1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 26 * Copyright (c) 2012 by Frederik Wessels. All rights reserved. 27 * Copyright (c) 2012 by Cyril Plisko. All rights reserved. 28 * Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved. 29 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>. 30 * Copyright (c) 2017 Datto Inc. 31 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved. 32 * Copyright (c) 2017, Intel Corporation. 33 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com> 34 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org> 35 * Copyright (c) 2021, Klara Inc. 36 * Copyright [2021] Hewlett Packard Enterprise Development LP 37 */ 38 39 #include <assert.h> 40 #include <ctype.h> 41 #include <dirent.h> 42 #include <errno.h> 43 #include <fcntl.h> 44 #include <getopt.h> 45 #include <libgen.h> 46 #include <libintl.h> 47 #include <libuutil.h> 48 #include <locale.h> 49 #include <pthread.h> 50 #include <stdio.h> 51 #include <stdlib.h> 52 #include <string.h> 53 #include <thread_pool.h> 54 #include <time.h> 55 #include <unistd.h> 56 #include <pwd.h> 57 #include <zone.h> 58 #include <sys/wait.h> 59 #include <zfs_prop.h> 60 #include <sys/fs/zfs.h> 61 #include <sys/stat.h> 62 #include <sys/systeminfo.h> 63 #include <sys/fm/fs/zfs.h> 64 #include <sys/fm/util.h> 65 #include <sys/fm/protocol.h> 66 #include <sys/zfs_ioctl.h> 67 #include <sys/mount.h> 68 #include <sys/sysmacros.h> 69 70 #include <math.h> 71 72 #include <libzfs.h> 73 #include <libzutil.h> 74 75 #include "zpool_util.h" 76 #include "zfs_comutil.h" 77 #include "zfeature_common.h" 78 79 #include "statcommon.h" 80 81 libzfs_handle_t *g_zfs; 82 83 static int zpool_do_create(int, char **); 84 static int zpool_do_destroy(int, char **); 85 86 static int zpool_do_add(int, char **); 87 static int zpool_do_remove(int, char **); 88 static int zpool_do_labelclear(int, char **); 89 90 static int zpool_do_checkpoint(int, char **); 91 92 static int zpool_do_list(int, char **); 93 static int zpool_do_iostat(int, char **); 94 static int zpool_do_status(int, char **); 95 96 static int zpool_do_online(int, char **); 97 static int zpool_do_offline(int, char **); 98 static int zpool_do_clear(int, char **); 99 static int zpool_do_reopen(int, char **); 100 101 static int zpool_do_reguid(int, char **); 102 103 static int zpool_do_attach(int, char **); 104 static int zpool_do_detach(int, char **); 105 static int zpool_do_replace(int, char **); 106 static int zpool_do_split(int, char **); 107 108 static int zpool_do_initialize(int, char **); 109 static int zpool_do_scrub(int, char **); 110 static int zpool_do_resilver(int, char **); 111 static int zpool_do_trim(int, char **); 112 113 static int zpool_do_import(int, char **); 114 static int zpool_do_export(int, char **); 115 116 static int zpool_do_upgrade(int, char **); 117 118 static int zpool_do_history(int, char **); 119 static int zpool_do_events(int, char **); 120 121 static int zpool_do_get(int, char **); 122 static int zpool_do_set(int, char **); 123 124 static int zpool_do_sync(int, char **); 125 126 static int zpool_do_version(int, char **); 127 128 static int zpool_do_wait(int, char **); 129 130 static int zpool_do_help(int argc, char **argv); 131 132 static zpool_compat_status_t zpool_do_load_compat( 133 const char *, boolean_t *); 134 135 enum zpool_options { 136 ZPOOL_OPTION_POWER = 1024, 137 ZPOOL_OPTION_ALLOW_INUSE, 138 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH, 139 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH 140 }; 141 142 /* 143 * These libumem hooks provide a reasonable set of defaults for the allocator's 144 * debugging facilities. 145 */ 146 147 #ifdef DEBUG 148 const char * 149 _umem_debug_init(void) 150 { 151 return ("default,verbose"); /* $UMEM_DEBUG setting */ 152 } 153 154 const char * 155 _umem_logging_init(void) 156 { 157 return ("fail,contents"); /* $UMEM_LOGGING setting */ 158 } 159 #endif 160 161 typedef enum { 162 HELP_ADD, 163 HELP_ATTACH, 164 HELP_CLEAR, 165 HELP_CREATE, 166 HELP_CHECKPOINT, 167 HELP_DESTROY, 168 HELP_DETACH, 169 HELP_EXPORT, 170 HELP_HISTORY, 171 HELP_IMPORT, 172 HELP_IOSTAT, 173 HELP_LABELCLEAR, 174 HELP_LIST, 175 HELP_OFFLINE, 176 HELP_ONLINE, 177 HELP_REPLACE, 178 HELP_REMOVE, 179 HELP_INITIALIZE, 180 HELP_SCRUB, 181 HELP_RESILVER, 182 HELP_TRIM, 183 HELP_STATUS, 184 HELP_UPGRADE, 185 HELP_EVENTS, 186 HELP_GET, 187 HELP_SET, 188 HELP_SPLIT, 189 HELP_SYNC, 190 HELP_REGUID, 191 HELP_REOPEN, 192 HELP_VERSION, 193 HELP_WAIT 194 } zpool_help_t; 195 196 197 /* 198 * Flags for stats to display with "zpool iostats" 199 */ 200 enum iostat_type { 201 IOS_DEFAULT = 0, 202 IOS_LATENCY = 1, 203 IOS_QUEUES = 2, 204 IOS_L_HISTO = 3, 205 IOS_RQ_HISTO = 4, 206 IOS_COUNT, /* always last element */ 207 }; 208 209 /* iostat_type entries as bitmasks */ 210 #define IOS_DEFAULT_M (1ULL << IOS_DEFAULT) 211 #define IOS_LATENCY_M (1ULL << IOS_LATENCY) 212 #define IOS_QUEUES_M (1ULL << IOS_QUEUES) 213 #define IOS_L_HISTO_M (1ULL << IOS_L_HISTO) 214 #define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO) 215 216 /* Mask of all the histo bits */ 217 #define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M) 218 219 /* 220 * Lookup table for iostat flags to nvlist names. Basically a list 221 * of all the nvlists a flag requires. Also specifies the order in 222 * which data gets printed in zpool iostat. 223 */ 224 static const char *vsx_type_to_nvlist[IOS_COUNT][15] = { 225 [IOS_L_HISTO] = { 226 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 227 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 228 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 229 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 230 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 231 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 232 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 233 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 234 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 235 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 236 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 237 NULL}, 238 [IOS_LATENCY] = { 239 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 240 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 241 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 242 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 243 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 244 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 245 NULL}, 246 [IOS_QUEUES] = { 247 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 248 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 249 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 250 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 251 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 252 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 253 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 254 NULL}, 255 [IOS_RQ_HISTO] = { 256 ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO, 257 ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO, 258 ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO, 259 ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO, 260 ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO, 261 ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO, 262 ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO, 263 ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO, 264 ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO, 265 ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO, 266 ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO, 267 ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO, 268 ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO, 269 ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO, 270 NULL}, 271 }; 272 273 274 /* 275 * Given a cb->cb_flags with a histogram bit set, return the iostat_type. 276 * Right now, only one histo bit is ever set at one time, so we can 277 * just do a highbit64(a) 278 */ 279 #define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1) 280 281 typedef struct zpool_command { 282 const char *name; 283 int (*func)(int, char **); 284 zpool_help_t usage; 285 } zpool_command_t; 286 287 /* 288 * Master command table. Each ZFS command has a name, associated function, and 289 * usage message. The usage messages need to be internationalized, so we have 290 * to have a function to return the usage message based on a command index. 291 * 292 * These commands are organized according to how they are displayed in the usage 293 * message. An empty command (one with a NULL name) indicates an empty line in 294 * the generic usage message. 295 */ 296 static zpool_command_t command_table[] = { 297 { "version", zpool_do_version, HELP_VERSION }, 298 { NULL }, 299 { "create", zpool_do_create, HELP_CREATE }, 300 { "destroy", zpool_do_destroy, HELP_DESTROY }, 301 { NULL }, 302 { "add", zpool_do_add, HELP_ADD }, 303 { "remove", zpool_do_remove, HELP_REMOVE }, 304 { NULL }, 305 { "labelclear", zpool_do_labelclear, HELP_LABELCLEAR }, 306 { NULL }, 307 { "checkpoint", zpool_do_checkpoint, HELP_CHECKPOINT }, 308 { NULL }, 309 { "list", zpool_do_list, HELP_LIST }, 310 { "iostat", zpool_do_iostat, HELP_IOSTAT }, 311 { "status", zpool_do_status, HELP_STATUS }, 312 { NULL }, 313 { "online", zpool_do_online, HELP_ONLINE }, 314 { "offline", zpool_do_offline, HELP_OFFLINE }, 315 { "clear", zpool_do_clear, HELP_CLEAR }, 316 { "reopen", zpool_do_reopen, HELP_REOPEN }, 317 { NULL }, 318 { "attach", zpool_do_attach, HELP_ATTACH }, 319 { "detach", zpool_do_detach, HELP_DETACH }, 320 { "replace", zpool_do_replace, HELP_REPLACE }, 321 { "split", zpool_do_split, HELP_SPLIT }, 322 { NULL }, 323 { "initialize", zpool_do_initialize, HELP_INITIALIZE }, 324 { "resilver", zpool_do_resilver, HELP_RESILVER }, 325 { "scrub", zpool_do_scrub, HELP_SCRUB }, 326 { "trim", zpool_do_trim, HELP_TRIM }, 327 { NULL }, 328 { "import", zpool_do_import, HELP_IMPORT }, 329 { "export", zpool_do_export, HELP_EXPORT }, 330 { "upgrade", zpool_do_upgrade, HELP_UPGRADE }, 331 { "reguid", zpool_do_reguid, HELP_REGUID }, 332 { NULL }, 333 { "history", zpool_do_history, HELP_HISTORY }, 334 { "events", zpool_do_events, HELP_EVENTS }, 335 { NULL }, 336 { "get", zpool_do_get, HELP_GET }, 337 { "set", zpool_do_set, HELP_SET }, 338 { "sync", zpool_do_sync, HELP_SYNC }, 339 { NULL }, 340 { "wait", zpool_do_wait, HELP_WAIT }, 341 }; 342 343 #define NCOMMAND (ARRAY_SIZE(command_table)) 344 345 #define VDEV_ALLOC_CLASS_LOGS "logs" 346 347 static zpool_command_t *current_command; 348 static zfs_type_t current_prop_type = (ZFS_TYPE_POOL | ZFS_TYPE_VDEV); 349 static char history_str[HIS_MAX_RECORD_LEN]; 350 static boolean_t log_history = B_TRUE; 351 static uint_t timestamp_fmt = NODATE; 352 353 static const char * 354 get_usage(zpool_help_t idx) 355 { 356 switch (idx) { 357 case HELP_ADD: 358 return (gettext("\tadd [-afgLnP] [-o property=value] " 359 "<pool> <vdev> ...\n")); 360 case HELP_ATTACH: 361 return (gettext("\tattach [-fsw] [-o property=value] " 362 "<pool> <device> <new-device>\n")); 363 case HELP_CLEAR: 364 return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n")); 365 case HELP_CREATE: 366 return (gettext("\tcreate [-fnd] [-o property=value] ... \n" 367 "\t [-O file-system-property=value] ... \n" 368 "\t [-m mountpoint] [-R root] <pool> <vdev> ...\n")); 369 case HELP_CHECKPOINT: 370 return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n")); 371 case HELP_DESTROY: 372 return (gettext("\tdestroy [-f] <pool>\n")); 373 case HELP_DETACH: 374 return (gettext("\tdetach <pool> <device>\n")); 375 case HELP_EXPORT: 376 return (gettext("\texport [-af] <pool> ...\n")); 377 case HELP_HISTORY: 378 return (gettext("\thistory [-il] [<pool>] ...\n")); 379 case HELP_IMPORT: 380 return (gettext("\timport [-d dir] [-D]\n" 381 "\timport [-o mntopts] [-o property=value] ... \n" 382 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 383 "[-R root] [-F [-n]] -a\n" 384 "\timport [-o mntopts] [-o property=value] ... \n" 385 "\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] " 386 "[-R root] [-F [-n]]\n" 387 "\t [--rewind-to-checkpoint] <pool | id> [newpool]\n")); 388 case HELP_IOSTAT: 389 return (gettext("\tiostat [[[-c [script1,script2,...]" 390 "[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n" 391 "\t [[pool ...]|[pool vdev ...]|[vdev ...]]" 392 " [[-n] interval [count]]\n")); 393 case HELP_LABELCLEAR: 394 return (gettext("\tlabelclear [-f] <vdev>\n")); 395 case HELP_LIST: 396 return (gettext("\tlist [-gHLpPv] [-o property[,...]] " 397 "[-T d|u] [pool] ... \n" 398 "\t [interval [count]]\n")); 399 case HELP_OFFLINE: 400 return (gettext("\toffline [--power]|[[-f][-t]] <pool> " 401 "<device> ...\n")); 402 case HELP_ONLINE: 403 return (gettext("\tonline [--power][-e] <pool> <device> " 404 "...\n")); 405 case HELP_REPLACE: 406 return (gettext("\treplace [-fsw] [-o property=value] " 407 "<pool> <device> [new-device]\n")); 408 case HELP_REMOVE: 409 return (gettext("\tremove [-npsw] <pool> <device> ...\n")); 410 case HELP_REOPEN: 411 return (gettext("\treopen [-n] <pool>\n")); 412 case HELP_INITIALIZE: 413 return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> " 414 "[<device> ...]\n")); 415 case HELP_SCRUB: 416 return (gettext("\tscrub [-s | -p] [-w] [-e] <pool> ...\n")); 417 case HELP_RESILVER: 418 return (gettext("\tresilver <pool> ...\n")); 419 case HELP_TRIM: 420 return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> " 421 "[<device> ...]\n")); 422 case HELP_STATUS: 423 return (gettext("\tstatus [--power] [-c [script1,script2,...]] " 424 "[-DegiLpPstvx] [-T d|u] [pool] ...\n" 425 "\t [interval [count]]\n")); 426 case HELP_UPGRADE: 427 return (gettext("\tupgrade\n" 428 "\tupgrade -v\n" 429 "\tupgrade [-V version] <-a | pool ...>\n")); 430 case HELP_EVENTS: 431 return (gettext("\tevents [-vHf [pool] | -c]\n")); 432 case HELP_GET: 433 return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] " 434 "<\"all\" | property[,...]> <pool> ...\n")); 435 case HELP_SET: 436 return (gettext("\tset <property=value> <pool>\n" 437 "\tset <vdev_property=value> <pool> <vdev>\n")); 438 case HELP_SPLIT: 439 return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n" 440 "\t [-o property=value] <pool> <newpool> " 441 "[<device> ...]\n")); 442 case HELP_REGUID: 443 return (gettext("\treguid <pool>\n")); 444 case HELP_SYNC: 445 return (gettext("\tsync [pool] ...\n")); 446 case HELP_VERSION: 447 return (gettext("\tversion\n")); 448 case HELP_WAIT: 449 return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] " 450 "<pool> [interval]\n")); 451 default: 452 __builtin_unreachable(); 453 } 454 } 455 456 static void 457 zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) 458 { 459 uint_t children = 0; 460 nvlist_t **child; 461 uint_t i; 462 463 (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 464 &child, &children); 465 466 if (children == 0) { 467 char *path = zpool_vdev_name(g_zfs, zhp, nvroot, 468 VDEV_NAME_PATH); 469 470 if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 && 471 strcmp(path, VDEV_TYPE_HOLE) != 0) 472 fnvlist_add_boolean(res, path); 473 474 free(path); 475 return; 476 } 477 478 for (i = 0; i < children; i++) { 479 zpool_collect_leaves(zhp, child[i], res); 480 } 481 } 482 483 /* 484 * Callback routine that will print out a pool property value. 485 */ 486 static int 487 print_pool_prop_cb(int prop, void *cb) 488 { 489 FILE *fp = cb; 490 491 (void) fprintf(fp, "\t%-19s ", zpool_prop_to_name(prop)); 492 493 if (zpool_prop_readonly(prop)) 494 (void) fprintf(fp, " NO "); 495 else 496 (void) fprintf(fp, " YES "); 497 498 if (zpool_prop_values(prop) == NULL) 499 (void) fprintf(fp, "-\n"); 500 else 501 (void) fprintf(fp, "%s\n", zpool_prop_values(prop)); 502 503 return (ZPROP_CONT); 504 } 505 506 /* 507 * Callback routine that will print out a vdev property value. 508 */ 509 static int 510 print_vdev_prop_cb(int prop, void *cb) 511 { 512 FILE *fp = cb; 513 514 (void) fprintf(fp, "\t%-19s ", vdev_prop_to_name(prop)); 515 516 if (vdev_prop_readonly(prop)) 517 (void) fprintf(fp, " NO "); 518 else 519 (void) fprintf(fp, " YES "); 520 521 if (vdev_prop_values(prop) == NULL) 522 (void) fprintf(fp, "-\n"); 523 else 524 (void) fprintf(fp, "%s\n", vdev_prop_values(prop)); 525 526 return (ZPROP_CONT); 527 } 528 529 /* 530 * Given a leaf vdev name like 'L5' return its VDEV_CONFIG_PATH like 531 * '/dev/disk/by-vdev/L5'. 532 */ 533 static const char * 534 vdev_name_to_path(zpool_handle_t *zhp, char *vdev) 535 { 536 nvlist_t *vdev_nv = zpool_find_vdev(zhp, vdev, NULL, NULL, NULL); 537 if (vdev_nv == NULL) { 538 return (NULL); 539 } 540 return (fnvlist_lookup_string(vdev_nv, ZPOOL_CONFIG_PATH)); 541 } 542 543 static int 544 zpool_power_on(zpool_handle_t *zhp, char *vdev) 545 { 546 return (zpool_power(zhp, vdev, B_TRUE)); 547 } 548 549 static int 550 zpool_power_on_and_disk_wait(zpool_handle_t *zhp, char *vdev) 551 { 552 int rc; 553 554 rc = zpool_power_on(zhp, vdev); 555 if (rc != 0) 556 return (rc); 557 558 zpool_disk_wait(vdev_name_to_path(zhp, vdev)); 559 560 return (0); 561 } 562 563 static int 564 zpool_power_on_pool_and_wait_for_devices(zpool_handle_t *zhp) 565 { 566 nvlist_t *nv; 567 const char *path = NULL; 568 int rc; 569 570 /* Power up all the devices first */ 571 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) { 572 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 573 if (path != NULL) { 574 rc = zpool_power_on(zhp, (char *)path); 575 if (rc != 0) { 576 return (rc); 577 } 578 } 579 } 580 581 /* 582 * Wait for their devices to show up. Since we powered them on 583 * at roughly the same time, they should all come online around 584 * the same time. 585 */ 586 FOR_EACH_REAL_LEAF_VDEV(zhp, nv) { 587 path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 588 zpool_disk_wait(path); 589 } 590 591 return (0); 592 } 593 594 static int 595 zpool_power_off(zpool_handle_t *zhp, char *vdev) 596 { 597 return (zpool_power(zhp, vdev, B_FALSE)); 598 } 599 600 /* 601 * Display usage message. If we're inside a command, display only the usage for 602 * that command. Otherwise, iterate over the entire command table and display 603 * a complete usage message. 604 */ 605 static __attribute__((noreturn)) void 606 usage(boolean_t requested) 607 { 608 FILE *fp = requested ? stdout : stderr; 609 610 if (current_command == NULL) { 611 int i; 612 613 (void) fprintf(fp, gettext("usage: zpool command args ...\n")); 614 (void) fprintf(fp, 615 gettext("where 'command' is one of the following:\n\n")); 616 617 for (i = 0; i < NCOMMAND; i++) { 618 if (command_table[i].name == NULL) 619 (void) fprintf(fp, "\n"); 620 else 621 (void) fprintf(fp, "%s", 622 get_usage(command_table[i].usage)); 623 } 624 625 (void) fprintf(fp, 626 gettext("\nFor further help on a command or topic, " 627 "run: %s\n"), "zpool help [<topic>]"); 628 } else { 629 (void) fprintf(fp, gettext("usage:\n")); 630 (void) fprintf(fp, "%s", get_usage(current_command->usage)); 631 } 632 633 if (current_command != NULL && 634 current_prop_type != (ZFS_TYPE_POOL | ZFS_TYPE_VDEV) && 635 ((strcmp(current_command->name, "set") == 0) || 636 (strcmp(current_command->name, "get") == 0) || 637 (strcmp(current_command->name, "list") == 0))) { 638 639 (void) fprintf(fp, "%s", 640 gettext("\nthe following properties are supported:\n")); 641 642 (void) fprintf(fp, "\n\t%-19s %s %s\n\n", 643 "PROPERTY", "EDIT", "VALUES"); 644 645 /* Iterate over all properties */ 646 if (current_prop_type == ZFS_TYPE_POOL) { 647 (void) zprop_iter(print_pool_prop_cb, fp, B_FALSE, 648 B_TRUE, current_prop_type); 649 650 (void) fprintf(fp, "\t%-19s ", "feature@..."); 651 (void) fprintf(fp, "YES " 652 "disabled | enabled | active\n"); 653 654 (void) fprintf(fp, gettext("\nThe feature@ properties " 655 "must be appended with a feature name.\n" 656 "See zpool-features(7).\n")); 657 } else if (current_prop_type == ZFS_TYPE_VDEV) { 658 (void) zprop_iter(print_vdev_prop_cb, fp, B_FALSE, 659 B_TRUE, current_prop_type); 660 } 661 } 662 663 /* 664 * See comments at end of main(). 665 */ 666 if (getenv("ZFS_ABORT") != NULL) { 667 (void) printf("dumping core by request\n"); 668 abort(); 669 } 670 671 exit(requested ? 0 : 2); 672 } 673 674 /* 675 * zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...] 676 * Initialize all unused blocks in the specified vdevs, or all vdevs in the pool 677 * if none specified. 678 * 679 * -c Cancel. Ends active initializing. 680 * -s Suspend. Initializing can then be restarted with no flags. 681 * -u Uninitialize. Clears initialization state. 682 * -w Wait. Blocks until initializing has completed. 683 */ 684 int 685 zpool_do_initialize(int argc, char **argv) 686 { 687 int c; 688 char *poolname; 689 zpool_handle_t *zhp; 690 nvlist_t *vdevs; 691 int err = 0; 692 boolean_t wait = B_FALSE; 693 694 struct option long_options[] = { 695 {"cancel", no_argument, NULL, 'c'}, 696 {"suspend", no_argument, NULL, 's'}, 697 {"uninit", no_argument, NULL, 'u'}, 698 {"wait", no_argument, NULL, 'w'}, 699 {0, 0, 0, 0} 700 }; 701 702 pool_initialize_func_t cmd_type = POOL_INITIALIZE_START; 703 while ((c = getopt_long(argc, argv, "csuw", long_options, 704 NULL)) != -1) { 705 switch (c) { 706 case 'c': 707 if (cmd_type != POOL_INITIALIZE_START && 708 cmd_type != POOL_INITIALIZE_CANCEL) { 709 (void) fprintf(stderr, gettext("-c cannot be " 710 "combined with other options\n")); 711 usage(B_FALSE); 712 } 713 cmd_type = POOL_INITIALIZE_CANCEL; 714 break; 715 case 's': 716 if (cmd_type != POOL_INITIALIZE_START && 717 cmd_type != POOL_INITIALIZE_SUSPEND) { 718 (void) fprintf(stderr, gettext("-s cannot be " 719 "combined with other options\n")); 720 usage(B_FALSE); 721 } 722 cmd_type = POOL_INITIALIZE_SUSPEND; 723 break; 724 case 'u': 725 if (cmd_type != POOL_INITIALIZE_START && 726 cmd_type != POOL_INITIALIZE_UNINIT) { 727 (void) fprintf(stderr, gettext("-u cannot be " 728 "combined with other options\n")); 729 usage(B_FALSE); 730 } 731 cmd_type = POOL_INITIALIZE_UNINIT; 732 break; 733 case 'w': 734 wait = B_TRUE; 735 break; 736 case '?': 737 if (optopt != 0) { 738 (void) fprintf(stderr, 739 gettext("invalid option '%c'\n"), optopt); 740 } else { 741 (void) fprintf(stderr, 742 gettext("invalid option '%s'\n"), 743 argv[optind - 1]); 744 } 745 usage(B_FALSE); 746 } 747 } 748 749 argc -= optind; 750 argv += optind; 751 752 if (argc < 1) { 753 (void) fprintf(stderr, gettext("missing pool name argument\n")); 754 usage(B_FALSE); 755 return (-1); 756 } 757 758 if (wait && (cmd_type != POOL_INITIALIZE_START)) { 759 (void) fprintf(stderr, gettext("-w cannot be used with -c, -s" 760 "or -u\n")); 761 usage(B_FALSE); 762 } 763 764 poolname = argv[0]; 765 zhp = zpool_open(g_zfs, poolname); 766 if (zhp == NULL) 767 return (-1); 768 769 vdevs = fnvlist_alloc(); 770 if (argc == 1) { 771 /* no individual leaf vdevs specified, so add them all */ 772 nvlist_t *config = zpool_get_config(zhp, NULL); 773 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 774 ZPOOL_CONFIG_VDEV_TREE); 775 zpool_collect_leaves(zhp, nvroot, vdevs); 776 } else { 777 for (int i = 1; i < argc; i++) { 778 fnvlist_add_boolean(vdevs, argv[i]); 779 } 780 } 781 782 if (wait) 783 err = zpool_initialize_wait(zhp, cmd_type, vdevs); 784 else 785 err = zpool_initialize(zhp, cmd_type, vdevs); 786 787 fnvlist_free(vdevs); 788 zpool_close(zhp); 789 790 return (err); 791 } 792 793 /* 794 * print a pool vdev config for dry runs 795 */ 796 static void 797 print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, 798 const char *match, int name_flags) 799 { 800 nvlist_t **child; 801 uint_t c, children; 802 char *vname; 803 boolean_t printed = B_FALSE; 804 805 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 806 &child, &children) != 0) { 807 if (name != NULL) 808 (void) printf("\t%*s%s\n", indent, "", name); 809 return; 810 } 811 812 for (c = 0; c < children; c++) { 813 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 814 const char *class = ""; 815 816 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 817 &is_hole); 818 819 if (is_hole == B_TRUE) { 820 continue; 821 } 822 823 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 824 &is_log); 825 if (is_log) 826 class = VDEV_ALLOC_BIAS_LOG; 827 (void) nvlist_lookup_string(child[c], 828 ZPOOL_CONFIG_ALLOCATION_BIAS, &class); 829 if (strcmp(match, class) != 0) 830 continue; 831 832 if (!printed && name != NULL) { 833 (void) printf("\t%*s%s\n", indent, "", name); 834 printed = B_TRUE; 835 } 836 vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags); 837 print_vdev_tree(zhp, vname, child[c], indent + 2, "", 838 name_flags); 839 free(vname); 840 } 841 } 842 843 /* 844 * Print the list of l2cache devices for dry runs. 845 */ 846 static void 847 print_cache_list(nvlist_t *nv, int indent) 848 { 849 nvlist_t **child; 850 uint_t c, children; 851 852 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 853 &child, &children) == 0 && children > 0) { 854 (void) printf("\t%*s%s\n", indent, "", "cache"); 855 } else { 856 return; 857 } 858 for (c = 0; c < children; c++) { 859 char *vname; 860 861 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 862 (void) printf("\t%*s%s\n", indent + 2, "", vname); 863 free(vname); 864 } 865 } 866 867 /* 868 * Print the list of spares for dry runs. 869 */ 870 static void 871 print_spare_list(nvlist_t *nv, int indent) 872 { 873 nvlist_t **child; 874 uint_t c, children; 875 876 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 877 &child, &children) == 0 && children > 0) { 878 (void) printf("\t%*s%s\n", indent, "", "spares"); 879 } else { 880 return; 881 } 882 for (c = 0; c < children; c++) { 883 char *vname; 884 885 vname = zpool_vdev_name(g_zfs, NULL, child[c], 0); 886 (void) printf("\t%*s%s\n", indent + 2, "", vname); 887 free(vname); 888 } 889 } 890 891 static boolean_t 892 prop_list_contains_feature(nvlist_t *proplist) 893 { 894 nvpair_t *nvp; 895 for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp; 896 nvp = nvlist_next_nvpair(proplist, nvp)) { 897 if (zpool_prop_feature(nvpair_name(nvp))) 898 return (B_TRUE); 899 } 900 return (B_FALSE); 901 } 902 903 /* 904 * Add a property pair (name, string-value) into a property nvlist. 905 */ 906 static int 907 add_prop_list(const char *propname, const char *propval, nvlist_t **props, 908 boolean_t poolprop) 909 { 910 zpool_prop_t prop = ZPOOL_PROP_INVAL; 911 nvlist_t *proplist; 912 const char *normnm; 913 const char *strval; 914 915 if (*props == NULL && 916 nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { 917 (void) fprintf(stderr, 918 gettext("internal error: out of memory\n")); 919 return (1); 920 } 921 922 proplist = *props; 923 924 if (poolprop) { 925 const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION); 926 const char *cname = 927 zpool_prop_to_name(ZPOOL_PROP_COMPATIBILITY); 928 929 if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL && 930 (!zpool_prop_feature(propname) && 931 !zpool_prop_vdev(propname))) { 932 (void) fprintf(stderr, gettext("property '%s' is " 933 "not a valid pool or vdev property\n"), propname); 934 return (2); 935 } 936 937 /* 938 * feature@ properties and version should not be specified 939 * at the same time. 940 */ 941 if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) && 942 nvlist_exists(proplist, vname)) || 943 (prop == ZPOOL_PROP_VERSION && 944 prop_list_contains_feature(proplist))) { 945 (void) fprintf(stderr, gettext("'feature@' and " 946 "'version' properties cannot be specified " 947 "together\n")); 948 return (2); 949 } 950 951 /* 952 * if version is specified, only "legacy" compatibility 953 * may be requested 954 */ 955 if ((prop == ZPOOL_PROP_COMPATIBILITY && 956 strcmp(propval, ZPOOL_COMPAT_LEGACY) != 0 && 957 nvlist_exists(proplist, vname)) || 958 (prop == ZPOOL_PROP_VERSION && 959 nvlist_exists(proplist, cname) && 960 strcmp(fnvlist_lookup_string(proplist, cname), 961 ZPOOL_COMPAT_LEGACY) != 0)) { 962 (void) fprintf(stderr, gettext("when 'version' is " 963 "specified, the 'compatibility' feature may only " 964 "be set to '" ZPOOL_COMPAT_LEGACY "'\n")); 965 return (2); 966 } 967 968 if (zpool_prop_feature(propname) || zpool_prop_vdev(propname)) 969 normnm = propname; 970 else 971 normnm = zpool_prop_to_name(prop); 972 } else { 973 zfs_prop_t fsprop = zfs_name_to_prop(propname); 974 975 if (zfs_prop_valid_for_type(fsprop, ZFS_TYPE_FILESYSTEM, 976 B_FALSE)) { 977 normnm = zfs_prop_to_name(fsprop); 978 } else if (zfs_prop_user(propname) || 979 zfs_prop_userquota(propname)) { 980 normnm = propname; 981 } else { 982 (void) fprintf(stderr, gettext("property '%s' is " 983 "not a valid filesystem property\n"), propname); 984 return (2); 985 } 986 } 987 988 if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && 989 prop != ZPOOL_PROP_CACHEFILE) { 990 (void) fprintf(stderr, gettext("property '%s' " 991 "specified multiple times\n"), propname); 992 return (2); 993 } 994 995 if (nvlist_add_string(proplist, normnm, propval) != 0) { 996 (void) fprintf(stderr, gettext("internal " 997 "error: out of memory\n")); 998 return (1); 999 } 1000 1001 return (0); 1002 } 1003 1004 /* 1005 * Set a default property pair (name, string-value) in a property nvlist 1006 */ 1007 static int 1008 add_prop_list_default(const char *propname, const char *propval, 1009 nvlist_t **props) 1010 { 1011 const char *pval; 1012 1013 if (nvlist_lookup_string(*props, propname, &pval) == 0) 1014 return (0); 1015 1016 return (add_prop_list(propname, propval, props, B_TRUE)); 1017 } 1018 1019 /* 1020 * zpool add [-afgLnP] [-o property=value] <pool> <vdev> ... 1021 * 1022 * -a Disable the ashift validation checks 1023 * -f Force addition of devices, even if they appear in use 1024 * -g Display guid for individual vdev name. 1025 * -L Follow links when resolving vdev path name. 1026 * -n Do not add the devices, but display the resulting layout if 1027 * they were to be added. 1028 * -o Set property=value. 1029 * -P Display full path for vdev name. 1030 * 1031 * Adds the given vdevs to 'pool'. As with create, the bulk of this work is 1032 * handled by make_root_vdev(), which constructs the nvlist needed to pass to 1033 * libzfs. 1034 */ 1035 int 1036 zpool_do_add(int argc, char **argv) 1037 { 1038 boolean_t check_replication = B_TRUE; 1039 boolean_t check_inuse = B_TRUE; 1040 boolean_t dryrun = B_FALSE; 1041 boolean_t check_ashift = B_TRUE; 1042 boolean_t force = B_FALSE; 1043 int name_flags = 0; 1044 int c; 1045 nvlist_t *nvroot; 1046 char *poolname; 1047 int ret; 1048 zpool_handle_t *zhp; 1049 nvlist_t *config; 1050 nvlist_t *props = NULL; 1051 char *propval; 1052 1053 struct option long_options[] = { 1054 {"allow-in-use", no_argument, NULL, ZPOOL_OPTION_ALLOW_INUSE}, 1055 {"allow-replication-mismatch", no_argument, NULL, 1056 ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH}, 1057 {"allow-ashift-mismatch", no_argument, NULL, 1058 ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH}, 1059 {0, 0, 0, 0} 1060 }; 1061 1062 /* check options */ 1063 while ((c = getopt_long(argc, argv, "fgLno:P", long_options, NULL)) 1064 != -1) { 1065 switch (c) { 1066 case 'f': 1067 force = B_TRUE; 1068 break; 1069 case 'g': 1070 name_flags |= VDEV_NAME_GUID; 1071 break; 1072 case 'L': 1073 name_flags |= VDEV_NAME_FOLLOW_LINKS; 1074 break; 1075 case 'n': 1076 dryrun = B_TRUE; 1077 break; 1078 case 'o': 1079 if ((propval = strchr(optarg, '=')) == NULL) { 1080 (void) fprintf(stderr, gettext("missing " 1081 "'=' for -o option\n")); 1082 usage(B_FALSE); 1083 } 1084 *propval = '\0'; 1085 propval++; 1086 1087 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 1088 (add_prop_list(optarg, propval, &props, B_TRUE))) 1089 usage(B_FALSE); 1090 break; 1091 case 'P': 1092 name_flags |= VDEV_NAME_PATH; 1093 break; 1094 case ZPOOL_OPTION_ALLOW_INUSE: 1095 check_inuse = B_FALSE; 1096 break; 1097 case ZPOOL_OPTION_ALLOW_REPLICATION_MISMATCH: 1098 check_replication = B_FALSE; 1099 break; 1100 case ZPOOL_OPTION_ALLOW_ASHIFT_MISMATCH: 1101 check_ashift = B_FALSE; 1102 break; 1103 case '?': 1104 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1105 optopt); 1106 usage(B_FALSE); 1107 } 1108 } 1109 1110 argc -= optind; 1111 argv += optind; 1112 1113 /* get pool name and check number of arguments */ 1114 if (argc < 1) { 1115 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1116 usage(B_FALSE); 1117 } 1118 if (argc < 2) { 1119 (void) fprintf(stderr, gettext("missing vdev specification\n")); 1120 usage(B_FALSE); 1121 } 1122 1123 if (force) { 1124 if (!check_inuse || !check_replication || !check_ashift) { 1125 (void) fprintf(stderr, gettext("'-f' option is not " 1126 "allowed with '--allow-replication-mismatch', " 1127 "'--allow-ashift-mismatch', or " 1128 "'--allow-in-use'\n")); 1129 usage(B_FALSE); 1130 } 1131 check_inuse = B_FALSE; 1132 check_replication = B_FALSE; 1133 check_ashift = B_FALSE; 1134 } 1135 1136 poolname = argv[0]; 1137 1138 argc--; 1139 argv++; 1140 1141 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1142 return (1); 1143 1144 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 1145 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 1146 poolname); 1147 zpool_close(zhp); 1148 return (1); 1149 } 1150 1151 /* unless manually specified use "ashift" pool property (if set) */ 1152 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 1153 int intval; 1154 zprop_source_t src; 1155 char strval[ZPOOL_MAXPROPLEN]; 1156 1157 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 1158 if (src != ZPROP_SRC_DEFAULT) { 1159 (void) sprintf(strval, "%" PRId32, intval); 1160 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 1161 &props, B_TRUE) == 0); 1162 } 1163 } 1164 1165 /* pass off to make_root_vdev for processing */ 1166 nvroot = make_root_vdev(zhp, props, !check_inuse, 1167 check_replication, B_FALSE, dryrun, argc, argv); 1168 if (nvroot == NULL) { 1169 zpool_close(zhp); 1170 return (1); 1171 } 1172 1173 if (dryrun) { 1174 nvlist_t *poolnvroot; 1175 nvlist_t **l2child, **sparechild; 1176 uint_t l2children, sparechildren, c; 1177 char *vname; 1178 boolean_t hadcache = B_FALSE, hadspare = B_FALSE; 1179 1180 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 1181 &poolnvroot) == 0); 1182 1183 (void) printf(gettext("would update '%s' to the following " 1184 "configuration:\n\n"), zpool_get_name(zhp)); 1185 1186 /* print original main pool and new tree */ 1187 print_vdev_tree(zhp, poolname, poolnvroot, 0, "", 1188 name_flags | VDEV_NAME_TYPE_ID); 1189 print_vdev_tree(zhp, NULL, nvroot, 0, "", name_flags); 1190 1191 /* print other classes: 'dedup', 'special', and 'log' */ 1192 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1193 print_vdev_tree(zhp, "dedup", poolnvroot, 0, 1194 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1195 print_vdev_tree(zhp, NULL, nvroot, 0, 1196 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1197 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_DEDUP)) { 1198 print_vdev_tree(zhp, "dedup", nvroot, 0, 1199 VDEV_ALLOC_BIAS_DEDUP, name_flags); 1200 } 1201 1202 if (zfs_special_devs(poolnvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1203 print_vdev_tree(zhp, "special", poolnvroot, 0, 1204 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1205 print_vdev_tree(zhp, NULL, nvroot, 0, 1206 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1207 } else if (zfs_special_devs(nvroot, VDEV_ALLOC_BIAS_SPECIAL)) { 1208 print_vdev_tree(zhp, "special", nvroot, 0, 1209 VDEV_ALLOC_BIAS_SPECIAL, name_flags); 1210 } 1211 1212 if (num_logs(poolnvroot) > 0) { 1213 print_vdev_tree(zhp, "logs", poolnvroot, 0, 1214 VDEV_ALLOC_BIAS_LOG, name_flags); 1215 print_vdev_tree(zhp, NULL, nvroot, 0, 1216 VDEV_ALLOC_BIAS_LOG, name_flags); 1217 } else if (num_logs(nvroot) > 0) { 1218 print_vdev_tree(zhp, "logs", nvroot, 0, 1219 VDEV_ALLOC_BIAS_LOG, name_flags); 1220 } 1221 1222 /* Do the same for the caches */ 1223 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE, 1224 &l2child, &l2children) == 0 && l2children) { 1225 hadcache = B_TRUE; 1226 (void) printf(gettext("\tcache\n")); 1227 for (c = 0; c < l2children; c++) { 1228 vname = zpool_vdev_name(g_zfs, NULL, 1229 l2child[c], name_flags); 1230 (void) printf("\t %s\n", vname); 1231 free(vname); 1232 } 1233 } 1234 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1235 &l2child, &l2children) == 0 && l2children) { 1236 if (!hadcache) 1237 (void) printf(gettext("\tcache\n")); 1238 for (c = 0; c < l2children; c++) { 1239 vname = zpool_vdev_name(g_zfs, NULL, 1240 l2child[c], name_flags); 1241 (void) printf("\t %s\n", vname); 1242 free(vname); 1243 } 1244 } 1245 /* And finally the spares */ 1246 if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_SPARES, 1247 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1248 hadspare = B_TRUE; 1249 (void) printf(gettext("\tspares\n")); 1250 for (c = 0; c < sparechildren; c++) { 1251 vname = zpool_vdev_name(g_zfs, NULL, 1252 sparechild[c], name_flags); 1253 (void) printf("\t %s\n", vname); 1254 free(vname); 1255 } 1256 } 1257 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1258 &sparechild, &sparechildren) == 0 && sparechildren > 0) { 1259 if (!hadspare) 1260 (void) printf(gettext("\tspares\n")); 1261 for (c = 0; c < sparechildren; c++) { 1262 vname = zpool_vdev_name(g_zfs, NULL, 1263 sparechild[c], name_flags); 1264 (void) printf("\t %s\n", vname); 1265 free(vname); 1266 } 1267 } 1268 1269 ret = 0; 1270 } else { 1271 ret = (zpool_add(zhp, nvroot, check_ashift) != 0); 1272 } 1273 1274 nvlist_free(props); 1275 nvlist_free(nvroot); 1276 zpool_close(zhp); 1277 1278 return (ret); 1279 } 1280 1281 /* 1282 * zpool remove [-npsw] <pool> <vdev> ... 1283 * 1284 * Removes the given vdev from the pool. 1285 */ 1286 int 1287 zpool_do_remove(int argc, char **argv) 1288 { 1289 char *poolname; 1290 int i, ret = 0; 1291 zpool_handle_t *zhp = NULL; 1292 boolean_t stop = B_FALSE; 1293 int c; 1294 boolean_t noop = B_FALSE; 1295 boolean_t parsable = B_FALSE; 1296 boolean_t wait = B_FALSE; 1297 1298 /* check options */ 1299 while ((c = getopt(argc, argv, "npsw")) != -1) { 1300 switch (c) { 1301 case 'n': 1302 noop = B_TRUE; 1303 break; 1304 case 'p': 1305 parsable = B_TRUE; 1306 break; 1307 case 's': 1308 stop = B_TRUE; 1309 break; 1310 case 'w': 1311 wait = B_TRUE; 1312 break; 1313 case '?': 1314 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1315 optopt); 1316 usage(B_FALSE); 1317 } 1318 } 1319 1320 argc -= optind; 1321 argv += optind; 1322 1323 /* get pool name and check number of arguments */ 1324 if (argc < 1) { 1325 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1326 usage(B_FALSE); 1327 } 1328 1329 poolname = argv[0]; 1330 1331 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 1332 return (1); 1333 1334 if (stop && noop) { 1335 zpool_close(zhp); 1336 (void) fprintf(stderr, gettext("stop request ignored\n")); 1337 return (0); 1338 } 1339 1340 if (stop) { 1341 if (argc > 1) { 1342 (void) fprintf(stderr, gettext("too many arguments\n")); 1343 usage(B_FALSE); 1344 } 1345 if (zpool_vdev_remove_cancel(zhp) != 0) 1346 ret = 1; 1347 if (wait) { 1348 (void) fprintf(stderr, gettext("invalid option " 1349 "combination: -w cannot be used with -s\n")); 1350 usage(B_FALSE); 1351 } 1352 } else { 1353 if (argc < 2) { 1354 (void) fprintf(stderr, gettext("missing device\n")); 1355 usage(B_FALSE); 1356 } 1357 1358 for (i = 1; i < argc; i++) { 1359 if (noop) { 1360 uint64_t size; 1361 1362 if (zpool_vdev_indirect_size(zhp, argv[i], 1363 &size) != 0) { 1364 ret = 1; 1365 break; 1366 } 1367 if (parsable) { 1368 (void) printf("%s %llu\n", 1369 argv[i], (unsigned long long)size); 1370 } else { 1371 char valstr[32]; 1372 zfs_nicenum(size, valstr, 1373 sizeof (valstr)); 1374 (void) printf("Memory that will be " 1375 "used after removing %s: %s\n", 1376 argv[i], valstr); 1377 } 1378 } else { 1379 if (zpool_vdev_remove(zhp, argv[i]) != 0) 1380 ret = 1; 1381 } 1382 } 1383 1384 if (ret == 0 && wait) 1385 ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE); 1386 } 1387 zpool_close(zhp); 1388 1389 return (ret); 1390 } 1391 1392 /* 1393 * Return 1 if a vdev is active (being used in a pool) 1394 * Return 0 if a vdev is inactive (offlined or faulted, or not in active pool) 1395 * 1396 * This is useful for checking if a disk in an active pool is offlined or 1397 * faulted. 1398 */ 1399 static int 1400 vdev_is_active(char *vdev_path) 1401 { 1402 int fd; 1403 fd = open(vdev_path, O_EXCL); 1404 if (fd < 0) { 1405 return (1); /* cant open O_EXCL - disk is active */ 1406 } 1407 1408 close(fd); 1409 return (0); /* disk is inactive in the pool */ 1410 } 1411 1412 /* 1413 * zpool labelclear [-f] <vdev> 1414 * 1415 * -f Force clearing the label for the vdevs which are members of 1416 * the exported or foreign pools. 1417 * 1418 * Verifies that the vdev is not active and zeros out the label information 1419 * on the device. 1420 */ 1421 int 1422 zpool_do_labelclear(int argc, char **argv) 1423 { 1424 char vdev[MAXPATHLEN]; 1425 char *name = NULL; 1426 int c, fd = -1, ret = 0; 1427 nvlist_t *config; 1428 pool_state_t state; 1429 boolean_t inuse = B_FALSE; 1430 boolean_t force = B_FALSE; 1431 1432 /* check options */ 1433 while ((c = getopt(argc, argv, "f")) != -1) { 1434 switch (c) { 1435 case 'f': 1436 force = B_TRUE; 1437 break; 1438 default: 1439 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1440 optopt); 1441 usage(B_FALSE); 1442 } 1443 } 1444 1445 argc -= optind; 1446 argv += optind; 1447 1448 /* get vdev name */ 1449 if (argc < 1) { 1450 (void) fprintf(stderr, gettext("missing vdev name\n")); 1451 usage(B_FALSE); 1452 } 1453 if (argc > 1) { 1454 (void) fprintf(stderr, gettext("too many arguments\n")); 1455 usage(B_FALSE); 1456 } 1457 1458 (void) strlcpy(vdev, argv[0], sizeof (vdev)); 1459 1460 /* 1461 * If we cannot open an absolute path, we quit. 1462 * Otherwise if the provided vdev name doesn't point to a file, 1463 * try prepending expected disk paths and partition numbers. 1464 */ 1465 if ((fd = open(vdev, O_RDWR)) < 0) { 1466 int error; 1467 if (vdev[0] == '/') { 1468 (void) fprintf(stderr, gettext("failed to open " 1469 "%s: %s\n"), vdev, strerror(errno)); 1470 return (1); 1471 } 1472 1473 error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN); 1474 if (error == 0 && zfs_dev_is_whole_disk(vdev)) { 1475 if (zfs_append_partition(vdev, MAXPATHLEN) == -1) 1476 error = ENOENT; 1477 } 1478 1479 if (error || ((fd = open(vdev, O_RDWR)) < 0)) { 1480 if (errno == ENOENT) { 1481 (void) fprintf(stderr, gettext( 1482 "failed to find device %s, try " 1483 "specifying absolute path instead\n"), 1484 argv[0]); 1485 return (1); 1486 } 1487 1488 (void) fprintf(stderr, gettext("failed to open %s:" 1489 " %s\n"), vdev, strerror(errno)); 1490 return (1); 1491 } 1492 } 1493 1494 /* 1495 * Flush all dirty pages for the block device. This should not be 1496 * fatal when the device does not support BLKFLSBUF as would be the 1497 * case for a file vdev. 1498 */ 1499 if ((zfs_dev_flush(fd) != 0) && (errno != ENOTTY)) 1500 (void) fprintf(stderr, gettext("failed to invalidate " 1501 "cache for %s: %s\n"), vdev, strerror(errno)); 1502 1503 if (zpool_read_label(fd, &config, NULL) != 0) { 1504 (void) fprintf(stderr, 1505 gettext("failed to read label from %s\n"), vdev); 1506 ret = 1; 1507 goto errout; 1508 } 1509 nvlist_free(config); 1510 1511 ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse); 1512 if (ret != 0) { 1513 (void) fprintf(stderr, 1514 gettext("failed to check state for %s\n"), vdev); 1515 ret = 1; 1516 goto errout; 1517 } 1518 1519 if (!inuse) 1520 goto wipe_label; 1521 1522 switch (state) { 1523 default: 1524 case POOL_STATE_ACTIVE: 1525 case POOL_STATE_SPARE: 1526 case POOL_STATE_L2CACHE: 1527 /* 1528 * We allow the user to call 'zpool offline -f' 1529 * on an offlined disk in an active pool. We can check if 1530 * the disk is online by calling vdev_is_active(). 1531 */ 1532 if (force && !vdev_is_active(vdev)) 1533 break; 1534 1535 (void) fprintf(stderr, gettext( 1536 "%s is a member (%s) of pool \"%s\""), 1537 vdev, zpool_pool_state_to_name(state), name); 1538 1539 if (force) { 1540 (void) fprintf(stderr, gettext( 1541 ". Offline the disk first to clear its label.")); 1542 } 1543 printf("\n"); 1544 ret = 1; 1545 goto errout; 1546 1547 case POOL_STATE_EXPORTED: 1548 if (force) 1549 break; 1550 (void) fprintf(stderr, gettext( 1551 "use '-f' to override the following error:\n" 1552 "%s is a member of exported pool \"%s\"\n"), 1553 vdev, name); 1554 ret = 1; 1555 goto errout; 1556 1557 case POOL_STATE_POTENTIALLY_ACTIVE: 1558 if (force) 1559 break; 1560 (void) fprintf(stderr, gettext( 1561 "use '-f' to override the following error:\n" 1562 "%s is a member of potentially active pool \"%s\"\n"), 1563 vdev, name); 1564 ret = 1; 1565 goto errout; 1566 1567 case POOL_STATE_DESTROYED: 1568 /* inuse should never be set for a destroyed pool */ 1569 assert(0); 1570 break; 1571 } 1572 1573 wipe_label: 1574 ret = zpool_clear_label(fd); 1575 if (ret != 0) { 1576 (void) fprintf(stderr, 1577 gettext("failed to clear label for %s\n"), vdev); 1578 } 1579 1580 errout: 1581 free(name); 1582 (void) close(fd); 1583 1584 return (ret); 1585 } 1586 1587 /* 1588 * zpool create [-fnd] [-o property=value] ... 1589 * [-O file-system-property=value] ... 1590 * [-R root] [-m mountpoint] <pool> <dev> ... 1591 * 1592 * -f Force creation, even if devices appear in use 1593 * -n Do not create the pool, but display the resulting layout if it 1594 * were to be created. 1595 * -R Create a pool under an alternate root 1596 * -m Set default mountpoint for the root dataset. By default it's 1597 * '/<pool>' 1598 * -o Set property=value. 1599 * -o Set feature@feature=enabled|disabled. 1600 * -d Don't automatically enable all supported pool features 1601 * (individual features can be enabled with -o). 1602 * -O Set fsproperty=value in the pool's root file system 1603 * 1604 * Creates the named pool according to the given vdev specification. The 1605 * bulk of the vdev processing is done in make_root_vdev() in zpool_vdev.c. 1606 * Once we get the nvlist back from make_root_vdev(), we either print out the 1607 * contents (if '-n' was specified), or pass it to libzfs to do the creation. 1608 */ 1609 int 1610 zpool_do_create(int argc, char **argv) 1611 { 1612 boolean_t force = B_FALSE; 1613 boolean_t dryrun = B_FALSE; 1614 boolean_t enable_pool_features = B_TRUE; 1615 1616 int c; 1617 nvlist_t *nvroot = NULL; 1618 char *poolname; 1619 char *tname = NULL; 1620 int ret = 1; 1621 char *altroot = NULL; 1622 char *compat = NULL; 1623 char *mountpoint = NULL; 1624 nvlist_t *fsprops = NULL; 1625 nvlist_t *props = NULL; 1626 char *propval; 1627 1628 /* check options */ 1629 while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) { 1630 switch (c) { 1631 case 'f': 1632 force = B_TRUE; 1633 break; 1634 case 'n': 1635 dryrun = B_TRUE; 1636 break; 1637 case 'd': 1638 enable_pool_features = B_FALSE; 1639 break; 1640 case 'R': 1641 altroot = optarg; 1642 if (add_prop_list(zpool_prop_to_name( 1643 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 1644 goto errout; 1645 if (add_prop_list_default(zpool_prop_to_name( 1646 ZPOOL_PROP_CACHEFILE), "none", &props)) 1647 goto errout; 1648 break; 1649 case 'm': 1650 /* Equivalent to -O mountpoint=optarg */ 1651 mountpoint = optarg; 1652 break; 1653 case 'o': 1654 if ((propval = strchr(optarg, '=')) == NULL) { 1655 (void) fprintf(stderr, gettext("missing " 1656 "'=' for -o option\n")); 1657 goto errout; 1658 } 1659 *propval = '\0'; 1660 propval++; 1661 1662 if (add_prop_list(optarg, propval, &props, B_TRUE)) 1663 goto errout; 1664 1665 /* 1666 * If the user is creating a pool that doesn't support 1667 * feature flags, don't enable any features. 1668 */ 1669 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) { 1670 char *end; 1671 u_longlong_t ver; 1672 1673 ver = strtoull(propval, &end, 10); 1674 if (*end == '\0' && 1675 ver < SPA_VERSION_FEATURES) { 1676 enable_pool_features = B_FALSE; 1677 } 1678 } 1679 if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT) 1680 altroot = propval; 1681 if (zpool_name_to_prop(optarg) == 1682 ZPOOL_PROP_COMPATIBILITY) 1683 compat = propval; 1684 break; 1685 case 'O': 1686 if ((propval = strchr(optarg, '=')) == NULL) { 1687 (void) fprintf(stderr, gettext("missing " 1688 "'=' for -O option\n")); 1689 goto errout; 1690 } 1691 *propval = '\0'; 1692 propval++; 1693 1694 /* 1695 * Mountpoints are checked and then added later. 1696 * Uniquely among properties, they can be specified 1697 * more than once, to avoid conflict with -m. 1698 */ 1699 if (0 == strcmp(optarg, 1700 zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) { 1701 mountpoint = propval; 1702 } else if (add_prop_list(optarg, propval, &fsprops, 1703 B_FALSE)) { 1704 goto errout; 1705 } 1706 break; 1707 case 't': 1708 /* 1709 * Sanity check temporary pool name. 1710 */ 1711 if (strchr(optarg, '/') != NULL) { 1712 (void) fprintf(stderr, gettext("cannot create " 1713 "'%s': invalid character '/' in temporary " 1714 "name\n"), optarg); 1715 (void) fprintf(stderr, gettext("use 'zfs " 1716 "create' to create a dataset\n")); 1717 goto errout; 1718 } 1719 1720 if (add_prop_list(zpool_prop_to_name( 1721 ZPOOL_PROP_TNAME), optarg, &props, B_TRUE)) 1722 goto errout; 1723 if (add_prop_list_default(zpool_prop_to_name( 1724 ZPOOL_PROP_CACHEFILE), "none", &props)) 1725 goto errout; 1726 tname = optarg; 1727 break; 1728 case ':': 1729 (void) fprintf(stderr, gettext("missing argument for " 1730 "'%c' option\n"), optopt); 1731 goto badusage; 1732 case '?': 1733 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1734 optopt); 1735 goto badusage; 1736 } 1737 } 1738 1739 argc -= optind; 1740 argv += optind; 1741 1742 /* get pool name and check number of arguments */ 1743 if (argc < 1) { 1744 (void) fprintf(stderr, gettext("missing pool name argument\n")); 1745 goto badusage; 1746 } 1747 if (argc < 2) { 1748 (void) fprintf(stderr, gettext("missing vdev specification\n")); 1749 goto badusage; 1750 } 1751 1752 poolname = argv[0]; 1753 1754 /* 1755 * As a special case, check for use of '/' in the name, and direct the 1756 * user to use 'zfs create' instead. 1757 */ 1758 if (strchr(poolname, '/') != NULL) { 1759 (void) fprintf(stderr, gettext("cannot create '%s': invalid " 1760 "character '/' in pool name\n"), poolname); 1761 (void) fprintf(stderr, gettext("use 'zfs create' to " 1762 "create a dataset\n")); 1763 goto errout; 1764 } 1765 1766 /* pass off to make_root_vdev for bulk processing */ 1767 nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun, 1768 argc - 1, argv + 1); 1769 if (nvroot == NULL) 1770 goto errout; 1771 1772 /* make_root_vdev() allows 0 toplevel children if there are spares */ 1773 if (!zfs_allocatable_devs(nvroot)) { 1774 (void) fprintf(stderr, gettext("invalid vdev " 1775 "specification: at least one toplevel vdev must be " 1776 "specified\n")); 1777 goto errout; 1778 } 1779 1780 if (altroot != NULL && altroot[0] != '/') { 1781 (void) fprintf(stderr, gettext("invalid alternate root '%s': " 1782 "must be an absolute path\n"), altroot); 1783 goto errout; 1784 } 1785 1786 /* 1787 * Check the validity of the mountpoint and direct the user to use the 1788 * '-m' mountpoint option if it looks like its in use. 1789 */ 1790 if (mountpoint == NULL || 1791 (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 && 1792 strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) { 1793 char buf[MAXPATHLEN]; 1794 DIR *dirp; 1795 1796 if (mountpoint && mountpoint[0] != '/') { 1797 (void) fprintf(stderr, gettext("invalid mountpoint " 1798 "'%s': must be an absolute path, 'legacy', or " 1799 "'none'\n"), mountpoint); 1800 goto errout; 1801 } 1802 1803 if (mountpoint == NULL) { 1804 if (altroot != NULL) 1805 (void) snprintf(buf, sizeof (buf), "%s/%s", 1806 altroot, poolname); 1807 else 1808 (void) snprintf(buf, sizeof (buf), "/%s", 1809 poolname); 1810 } else { 1811 if (altroot != NULL) 1812 (void) snprintf(buf, sizeof (buf), "%s%s", 1813 altroot, mountpoint); 1814 else 1815 (void) snprintf(buf, sizeof (buf), "%s", 1816 mountpoint); 1817 } 1818 1819 if ((dirp = opendir(buf)) == NULL && errno != ENOENT) { 1820 (void) fprintf(stderr, gettext("mountpoint '%s' : " 1821 "%s\n"), buf, strerror(errno)); 1822 (void) fprintf(stderr, gettext("use '-m' " 1823 "option to provide a different default\n")); 1824 goto errout; 1825 } else if (dirp) { 1826 int count = 0; 1827 1828 while (count < 3 && readdir(dirp) != NULL) 1829 count++; 1830 (void) closedir(dirp); 1831 1832 if (count > 2) { 1833 (void) fprintf(stderr, gettext("mountpoint " 1834 "'%s' exists and is not empty\n"), buf); 1835 (void) fprintf(stderr, gettext("use '-m' " 1836 "option to provide a " 1837 "different default\n")); 1838 goto errout; 1839 } 1840 } 1841 } 1842 1843 /* 1844 * Now that the mountpoint's validity has been checked, ensure that 1845 * the property is set appropriately prior to creating the pool. 1846 */ 1847 if (mountpoint != NULL) { 1848 ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1849 mountpoint, &fsprops, B_FALSE); 1850 if (ret != 0) 1851 goto errout; 1852 } 1853 1854 ret = 1; 1855 if (dryrun) { 1856 /* 1857 * For a dry run invocation, print out a basic message and run 1858 * through all the vdevs in the list and print out in an 1859 * appropriate hierarchy. 1860 */ 1861 (void) printf(gettext("would create '%s' with the " 1862 "following layout:\n\n"), poolname); 1863 1864 print_vdev_tree(NULL, poolname, nvroot, 0, "", 0); 1865 print_vdev_tree(NULL, "dedup", nvroot, 0, 1866 VDEV_ALLOC_BIAS_DEDUP, 0); 1867 print_vdev_tree(NULL, "special", nvroot, 0, 1868 VDEV_ALLOC_BIAS_SPECIAL, 0); 1869 print_vdev_tree(NULL, "logs", nvroot, 0, 1870 VDEV_ALLOC_BIAS_LOG, 0); 1871 print_cache_list(nvroot, 0); 1872 print_spare_list(nvroot, 0); 1873 1874 ret = 0; 1875 } else { 1876 /* 1877 * Load in feature set. 1878 * Note: if compatibility property not given, we'll have 1879 * NULL, which means 'all features'. 1880 */ 1881 boolean_t requested_features[SPA_FEATURES]; 1882 if (zpool_do_load_compat(compat, requested_features) != 1883 ZPOOL_COMPATIBILITY_OK) 1884 goto errout; 1885 1886 /* 1887 * props contains list of features to enable. 1888 * For each feature: 1889 * - remove it if feature@name=disabled 1890 * - leave it there if feature@name=enabled 1891 * - add it if: 1892 * - enable_pool_features (ie: no '-d' or '-o version') 1893 * - it's supported by the kernel module 1894 * - it's in the requested feature set 1895 * - warn if it's enabled but not in compat 1896 */ 1897 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 1898 char propname[MAXPATHLEN]; 1899 const char *propval; 1900 zfeature_info_t *feat = &spa_feature_table[i]; 1901 1902 (void) snprintf(propname, sizeof (propname), 1903 "feature@%s", feat->fi_uname); 1904 1905 if (!nvlist_lookup_string(props, propname, &propval)) { 1906 if (strcmp(propval, 1907 ZFS_FEATURE_DISABLED) == 0) { 1908 (void) nvlist_remove_all(props, 1909 propname); 1910 } else if (strcmp(propval, 1911 ZFS_FEATURE_ENABLED) == 0 && 1912 !requested_features[i]) { 1913 (void) fprintf(stderr, gettext( 1914 "Warning: feature \"%s\" enabled " 1915 "but is not in specified " 1916 "'compatibility' feature set.\n"), 1917 feat->fi_uname); 1918 } 1919 } else if ( 1920 enable_pool_features && 1921 feat->fi_zfs_mod_supported && 1922 requested_features[i]) { 1923 ret = add_prop_list(propname, 1924 ZFS_FEATURE_ENABLED, &props, B_TRUE); 1925 if (ret != 0) 1926 goto errout; 1927 } 1928 } 1929 1930 ret = 1; 1931 if (zpool_create(g_zfs, poolname, 1932 nvroot, props, fsprops) == 0) { 1933 zfs_handle_t *pool = zfs_open(g_zfs, 1934 tname ? tname : poolname, ZFS_TYPE_FILESYSTEM); 1935 if (pool != NULL) { 1936 if (zfs_mount(pool, NULL, 0) == 0) { 1937 ret = zfs_share(pool, NULL); 1938 zfs_commit_shares(NULL); 1939 } 1940 zfs_close(pool); 1941 } 1942 } else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) { 1943 (void) fprintf(stderr, gettext("pool name may have " 1944 "been omitted\n")); 1945 } 1946 } 1947 1948 errout: 1949 nvlist_free(nvroot); 1950 nvlist_free(fsprops); 1951 nvlist_free(props); 1952 return (ret); 1953 badusage: 1954 nvlist_free(fsprops); 1955 nvlist_free(props); 1956 usage(B_FALSE); 1957 return (2); 1958 } 1959 1960 /* 1961 * zpool destroy <pool> 1962 * 1963 * -f Forcefully unmount any datasets 1964 * 1965 * Destroy the given pool. Automatically unmounts any datasets in the pool. 1966 */ 1967 int 1968 zpool_do_destroy(int argc, char **argv) 1969 { 1970 boolean_t force = B_FALSE; 1971 int c; 1972 char *pool; 1973 zpool_handle_t *zhp; 1974 int ret; 1975 1976 /* check options */ 1977 while ((c = getopt(argc, argv, "f")) != -1) { 1978 switch (c) { 1979 case 'f': 1980 force = B_TRUE; 1981 break; 1982 case '?': 1983 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 1984 optopt); 1985 usage(B_FALSE); 1986 } 1987 } 1988 1989 argc -= optind; 1990 argv += optind; 1991 1992 /* check arguments */ 1993 if (argc < 1) { 1994 (void) fprintf(stderr, gettext("missing pool argument\n")); 1995 usage(B_FALSE); 1996 } 1997 if (argc > 1) { 1998 (void) fprintf(stderr, gettext("too many arguments\n")); 1999 usage(B_FALSE); 2000 } 2001 2002 pool = argv[0]; 2003 2004 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 2005 /* 2006 * As a special case, check for use of '/' in the name, and 2007 * direct the user to use 'zfs destroy' instead. 2008 */ 2009 if (strchr(pool, '/') != NULL) 2010 (void) fprintf(stderr, gettext("use 'zfs destroy' to " 2011 "destroy a dataset\n")); 2012 return (1); 2013 } 2014 2015 if (zpool_disable_datasets(zhp, force) != 0) { 2016 (void) fprintf(stderr, gettext("could not destroy '%s': " 2017 "could not unmount datasets\n"), zpool_get_name(zhp)); 2018 zpool_close(zhp); 2019 return (1); 2020 } 2021 2022 /* The history must be logged as part of the export */ 2023 log_history = B_FALSE; 2024 2025 ret = (zpool_destroy(zhp, history_str) != 0); 2026 2027 zpool_close(zhp); 2028 2029 return (ret); 2030 } 2031 2032 typedef struct export_cbdata { 2033 boolean_t force; 2034 boolean_t hardforce; 2035 } export_cbdata_t; 2036 2037 /* 2038 * Export one pool 2039 */ 2040 static int 2041 zpool_export_one(zpool_handle_t *zhp, void *data) 2042 { 2043 export_cbdata_t *cb = data; 2044 2045 if (zpool_disable_datasets(zhp, cb->force) != 0) 2046 return (1); 2047 2048 /* The history must be logged as part of the export */ 2049 log_history = B_FALSE; 2050 2051 if (cb->hardforce) { 2052 if (zpool_export_force(zhp, history_str) != 0) 2053 return (1); 2054 } else if (zpool_export(zhp, cb->force, history_str) != 0) { 2055 return (1); 2056 } 2057 2058 return (0); 2059 } 2060 2061 /* 2062 * zpool export [-f] <pool> ... 2063 * 2064 * -a Export all pools 2065 * -f Forcefully unmount datasets 2066 * 2067 * Export the given pools. By default, the command will attempt to cleanly 2068 * unmount any active datasets within the pool. If the '-f' flag is specified, 2069 * then the datasets will be forcefully unmounted. 2070 */ 2071 int 2072 zpool_do_export(int argc, char **argv) 2073 { 2074 export_cbdata_t cb; 2075 boolean_t do_all = B_FALSE; 2076 boolean_t force = B_FALSE; 2077 boolean_t hardforce = B_FALSE; 2078 int c, ret; 2079 2080 /* check options */ 2081 while ((c = getopt(argc, argv, "afF")) != -1) { 2082 switch (c) { 2083 case 'a': 2084 do_all = B_TRUE; 2085 break; 2086 case 'f': 2087 force = B_TRUE; 2088 break; 2089 case 'F': 2090 hardforce = B_TRUE; 2091 break; 2092 case '?': 2093 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 2094 optopt); 2095 usage(B_FALSE); 2096 } 2097 } 2098 2099 cb.force = force; 2100 cb.hardforce = hardforce; 2101 argc -= optind; 2102 argv += optind; 2103 2104 if (do_all) { 2105 if (argc != 0) { 2106 (void) fprintf(stderr, gettext("too many arguments\n")); 2107 usage(B_FALSE); 2108 } 2109 2110 return (for_each_pool(argc, argv, B_TRUE, NULL, 2111 ZFS_TYPE_POOL, B_FALSE, zpool_export_one, &cb)); 2112 } 2113 2114 /* check arguments */ 2115 if (argc < 1) { 2116 (void) fprintf(stderr, gettext("missing pool argument\n")); 2117 usage(B_FALSE); 2118 } 2119 2120 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 2121 B_FALSE, zpool_export_one, &cb); 2122 2123 return (ret); 2124 } 2125 2126 /* 2127 * Given a vdev configuration, determine the maximum width needed for the device 2128 * name column. 2129 */ 2130 static int 2131 max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max, 2132 int name_flags) 2133 { 2134 static const char *const subtypes[] = 2135 {ZPOOL_CONFIG_SPARES, ZPOOL_CONFIG_L2CACHE, ZPOOL_CONFIG_CHILDREN}; 2136 2137 char *name = zpool_vdev_name(g_zfs, zhp, nv, name_flags); 2138 max = MAX(strlen(name) + depth, max); 2139 free(name); 2140 2141 nvlist_t **child; 2142 uint_t children; 2143 for (size_t i = 0; i < ARRAY_SIZE(subtypes); ++i) 2144 if (nvlist_lookup_nvlist_array(nv, subtypes[i], 2145 &child, &children) == 0) 2146 for (uint_t c = 0; c < children; ++c) 2147 max = MAX(max_width(zhp, child[c], depth + 2, 2148 max, name_flags), max); 2149 2150 return (max); 2151 } 2152 2153 typedef struct spare_cbdata { 2154 uint64_t cb_guid; 2155 zpool_handle_t *cb_zhp; 2156 } spare_cbdata_t; 2157 2158 static boolean_t 2159 find_vdev(nvlist_t *nv, uint64_t search) 2160 { 2161 uint64_t guid; 2162 nvlist_t **child; 2163 uint_t c, children; 2164 2165 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 && 2166 search == guid) 2167 return (B_TRUE); 2168 2169 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2170 &child, &children) == 0) { 2171 for (c = 0; c < children; c++) 2172 if (find_vdev(child[c], search)) 2173 return (B_TRUE); 2174 } 2175 2176 return (B_FALSE); 2177 } 2178 2179 static int 2180 find_spare(zpool_handle_t *zhp, void *data) 2181 { 2182 spare_cbdata_t *cbp = data; 2183 nvlist_t *config, *nvroot; 2184 2185 config = zpool_get_config(zhp, NULL); 2186 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2187 &nvroot) == 0); 2188 2189 if (find_vdev(nvroot, cbp->cb_guid)) { 2190 cbp->cb_zhp = zhp; 2191 return (1); 2192 } 2193 2194 zpool_close(zhp); 2195 return (0); 2196 } 2197 2198 typedef struct status_cbdata { 2199 int cb_count; 2200 int cb_name_flags; 2201 int cb_namewidth; 2202 boolean_t cb_allpools; 2203 boolean_t cb_verbose; 2204 boolean_t cb_literal; 2205 boolean_t cb_explain; 2206 boolean_t cb_first; 2207 boolean_t cb_dedup_stats; 2208 boolean_t cb_print_unhealthy; 2209 boolean_t cb_print_status; 2210 boolean_t cb_print_slow_ios; 2211 boolean_t cb_print_vdev_init; 2212 boolean_t cb_print_vdev_trim; 2213 vdev_cmd_data_list_t *vcdl; 2214 boolean_t cb_print_power; 2215 } status_cbdata_t; 2216 2217 /* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */ 2218 static boolean_t 2219 is_blank_str(const char *str) 2220 { 2221 for (; str != NULL && *str != '\0'; ++str) 2222 if (!isblank(*str)) 2223 return (B_FALSE); 2224 return (B_TRUE); 2225 } 2226 2227 /* Print command output lines for specific vdev in a specific pool */ 2228 static void 2229 zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, const char *path) 2230 { 2231 vdev_cmd_data_t *data; 2232 int i, j; 2233 const char *val; 2234 2235 for (i = 0; i < vcdl->count; i++) { 2236 if ((strcmp(vcdl->data[i].path, path) != 0) || 2237 (strcmp(vcdl->data[i].pool, pool) != 0)) { 2238 /* Not the vdev we're looking for */ 2239 continue; 2240 } 2241 2242 data = &vcdl->data[i]; 2243 /* Print out all the output values for this vdev */ 2244 for (j = 0; j < vcdl->uniq_cols_cnt; j++) { 2245 val = NULL; 2246 /* Does this vdev have values for this column? */ 2247 for (int k = 0; k < data->cols_cnt; k++) { 2248 if (strcmp(data->cols[k], 2249 vcdl->uniq_cols[j]) == 0) { 2250 /* yes it does, record the value */ 2251 val = data->lines[k]; 2252 break; 2253 } 2254 } 2255 /* 2256 * Mark empty values with dashes to make output 2257 * awk-able. 2258 */ 2259 if (val == NULL || is_blank_str(val)) 2260 val = "-"; 2261 2262 printf("%*s", vcdl->uniq_cols_width[j], val); 2263 if (j < vcdl->uniq_cols_cnt - 1) 2264 fputs(" ", stdout); 2265 } 2266 2267 /* Print out any values that aren't in a column at the end */ 2268 for (j = data->cols_cnt; j < data->lines_cnt; j++) { 2269 /* Did we have any columns? If so print a spacer. */ 2270 if (vcdl->uniq_cols_cnt > 0) 2271 fputs(" ", stdout); 2272 2273 val = data->lines[j]; 2274 fputs(val ?: "", stdout); 2275 } 2276 break; 2277 } 2278 } 2279 2280 /* 2281 * Print vdev initialization status for leaves 2282 */ 2283 static void 2284 print_status_initialize(vdev_stat_t *vs, boolean_t verbose) 2285 { 2286 if (verbose) { 2287 if ((vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE || 2288 vs->vs_initialize_state == VDEV_INITIALIZE_SUSPENDED || 2289 vs->vs_initialize_state == VDEV_INITIALIZE_COMPLETE) && 2290 !vs->vs_scan_removing) { 2291 char zbuf[1024]; 2292 char tbuf[256]; 2293 2294 time_t t = vs->vs_initialize_action_time; 2295 int initialize_pct = 100; 2296 if (vs->vs_initialize_state != 2297 VDEV_INITIALIZE_COMPLETE) { 2298 initialize_pct = (vs->vs_initialize_bytes_done * 2299 100 / (vs->vs_initialize_bytes_est + 1)); 2300 } 2301 2302 (void) ctime_r(&t, tbuf); 2303 tbuf[24] = 0; 2304 2305 switch (vs->vs_initialize_state) { 2306 case VDEV_INITIALIZE_SUSPENDED: 2307 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2308 gettext("suspended, started at"), tbuf); 2309 break; 2310 case VDEV_INITIALIZE_ACTIVE: 2311 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2312 gettext("started at"), tbuf); 2313 break; 2314 case VDEV_INITIALIZE_COMPLETE: 2315 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2316 gettext("completed at"), tbuf); 2317 break; 2318 } 2319 2320 (void) printf(gettext(" (%d%% initialized%s)"), 2321 initialize_pct, zbuf); 2322 } else { 2323 (void) printf(gettext(" (uninitialized)")); 2324 } 2325 } else if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) { 2326 (void) printf(gettext(" (initializing)")); 2327 } 2328 } 2329 2330 /* 2331 * Print vdev TRIM status for leaves 2332 */ 2333 static void 2334 print_status_trim(vdev_stat_t *vs, boolean_t verbose) 2335 { 2336 if (verbose) { 2337 if ((vs->vs_trim_state == VDEV_TRIM_ACTIVE || 2338 vs->vs_trim_state == VDEV_TRIM_SUSPENDED || 2339 vs->vs_trim_state == VDEV_TRIM_COMPLETE) && 2340 !vs->vs_scan_removing) { 2341 char zbuf[1024]; 2342 char tbuf[256]; 2343 2344 time_t t = vs->vs_trim_action_time; 2345 int trim_pct = 100; 2346 if (vs->vs_trim_state != VDEV_TRIM_COMPLETE) { 2347 trim_pct = (vs->vs_trim_bytes_done * 2348 100 / (vs->vs_trim_bytes_est + 1)); 2349 } 2350 2351 (void) ctime_r(&t, tbuf); 2352 tbuf[24] = 0; 2353 2354 switch (vs->vs_trim_state) { 2355 case VDEV_TRIM_SUSPENDED: 2356 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2357 gettext("suspended, started at"), tbuf); 2358 break; 2359 case VDEV_TRIM_ACTIVE: 2360 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2361 gettext("started at"), tbuf); 2362 break; 2363 case VDEV_TRIM_COMPLETE: 2364 (void) snprintf(zbuf, sizeof (zbuf), ", %s %s", 2365 gettext("completed at"), tbuf); 2366 break; 2367 } 2368 2369 (void) printf(gettext(" (%d%% trimmed%s)"), 2370 trim_pct, zbuf); 2371 } else if (vs->vs_trim_notsup) { 2372 (void) printf(gettext(" (trim unsupported)")); 2373 } else { 2374 (void) printf(gettext(" (untrimmed)")); 2375 } 2376 } else if (vs->vs_trim_state == VDEV_TRIM_ACTIVE) { 2377 (void) printf(gettext(" (trimming)")); 2378 } 2379 } 2380 2381 /* 2382 * Return the color associated with a health string. This includes returning 2383 * NULL for no color change. 2384 */ 2385 static const char * 2386 health_str_to_color(const char *health) 2387 { 2388 if (strcmp(health, gettext("FAULTED")) == 0 || 2389 strcmp(health, gettext("SUSPENDED")) == 0 || 2390 strcmp(health, gettext("UNAVAIL")) == 0) { 2391 return (ANSI_RED); 2392 } 2393 2394 if (strcmp(health, gettext("OFFLINE")) == 0 || 2395 strcmp(health, gettext("DEGRADED")) == 0 || 2396 strcmp(health, gettext("REMOVED")) == 0) { 2397 return (ANSI_YELLOW); 2398 } 2399 2400 return (NULL); 2401 } 2402 2403 /* 2404 * Called for each leaf vdev. Returns 0 if the vdev is healthy. 2405 * A vdev is unhealthy if any of the following are true: 2406 * 1) there are read, write, or checksum errors, 2407 * 2) its state is not ONLINE, or 2408 * 3) slow IO reporting was requested (-s) and there are slow IOs. 2409 */ 2410 static int 2411 vdev_health_check_cb(void *hdl_data, nvlist_t *nv, void *data) 2412 { 2413 status_cbdata_t *cb = data; 2414 vdev_stat_t *vs; 2415 uint_t vsc; 2416 (void) hdl_data; 2417 2418 if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2419 (uint64_t **)&vs, &vsc) != 0) 2420 return (1); 2421 2422 if (vs->vs_checksum_errors || vs->vs_read_errors || 2423 vs->vs_write_errors || vs->vs_state != VDEV_STATE_HEALTHY) 2424 return (1); 2425 2426 if (cb->cb_print_slow_ios && vs->vs_slow_ios) 2427 return (1); 2428 2429 return (0); 2430 } 2431 2432 /* 2433 * Print out configuration state as requested by status_callback. 2434 */ 2435 static void 2436 print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name, 2437 nvlist_t *nv, int depth, boolean_t isspare, vdev_rebuild_stat_t *vrs) 2438 { 2439 nvlist_t **child, *root; 2440 uint_t c, i, vsc, children; 2441 pool_scan_stat_t *ps = NULL; 2442 vdev_stat_t *vs; 2443 char rbuf[6], wbuf[6], cbuf[6]; 2444 char *vname; 2445 uint64_t notpresent; 2446 spare_cbdata_t spare_cb; 2447 const char *state; 2448 const char *type; 2449 const char *path = NULL; 2450 const char *rcolor = NULL, *wcolor = NULL, *ccolor = NULL, 2451 *scolor = NULL; 2452 2453 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2454 &child, &children) != 0) 2455 children = 0; 2456 2457 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2458 (uint64_t **)&vs, &vsc) == 0); 2459 2460 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2461 2462 if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2463 return; 2464 2465 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 2466 2467 if (isspare) { 2468 /* 2469 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for 2470 * online drives. 2471 */ 2472 if (vs->vs_aux == VDEV_AUX_SPARED) 2473 state = gettext("INUSE"); 2474 else if (vs->vs_state == VDEV_STATE_HEALTHY) 2475 state = gettext("AVAIL"); 2476 } 2477 2478 /* 2479 * If '-e' is specified then top-level vdevs and their children 2480 * can be pruned if all of their leaves are healthy. 2481 */ 2482 if (cb->cb_print_unhealthy && depth > 0 && 2483 for_each_vdev_in_nvlist(nv, vdev_health_check_cb, cb) == 0) { 2484 return; 2485 } 2486 2487 printf_color(health_str_to_color(state), 2488 "\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth, 2489 name, state); 2490 2491 if (!isspare) { 2492 if (vs->vs_read_errors) 2493 rcolor = ANSI_RED; 2494 2495 if (vs->vs_write_errors) 2496 wcolor = ANSI_RED; 2497 2498 if (vs->vs_checksum_errors) 2499 ccolor = ANSI_RED; 2500 2501 if (vs->vs_slow_ios) 2502 scolor = ANSI_BLUE; 2503 2504 if (cb->cb_literal) { 2505 fputc(' ', stdout); 2506 printf_color(rcolor, "%5llu", 2507 (u_longlong_t)vs->vs_read_errors); 2508 fputc(' ', stdout); 2509 printf_color(wcolor, "%5llu", 2510 (u_longlong_t)vs->vs_write_errors); 2511 fputc(' ', stdout); 2512 printf_color(ccolor, "%5llu", 2513 (u_longlong_t)vs->vs_checksum_errors); 2514 } else { 2515 zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf)); 2516 zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf)); 2517 zfs_nicenum(vs->vs_checksum_errors, cbuf, 2518 sizeof (cbuf)); 2519 fputc(' ', stdout); 2520 printf_color(rcolor, "%5s", rbuf); 2521 fputc(' ', stdout); 2522 printf_color(wcolor, "%5s", wbuf); 2523 fputc(' ', stdout); 2524 printf_color(ccolor, "%5s", cbuf); 2525 } 2526 if (cb->cb_print_slow_ios) { 2527 if (children == 0) { 2528 /* Only leafs vdevs have slow IOs */ 2529 zfs_nicenum(vs->vs_slow_ios, rbuf, 2530 sizeof (rbuf)); 2531 } else { 2532 snprintf(rbuf, sizeof (rbuf), "-"); 2533 } 2534 2535 if (cb->cb_literal) 2536 printf_color(scolor, " %5llu", 2537 (u_longlong_t)vs->vs_slow_ios); 2538 else 2539 printf_color(scolor, " %5s", rbuf); 2540 } 2541 if (cb->cb_print_power) { 2542 if (children == 0) { 2543 /* Only leaf vdevs have physical slots */ 2544 switch (zpool_power_current_state(zhp, (char *) 2545 fnvlist_lookup_string(nv, 2546 ZPOOL_CONFIG_PATH))) { 2547 case 0: 2548 printf_color(ANSI_RED, " %5s", 2549 gettext("off")); 2550 break; 2551 case 1: 2552 printf(" %5s", gettext("on")); 2553 break; 2554 default: 2555 printf(" %5s", "-"); 2556 } 2557 } else { 2558 printf(" %5s", "-"); 2559 } 2560 } 2561 } 2562 2563 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 2564 ¬present) == 0) { 2565 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); 2566 (void) printf(" %s %s", gettext("was"), path); 2567 } else if (vs->vs_aux != 0) { 2568 (void) printf(" "); 2569 color_start(ANSI_RED); 2570 switch (vs->vs_aux) { 2571 case VDEV_AUX_OPEN_FAILED: 2572 (void) printf(gettext("cannot open")); 2573 break; 2574 2575 case VDEV_AUX_BAD_GUID_SUM: 2576 (void) printf(gettext("missing device")); 2577 break; 2578 2579 case VDEV_AUX_NO_REPLICAS: 2580 (void) printf(gettext("insufficient replicas")); 2581 break; 2582 2583 case VDEV_AUX_VERSION_NEWER: 2584 (void) printf(gettext("newer version")); 2585 break; 2586 2587 case VDEV_AUX_UNSUP_FEAT: 2588 (void) printf(gettext("unsupported feature(s)")); 2589 break; 2590 2591 case VDEV_AUX_ASHIFT_TOO_BIG: 2592 (void) printf(gettext("unsupported minimum blocksize")); 2593 break; 2594 2595 case VDEV_AUX_SPARED: 2596 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 2597 &spare_cb.cb_guid) == 0); 2598 if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) { 2599 if (strcmp(zpool_get_name(spare_cb.cb_zhp), 2600 zpool_get_name(zhp)) == 0) 2601 (void) printf(gettext("currently in " 2602 "use")); 2603 else 2604 (void) printf(gettext("in use by " 2605 "pool '%s'"), 2606 zpool_get_name(spare_cb.cb_zhp)); 2607 zpool_close(spare_cb.cb_zhp); 2608 } else { 2609 (void) printf(gettext("currently in use")); 2610 } 2611 break; 2612 2613 case VDEV_AUX_ERR_EXCEEDED: 2614 if (vs->vs_read_errors + vs->vs_write_errors + 2615 vs->vs_checksum_errors == 0 && children == 0 && 2616 vs->vs_slow_ios > 0) { 2617 (void) printf(gettext("too many slow I/Os")); 2618 } else { 2619 (void) printf(gettext("too many errors")); 2620 } 2621 break; 2622 2623 case VDEV_AUX_IO_FAILURE: 2624 (void) printf(gettext("experienced I/O failures")); 2625 break; 2626 2627 case VDEV_AUX_BAD_LOG: 2628 (void) printf(gettext("bad intent log")); 2629 break; 2630 2631 case VDEV_AUX_EXTERNAL: 2632 (void) printf(gettext("external device fault")); 2633 break; 2634 2635 case VDEV_AUX_SPLIT_POOL: 2636 (void) printf(gettext("split into new pool")); 2637 break; 2638 2639 case VDEV_AUX_ACTIVE: 2640 (void) printf(gettext("currently in use")); 2641 break; 2642 2643 case VDEV_AUX_CHILDREN_OFFLINE: 2644 (void) printf(gettext("all children offline")); 2645 break; 2646 2647 case VDEV_AUX_BAD_LABEL: 2648 (void) printf(gettext("invalid label")); 2649 break; 2650 2651 default: 2652 (void) printf(gettext("corrupted data")); 2653 break; 2654 } 2655 color_end(); 2656 } else if (children == 0 && !isspare && 2657 getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") == NULL && 2658 VDEV_STAT_VALID(vs_physical_ashift, vsc) && 2659 vs->vs_configured_ashift < vs->vs_physical_ashift) { 2660 (void) printf( 2661 gettext(" block size: %dB configured, %dB native"), 2662 1 << vs->vs_configured_ashift, 1 << vs->vs_physical_ashift); 2663 } 2664 2665 if (vs->vs_scan_removing != 0) { 2666 (void) printf(gettext(" (removing)")); 2667 } else if (VDEV_STAT_VALID(vs_noalloc, vsc) && vs->vs_noalloc != 0) { 2668 (void) printf(gettext(" (non-allocating)")); 2669 } 2670 2671 /* The root vdev has the scrub/resilver stats */ 2672 root = fnvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2673 ZPOOL_CONFIG_VDEV_TREE); 2674 (void) nvlist_lookup_uint64_array(root, ZPOOL_CONFIG_SCAN_STATS, 2675 (uint64_t **)&ps, &c); 2676 2677 /* 2678 * If you force fault a drive that's resilvering, its scan stats can 2679 * get frozen in time, giving the false impression that it's 2680 * being resilvered. That's why we check the state to see if the vdev 2681 * is healthy before reporting "resilvering" or "repairing". 2682 */ 2683 if (ps != NULL && ps->pss_state == DSS_SCANNING && children == 0 && 2684 vs->vs_state == VDEV_STATE_HEALTHY) { 2685 if (vs->vs_scan_processed != 0) { 2686 (void) printf(gettext(" (%s)"), 2687 (ps->pss_func == POOL_SCAN_RESILVER) ? 2688 "resilvering" : "repairing"); 2689 } else if (vs->vs_resilver_deferred) { 2690 (void) printf(gettext(" (awaiting resilver)")); 2691 } 2692 } 2693 2694 /* The top-level vdevs have the rebuild stats */ 2695 if (vrs != NULL && vrs->vrs_state == VDEV_REBUILD_ACTIVE && 2696 children == 0 && vs->vs_state == VDEV_STATE_HEALTHY) { 2697 if (vs->vs_rebuild_processed != 0) { 2698 (void) printf(gettext(" (resilvering)")); 2699 } 2700 } 2701 2702 if (cb->vcdl != NULL) { 2703 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { 2704 printf(" "); 2705 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 2706 } 2707 } 2708 2709 /* Display vdev initialization and trim status for leaves. */ 2710 if (children == 0) { 2711 print_status_initialize(vs, cb->cb_print_vdev_init); 2712 print_status_trim(vs, cb->cb_print_vdev_trim); 2713 } 2714 2715 (void) printf("\n"); 2716 2717 for (c = 0; c < children; c++) { 2718 uint64_t islog = B_FALSE, ishole = B_FALSE; 2719 2720 /* Don't print logs or holes here */ 2721 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2722 &islog); 2723 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2724 &ishole); 2725 if (islog || ishole) 2726 continue; 2727 /* Only print normal classes here */ 2728 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 2729 continue; 2730 2731 /* Provide vdev_rebuild_stats to children if available */ 2732 if (vrs == NULL) { 2733 (void) nvlist_lookup_uint64_array(nv, 2734 ZPOOL_CONFIG_REBUILD_STATS, 2735 (uint64_t **)&vrs, &i); 2736 } 2737 2738 vname = zpool_vdev_name(g_zfs, zhp, child[c], 2739 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2740 print_status_config(zhp, cb, vname, child[c], depth + 2, 2741 isspare, vrs); 2742 free(vname); 2743 } 2744 } 2745 2746 /* 2747 * Print the configuration of an exported pool. Iterate over all vdevs in the 2748 * pool, printing out the name and status for each one. 2749 */ 2750 static void 2751 print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv, 2752 int depth) 2753 { 2754 nvlist_t **child; 2755 uint_t c, children; 2756 vdev_stat_t *vs; 2757 const char *type; 2758 char *vname; 2759 2760 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0); 2761 if (strcmp(type, VDEV_TYPE_MISSING) == 0 || 2762 strcmp(type, VDEV_TYPE_HOLE) == 0) 2763 return; 2764 2765 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 2766 (uint64_t **)&vs, &c) == 0); 2767 2768 (void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name); 2769 (void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux)); 2770 2771 if (vs->vs_aux != 0) { 2772 (void) printf(" "); 2773 2774 switch (vs->vs_aux) { 2775 case VDEV_AUX_OPEN_FAILED: 2776 (void) printf(gettext("cannot open")); 2777 break; 2778 2779 case VDEV_AUX_BAD_GUID_SUM: 2780 (void) printf(gettext("missing device")); 2781 break; 2782 2783 case VDEV_AUX_NO_REPLICAS: 2784 (void) printf(gettext("insufficient replicas")); 2785 break; 2786 2787 case VDEV_AUX_VERSION_NEWER: 2788 (void) printf(gettext("newer version")); 2789 break; 2790 2791 case VDEV_AUX_UNSUP_FEAT: 2792 (void) printf(gettext("unsupported feature(s)")); 2793 break; 2794 2795 case VDEV_AUX_ERR_EXCEEDED: 2796 (void) printf(gettext("too many errors")); 2797 break; 2798 2799 case VDEV_AUX_ACTIVE: 2800 (void) printf(gettext("currently in use")); 2801 break; 2802 2803 case VDEV_AUX_CHILDREN_OFFLINE: 2804 (void) printf(gettext("all children offline")); 2805 break; 2806 2807 case VDEV_AUX_BAD_LABEL: 2808 (void) printf(gettext("invalid label")); 2809 break; 2810 2811 default: 2812 (void) printf(gettext("corrupted data")); 2813 break; 2814 } 2815 } 2816 (void) printf("\n"); 2817 2818 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2819 &child, &children) != 0) 2820 return; 2821 2822 for (c = 0; c < children; c++) { 2823 uint64_t is_log = B_FALSE; 2824 2825 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2826 &is_log); 2827 if (is_log) 2828 continue; 2829 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 2830 continue; 2831 2832 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2833 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2834 print_import_config(cb, vname, child[c], depth + 2); 2835 free(vname); 2836 } 2837 2838 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2839 &child, &children) == 0) { 2840 (void) printf(gettext("\tcache\n")); 2841 for (c = 0; c < children; c++) { 2842 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2843 cb->cb_name_flags); 2844 (void) printf("\t %s\n", vname); 2845 free(vname); 2846 } 2847 } 2848 2849 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2850 &child, &children) == 0) { 2851 (void) printf(gettext("\tspares\n")); 2852 for (c = 0; c < children; c++) { 2853 vname = zpool_vdev_name(g_zfs, NULL, child[c], 2854 cb->cb_name_flags); 2855 (void) printf("\t %s\n", vname); 2856 free(vname); 2857 } 2858 } 2859 } 2860 2861 /* 2862 * Print specialized class vdevs. 2863 * 2864 * These are recorded as top level vdevs in the main pool child array 2865 * but with "is_log" set to 1 or an "alloc_bias" string. We use either 2866 * print_status_config() or print_import_config() to print the top level 2867 * class vdevs then any of their children (eg mirrored slogs) are printed 2868 * recursively - which works because only the top level vdev is marked. 2869 */ 2870 static void 2871 print_class_vdevs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv, 2872 const char *class) 2873 { 2874 uint_t c, children; 2875 nvlist_t **child; 2876 boolean_t printed = B_FALSE; 2877 2878 assert(zhp != NULL || !cb->cb_verbose); 2879 2880 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, 2881 &children) != 0) 2882 return; 2883 2884 for (c = 0; c < children; c++) { 2885 uint64_t is_log = B_FALSE; 2886 const char *bias = NULL; 2887 const char *type = NULL; 2888 2889 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2890 &is_log); 2891 2892 if (is_log) { 2893 bias = (char *)VDEV_ALLOC_CLASS_LOGS; 2894 } else { 2895 (void) nvlist_lookup_string(child[c], 2896 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 2897 (void) nvlist_lookup_string(child[c], 2898 ZPOOL_CONFIG_TYPE, &type); 2899 } 2900 2901 if (bias == NULL || strcmp(bias, class) != 0) 2902 continue; 2903 if (!is_log && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 2904 continue; 2905 2906 if (!printed) { 2907 (void) printf("\t%s\t\n", gettext(class)); 2908 printed = B_TRUE; 2909 } 2910 2911 char *name = zpool_vdev_name(g_zfs, zhp, child[c], 2912 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 2913 if (cb->cb_print_status) 2914 print_status_config(zhp, cb, name, child[c], 2, 2915 B_FALSE, NULL); 2916 else 2917 print_import_config(cb, name, child[c], 2); 2918 free(name); 2919 } 2920 } 2921 2922 /* 2923 * Display the status for the given pool. 2924 */ 2925 static int 2926 show_import(nvlist_t *config, boolean_t report_error) 2927 { 2928 uint64_t pool_state; 2929 vdev_stat_t *vs; 2930 const char *name; 2931 uint64_t guid; 2932 uint64_t hostid = 0; 2933 const char *msgid; 2934 const char *hostname = "unknown"; 2935 nvlist_t *nvroot, *nvinfo; 2936 zpool_status_t reason; 2937 zpool_errata_t errata; 2938 const char *health; 2939 uint_t vsc; 2940 const char *comment; 2941 status_cbdata_t cb = { 0 }; 2942 2943 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 2944 &name) == 0); 2945 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 2946 &guid) == 0); 2947 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 2948 &pool_state) == 0); 2949 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2950 &nvroot) == 0); 2951 2952 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 2953 (uint64_t **)&vs, &vsc) == 0); 2954 health = zpool_state_to_name(vs->vs_state, vs->vs_aux); 2955 2956 reason = zpool_import_status(config, &msgid, &errata); 2957 2958 /* 2959 * If we're importing using a cachefile, then we won't report any 2960 * errors unless we are in the scan phase of the import. 2961 */ 2962 if (reason != ZPOOL_STATUS_OK && !report_error) 2963 return (reason); 2964 2965 (void) printf(gettext(" pool: %s\n"), name); 2966 (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid); 2967 (void) printf(gettext(" state: %s"), health); 2968 if (pool_state == POOL_STATE_DESTROYED) 2969 (void) printf(gettext(" (DESTROYED)")); 2970 (void) printf("\n"); 2971 2972 switch (reason) { 2973 case ZPOOL_STATUS_MISSING_DEV_R: 2974 case ZPOOL_STATUS_MISSING_DEV_NR: 2975 case ZPOOL_STATUS_BAD_GUID_SUM: 2976 printf_color(ANSI_BOLD, gettext("status: ")); 2977 printf_color(ANSI_YELLOW, gettext("One or more devices are " 2978 "missing from the system.\n")); 2979 break; 2980 2981 case ZPOOL_STATUS_CORRUPT_LABEL_R: 2982 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 2983 printf_color(ANSI_BOLD, gettext("status: ")); 2984 printf_color(ANSI_YELLOW, gettext("One or more devices contains" 2985 " corrupted data.\n")); 2986 break; 2987 2988 case ZPOOL_STATUS_CORRUPT_DATA: 2989 (void) printf( 2990 gettext(" status: The pool data is corrupted.\n")); 2991 break; 2992 2993 case ZPOOL_STATUS_OFFLINE_DEV: 2994 printf_color(ANSI_BOLD, gettext("status: ")); 2995 printf_color(ANSI_YELLOW, gettext("One or more devices " 2996 "are offlined.\n")); 2997 break; 2998 2999 case ZPOOL_STATUS_CORRUPT_POOL: 3000 printf_color(ANSI_BOLD, gettext("status: ")); 3001 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 3002 "corrupted.\n")); 3003 break; 3004 3005 case ZPOOL_STATUS_VERSION_OLDER: 3006 printf_color(ANSI_BOLD, gettext("status: ")); 3007 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 3008 "a legacy on-disk version.\n")); 3009 break; 3010 3011 case ZPOOL_STATUS_VERSION_NEWER: 3012 printf_color(ANSI_BOLD, gettext("status: ")); 3013 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 3014 "an incompatible version.\n")); 3015 break; 3016 3017 case ZPOOL_STATUS_FEAT_DISABLED: 3018 printf_color(ANSI_BOLD, gettext("status: ")); 3019 printf_color(ANSI_YELLOW, gettext("Some supported " 3020 "features are not enabled on the pool.\n\t" 3021 "(Note that they may be intentionally disabled " 3022 "if the\n\t'compatibility' property is set.)\n")); 3023 break; 3024 3025 case ZPOOL_STATUS_COMPATIBILITY_ERR: 3026 printf_color(ANSI_BOLD, gettext("status: ")); 3027 printf_color(ANSI_YELLOW, gettext("Error reading or parsing " 3028 "the file(s) indicated by the 'compatibility'\n" 3029 "property.\n")); 3030 break; 3031 3032 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 3033 printf_color(ANSI_BOLD, gettext("status: ")); 3034 printf_color(ANSI_YELLOW, gettext("One or more features " 3035 "are enabled on the pool despite not being\n" 3036 "requested by the 'compatibility' property.\n")); 3037 break; 3038 3039 case ZPOOL_STATUS_UNSUP_FEAT_READ: 3040 printf_color(ANSI_BOLD, gettext("status: ")); 3041 printf_color(ANSI_YELLOW, gettext("The pool uses the following " 3042 "feature(s) not supported on this system:\n")); 3043 color_start(ANSI_YELLOW); 3044 zpool_print_unsup_feat(config); 3045 color_end(); 3046 break; 3047 3048 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 3049 printf_color(ANSI_BOLD, gettext("status: ")); 3050 printf_color(ANSI_YELLOW, gettext("The pool can only be " 3051 "accessed in read-only mode on this system. It\n\tcannot be" 3052 " accessed in read-write mode because it uses the " 3053 "following\n\tfeature(s) not supported on this system:\n")); 3054 color_start(ANSI_YELLOW); 3055 zpool_print_unsup_feat(config); 3056 color_end(); 3057 break; 3058 3059 case ZPOOL_STATUS_HOSTID_ACTIVE: 3060 printf_color(ANSI_BOLD, gettext("status: ")); 3061 printf_color(ANSI_YELLOW, gettext("The pool is currently " 3062 "imported by another system.\n")); 3063 break; 3064 3065 case ZPOOL_STATUS_HOSTID_REQUIRED: 3066 printf_color(ANSI_BOLD, gettext("status: ")); 3067 printf_color(ANSI_YELLOW, gettext("The pool has the " 3068 "multihost property on. It cannot\n\tbe safely imported " 3069 "when the system hostid is not set.\n")); 3070 break; 3071 3072 case ZPOOL_STATUS_HOSTID_MISMATCH: 3073 printf_color(ANSI_BOLD, gettext("status: ")); 3074 printf_color(ANSI_YELLOW, gettext("The pool was last accessed " 3075 "by another system.\n")); 3076 break; 3077 3078 case ZPOOL_STATUS_FAULTED_DEV_R: 3079 case ZPOOL_STATUS_FAULTED_DEV_NR: 3080 printf_color(ANSI_BOLD, gettext("status: ")); 3081 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3082 "faulted.\n")); 3083 break; 3084 3085 case ZPOOL_STATUS_BAD_LOG: 3086 printf_color(ANSI_BOLD, gettext("status: ")); 3087 printf_color(ANSI_YELLOW, gettext("An intent log record cannot " 3088 "be read.\n")); 3089 break; 3090 3091 case ZPOOL_STATUS_RESILVERING: 3092 case ZPOOL_STATUS_REBUILDING: 3093 printf_color(ANSI_BOLD, gettext("status: ")); 3094 printf_color(ANSI_YELLOW, gettext("One or more devices were " 3095 "being resilvered.\n")); 3096 break; 3097 3098 case ZPOOL_STATUS_ERRATA: 3099 printf_color(ANSI_BOLD, gettext("status: ")); 3100 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 3101 errata); 3102 break; 3103 3104 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 3105 printf_color(ANSI_BOLD, gettext("status: ")); 3106 printf_color(ANSI_YELLOW, gettext("One or more devices are " 3107 "configured to use a non-native block size.\n" 3108 "\tExpect reduced performance.\n")); 3109 break; 3110 3111 default: 3112 /* 3113 * No other status can be seen when importing pools. 3114 */ 3115 assert(reason == ZPOOL_STATUS_OK); 3116 } 3117 3118 /* 3119 * Print out an action according to the overall state of the pool. 3120 */ 3121 if (vs->vs_state == VDEV_STATE_HEALTHY) { 3122 if (reason == ZPOOL_STATUS_VERSION_OLDER || 3123 reason == ZPOOL_STATUS_FEAT_DISABLED) { 3124 (void) printf(gettext(" action: The pool can be " 3125 "imported using its name or numeric identifier, " 3126 "though\n\tsome features will not be available " 3127 "without an explicit 'zpool upgrade'.\n")); 3128 } else if (reason == ZPOOL_STATUS_COMPATIBILITY_ERR) { 3129 (void) printf(gettext(" action: The pool can be " 3130 "imported using its name or numeric\n\tidentifier, " 3131 "though the file(s) indicated by its " 3132 "'compatibility'\n\tproperty cannot be parsed at " 3133 "this time.\n")); 3134 } else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) { 3135 (void) printf(gettext(" action: The pool can be " 3136 "imported using its name or numeric " 3137 "identifier and\n\tthe '-f' flag.\n")); 3138 } else if (reason == ZPOOL_STATUS_ERRATA) { 3139 switch (errata) { 3140 case ZPOOL_ERRATA_NONE: 3141 break; 3142 3143 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 3144 (void) printf(gettext(" action: The pool can " 3145 "be imported using its name or numeric " 3146 "identifier,\n\thowever there is a compat" 3147 "ibility issue which should be corrected" 3148 "\n\tby running 'zpool scrub'\n")); 3149 break; 3150 3151 case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY: 3152 (void) printf(gettext(" action: The pool can" 3153 "not be imported with this version of ZFS " 3154 "due to\n\tan active asynchronous destroy. " 3155 "Revert to an earlier version\n\tand " 3156 "allow the destroy to complete before " 3157 "updating.\n")); 3158 break; 3159 3160 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 3161 (void) printf(gettext(" action: Existing " 3162 "encrypted datasets contain an on-disk " 3163 "incompatibility, which\n\tneeds to be " 3164 "corrected. Backup these datasets to new " 3165 "encrypted datasets\n\tand destroy the " 3166 "old ones.\n")); 3167 break; 3168 3169 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 3170 (void) printf(gettext(" action: Existing " 3171 "encrypted snapshots and bookmarks contain " 3172 "an on-disk\n\tincompatibility. This may " 3173 "cause on-disk corruption if they are used" 3174 "\n\twith 'zfs recv'. To correct the " 3175 "issue, enable the bookmark_v2 feature.\n\t" 3176 "No additional action is needed if there " 3177 "are no encrypted snapshots or\n\t" 3178 "bookmarks. If preserving the encrypted " 3179 "snapshots and bookmarks is\n\trequired, " 3180 "use a non-raw send to backup and restore " 3181 "them. Alternately,\n\tthey may be removed" 3182 " to resolve the incompatibility.\n")); 3183 break; 3184 default: 3185 /* 3186 * All errata must contain an action message. 3187 */ 3188 assert(0); 3189 } 3190 } else { 3191 (void) printf(gettext(" action: The pool can be " 3192 "imported using its name or numeric " 3193 "identifier.\n")); 3194 } 3195 } else if (vs->vs_state == VDEV_STATE_DEGRADED) { 3196 (void) printf(gettext(" action: The pool can be imported " 3197 "despite missing or damaged devices. The\n\tfault " 3198 "tolerance of the pool may be compromised if imported.\n")); 3199 } else { 3200 switch (reason) { 3201 case ZPOOL_STATUS_VERSION_NEWER: 3202 (void) printf(gettext(" action: The pool cannot be " 3203 "imported. Access the pool on a system running " 3204 "newer\n\tsoftware, or recreate the pool from " 3205 "backup.\n")); 3206 break; 3207 case ZPOOL_STATUS_UNSUP_FEAT_READ: 3208 printf_color(ANSI_BOLD, gettext("action: ")); 3209 printf_color(ANSI_YELLOW, gettext("The pool cannot be " 3210 "imported. Access the pool on a system that " 3211 "supports\n\tthe required feature(s), or recreate " 3212 "the pool from backup.\n")); 3213 break; 3214 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 3215 printf_color(ANSI_BOLD, gettext("action: ")); 3216 printf_color(ANSI_YELLOW, gettext("The pool cannot be " 3217 "imported in read-write mode. Import the pool " 3218 "with\n" 3219 "\t\"-o readonly=on\", access the pool on a system " 3220 "that supports the\n\trequired feature(s), or " 3221 "recreate the pool from backup.\n")); 3222 break; 3223 case ZPOOL_STATUS_MISSING_DEV_R: 3224 case ZPOOL_STATUS_MISSING_DEV_NR: 3225 case ZPOOL_STATUS_BAD_GUID_SUM: 3226 (void) printf(gettext(" action: The pool cannot be " 3227 "imported. Attach the missing\n\tdevices and try " 3228 "again.\n")); 3229 break; 3230 case ZPOOL_STATUS_HOSTID_ACTIVE: 3231 VERIFY0(nvlist_lookup_nvlist(config, 3232 ZPOOL_CONFIG_LOAD_INFO, &nvinfo)); 3233 3234 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3235 hostname = fnvlist_lookup_string(nvinfo, 3236 ZPOOL_CONFIG_MMP_HOSTNAME); 3237 3238 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3239 hostid = fnvlist_lookup_uint64(nvinfo, 3240 ZPOOL_CONFIG_MMP_HOSTID); 3241 3242 (void) printf(gettext(" action: The pool must be " 3243 "exported from %s (hostid=%"PRIx64")\n\tbefore it " 3244 "can be safely imported.\n"), hostname, hostid); 3245 break; 3246 case ZPOOL_STATUS_HOSTID_REQUIRED: 3247 (void) printf(gettext(" action: Set a unique system " 3248 "hostid with the zgenhostid(8) command.\n")); 3249 break; 3250 default: 3251 (void) printf(gettext(" action: The pool cannot be " 3252 "imported due to damaged devices or data.\n")); 3253 } 3254 } 3255 3256 /* Print the comment attached to the pool. */ 3257 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 3258 (void) printf(gettext("comment: %s\n"), comment); 3259 3260 /* 3261 * If the state is "closed" or "can't open", and the aux state 3262 * is "corrupt data": 3263 */ 3264 if (((vs->vs_state == VDEV_STATE_CLOSED) || 3265 (vs->vs_state == VDEV_STATE_CANT_OPEN)) && 3266 (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) { 3267 if (pool_state == POOL_STATE_DESTROYED) 3268 (void) printf(gettext("\tThe pool was destroyed, " 3269 "but can be imported using the '-Df' flags.\n")); 3270 else if (pool_state != POOL_STATE_EXPORTED) 3271 (void) printf(gettext("\tThe pool may be active on " 3272 "another system, but can be imported using\n\t" 3273 "the '-f' flag.\n")); 3274 } 3275 3276 if (msgid != NULL) { 3277 (void) printf(gettext( 3278 " see: https://openzfs.github.io/openzfs-docs/msg/%s\n"), 3279 msgid); 3280 } 3281 3282 (void) printf(gettext(" config:\n\n")); 3283 3284 cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name), 3285 VDEV_NAME_TYPE_ID); 3286 if (cb.cb_namewidth < 10) 3287 cb.cb_namewidth = 10; 3288 3289 print_import_config(&cb, name, nvroot, 0); 3290 3291 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_DEDUP); 3292 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 3293 print_class_vdevs(NULL, &cb, nvroot, VDEV_ALLOC_CLASS_LOGS); 3294 3295 if (reason == ZPOOL_STATUS_BAD_GUID_SUM) { 3296 (void) printf(gettext("\n\tAdditional devices are known to " 3297 "be part of this pool, though their\n\texact " 3298 "configuration cannot be determined.\n")); 3299 } 3300 return (0); 3301 } 3302 3303 static boolean_t 3304 zfs_force_import_required(nvlist_t *config) 3305 { 3306 uint64_t state; 3307 uint64_t hostid = 0; 3308 nvlist_t *nvinfo; 3309 3310 state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE); 3311 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3312 3313 /* 3314 * The hostid on LOAD_INFO comes from the MOS label via 3315 * spa_tryimport(). If its not there then we're likely talking to an 3316 * older kernel, so use the top one, which will be from the label 3317 * discovered in zpool_find_import(), or if a cachefile is in use, the 3318 * local hostid. 3319 */ 3320 if (nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_HOSTID, &hostid) != 0) 3321 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, 3322 &hostid); 3323 3324 if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid()) 3325 return (B_TRUE); 3326 3327 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) { 3328 mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo, 3329 ZPOOL_CONFIG_MMP_STATE); 3330 3331 if (mmp_state != MMP_STATE_INACTIVE) 3332 return (B_TRUE); 3333 } 3334 3335 return (B_FALSE); 3336 } 3337 3338 /* 3339 * Perform the import for the given configuration. This passes the heavy 3340 * lifting off to zpool_import_props(), and then mounts the datasets contained 3341 * within the pool. 3342 */ 3343 static int 3344 do_import(nvlist_t *config, const char *newname, const char *mntopts, 3345 nvlist_t *props, int flags) 3346 { 3347 int ret = 0; 3348 int ms_status = 0; 3349 zpool_handle_t *zhp; 3350 const char *name; 3351 uint64_t version; 3352 3353 name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME); 3354 version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION); 3355 3356 if (!SPA_VERSION_IS_SUPPORTED(version)) { 3357 (void) fprintf(stderr, gettext("cannot import '%s': pool " 3358 "is formatted using an unsupported ZFS version\n"), name); 3359 return (1); 3360 } else if (zfs_force_import_required(config) && 3361 !(flags & ZFS_IMPORT_ANY_HOST)) { 3362 mmp_state_t mmp_state = MMP_STATE_INACTIVE; 3363 nvlist_t *nvinfo; 3364 3365 nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO); 3366 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) 3367 mmp_state = fnvlist_lookup_uint64(nvinfo, 3368 ZPOOL_CONFIG_MMP_STATE); 3369 3370 if (mmp_state == MMP_STATE_ACTIVE) { 3371 const char *hostname = "<unknown>"; 3372 uint64_t hostid = 0; 3373 3374 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME)) 3375 hostname = fnvlist_lookup_string(nvinfo, 3376 ZPOOL_CONFIG_MMP_HOSTNAME); 3377 3378 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID)) 3379 hostid = fnvlist_lookup_uint64(nvinfo, 3380 ZPOOL_CONFIG_MMP_HOSTID); 3381 3382 (void) fprintf(stderr, gettext("cannot import '%s': " 3383 "pool is imported on %s (hostid: " 3384 "0x%"PRIx64")\nExport the pool on the other " 3385 "system, then run 'zpool import'.\n"), 3386 name, hostname, hostid); 3387 } else if (mmp_state == MMP_STATE_NO_HOSTID) { 3388 (void) fprintf(stderr, gettext("Cannot import '%s': " 3389 "pool has the multihost property on and the\n" 3390 "system's hostid is not set. Set a unique hostid " 3391 "with the zgenhostid(8) command.\n"), name); 3392 } else { 3393 const char *hostname = "<unknown>"; 3394 time_t timestamp = 0; 3395 uint64_t hostid = 0; 3396 3397 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTNAME)) 3398 hostname = fnvlist_lookup_string(nvinfo, 3399 ZPOOL_CONFIG_HOSTNAME); 3400 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME)) 3401 hostname = fnvlist_lookup_string(config, 3402 ZPOOL_CONFIG_HOSTNAME); 3403 3404 if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP)) 3405 timestamp = fnvlist_lookup_uint64(config, 3406 ZPOOL_CONFIG_TIMESTAMP); 3407 3408 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_HOSTID)) 3409 hostid = fnvlist_lookup_uint64(nvinfo, 3410 ZPOOL_CONFIG_HOSTID); 3411 else if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID)) 3412 hostid = fnvlist_lookup_uint64(config, 3413 ZPOOL_CONFIG_HOSTID); 3414 3415 (void) fprintf(stderr, gettext("cannot import '%s': " 3416 "pool was previously in use from another system.\n" 3417 "Last accessed by %s (hostid=%"PRIx64") at %s" 3418 "The pool can be imported, use 'zpool import -f' " 3419 "to import the pool.\n"), name, hostname, 3420 hostid, ctime(×tamp)); 3421 } 3422 3423 return (1); 3424 } 3425 3426 if (zpool_import_props(g_zfs, config, newname, props, flags) != 0) 3427 return (1); 3428 3429 if (newname != NULL) 3430 name = newname; 3431 3432 if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL) 3433 return (1); 3434 3435 /* 3436 * Loading keys is best effort. We don't want to return immediately 3437 * if it fails but we do want to give the error to the caller. 3438 */ 3439 if (flags & ZFS_IMPORT_LOAD_KEYS && 3440 zfs_crypto_attempt_load_keys(g_zfs, name) != 0) 3441 ret = 1; 3442 3443 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL && 3444 !(flags & ZFS_IMPORT_ONLY)) { 3445 ms_status = zpool_enable_datasets(zhp, mntopts, 0); 3446 if (ms_status == EZFS_SHAREFAILED) { 3447 (void) fprintf(stderr, gettext("Import was " 3448 "successful, but unable to share some datasets\n")); 3449 } else if (ms_status == EZFS_MOUNTFAILED) { 3450 (void) fprintf(stderr, gettext("Import was " 3451 "successful, but unable to mount some datasets\n")); 3452 } 3453 } 3454 3455 zpool_close(zhp); 3456 return (ret); 3457 } 3458 3459 typedef struct import_parameters { 3460 nvlist_t *ip_config; 3461 const char *ip_mntopts; 3462 nvlist_t *ip_props; 3463 int ip_flags; 3464 int *ip_err; 3465 } import_parameters_t; 3466 3467 static void 3468 do_import_task(void *arg) 3469 { 3470 import_parameters_t *ip = arg; 3471 *ip->ip_err |= do_import(ip->ip_config, NULL, ip->ip_mntopts, 3472 ip->ip_props, ip->ip_flags); 3473 free(ip); 3474 } 3475 3476 3477 static int 3478 import_pools(nvlist_t *pools, nvlist_t *props, char *mntopts, int flags, 3479 char *orig_name, char *new_name, importargs_t *import) 3480 { 3481 nvlist_t *config = NULL; 3482 nvlist_t *found_config = NULL; 3483 uint64_t pool_state; 3484 boolean_t pool_specified = (import->poolname != NULL || 3485 import->guid != 0); 3486 3487 3488 tpool_t *tp = NULL; 3489 if (import->do_all) { 3490 tp = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 3491 0, NULL); 3492 } 3493 3494 /* 3495 * At this point we have a list of import candidate configs. Even if 3496 * we were searching by pool name or guid, we still need to 3497 * post-process the list to deal with pool state and possible 3498 * duplicate names. 3499 */ 3500 int err = 0; 3501 nvpair_t *elem = NULL; 3502 boolean_t first = B_TRUE; 3503 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) { 3504 3505 verify(nvpair_value_nvlist(elem, &config) == 0); 3506 3507 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 3508 &pool_state) == 0); 3509 if (!import->do_destroyed && 3510 pool_state == POOL_STATE_DESTROYED) 3511 continue; 3512 if (import->do_destroyed && 3513 pool_state != POOL_STATE_DESTROYED) 3514 continue; 3515 3516 verify(nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY, 3517 import->policy) == 0); 3518 3519 if (!pool_specified) { 3520 if (first) 3521 first = B_FALSE; 3522 else if (!import->do_all) 3523 (void) fputc('\n', stdout); 3524 3525 if (import->do_all) { 3526 import_parameters_t *ip = safe_malloc( 3527 sizeof (import_parameters_t)); 3528 3529 ip->ip_config = config; 3530 ip->ip_mntopts = mntopts; 3531 ip->ip_props = props; 3532 ip->ip_flags = flags; 3533 ip->ip_err = &err; 3534 3535 (void) tpool_dispatch(tp, do_import_task, 3536 (void *)ip); 3537 } else { 3538 /* 3539 * If we're importing from cachefile, then 3540 * we don't want to report errors until we 3541 * are in the scan phase of the import. If 3542 * we get an error, then we return that error 3543 * to invoke the scan phase. 3544 */ 3545 if (import->cachefile && !import->scan) 3546 err = show_import(config, B_FALSE); 3547 else 3548 (void) show_import(config, B_TRUE); 3549 } 3550 } else if (import->poolname != NULL) { 3551 const char *name; 3552 3553 /* 3554 * We are searching for a pool based on name. 3555 */ 3556 verify(nvlist_lookup_string(config, 3557 ZPOOL_CONFIG_POOL_NAME, &name) == 0); 3558 3559 if (strcmp(name, import->poolname) == 0) { 3560 if (found_config != NULL) { 3561 (void) fprintf(stderr, gettext( 3562 "cannot import '%s': more than " 3563 "one matching pool\n"), 3564 import->poolname); 3565 (void) fprintf(stderr, gettext( 3566 "import by numeric ID instead\n")); 3567 err = B_TRUE; 3568 } 3569 found_config = config; 3570 } 3571 } else { 3572 uint64_t guid; 3573 3574 /* 3575 * Search for a pool by guid. 3576 */ 3577 verify(nvlist_lookup_uint64(config, 3578 ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 3579 3580 if (guid == import->guid) 3581 found_config = config; 3582 } 3583 } 3584 if (import->do_all) { 3585 tpool_wait(tp); 3586 tpool_destroy(tp); 3587 } 3588 3589 /* 3590 * If we were searching for a specific pool, verify that we found a 3591 * pool, and then do the import. 3592 */ 3593 if (pool_specified && err == 0) { 3594 if (found_config == NULL) { 3595 (void) fprintf(stderr, gettext("cannot import '%s': " 3596 "no such pool available\n"), orig_name); 3597 err = B_TRUE; 3598 } else { 3599 err |= do_import(found_config, new_name, 3600 mntopts, props, flags); 3601 } 3602 } 3603 3604 /* 3605 * If we were just looking for pools, report an error if none were 3606 * found. 3607 */ 3608 if (!pool_specified && first) 3609 (void) fprintf(stderr, 3610 gettext("no pools available to import\n")); 3611 return (err); 3612 } 3613 3614 typedef struct target_exists_args { 3615 const char *poolname; 3616 uint64_t poolguid; 3617 } target_exists_args_t; 3618 3619 static int 3620 name_or_guid_exists(zpool_handle_t *zhp, void *data) 3621 { 3622 target_exists_args_t *args = data; 3623 nvlist_t *config = zpool_get_config(zhp, NULL); 3624 int found = 0; 3625 3626 if (config == NULL) 3627 return (0); 3628 3629 if (args->poolname != NULL) { 3630 const char *pool_name; 3631 3632 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3633 &pool_name) == 0); 3634 if (strcmp(pool_name, args->poolname) == 0) 3635 found = 1; 3636 } else { 3637 uint64_t pool_guid; 3638 3639 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3640 &pool_guid) == 0); 3641 if (pool_guid == args->poolguid) 3642 found = 1; 3643 } 3644 zpool_close(zhp); 3645 3646 return (found); 3647 } 3648 /* 3649 * zpool checkpoint <pool> 3650 * checkpoint --discard <pool> 3651 * 3652 * -d Discard the checkpoint from a checkpointed 3653 * --discard pool. 3654 * 3655 * -w Wait for discarding a checkpoint to complete. 3656 * --wait 3657 * 3658 * Checkpoints the specified pool, by taking a "snapshot" of its 3659 * current state. A pool can only have one checkpoint at a time. 3660 */ 3661 int 3662 zpool_do_checkpoint(int argc, char **argv) 3663 { 3664 boolean_t discard, wait; 3665 char *pool; 3666 zpool_handle_t *zhp; 3667 int c, err; 3668 3669 struct option long_options[] = { 3670 {"discard", no_argument, NULL, 'd'}, 3671 {"wait", no_argument, NULL, 'w'}, 3672 {0, 0, 0, 0} 3673 }; 3674 3675 discard = B_FALSE; 3676 wait = B_FALSE; 3677 while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) { 3678 switch (c) { 3679 case 'd': 3680 discard = B_TRUE; 3681 break; 3682 case 'w': 3683 wait = B_TRUE; 3684 break; 3685 case '?': 3686 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 3687 optopt); 3688 usage(B_FALSE); 3689 } 3690 } 3691 3692 if (wait && !discard) { 3693 (void) fprintf(stderr, gettext("--wait only valid when " 3694 "--discard also specified\n")); 3695 usage(B_FALSE); 3696 } 3697 3698 argc -= optind; 3699 argv += optind; 3700 3701 if (argc < 1) { 3702 (void) fprintf(stderr, gettext("missing pool argument\n")); 3703 usage(B_FALSE); 3704 } 3705 3706 if (argc > 1) { 3707 (void) fprintf(stderr, gettext("too many arguments\n")); 3708 usage(B_FALSE); 3709 } 3710 3711 pool = argv[0]; 3712 3713 if ((zhp = zpool_open(g_zfs, pool)) == NULL) { 3714 /* As a special case, check for use of '/' in the name */ 3715 if (strchr(pool, '/') != NULL) 3716 (void) fprintf(stderr, gettext("'zpool checkpoint' " 3717 "doesn't work on datasets. To save the state " 3718 "of a dataset from a specific point in time " 3719 "please use 'zfs snapshot'\n")); 3720 return (1); 3721 } 3722 3723 if (discard) { 3724 err = (zpool_discard_checkpoint(zhp) != 0); 3725 if (err == 0 && wait) 3726 err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD); 3727 } else { 3728 err = (zpool_checkpoint(zhp) != 0); 3729 } 3730 3731 zpool_close(zhp); 3732 3733 return (err); 3734 } 3735 3736 #define CHECKPOINT_OPT 1024 3737 3738 /* 3739 * zpool import [-d dir] [-D] 3740 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 3741 * [-d dir | -c cachefile | -s] [-f] -a 3742 * import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l] 3743 * [-d dir | -c cachefile | -s] [-f] [-n] [-F] <pool | id> 3744 * [newpool] 3745 * 3746 * -c Read pool information from a cachefile instead of searching 3747 * devices. If importing from a cachefile config fails, then 3748 * fallback to searching for devices only in the directories that 3749 * exist in the cachefile. 3750 * 3751 * -d Scan in a specific directory, other than /dev/. More than 3752 * one directory can be specified using multiple '-d' options. 3753 * 3754 * -D Scan for previously destroyed pools or import all or only 3755 * specified destroyed pools. 3756 * 3757 * -R Temporarily import the pool, with all mountpoints relative to 3758 * the given root. The pool will remain exported when the machine 3759 * is rebooted. 3760 * 3761 * -V Import even in the presence of faulted vdevs. This is an 3762 * intentionally undocumented option for testing purposes, and 3763 * treats the pool configuration as complete, leaving any bad 3764 * vdevs in the FAULTED state. In other words, it does verbatim 3765 * import. 3766 * 3767 * -f Force import, even if it appears that the pool is active. 3768 * 3769 * -F Attempt rewind if necessary. 3770 * 3771 * -n See if rewind would work, but don't actually rewind. 3772 * 3773 * -N Import the pool but don't mount datasets. 3774 * 3775 * -T Specify a starting txg to use for import. This option is 3776 * intentionally undocumented option for testing purposes. 3777 * 3778 * -a Import all pools found. 3779 * 3780 * -l Load encryption keys while importing. 3781 * 3782 * -o Set property=value and/or temporary mount options (without '='). 3783 * 3784 * -s Scan using the default search path, the libblkid cache will 3785 * not be consulted. 3786 * 3787 * --rewind-to-checkpoint 3788 * Import the pool and revert back to the checkpoint. 3789 * 3790 * The import command scans for pools to import, and import pools based on pool 3791 * name and GUID. The pool can also be renamed as part of the import process. 3792 */ 3793 int 3794 zpool_do_import(int argc, char **argv) 3795 { 3796 char **searchdirs = NULL; 3797 char *env, *envdup = NULL; 3798 int nsearch = 0; 3799 int c; 3800 int err = 0; 3801 nvlist_t *pools = NULL; 3802 boolean_t do_all = B_FALSE; 3803 boolean_t do_destroyed = B_FALSE; 3804 char *mntopts = NULL; 3805 uint64_t searchguid = 0; 3806 char *searchname = NULL; 3807 char *propval; 3808 nvlist_t *policy = NULL; 3809 nvlist_t *props = NULL; 3810 int flags = ZFS_IMPORT_NORMAL; 3811 uint32_t rewind_policy = ZPOOL_NO_REWIND; 3812 boolean_t dryrun = B_FALSE; 3813 boolean_t do_rewind = B_FALSE; 3814 boolean_t xtreme_rewind = B_FALSE; 3815 boolean_t do_scan = B_FALSE; 3816 boolean_t pool_exists = B_FALSE; 3817 uint64_t txg = -1ULL; 3818 char *cachefile = NULL; 3819 importargs_t idata = { 0 }; 3820 char *endptr; 3821 3822 struct option long_options[] = { 3823 {"rewind-to-checkpoint", no_argument, NULL, CHECKPOINT_OPT}, 3824 {0, 0, 0, 0} 3825 }; 3826 3827 /* check options */ 3828 while ((c = getopt_long(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX", 3829 long_options, NULL)) != -1) { 3830 switch (c) { 3831 case 'a': 3832 do_all = B_TRUE; 3833 break; 3834 case 'c': 3835 cachefile = optarg; 3836 break; 3837 case 'd': 3838 searchdirs = safe_realloc(searchdirs, 3839 (nsearch + 1) * sizeof (char *)); 3840 searchdirs[nsearch++] = optarg; 3841 break; 3842 case 'D': 3843 do_destroyed = B_TRUE; 3844 break; 3845 case 'f': 3846 flags |= ZFS_IMPORT_ANY_HOST; 3847 break; 3848 case 'F': 3849 do_rewind = B_TRUE; 3850 break; 3851 case 'l': 3852 flags |= ZFS_IMPORT_LOAD_KEYS; 3853 break; 3854 case 'm': 3855 flags |= ZFS_IMPORT_MISSING_LOG; 3856 break; 3857 case 'n': 3858 dryrun = B_TRUE; 3859 break; 3860 case 'N': 3861 flags |= ZFS_IMPORT_ONLY; 3862 break; 3863 case 'o': 3864 if ((propval = strchr(optarg, '=')) != NULL) { 3865 *propval = '\0'; 3866 propval++; 3867 if (add_prop_list(optarg, propval, 3868 &props, B_TRUE)) 3869 goto error; 3870 } else { 3871 mntopts = optarg; 3872 } 3873 break; 3874 case 'R': 3875 if (add_prop_list(zpool_prop_to_name( 3876 ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE)) 3877 goto error; 3878 if (add_prop_list_default(zpool_prop_to_name( 3879 ZPOOL_PROP_CACHEFILE), "none", &props)) 3880 goto error; 3881 break; 3882 case 's': 3883 do_scan = B_TRUE; 3884 break; 3885 case 't': 3886 flags |= ZFS_IMPORT_TEMP_NAME; 3887 if (add_prop_list_default(zpool_prop_to_name( 3888 ZPOOL_PROP_CACHEFILE), "none", &props)) 3889 goto error; 3890 break; 3891 3892 case 'T': 3893 errno = 0; 3894 txg = strtoull(optarg, &endptr, 0); 3895 if (errno != 0 || *endptr != '\0') { 3896 (void) fprintf(stderr, 3897 gettext("invalid txg value\n")); 3898 usage(B_FALSE); 3899 } 3900 rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND; 3901 break; 3902 case 'V': 3903 flags |= ZFS_IMPORT_VERBATIM; 3904 break; 3905 case 'X': 3906 xtreme_rewind = B_TRUE; 3907 break; 3908 case CHECKPOINT_OPT: 3909 flags |= ZFS_IMPORT_CHECKPOINT; 3910 break; 3911 case ':': 3912 (void) fprintf(stderr, gettext("missing argument for " 3913 "'%c' option\n"), optopt); 3914 usage(B_FALSE); 3915 break; 3916 case '?': 3917 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 3918 optopt); 3919 usage(B_FALSE); 3920 } 3921 } 3922 3923 argc -= optind; 3924 argv += optind; 3925 3926 if (cachefile && nsearch != 0) { 3927 (void) fprintf(stderr, gettext("-c is incompatible with -d\n")); 3928 usage(B_FALSE); 3929 } 3930 3931 if (cachefile && do_scan) { 3932 (void) fprintf(stderr, gettext("-c is incompatible with -s\n")); 3933 usage(B_FALSE); 3934 } 3935 3936 if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) { 3937 (void) fprintf(stderr, gettext("-l is incompatible with -N\n")); 3938 usage(B_FALSE); 3939 } 3940 3941 if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) { 3942 (void) fprintf(stderr, gettext("-l is only meaningful during " 3943 "an import\n")); 3944 usage(B_FALSE); 3945 } 3946 3947 if ((dryrun || xtreme_rewind) && !do_rewind) { 3948 (void) fprintf(stderr, 3949 gettext("-n or -X only meaningful with -F\n")); 3950 usage(B_FALSE); 3951 } 3952 if (dryrun) 3953 rewind_policy = ZPOOL_TRY_REWIND; 3954 else if (do_rewind) 3955 rewind_policy = ZPOOL_DO_REWIND; 3956 if (xtreme_rewind) 3957 rewind_policy |= ZPOOL_EXTREME_REWIND; 3958 3959 /* In the future, we can capture further policy and include it here */ 3960 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 3961 nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, txg) != 0 || 3962 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 3963 rewind_policy) != 0) 3964 goto error; 3965 3966 /* check argument count */ 3967 if (do_all) { 3968 if (argc != 0) { 3969 (void) fprintf(stderr, gettext("too many arguments\n")); 3970 usage(B_FALSE); 3971 } 3972 } else { 3973 if (argc > 2) { 3974 (void) fprintf(stderr, gettext("too many arguments\n")); 3975 usage(B_FALSE); 3976 } 3977 } 3978 3979 /* 3980 * Check for the effective uid. We do this explicitly here because 3981 * otherwise any attempt to discover pools will silently fail. 3982 */ 3983 if (argc == 0 && geteuid() != 0) { 3984 (void) fprintf(stderr, gettext("cannot " 3985 "discover pools: permission denied\n")); 3986 3987 free(searchdirs); 3988 nvlist_free(props); 3989 nvlist_free(policy); 3990 return (1); 3991 } 3992 3993 /* 3994 * Depending on the arguments given, we do one of the following: 3995 * 3996 * <none> Iterate through all pools and display information about 3997 * each one. 3998 * 3999 * -a Iterate through all pools and try to import each one. 4000 * 4001 * <id> Find the pool that corresponds to the given GUID/pool 4002 * name and import that one. 4003 * 4004 * -D Above options applies only to destroyed pools. 4005 */ 4006 if (argc != 0) { 4007 char *endptr; 4008 4009 errno = 0; 4010 searchguid = strtoull(argv[0], &endptr, 10); 4011 if (errno != 0 || *endptr != '\0') { 4012 searchname = argv[0]; 4013 searchguid = 0; 4014 } 4015 4016 /* 4017 * User specified a name or guid. Ensure it's unique. 4018 */ 4019 target_exists_args_t search = {searchname, searchguid}; 4020 pool_exists = zpool_iter(g_zfs, name_or_guid_exists, &search); 4021 } 4022 4023 /* 4024 * Check the environment for the preferred search path. 4025 */ 4026 if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) { 4027 char *dir, *tmp = NULL; 4028 4029 envdup = strdup(env); 4030 4031 for (dir = strtok_r(envdup, ":", &tmp); 4032 dir != NULL; 4033 dir = strtok_r(NULL, ":", &tmp)) { 4034 searchdirs = safe_realloc(searchdirs, 4035 (nsearch + 1) * sizeof (char *)); 4036 searchdirs[nsearch++] = dir; 4037 } 4038 } 4039 4040 idata.path = searchdirs; 4041 idata.paths = nsearch; 4042 idata.poolname = searchname; 4043 idata.guid = searchguid; 4044 idata.cachefile = cachefile; 4045 idata.scan = do_scan; 4046 idata.policy = policy; 4047 idata.do_destroyed = do_destroyed; 4048 idata.do_all = do_all; 4049 4050 libpc_handle_t lpch = { 4051 .lpc_lib_handle = g_zfs, 4052 .lpc_ops = &libzfs_config_ops, 4053 .lpc_printerr = B_TRUE 4054 }; 4055 pools = zpool_search_import(&lpch, &idata); 4056 4057 if (pools != NULL && pool_exists && 4058 (argc == 1 || strcmp(argv[0], argv[1]) == 0)) { 4059 (void) fprintf(stderr, gettext("cannot import '%s': " 4060 "a pool with that name already exists\n"), 4061 argv[0]); 4062 (void) fprintf(stderr, gettext("use the form '%s " 4063 "<pool | id> <newpool>' to give it a new name\n"), 4064 "zpool import"); 4065 err = 1; 4066 } else if (pools == NULL && pool_exists) { 4067 (void) fprintf(stderr, gettext("cannot import '%s': " 4068 "a pool with that name is already created/imported,\n"), 4069 argv[0]); 4070 (void) fprintf(stderr, gettext("and no additional pools " 4071 "with that name were found\n")); 4072 err = 1; 4073 } else if (pools == NULL) { 4074 if (argc != 0) { 4075 (void) fprintf(stderr, gettext("cannot import '%s': " 4076 "no such pool available\n"), argv[0]); 4077 } 4078 err = 1; 4079 } 4080 4081 if (err == 1) { 4082 free(searchdirs); 4083 free(envdup); 4084 nvlist_free(policy); 4085 nvlist_free(pools); 4086 nvlist_free(props); 4087 return (1); 4088 } 4089 4090 err = import_pools(pools, props, mntopts, flags, 4091 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, &idata); 4092 4093 /* 4094 * If we're using the cachefile and we failed to import, then 4095 * fallback to scanning the directory for pools that match 4096 * those in the cachefile. 4097 */ 4098 if (err != 0 && cachefile != NULL) { 4099 (void) printf(gettext("cachefile import failed, retrying\n")); 4100 4101 /* 4102 * We use the scan flag to gather the directories that exist 4103 * in the cachefile. If we need to fallback to searching for 4104 * the pool config, we will only search devices in these 4105 * directories. 4106 */ 4107 idata.scan = B_TRUE; 4108 nvlist_free(pools); 4109 pools = zpool_search_import(&lpch, &idata); 4110 4111 err = import_pools(pools, props, mntopts, flags, 4112 argc >= 1 ? argv[0] : NULL, argc >= 2 ? argv[1] : NULL, 4113 &idata); 4114 } 4115 4116 error: 4117 nvlist_free(props); 4118 nvlist_free(pools); 4119 nvlist_free(policy); 4120 free(searchdirs); 4121 free(envdup); 4122 4123 return (err ? 1 : 0); 4124 } 4125 4126 /* 4127 * zpool sync [-f] [pool] ... 4128 * 4129 * -f (undocumented) force uberblock (and config including zpool cache file) 4130 * update. 4131 * 4132 * Sync the specified pool(s). 4133 * Without arguments "zpool sync" will sync all pools. 4134 * This command initiates TXG sync(s) and will return after the TXG(s) commit. 4135 * 4136 */ 4137 static int 4138 zpool_do_sync(int argc, char **argv) 4139 { 4140 int ret; 4141 boolean_t force = B_FALSE; 4142 4143 /* check options */ 4144 while ((ret = getopt(argc, argv, "f")) != -1) { 4145 switch (ret) { 4146 case 'f': 4147 force = B_TRUE; 4148 break; 4149 case '?': 4150 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 4151 optopt); 4152 usage(B_FALSE); 4153 } 4154 } 4155 4156 argc -= optind; 4157 argv += optind; 4158 4159 /* if argc == 0 we will execute zpool_sync_one on all pools */ 4160 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 4161 B_FALSE, zpool_sync_one, &force); 4162 4163 return (ret); 4164 } 4165 4166 typedef struct iostat_cbdata { 4167 uint64_t cb_flags; 4168 int cb_namewidth; 4169 int cb_iteration; 4170 boolean_t cb_verbose; 4171 boolean_t cb_literal; 4172 boolean_t cb_scripted; 4173 zpool_list_t *cb_list; 4174 vdev_cmd_data_list_t *vcdl; 4175 vdev_cbdata_t cb_vdevs; 4176 } iostat_cbdata_t; 4177 4178 /* iostat labels */ 4179 typedef struct name_and_columns { 4180 const char *name; /* Column name */ 4181 unsigned int columns; /* Center name to this number of columns */ 4182 } name_and_columns_t; 4183 4184 #define IOSTAT_MAX_LABELS 15 /* Max number of labels on one line */ 4185 4186 static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] = 4187 { 4188 [IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2}, 4189 {NULL}}, 4190 [IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 4191 {"asyncq_wait", 2}, {"scrub", 1}, {"trim", 1}, {"rebuild", 1}, 4192 {NULL}}, 4193 [IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2}, 4194 {"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2}, 4195 {"trimq_write", 2}, {"rebuildq_write", 2}, {NULL}}, 4196 [IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2}, 4197 {"asyncq_wait", 2}, {NULL}}, 4198 [IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2}, 4199 {"async_read", 2}, {"async_write", 2}, {"scrub", 2}, 4200 {"trim", 2}, {"rebuild", 2}, {NULL}}, 4201 }; 4202 4203 /* Shorthand - if "columns" field not set, default to 1 column */ 4204 static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] = 4205 { 4206 [IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"}, 4207 {"write"}, {NULL}}, 4208 [IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 4209 {"write"}, {"read"}, {"write"}, {"wait"}, {"wait"}, {"wait"}, 4210 {NULL}}, 4211 [IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"}, 4212 {"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"}, 4213 {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}}, 4214 [IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"}, 4215 {"write"}, {"read"}, {"write"}, {"scrub"}, {"trim"}, {"rebuild"}, 4216 {NULL}}, 4217 [IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 4218 {"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"}, 4219 {"ind"}, {"agg"}, {NULL}}, 4220 }; 4221 4222 static const char *histo_to_title[] = { 4223 [IOS_L_HISTO] = "latency", 4224 [IOS_RQ_HISTO] = "req_size", 4225 }; 4226 4227 /* 4228 * Return the number of labels in a null-terminated name_and_columns_t 4229 * array. 4230 * 4231 */ 4232 static unsigned int 4233 label_array_len(const name_and_columns_t *labels) 4234 { 4235 int i = 0; 4236 4237 while (labels[i].name) 4238 i++; 4239 4240 return (i); 4241 } 4242 4243 /* 4244 * Return the number of strings in a null-terminated string array. 4245 * For example: 4246 * 4247 * const char foo[] = {"bar", "baz", NULL} 4248 * 4249 * returns 2 4250 */ 4251 static uint64_t 4252 str_array_len(const char *array[]) 4253 { 4254 uint64_t i = 0; 4255 while (array[i]) 4256 i++; 4257 4258 return (i); 4259 } 4260 4261 4262 /* 4263 * Return a default column width for default/latency/queue columns. This does 4264 * not include histograms, which have their columns autosized. 4265 */ 4266 static unsigned int 4267 default_column_width(iostat_cbdata_t *cb, enum iostat_type type) 4268 { 4269 unsigned long column_width = 5; /* Normal niceprint */ 4270 static unsigned long widths[] = { 4271 /* 4272 * Choose some sane default column sizes for printing the 4273 * raw numbers. 4274 */ 4275 [IOS_DEFAULT] = 15, /* 1PB capacity */ 4276 [IOS_LATENCY] = 10, /* 1B ns = 10sec */ 4277 [IOS_QUEUES] = 6, /* 1M queue entries */ 4278 [IOS_L_HISTO] = 10, /* 1B ns = 10sec */ 4279 [IOS_RQ_HISTO] = 6, /* 1M queue entries */ 4280 }; 4281 4282 if (cb->cb_literal) 4283 column_width = widths[type]; 4284 4285 return (column_width); 4286 } 4287 4288 /* 4289 * Print the column labels, i.e: 4290 * 4291 * capacity operations bandwidth 4292 * alloc free read write read write ... 4293 * 4294 * If force_column_width is set, use it for the column width. If not set, use 4295 * the default column width. 4296 */ 4297 static void 4298 print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width, 4299 const name_and_columns_t labels[][IOSTAT_MAX_LABELS]) 4300 { 4301 int i, idx, s; 4302 int text_start, rw_column_width, spaces_to_end; 4303 uint64_t flags = cb->cb_flags; 4304 uint64_t f; 4305 unsigned int column_width = force_column_width; 4306 4307 /* For each bit set in flags */ 4308 for (f = flags; f; f &= ~(1ULL << idx)) { 4309 idx = lowbit64(f) - 1; 4310 if (!force_column_width) 4311 column_width = default_column_width(cb, idx); 4312 /* Print our top labels centered over "read write" label. */ 4313 for (i = 0; i < label_array_len(labels[idx]); i++) { 4314 const char *name = labels[idx][i].name; 4315 /* 4316 * We treat labels[][].columns == 0 as shorthand 4317 * for one column. It makes writing out the label 4318 * tables more concise. 4319 */ 4320 unsigned int columns = MAX(1, labels[idx][i].columns); 4321 unsigned int slen = strlen(name); 4322 4323 rw_column_width = (column_width * columns) + 4324 (2 * (columns - 1)); 4325 4326 text_start = (int)((rw_column_width) / columns - 4327 slen / columns); 4328 if (text_start < 0) 4329 text_start = 0; 4330 4331 printf(" "); /* Two spaces between columns */ 4332 4333 /* Space from beginning of column to label */ 4334 for (s = 0; s < text_start; s++) 4335 printf(" "); 4336 4337 printf("%s", name); 4338 4339 /* Print space after label to end of column */ 4340 spaces_to_end = rw_column_width - text_start - slen; 4341 if (spaces_to_end < 0) 4342 spaces_to_end = 0; 4343 4344 for (s = 0; s < spaces_to_end; s++) 4345 printf(" "); 4346 } 4347 } 4348 } 4349 4350 4351 /* 4352 * print_cmd_columns - Print custom column titles from -c 4353 * 4354 * If the user specified the "zpool status|iostat -c" then print their custom 4355 * column titles in the header. For example, print_cmd_columns() would print 4356 * the " col1 col2" part of this: 4357 * 4358 * $ zpool iostat -vc 'echo col1=val1; echo col2=val2' 4359 * ... 4360 * capacity operations bandwidth 4361 * pool alloc free read write read write col1 col2 4362 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4363 * mypool 269K 1008M 0 0 107 946 4364 * mirror 269K 1008M 0 0 107 946 4365 * sdb - - 0 0 102 473 val1 val2 4366 * sdc - - 0 0 5 473 val1 val2 4367 * ---------- ----- ----- ----- ----- ----- ----- ---- ---- 4368 */ 4369 static void 4370 print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes) 4371 { 4372 int i, j; 4373 vdev_cmd_data_t *data = &vcdl->data[0]; 4374 4375 if (vcdl->count == 0 || data == NULL) 4376 return; 4377 4378 /* 4379 * Each vdev cmd should have the same column names unless the user did 4380 * something weird with their cmd. Just take the column names from the 4381 * first vdev and assume it works for all of them. 4382 */ 4383 for (i = 0; i < vcdl->uniq_cols_cnt; i++) { 4384 printf(" "); 4385 if (use_dashes) { 4386 for (j = 0; j < vcdl->uniq_cols_width[i]; j++) 4387 printf("-"); 4388 } else { 4389 printf_color(ANSI_BOLD, "%*s", vcdl->uniq_cols_width[i], 4390 vcdl->uniq_cols[i]); 4391 } 4392 } 4393 } 4394 4395 4396 /* 4397 * Utility function to print out a line of dashes like: 4398 * 4399 * -------------------------------- ----- ----- ----- ----- ----- 4400 * 4401 * ...or a dashed named-row line like: 4402 * 4403 * logs - - - - - 4404 * 4405 * @cb: iostat data 4406 * 4407 * @force_column_width If non-zero, use the value as the column width. 4408 * Otherwise use the default column widths. 4409 * 4410 * @name: Print a dashed named-row line starting 4411 * with @name. Otherwise, print a regular 4412 * dashed line. 4413 */ 4414 static void 4415 print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width, 4416 const char *name) 4417 { 4418 int i; 4419 unsigned int namewidth; 4420 uint64_t flags = cb->cb_flags; 4421 uint64_t f; 4422 int idx; 4423 const name_and_columns_t *labels; 4424 const char *title; 4425 4426 4427 if (cb->cb_flags & IOS_ANYHISTO_M) { 4428 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 4429 } else if (cb->cb_vdevs.cb_names_count) { 4430 title = "vdev"; 4431 } else { 4432 title = "pool"; 4433 } 4434 4435 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 4436 name ? strlen(name) : 0); 4437 4438 4439 if (name) { 4440 printf("%-*s", namewidth, name); 4441 } else { 4442 for (i = 0; i < namewidth; i++) 4443 (void) printf("-"); 4444 } 4445 4446 /* For each bit in flags */ 4447 for (f = flags; f; f &= ~(1ULL << idx)) { 4448 unsigned int column_width; 4449 idx = lowbit64(f) - 1; 4450 if (force_column_width) 4451 column_width = force_column_width; 4452 else 4453 column_width = default_column_width(cb, idx); 4454 4455 labels = iostat_bottom_labels[idx]; 4456 for (i = 0; i < label_array_len(labels); i++) { 4457 if (name) 4458 printf(" %*s-", column_width - 1, " "); 4459 else 4460 printf(" %.*s", column_width, 4461 "--------------------"); 4462 } 4463 } 4464 } 4465 4466 4467 static void 4468 print_iostat_separator_impl(iostat_cbdata_t *cb, 4469 unsigned int force_column_width) 4470 { 4471 print_iostat_dashes(cb, force_column_width, NULL); 4472 } 4473 4474 static void 4475 print_iostat_separator(iostat_cbdata_t *cb) 4476 { 4477 print_iostat_separator_impl(cb, 0); 4478 } 4479 4480 static void 4481 print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width, 4482 const char *histo_vdev_name) 4483 { 4484 unsigned int namewidth; 4485 const char *title; 4486 4487 color_start(ANSI_BOLD); 4488 4489 if (cb->cb_flags & IOS_ANYHISTO_M) { 4490 title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]; 4491 } else if (cb->cb_vdevs.cb_names_count) { 4492 title = "vdev"; 4493 } else { 4494 title = "pool"; 4495 } 4496 4497 namewidth = MAX(MAX(strlen(title), cb->cb_namewidth), 4498 histo_vdev_name ? strlen(histo_vdev_name) : 0); 4499 4500 if (histo_vdev_name) 4501 printf("%-*s", namewidth, histo_vdev_name); 4502 else 4503 printf("%*s", namewidth, ""); 4504 4505 4506 print_iostat_labels(cb, force_column_width, iostat_top_labels); 4507 printf("\n"); 4508 4509 printf("%-*s", namewidth, title); 4510 4511 print_iostat_labels(cb, force_column_width, iostat_bottom_labels); 4512 if (cb->vcdl != NULL) 4513 print_cmd_columns(cb->vcdl, 0); 4514 4515 printf("\n"); 4516 4517 print_iostat_separator_impl(cb, force_column_width); 4518 4519 if (cb->vcdl != NULL) 4520 print_cmd_columns(cb->vcdl, 1); 4521 4522 color_end(); 4523 4524 printf("\n"); 4525 } 4526 4527 static void 4528 print_iostat_header(iostat_cbdata_t *cb) 4529 { 4530 print_iostat_header_impl(cb, 0, NULL); 4531 } 4532 4533 /* 4534 * Prints a size string (i.e. 120M) with the suffix ("M") colored 4535 * by order of magnitude. Uses column_size to add padding. 4536 */ 4537 static void 4538 print_stat_color(const char *statbuf, unsigned int column_size) 4539 { 4540 fputs(" ", stdout); 4541 size_t len = strlen(statbuf); 4542 while (len < column_size) { 4543 fputc(' ', stdout); 4544 column_size--; 4545 } 4546 if (*statbuf == '0') { 4547 color_start(ANSI_GRAY); 4548 fputc('0', stdout); 4549 } else { 4550 for (; *statbuf; statbuf++) { 4551 if (*statbuf == 'K') color_start(ANSI_GREEN); 4552 else if (*statbuf == 'M') color_start(ANSI_YELLOW); 4553 else if (*statbuf == 'G') color_start(ANSI_RED); 4554 else if (*statbuf == 'T') color_start(ANSI_BOLD_BLUE); 4555 else if (*statbuf == 'P') color_start(ANSI_MAGENTA); 4556 else if (*statbuf == 'E') color_start(ANSI_CYAN); 4557 fputc(*statbuf, stdout); 4558 if (--column_size <= 0) 4559 break; 4560 } 4561 } 4562 color_end(); 4563 } 4564 4565 /* 4566 * Display a single statistic. 4567 */ 4568 static void 4569 print_one_stat(uint64_t value, enum zfs_nicenum_format format, 4570 unsigned int column_size, boolean_t scripted) 4571 { 4572 char buf[64]; 4573 4574 zfs_nicenum_format(value, buf, sizeof (buf), format); 4575 4576 if (scripted) 4577 printf("\t%s", buf); 4578 else 4579 print_stat_color(buf, column_size); 4580 } 4581 4582 /* 4583 * Calculate the default vdev stats 4584 * 4585 * Subtract oldvs from newvs, apply a scaling factor, and save the resulting 4586 * stats into calcvs. 4587 */ 4588 static void 4589 calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs, 4590 vdev_stat_t *calcvs) 4591 { 4592 int i; 4593 4594 memcpy(calcvs, newvs, sizeof (*calcvs)); 4595 for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++) 4596 calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]); 4597 4598 for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++) 4599 calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]); 4600 } 4601 4602 /* 4603 * Internal representation of the extended iostats data. 4604 * 4605 * The extended iostat stats are exported in nvlists as either uint64_t arrays 4606 * or single uint64_t's. We make both look like arrays to make them easier 4607 * to process. In order to make single uint64_t's look like arrays, we set 4608 * __data to the stat data, and then set *data = &__data with count = 1. Then, 4609 * we can just use *data and count. 4610 */ 4611 struct stat_array { 4612 uint64_t *data; 4613 uint_t count; /* Number of entries in data[] */ 4614 uint64_t __data; /* Only used when data is a single uint64_t */ 4615 }; 4616 4617 static uint64_t 4618 stat_histo_max(struct stat_array *nva, unsigned int len) 4619 { 4620 uint64_t max = 0; 4621 int i; 4622 for (i = 0; i < len; i++) 4623 max = MAX(max, array64_max(nva[i].data, nva[i].count)); 4624 4625 return (max); 4626 } 4627 4628 /* 4629 * Helper function to lookup a uint64_t array or uint64_t value and store its 4630 * data as a stat_array. If the nvpair is a single uint64_t value, then we make 4631 * it look like a one element array to make it easier to process. 4632 */ 4633 static int 4634 nvpair64_to_stat_array(nvlist_t *nvl, const char *name, 4635 struct stat_array *nva) 4636 { 4637 nvpair_t *tmp; 4638 int ret; 4639 4640 verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0); 4641 switch (nvpair_type(tmp)) { 4642 case DATA_TYPE_UINT64_ARRAY: 4643 ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count); 4644 break; 4645 case DATA_TYPE_UINT64: 4646 ret = nvpair_value_uint64(tmp, &nva->__data); 4647 nva->data = &nva->__data; 4648 nva->count = 1; 4649 break; 4650 default: 4651 /* Not a uint64_t */ 4652 ret = EINVAL; 4653 break; 4654 } 4655 4656 return (ret); 4657 } 4658 4659 /* 4660 * Given a list of nvlist names, look up the extended stats in newnv and oldnv, 4661 * subtract them, and return the results in a newly allocated stat_array. 4662 * You must free the returned array after you are done with it with 4663 * free_calc_stats(). 4664 * 4665 * Additionally, you can set "oldnv" to NULL if you simply want the newnv 4666 * values. 4667 */ 4668 static struct stat_array * 4669 calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv, 4670 nvlist_t *newnv) 4671 { 4672 nvlist_t *oldnvx = NULL, *newnvx; 4673 struct stat_array *oldnva, *newnva, *calcnva; 4674 int i, j; 4675 unsigned int alloc_size = (sizeof (struct stat_array)) * len; 4676 4677 /* Extract our extended stats nvlist from the main list */ 4678 verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX, 4679 &newnvx) == 0); 4680 if (oldnv) { 4681 verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX, 4682 &oldnvx) == 0); 4683 } 4684 4685 newnva = safe_malloc(alloc_size); 4686 oldnva = safe_malloc(alloc_size); 4687 calcnva = safe_malloc(alloc_size); 4688 4689 for (j = 0; j < len; j++) { 4690 verify(nvpair64_to_stat_array(newnvx, names[j], 4691 &newnva[j]) == 0); 4692 calcnva[j].count = newnva[j].count; 4693 alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]); 4694 calcnva[j].data = safe_malloc(alloc_size); 4695 memcpy(calcnva[j].data, newnva[j].data, alloc_size); 4696 4697 if (oldnvx) { 4698 verify(nvpair64_to_stat_array(oldnvx, names[j], 4699 &oldnva[j]) == 0); 4700 for (i = 0; i < oldnva[j].count; i++) 4701 calcnva[j].data[i] -= oldnva[j].data[i]; 4702 } 4703 } 4704 free(newnva); 4705 free(oldnva); 4706 return (calcnva); 4707 } 4708 4709 static void 4710 free_calc_stats(struct stat_array *nva, unsigned int len) 4711 { 4712 int i; 4713 for (i = 0; i < len; i++) 4714 free(nva[i].data); 4715 4716 free(nva); 4717 } 4718 4719 static void 4720 print_iostat_histo(struct stat_array *nva, unsigned int len, 4721 iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth, 4722 double scale) 4723 { 4724 int i, j; 4725 char buf[6]; 4726 uint64_t val; 4727 enum zfs_nicenum_format format; 4728 unsigned int buckets; 4729 unsigned int start_bucket; 4730 4731 if (cb->cb_literal) 4732 format = ZFS_NICENUM_RAW; 4733 else 4734 format = ZFS_NICENUM_1024; 4735 4736 /* All these histos are the same size, so just use nva[0].count */ 4737 buckets = nva[0].count; 4738 4739 if (cb->cb_flags & IOS_RQ_HISTO_M) { 4740 /* Start at 512 - req size should never be lower than this */ 4741 start_bucket = 9; 4742 } else { 4743 start_bucket = 0; 4744 } 4745 4746 for (j = start_bucket; j < buckets; j++) { 4747 /* Print histogram bucket label */ 4748 if (cb->cb_flags & IOS_L_HISTO_M) { 4749 /* Ending range of this bucket */ 4750 val = (1UL << (j + 1)) - 1; 4751 zfs_nicetime(val, buf, sizeof (buf)); 4752 } else { 4753 /* Request size (starting range of bucket) */ 4754 val = (1UL << j); 4755 zfs_nicenum(val, buf, sizeof (buf)); 4756 } 4757 4758 if (cb->cb_scripted) 4759 printf("%llu", (u_longlong_t)val); 4760 else 4761 printf("%-*s", namewidth, buf); 4762 4763 /* Print the values on the line */ 4764 for (i = 0; i < len; i++) { 4765 print_one_stat(nva[i].data[j] * scale, format, 4766 column_width, cb->cb_scripted); 4767 } 4768 printf("\n"); 4769 } 4770 } 4771 4772 static void 4773 print_solid_separator(unsigned int length) 4774 { 4775 while (length--) 4776 printf("-"); 4777 printf("\n"); 4778 } 4779 4780 static void 4781 print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv, 4782 nvlist_t *newnv, double scale, const char *name) 4783 { 4784 unsigned int column_width; 4785 unsigned int namewidth; 4786 unsigned int entire_width; 4787 enum iostat_type type; 4788 struct stat_array *nva; 4789 const char **names; 4790 unsigned int names_len; 4791 4792 /* What type of histo are we? */ 4793 type = IOS_HISTO_IDX(cb->cb_flags); 4794 4795 /* Get NULL-terminated array of nvlist names for our histo */ 4796 names = vsx_type_to_nvlist[type]; 4797 names_len = str_array_len(names); /* num of names */ 4798 4799 nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv); 4800 4801 if (cb->cb_literal) { 4802 column_width = MAX(5, 4803 (unsigned int) log10(stat_histo_max(nva, names_len)) + 1); 4804 } else { 4805 column_width = 5; 4806 } 4807 4808 namewidth = MAX(cb->cb_namewidth, 4809 strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)])); 4810 4811 /* 4812 * Calculate the entire line width of what we're printing. The 4813 * +2 is for the two spaces between columns: 4814 */ 4815 /* read write */ 4816 /* ----- ----- */ 4817 /* |___| <---------- column_width */ 4818 /* */ 4819 /* |__________| <--- entire_width */ 4820 /* */ 4821 entire_width = namewidth + (column_width + 2) * 4822 label_array_len(iostat_bottom_labels[type]); 4823 4824 if (cb->cb_scripted) 4825 printf("%s\n", name); 4826 else 4827 print_iostat_header_impl(cb, column_width, name); 4828 4829 print_iostat_histo(nva, names_len, cb, column_width, 4830 namewidth, scale); 4831 4832 free_calc_stats(nva, names_len); 4833 if (!cb->cb_scripted) 4834 print_solid_separator(entire_width); 4835 } 4836 4837 /* 4838 * Calculate the average latency of a power-of-two latency histogram 4839 */ 4840 static uint64_t 4841 single_histo_average(uint64_t *histo, unsigned int buckets) 4842 { 4843 int i; 4844 uint64_t count = 0, total = 0; 4845 4846 for (i = 0; i < buckets; i++) { 4847 /* 4848 * Our buckets are power-of-two latency ranges. Use the 4849 * midpoint latency of each bucket to calculate the average. 4850 * For example: 4851 * 4852 * Bucket Midpoint 4853 * 8ns-15ns: 12ns 4854 * 16ns-31ns: 24ns 4855 * ... 4856 */ 4857 if (histo[i] != 0) { 4858 total += histo[i] * (((1UL << i) + ((1UL << i)/2))); 4859 count += histo[i]; 4860 } 4861 } 4862 4863 /* Prevent divide by zero */ 4864 return (count == 0 ? 0 : total / count); 4865 } 4866 4867 static void 4868 print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *newnv) 4869 { 4870 const char *names[] = { 4871 ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, 4872 ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, 4873 ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, 4874 ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, 4875 ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, 4876 ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, 4877 ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, 4878 ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, 4879 ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, 4880 ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, 4881 ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE, 4882 ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE, 4883 ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE, 4884 ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, 4885 }; 4886 4887 struct stat_array *nva; 4888 4889 unsigned int column_width = default_column_width(cb, IOS_QUEUES); 4890 enum zfs_nicenum_format format; 4891 4892 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv); 4893 4894 if (cb->cb_literal) 4895 format = ZFS_NICENUM_RAW; 4896 else 4897 format = ZFS_NICENUM_1024; 4898 4899 for (int i = 0; i < ARRAY_SIZE(names); i++) { 4900 uint64_t val = nva[i].data[0]; 4901 print_one_stat(val, format, column_width, cb->cb_scripted); 4902 } 4903 4904 free_calc_stats(nva, ARRAY_SIZE(names)); 4905 } 4906 4907 static void 4908 print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv, 4909 nvlist_t *newnv) 4910 { 4911 int i; 4912 uint64_t val; 4913 const char *names[] = { 4914 ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, 4915 ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, 4916 ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, 4917 ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, 4918 ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, 4919 ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, 4920 ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, 4921 ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, 4922 ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, 4923 ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, 4924 ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, 4925 }; 4926 struct stat_array *nva; 4927 4928 unsigned int column_width = default_column_width(cb, IOS_LATENCY); 4929 enum zfs_nicenum_format format; 4930 4931 nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv); 4932 4933 if (cb->cb_literal) 4934 format = ZFS_NICENUM_RAWTIME; 4935 else 4936 format = ZFS_NICENUM_TIME; 4937 4938 /* Print our avg latencies on the line */ 4939 for (i = 0; i < ARRAY_SIZE(names); i++) { 4940 /* Compute average latency for a latency histo */ 4941 val = single_histo_average(nva[i].data, nva[i].count); 4942 print_one_stat(val, format, column_width, cb->cb_scripted); 4943 } 4944 free_calc_stats(nva, ARRAY_SIZE(names)); 4945 } 4946 4947 /* 4948 * Print default statistics (capacity/operations/bandwidth) 4949 */ 4950 static void 4951 print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale) 4952 { 4953 unsigned int column_width = default_column_width(cb, IOS_DEFAULT); 4954 enum zfs_nicenum_format format; 4955 char na; /* char to print for "not applicable" values */ 4956 4957 if (cb->cb_literal) { 4958 format = ZFS_NICENUM_RAW; 4959 na = '0'; 4960 } else { 4961 format = ZFS_NICENUM_1024; 4962 na = '-'; 4963 } 4964 4965 /* only toplevel vdevs have capacity stats */ 4966 if (vs->vs_space == 0) { 4967 if (cb->cb_scripted) 4968 printf("\t%c\t%c", na, na); 4969 else 4970 printf(" %*c %*c", column_width, na, column_width, 4971 na); 4972 } else { 4973 print_one_stat(vs->vs_alloc, format, column_width, 4974 cb->cb_scripted); 4975 print_one_stat(vs->vs_space - vs->vs_alloc, format, 4976 column_width, cb->cb_scripted); 4977 } 4978 4979 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale), 4980 format, column_width, cb->cb_scripted); 4981 print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale), 4982 format, column_width, cb->cb_scripted); 4983 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale), 4984 format, column_width, cb->cb_scripted); 4985 print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale), 4986 format, column_width, cb->cb_scripted); 4987 } 4988 4989 static const char *const class_name[] = { 4990 VDEV_ALLOC_BIAS_DEDUP, 4991 VDEV_ALLOC_BIAS_SPECIAL, 4992 VDEV_ALLOC_CLASS_LOGS 4993 }; 4994 4995 /* 4996 * Print out all the statistics for the given vdev. This can either be the 4997 * toplevel configuration, or called recursively. If 'name' is NULL, then this 4998 * is a verbose output, and we don't want to display the toplevel pool stats. 4999 * 5000 * Returns the number of stat lines printed. 5001 */ 5002 static unsigned int 5003 print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, 5004 nvlist_t *newnv, iostat_cbdata_t *cb, int depth) 5005 { 5006 nvlist_t **oldchild, **newchild; 5007 uint_t c, children, oldchildren; 5008 vdev_stat_t *oldvs, *newvs, *calcvs; 5009 vdev_stat_t zerovs = { 0 }; 5010 char *vname; 5011 int i; 5012 int ret = 0; 5013 uint64_t tdelta; 5014 double scale; 5015 5016 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 5017 return (ret); 5018 5019 calcvs = safe_malloc(sizeof (*calcvs)); 5020 5021 if (oldnv != NULL) { 5022 verify(nvlist_lookup_uint64_array(oldnv, 5023 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0); 5024 } else { 5025 oldvs = &zerovs; 5026 } 5027 5028 /* Do we only want to see a specific vdev? */ 5029 for (i = 0; i < cb->cb_vdevs.cb_names_count; i++) { 5030 /* Yes we do. Is this the vdev? */ 5031 if (strcmp(name, cb->cb_vdevs.cb_names[i]) == 0) { 5032 /* 5033 * This is our vdev. Since it is the only vdev we 5034 * will be displaying, make depth = 0 so that it 5035 * doesn't get indented. 5036 */ 5037 depth = 0; 5038 break; 5039 } 5040 } 5041 5042 if (cb->cb_vdevs.cb_names_count && (i == cb->cb_vdevs.cb_names_count)) { 5043 /* Couldn't match the name */ 5044 goto children; 5045 } 5046 5047 5048 verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS, 5049 (uint64_t **)&newvs, &c) == 0); 5050 5051 /* 5052 * Print the vdev name unless it's is a histogram. Histograms 5053 * display the vdev name in the header itself. 5054 */ 5055 if (!(cb->cb_flags & IOS_ANYHISTO_M)) { 5056 if (cb->cb_scripted) { 5057 printf("%s", name); 5058 } else { 5059 if (strlen(name) + depth > cb->cb_namewidth) 5060 (void) printf("%*s%s", depth, "", name); 5061 else 5062 (void) printf("%*s%s%*s", depth, "", name, 5063 (int)(cb->cb_namewidth - strlen(name) - 5064 depth), ""); 5065 } 5066 } 5067 5068 /* Calculate our scaling factor */ 5069 tdelta = newvs->vs_timestamp - oldvs->vs_timestamp; 5070 if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) { 5071 /* 5072 * If we specify printing histograms with no time interval, then 5073 * print the histogram numbers over the entire lifetime of the 5074 * vdev. 5075 */ 5076 scale = 1; 5077 } else { 5078 if (tdelta == 0) 5079 scale = 1.0; 5080 else 5081 scale = (double)NANOSEC / tdelta; 5082 } 5083 5084 if (cb->cb_flags & IOS_DEFAULT_M) { 5085 calc_default_iostats(oldvs, newvs, calcvs); 5086 print_iostat_default(calcvs, cb, scale); 5087 } 5088 if (cb->cb_flags & IOS_LATENCY_M) 5089 print_iostat_latency(cb, oldnv, newnv); 5090 if (cb->cb_flags & IOS_QUEUES_M) 5091 print_iostat_queues(cb, newnv); 5092 if (cb->cb_flags & IOS_ANYHISTO_M) { 5093 printf("\n"); 5094 print_iostat_histos(cb, oldnv, newnv, scale, name); 5095 } 5096 5097 if (cb->vcdl != NULL) { 5098 const char *path; 5099 if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH, 5100 &path) == 0) { 5101 printf(" "); 5102 zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path); 5103 } 5104 } 5105 5106 if (!(cb->cb_flags & IOS_ANYHISTO_M)) 5107 printf("\n"); 5108 5109 ret++; 5110 5111 children: 5112 5113 free(calcvs); 5114 5115 if (!cb->cb_verbose) 5116 return (ret); 5117 5118 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN, 5119 &newchild, &children) != 0) 5120 return (ret); 5121 5122 if (oldnv) { 5123 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN, 5124 &oldchild, &oldchildren) != 0) 5125 return (ret); 5126 5127 children = MIN(oldchildren, children); 5128 } 5129 5130 /* 5131 * print normal top-level devices 5132 */ 5133 for (c = 0; c < children; c++) { 5134 uint64_t ishole = B_FALSE, islog = B_FALSE; 5135 5136 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE, 5137 &ishole); 5138 5139 (void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG, 5140 &islog); 5141 5142 if (ishole || islog) 5143 continue; 5144 5145 if (nvlist_exists(newchild[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 5146 continue; 5147 5148 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5149 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 5150 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, 5151 newchild[c], cb, depth + 2); 5152 free(vname); 5153 } 5154 5155 /* 5156 * print all other top-level devices 5157 */ 5158 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 5159 boolean_t printed = B_FALSE; 5160 5161 for (c = 0; c < children; c++) { 5162 uint64_t islog = B_FALSE; 5163 const char *bias = NULL; 5164 const char *type = NULL; 5165 5166 (void) nvlist_lookup_uint64(newchild[c], 5167 ZPOOL_CONFIG_IS_LOG, &islog); 5168 if (islog) { 5169 bias = VDEV_ALLOC_CLASS_LOGS; 5170 } else { 5171 (void) nvlist_lookup_string(newchild[c], 5172 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 5173 (void) nvlist_lookup_string(newchild[c], 5174 ZPOOL_CONFIG_TYPE, &type); 5175 } 5176 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 5177 continue; 5178 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 5179 continue; 5180 5181 if (!printed) { 5182 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && 5183 !cb->cb_scripted && 5184 !cb->cb_vdevs.cb_names) { 5185 print_iostat_dashes(cb, 0, 5186 class_name[n]); 5187 } 5188 printf("\n"); 5189 printed = B_TRUE; 5190 } 5191 5192 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5193 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID); 5194 ret += print_vdev_stats(zhp, vname, oldnv ? 5195 oldchild[c] : NULL, newchild[c], cb, depth + 2); 5196 free(vname); 5197 } 5198 } 5199 5200 /* 5201 * Include level 2 ARC devices in iostat output 5202 */ 5203 if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE, 5204 &newchild, &children) != 0) 5205 return (ret); 5206 5207 if (oldnv) { 5208 if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE, 5209 &oldchild, &oldchildren) != 0) 5210 return (ret); 5211 5212 children = MIN(oldchildren, children); 5213 } 5214 5215 if (children > 0) { 5216 if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted && 5217 !cb->cb_vdevs.cb_names) { 5218 print_iostat_dashes(cb, 0, "cache"); 5219 } 5220 printf("\n"); 5221 5222 for (c = 0; c < children; c++) { 5223 vname = zpool_vdev_name(g_zfs, zhp, newchild[c], 5224 cb->cb_vdevs.cb_name_flags); 5225 ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] 5226 : NULL, newchild[c], cb, depth + 2); 5227 free(vname); 5228 } 5229 } 5230 5231 return (ret); 5232 } 5233 5234 static int 5235 refresh_iostat(zpool_handle_t *zhp, void *data) 5236 { 5237 iostat_cbdata_t *cb = data; 5238 boolean_t missing; 5239 5240 /* 5241 * If the pool has disappeared, remove it from the list and continue. 5242 */ 5243 if (zpool_refresh_stats(zhp, &missing) != 0) 5244 return (-1); 5245 5246 if (missing) 5247 pool_list_remove(cb->cb_list, zhp); 5248 5249 return (0); 5250 } 5251 5252 /* 5253 * Callback to print out the iostats for the given pool. 5254 */ 5255 static int 5256 print_iostat(zpool_handle_t *zhp, void *data) 5257 { 5258 iostat_cbdata_t *cb = data; 5259 nvlist_t *oldconfig, *newconfig; 5260 nvlist_t *oldnvroot, *newnvroot; 5261 int ret; 5262 5263 newconfig = zpool_get_config(zhp, &oldconfig); 5264 5265 if (cb->cb_iteration == 1) 5266 oldconfig = NULL; 5267 5268 verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE, 5269 &newnvroot) == 0); 5270 5271 if (oldconfig == NULL) 5272 oldnvroot = NULL; 5273 else 5274 verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE, 5275 &oldnvroot) == 0); 5276 5277 ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, 5278 cb, 0); 5279 if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) && 5280 !cb->cb_scripted && cb->cb_verbose && 5281 !cb->cb_vdevs.cb_names_count) { 5282 print_iostat_separator(cb); 5283 if (cb->vcdl != NULL) { 5284 print_cmd_columns(cb->vcdl, 1); 5285 } 5286 printf("\n"); 5287 } 5288 5289 return (ret); 5290 } 5291 5292 static int 5293 get_columns(void) 5294 { 5295 struct winsize ws; 5296 int columns = 80; 5297 int error; 5298 5299 if (isatty(STDOUT_FILENO)) { 5300 error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws); 5301 if (error == 0) 5302 columns = ws.ws_col; 5303 } else { 5304 columns = 999; 5305 } 5306 5307 return (columns); 5308 } 5309 5310 /* 5311 * Return the required length of the pool/vdev name column. The minimum 5312 * allowed width and output formatting flags must be provided. 5313 */ 5314 static int 5315 get_namewidth(zpool_handle_t *zhp, int min_width, int flags, boolean_t verbose) 5316 { 5317 nvlist_t *config, *nvroot; 5318 int width = min_width; 5319 5320 if ((config = zpool_get_config(zhp, NULL)) != NULL) { 5321 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5322 &nvroot) == 0); 5323 size_t poolname_len = strlen(zpool_get_name(zhp)); 5324 if (verbose == B_FALSE) { 5325 width = MAX(poolname_len, min_width); 5326 } else { 5327 width = MAX(poolname_len, 5328 max_width(zhp, nvroot, 0, min_width, flags)); 5329 } 5330 } 5331 5332 return (width); 5333 } 5334 5335 /* 5336 * Parse the input string, get the 'interval' and 'count' value if there is one. 5337 */ 5338 static void 5339 get_interval_count(int *argcp, char **argv, float *iv, 5340 unsigned long *cnt) 5341 { 5342 float interval = 0; 5343 unsigned long count = 0; 5344 int argc = *argcp; 5345 5346 /* 5347 * Determine if the last argument is an integer or a pool name 5348 */ 5349 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5350 char *end; 5351 5352 errno = 0; 5353 interval = strtof(argv[argc - 1], &end); 5354 5355 if (*end == '\0' && errno == 0) { 5356 if (interval == 0) { 5357 (void) fprintf(stderr, gettext( 5358 "interval cannot be zero\n")); 5359 usage(B_FALSE); 5360 } 5361 /* 5362 * Ignore the last parameter 5363 */ 5364 argc--; 5365 } else { 5366 /* 5367 * If this is not a valid number, just plow on. The 5368 * user will get a more informative error message later 5369 * on. 5370 */ 5371 interval = 0; 5372 } 5373 } 5374 5375 /* 5376 * If the last argument is also an integer, then we have both a count 5377 * and an interval. 5378 */ 5379 if (argc > 0 && zfs_isnumber(argv[argc - 1])) { 5380 char *end; 5381 5382 errno = 0; 5383 count = interval; 5384 interval = strtof(argv[argc - 1], &end); 5385 5386 if (*end == '\0' && errno == 0) { 5387 if (interval == 0) { 5388 (void) fprintf(stderr, gettext( 5389 "interval cannot be zero\n")); 5390 usage(B_FALSE); 5391 } 5392 5393 /* 5394 * Ignore the last parameter 5395 */ 5396 argc--; 5397 } else { 5398 interval = 0; 5399 } 5400 } 5401 5402 *iv = interval; 5403 *cnt = count; 5404 *argcp = argc; 5405 } 5406 5407 static void 5408 get_timestamp_arg(char c) 5409 { 5410 if (c == 'u') 5411 timestamp_fmt = UDATE; 5412 else if (c == 'd') 5413 timestamp_fmt = DDATE; 5414 else 5415 usage(B_FALSE); 5416 } 5417 5418 /* 5419 * Return stat flags that are supported by all pools by both the module and 5420 * zpool iostat. "*data" should be initialized to all 0xFFs before running. 5421 * It will get ANDed down until only the flags that are supported on all pools 5422 * remain. 5423 */ 5424 static int 5425 get_stat_flags_cb(zpool_handle_t *zhp, void *data) 5426 { 5427 uint64_t *mask = data; 5428 nvlist_t *config, *nvroot, *nvx; 5429 uint64_t flags = 0; 5430 int i, j; 5431 5432 config = zpool_get_config(zhp, NULL); 5433 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 5434 &nvroot) == 0); 5435 5436 /* Default stats are always supported, but for completeness.. */ 5437 if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS)) 5438 flags |= IOS_DEFAULT_M; 5439 5440 /* Get our extended stats nvlist from the main list */ 5441 if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX, 5442 &nvx) != 0) { 5443 /* 5444 * No extended stats; they're probably running an older 5445 * module. No big deal, we support that too. 5446 */ 5447 goto end; 5448 } 5449 5450 /* For each extended stat, make sure all its nvpairs are supported */ 5451 for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) { 5452 if (!vsx_type_to_nvlist[j][0]) 5453 continue; 5454 5455 /* Start off by assuming the flag is supported, then check */ 5456 flags |= (1ULL << j); 5457 for (i = 0; vsx_type_to_nvlist[j][i]; i++) { 5458 if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) { 5459 /* flag isn't supported */ 5460 flags = flags & ~(1ULL << j); 5461 break; 5462 } 5463 } 5464 } 5465 end: 5466 *mask = *mask & flags; 5467 return (0); 5468 } 5469 5470 /* 5471 * Return a bitmask of stats that are supported on all pools by both the module 5472 * and zpool iostat. 5473 */ 5474 static uint64_t 5475 get_stat_flags(zpool_list_t *list) 5476 { 5477 uint64_t mask = -1; 5478 5479 /* 5480 * get_stat_flags_cb() will lop off bits from "mask" until only the 5481 * flags that are supported on all pools remain. 5482 */ 5483 pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask); 5484 return (mask); 5485 } 5486 5487 /* 5488 * Return 1 if cb_data->cb_names[0] is this vdev's name, 0 otherwise. 5489 */ 5490 static int 5491 is_vdev_cb(void *zhp_data, nvlist_t *nv, void *cb_data) 5492 { 5493 uint64_t guid; 5494 vdev_cbdata_t *cb = cb_data; 5495 zpool_handle_t *zhp = zhp_data; 5496 5497 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0) 5498 return (0); 5499 5500 return (guid == zpool_vdev_path_to_guid(zhp, cb->cb_names[0])); 5501 } 5502 5503 /* 5504 * Returns 1 if cb_data->cb_names[0] is a vdev name, 0 otherwise. 5505 */ 5506 static int 5507 is_vdev(zpool_handle_t *zhp, void *cb_data) 5508 { 5509 return (for_each_vdev(zhp, is_vdev_cb, cb_data)); 5510 } 5511 5512 /* 5513 * Check if vdevs are in a pool 5514 * 5515 * Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise 5516 * return 0. If pool_name is NULL, then search all pools. 5517 */ 5518 static int 5519 are_vdevs_in_pool(int argc, char **argv, char *pool_name, 5520 vdev_cbdata_t *cb) 5521 { 5522 char **tmp_name; 5523 int ret = 0; 5524 int i; 5525 int pool_count = 0; 5526 5527 if ((argc == 0) || !*argv) 5528 return (0); 5529 5530 if (pool_name) 5531 pool_count = 1; 5532 5533 /* Temporarily hijack cb_names for a second... */ 5534 tmp_name = cb->cb_names; 5535 5536 /* Go though our list of prospective vdev names */ 5537 for (i = 0; i < argc; i++) { 5538 cb->cb_names = argv + i; 5539 5540 /* Is this name a vdev in our pools? */ 5541 ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL, 5542 ZFS_TYPE_POOL, B_FALSE, is_vdev, cb); 5543 if (!ret) { 5544 /* No match */ 5545 break; 5546 } 5547 } 5548 5549 cb->cb_names = tmp_name; 5550 5551 return (ret); 5552 } 5553 5554 static int 5555 is_pool_cb(zpool_handle_t *zhp, void *data) 5556 { 5557 char *name = data; 5558 if (strcmp(name, zpool_get_name(zhp)) == 0) 5559 return (1); 5560 5561 return (0); 5562 } 5563 5564 /* 5565 * Do we have a pool named *name? If so, return 1, otherwise 0. 5566 */ 5567 static int 5568 is_pool(char *name) 5569 { 5570 return (for_each_pool(0, NULL, B_TRUE, NULL, ZFS_TYPE_POOL, B_FALSE, 5571 is_pool_cb, name)); 5572 } 5573 5574 /* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */ 5575 static int 5576 are_all_pools(int argc, char **argv) 5577 { 5578 if ((argc == 0) || !*argv) 5579 return (0); 5580 5581 while (--argc >= 0) 5582 if (!is_pool(argv[argc])) 5583 return (0); 5584 5585 return (1); 5586 } 5587 5588 /* 5589 * Helper function to print out vdev/pool names we can't resolve. Used for an 5590 * error message. 5591 */ 5592 static void 5593 error_list_unresolved_vdevs(int argc, char **argv, char *pool_name, 5594 vdev_cbdata_t *cb) 5595 { 5596 int i; 5597 char *name; 5598 char *str; 5599 for (i = 0; i < argc; i++) { 5600 name = argv[i]; 5601 5602 if (is_pool(name)) 5603 str = gettext("pool"); 5604 else if (are_vdevs_in_pool(1, &name, pool_name, cb)) 5605 str = gettext("vdev in this pool"); 5606 else if (are_vdevs_in_pool(1, &name, NULL, cb)) 5607 str = gettext("vdev in another pool"); 5608 else 5609 str = gettext("unknown"); 5610 5611 fprintf(stderr, "\t%s (%s)\n", name, str); 5612 } 5613 } 5614 5615 /* 5616 * Same as get_interval_count(), but with additional checks to not misinterpret 5617 * guids as interval/count values. Assumes VDEV_NAME_GUID is set in 5618 * cb.cb_vdevs.cb_name_flags. 5619 */ 5620 static void 5621 get_interval_count_filter_guids(int *argc, char **argv, float *interval, 5622 unsigned long *count, iostat_cbdata_t *cb) 5623 { 5624 char **tmpargv = argv; 5625 int argc_for_interval = 0; 5626 5627 /* Is the last arg an interval value? Or a guid? */ 5628 if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, 5629 &cb->cb_vdevs)) { 5630 /* 5631 * The last arg is not a guid, so it's probably an 5632 * interval value. 5633 */ 5634 argc_for_interval++; 5635 5636 if (*argc >= 2 && 5637 !are_vdevs_in_pool(1, &argv[*argc - 2], NULL, 5638 &cb->cb_vdevs)) { 5639 /* 5640 * The 2nd to last arg is not a guid, so it's probably 5641 * an interval value. 5642 */ 5643 argc_for_interval++; 5644 } 5645 } 5646 5647 /* Point to our list of possible intervals */ 5648 tmpargv = &argv[*argc - argc_for_interval]; 5649 5650 *argc = *argc - argc_for_interval; 5651 get_interval_count(&argc_for_interval, tmpargv, 5652 interval, count); 5653 } 5654 5655 /* 5656 * Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or 5657 * if we were unable to determine its size. 5658 */ 5659 static int 5660 terminal_height(void) 5661 { 5662 struct winsize win; 5663 5664 if (isatty(STDOUT_FILENO) == 0) 5665 return (-1); 5666 5667 if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0) 5668 return (win.ws_row); 5669 5670 return (-1); 5671 } 5672 5673 /* 5674 * Run one of the zpool status/iostat -c scripts with the help (-h) option and 5675 * print the result. 5676 * 5677 * name: Short name of the script ('iostat'). 5678 * path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat'); 5679 */ 5680 static void 5681 print_zpool_script_help(char *name, char *path) 5682 { 5683 char *argv[] = {path, (char *)"-h", NULL}; 5684 char **lines = NULL; 5685 int lines_cnt = 0; 5686 int rc; 5687 5688 rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines, 5689 &lines_cnt); 5690 if (rc != 0 || lines == NULL || lines_cnt <= 0) { 5691 if (lines != NULL) 5692 libzfs_free_str_array(lines, lines_cnt); 5693 return; 5694 } 5695 5696 for (int i = 0; i < lines_cnt; i++) 5697 if (!is_blank_str(lines[i])) 5698 printf(" %-14s %s\n", name, lines[i]); 5699 5700 libzfs_free_str_array(lines, lines_cnt); 5701 } 5702 5703 /* 5704 * Go though the zpool status/iostat -c scripts in the user's path, run their 5705 * help option (-h), and print out the results. 5706 */ 5707 static void 5708 print_zpool_dir_scripts(char *dirpath) 5709 { 5710 DIR *dir; 5711 struct dirent *ent; 5712 char fullpath[MAXPATHLEN]; 5713 struct stat dir_stat; 5714 5715 if ((dir = opendir(dirpath)) != NULL) { 5716 /* print all the files and directories within directory */ 5717 while ((ent = readdir(dir)) != NULL) { 5718 if (snprintf(fullpath, sizeof (fullpath), "%s/%s", 5719 dirpath, ent->d_name) >= sizeof (fullpath)) { 5720 (void) fprintf(stderr, 5721 gettext("internal error: " 5722 "ZPOOL_SCRIPTS_PATH too large.\n")); 5723 exit(1); 5724 } 5725 5726 /* Print the scripts */ 5727 if (stat(fullpath, &dir_stat) == 0) 5728 if (dir_stat.st_mode & S_IXUSR && 5729 S_ISREG(dir_stat.st_mode)) 5730 print_zpool_script_help(ent->d_name, 5731 fullpath); 5732 } 5733 closedir(dir); 5734 } 5735 } 5736 5737 /* 5738 * Print out help text for all zpool status/iostat -c scripts. 5739 */ 5740 static void 5741 print_zpool_script_list(const char *subcommand) 5742 { 5743 char *dir, *sp, *tmp; 5744 5745 printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand); 5746 5747 sp = zpool_get_cmd_search_path(); 5748 if (sp == NULL) 5749 return; 5750 5751 for (dir = strtok_r(sp, ":", &tmp); 5752 dir != NULL; 5753 dir = strtok_r(NULL, ":", &tmp)) 5754 print_zpool_dir_scripts(dir); 5755 5756 free(sp); 5757 } 5758 5759 /* 5760 * Set the minimum pool/vdev name column width. The width must be at least 10, 5761 * but may be as large as the column width - 42 so it still fits on one line. 5762 * NOTE: 42 is the width of the default capacity/operations/bandwidth output 5763 */ 5764 static int 5765 get_namewidth_iostat(zpool_handle_t *zhp, void *data) 5766 { 5767 iostat_cbdata_t *cb = data; 5768 int width, available_width; 5769 5770 /* 5771 * get_namewidth() returns the maximum width of any name in that column 5772 * for any pool/vdev/device line that will be output. 5773 */ 5774 width = get_namewidth(zhp, cb->cb_namewidth, 5775 cb->cb_vdevs.cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 5776 5777 /* 5778 * The width we are calculating is the width of the header and also the 5779 * padding width for names that are less than maximum width. The stats 5780 * take up 42 characters, so the width available for names is: 5781 */ 5782 available_width = get_columns() - 42; 5783 5784 /* 5785 * If the maximum width fits on a screen, then great! Make everything 5786 * line up by justifying all lines to the same width. If that max 5787 * width is larger than what's available, the name plus stats won't fit 5788 * on one line, and justifying to that width would cause every line to 5789 * wrap on the screen. We only want lines with long names to wrap. 5790 * Limit the padding to what won't wrap. 5791 */ 5792 if (width > available_width) 5793 width = available_width; 5794 5795 /* 5796 * And regardless of whatever the screen width is (get_columns can 5797 * return 0 if the width is not known or less than 42 for a narrow 5798 * terminal) have the width be a minimum of 10. 5799 */ 5800 if (width < 10) 5801 width = 10; 5802 5803 /* Save the calculated width */ 5804 cb->cb_namewidth = width; 5805 5806 return (0); 5807 } 5808 5809 /* 5810 * zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name] 5811 * [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]] 5812 * [interval [count]] 5813 * 5814 * -c CMD For each vdev, run command CMD 5815 * -g Display guid for individual vdev name. 5816 * -L Follow links when resolving vdev path name. 5817 * -P Display full path for vdev name. 5818 * -v Display statistics for individual vdevs 5819 * -h Display help 5820 * -p Display values in parsable (exact) format. 5821 * -H Scripted mode. Don't display headers, and separate properties 5822 * by a single tab. 5823 * -l Display average latency 5824 * -q Display queue depths 5825 * -w Display latency histograms 5826 * -r Display request size histogram 5827 * -T Display a timestamp in date(1) or Unix format 5828 * -n Only print headers once 5829 * 5830 * This command can be tricky because we want to be able to deal with pool 5831 * creation/destruction as well as vdev configuration changes. The bulk of this 5832 * processing is handled by the pool_list_* routines in zpool_iter.c. We rely 5833 * on pool_list_update() to detect the addition of new pools. Configuration 5834 * changes are all handled within libzfs. 5835 */ 5836 int 5837 zpool_do_iostat(int argc, char **argv) 5838 { 5839 int c; 5840 int ret; 5841 int npools; 5842 float interval = 0; 5843 unsigned long count = 0; 5844 int winheight = 24; 5845 zpool_list_t *list; 5846 boolean_t verbose = B_FALSE; 5847 boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE; 5848 boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE; 5849 boolean_t omit_since_boot = B_FALSE; 5850 boolean_t guid = B_FALSE; 5851 boolean_t follow_links = B_FALSE; 5852 boolean_t full_name = B_FALSE; 5853 boolean_t headers_once = B_FALSE; 5854 iostat_cbdata_t cb = { 0 }; 5855 char *cmd = NULL; 5856 5857 /* Used for printing error message */ 5858 const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q', 5859 [IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'}; 5860 5861 uint64_t unsupported_flags; 5862 5863 /* check options */ 5864 while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwnH")) != -1) { 5865 switch (c) { 5866 case 'c': 5867 if (cmd != NULL) { 5868 fprintf(stderr, 5869 gettext("Can't set -c flag twice\n")); 5870 exit(1); 5871 } 5872 5873 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 5874 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 5875 fprintf(stderr, gettext( 5876 "Can't run -c, disabled by " 5877 "ZPOOL_SCRIPTS_ENABLED.\n")); 5878 exit(1); 5879 } 5880 5881 if ((getuid() <= 0 || geteuid() <= 0) && 5882 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 5883 fprintf(stderr, gettext( 5884 "Can't run -c with root privileges " 5885 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 5886 exit(1); 5887 } 5888 cmd = optarg; 5889 verbose = B_TRUE; 5890 break; 5891 case 'g': 5892 guid = B_TRUE; 5893 break; 5894 case 'L': 5895 follow_links = B_TRUE; 5896 break; 5897 case 'P': 5898 full_name = B_TRUE; 5899 break; 5900 case 'T': 5901 get_timestamp_arg(*optarg); 5902 break; 5903 case 'v': 5904 verbose = B_TRUE; 5905 break; 5906 case 'p': 5907 parsable = B_TRUE; 5908 break; 5909 case 'l': 5910 latency = B_TRUE; 5911 break; 5912 case 'q': 5913 queues = B_TRUE; 5914 break; 5915 case 'H': 5916 scripted = B_TRUE; 5917 break; 5918 case 'w': 5919 l_histo = B_TRUE; 5920 break; 5921 case 'r': 5922 rq_histo = B_TRUE; 5923 break; 5924 case 'y': 5925 omit_since_boot = B_TRUE; 5926 break; 5927 case 'n': 5928 headers_once = B_TRUE; 5929 break; 5930 case 'h': 5931 usage(B_FALSE); 5932 break; 5933 case '?': 5934 if (optopt == 'c') { 5935 print_zpool_script_list("iostat"); 5936 exit(0); 5937 } else { 5938 fprintf(stderr, 5939 gettext("invalid option '%c'\n"), optopt); 5940 } 5941 usage(B_FALSE); 5942 } 5943 } 5944 5945 argc -= optind; 5946 argv += optind; 5947 5948 cb.cb_literal = parsable; 5949 cb.cb_scripted = scripted; 5950 5951 if (guid) 5952 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_GUID; 5953 if (follow_links) 5954 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 5955 if (full_name) 5956 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_PATH; 5957 cb.cb_iteration = 0; 5958 cb.cb_namewidth = 0; 5959 cb.cb_verbose = verbose; 5960 5961 /* Get our interval and count values (if any) */ 5962 if (guid) { 5963 get_interval_count_filter_guids(&argc, argv, &interval, 5964 &count, &cb); 5965 } else { 5966 get_interval_count(&argc, argv, &interval, &count); 5967 } 5968 5969 if (argc == 0) { 5970 /* No args, so just print the defaults. */ 5971 } else if (are_all_pools(argc, argv)) { 5972 /* All the args are pool names */ 5973 } else if (are_vdevs_in_pool(argc, argv, NULL, &cb.cb_vdevs)) { 5974 /* All the args are vdevs */ 5975 cb.cb_vdevs.cb_names = argv; 5976 cb.cb_vdevs.cb_names_count = argc; 5977 argc = 0; /* No pools to process */ 5978 } else if (are_all_pools(1, argv)) { 5979 /* The first arg is a pool name */ 5980 if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 5981 &cb.cb_vdevs)) { 5982 /* ...and the rest are vdev names */ 5983 cb.cb_vdevs.cb_names = argv + 1; 5984 cb.cb_vdevs.cb_names_count = argc - 1; 5985 argc = 1; /* One pool to process */ 5986 } else { 5987 fprintf(stderr, gettext("Expected either a list of ")); 5988 fprintf(stderr, gettext("pools, or list of vdevs in")); 5989 fprintf(stderr, " \"%s\", ", argv[0]); 5990 fprintf(stderr, gettext("but got:\n")); 5991 error_list_unresolved_vdevs(argc - 1, argv + 1, 5992 argv[0], &cb.cb_vdevs); 5993 fprintf(stderr, "\n"); 5994 usage(B_FALSE); 5995 return (1); 5996 } 5997 } else { 5998 /* 5999 * The args don't make sense. The first arg isn't a pool name, 6000 * nor are all the args vdevs. 6001 */ 6002 fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n")); 6003 fprintf(stderr, "\n"); 6004 return (1); 6005 } 6006 6007 if (cb.cb_vdevs.cb_names_count != 0) { 6008 /* 6009 * If user specified vdevs, it implies verbose. 6010 */ 6011 cb.cb_verbose = B_TRUE; 6012 } 6013 6014 /* 6015 * Construct the list of all interesting pools. 6016 */ 6017 ret = 0; 6018 if ((list = pool_list_get(argc, argv, NULL, ZFS_TYPE_POOL, parsable, 6019 &ret)) == NULL) 6020 return (1); 6021 6022 if (pool_list_count(list) == 0 && argc != 0) { 6023 pool_list_free(list); 6024 return (1); 6025 } 6026 6027 if (pool_list_count(list) == 0 && interval == 0) { 6028 pool_list_free(list); 6029 (void) fprintf(stderr, gettext("no pools available\n")); 6030 return (1); 6031 } 6032 6033 if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) { 6034 pool_list_free(list); 6035 (void) fprintf(stderr, 6036 gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n")); 6037 usage(B_FALSE); 6038 return (1); 6039 } 6040 6041 if (l_histo && rq_histo) { 6042 pool_list_free(list); 6043 (void) fprintf(stderr, 6044 gettext("Only one of [-r|-w] can be passed at a time\n")); 6045 usage(B_FALSE); 6046 return (1); 6047 } 6048 6049 /* 6050 * Enter the main iostat loop. 6051 */ 6052 cb.cb_list = list; 6053 6054 if (l_histo) { 6055 /* 6056 * Histograms tables look out of place when you try to display 6057 * them with the other stats, so make a rule that you can only 6058 * print histograms by themselves. 6059 */ 6060 cb.cb_flags = IOS_L_HISTO_M; 6061 } else if (rq_histo) { 6062 cb.cb_flags = IOS_RQ_HISTO_M; 6063 } else { 6064 cb.cb_flags = IOS_DEFAULT_M; 6065 if (latency) 6066 cb.cb_flags |= IOS_LATENCY_M; 6067 if (queues) 6068 cb.cb_flags |= IOS_QUEUES_M; 6069 } 6070 6071 /* 6072 * See if the module supports all the stats we want to display. 6073 */ 6074 unsupported_flags = cb.cb_flags & ~get_stat_flags(list); 6075 if (unsupported_flags) { 6076 uint64_t f; 6077 int idx; 6078 fprintf(stderr, 6079 gettext("The loaded zfs module doesn't support:")); 6080 6081 /* for each bit set in unsupported_flags */ 6082 for (f = unsupported_flags; f; f &= ~(1ULL << idx)) { 6083 idx = lowbit64(f) - 1; 6084 fprintf(stderr, " -%c", flag_to_arg[idx]); 6085 } 6086 6087 fprintf(stderr, ". Try running a newer module.\n"); 6088 pool_list_free(list); 6089 6090 return (1); 6091 } 6092 6093 for (;;) { 6094 if ((npools = pool_list_count(list)) == 0) 6095 (void) fprintf(stderr, gettext("no pools available\n")); 6096 else { 6097 /* 6098 * If this is the first iteration and -y was supplied 6099 * we skip any printing. 6100 */ 6101 boolean_t skip = (omit_since_boot && 6102 cb.cb_iteration == 0); 6103 6104 /* 6105 * Refresh all statistics. This is done as an 6106 * explicit step before calculating the maximum name 6107 * width, so that any * configuration changes are 6108 * properly accounted for. 6109 */ 6110 (void) pool_list_iter(list, B_FALSE, refresh_iostat, 6111 &cb); 6112 6113 /* 6114 * Iterate over all pools to determine the maximum width 6115 * for the pool / device name column across all pools. 6116 */ 6117 cb.cb_namewidth = 0; 6118 (void) pool_list_iter(list, B_FALSE, 6119 get_namewidth_iostat, &cb); 6120 6121 if (timestamp_fmt != NODATE) 6122 print_timestamp(timestamp_fmt); 6123 6124 if (cmd != NULL && cb.cb_verbose && 6125 !(cb.cb_flags & IOS_ANYHISTO_M)) { 6126 cb.vcdl = all_pools_for_each_vdev_run(argc, 6127 argv, cmd, g_zfs, cb.cb_vdevs.cb_names, 6128 cb.cb_vdevs.cb_names_count, 6129 cb.cb_vdevs.cb_name_flags); 6130 } else { 6131 cb.vcdl = NULL; 6132 } 6133 6134 6135 /* 6136 * Check terminal size so we can print headers 6137 * even when terminal window has its height 6138 * changed. 6139 */ 6140 winheight = terminal_height(); 6141 /* 6142 * Are we connected to TTY? If not, headers_once 6143 * should be true, to avoid breaking scripts. 6144 */ 6145 if (winheight < 0) 6146 headers_once = B_TRUE; 6147 6148 /* 6149 * If it's the first time and we're not skipping it, 6150 * or either skip or verbose mode, print the header. 6151 * 6152 * The histogram code explicitly prints its header on 6153 * every vdev, so skip this for histograms. 6154 */ 6155 if (((++cb.cb_iteration == 1 && !skip) || 6156 (skip != verbose) || 6157 (!headers_once && 6158 (cb.cb_iteration % winheight) == 0)) && 6159 (!(cb.cb_flags & IOS_ANYHISTO_M)) && 6160 !cb.cb_scripted) 6161 print_iostat_header(&cb); 6162 6163 if (skip) { 6164 (void) fflush(stdout); 6165 (void) fsleep(interval); 6166 continue; 6167 } 6168 6169 pool_list_iter(list, B_FALSE, print_iostat, &cb); 6170 6171 /* 6172 * If there's more than one pool, and we're not in 6173 * verbose mode (which prints a separator for us), 6174 * then print a separator. 6175 * 6176 * In addition, if we're printing specific vdevs then 6177 * we also want an ending separator. 6178 */ 6179 if (((npools > 1 && !verbose && 6180 !(cb.cb_flags & IOS_ANYHISTO_M)) || 6181 (!(cb.cb_flags & IOS_ANYHISTO_M) && 6182 cb.cb_vdevs.cb_names_count)) && 6183 !cb.cb_scripted) { 6184 print_iostat_separator(&cb); 6185 if (cb.vcdl != NULL) 6186 print_cmd_columns(cb.vcdl, 1); 6187 printf("\n"); 6188 } 6189 6190 if (cb.vcdl != NULL) 6191 free_vdev_cmd_data_list(cb.vcdl); 6192 6193 } 6194 6195 if (interval == 0) 6196 break; 6197 6198 if (count != 0 && --count == 0) 6199 break; 6200 6201 (void) fflush(stdout); 6202 (void) fsleep(interval); 6203 } 6204 6205 pool_list_free(list); 6206 6207 return (ret); 6208 } 6209 6210 typedef struct list_cbdata { 6211 boolean_t cb_verbose; 6212 int cb_name_flags; 6213 int cb_namewidth; 6214 boolean_t cb_scripted; 6215 zprop_list_t *cb_proplist; 6216 boolean_t cb_literal; 6217 } list_cbdata_t; 6218 6219 6220 /* 6221 * Given a list of columns to display, output appropriate headers for each one. 6222 */ 6223 static void 6224 print_header(list_cbdata_t *cb) 6225 { 6226 zprop_list_t *pl = cb->cb_proplist; 6227 char headerbuf[ZPOOL_MAXPROPLEN]; 6228 const char *header; 6229 boolean_t first = B_TRUE; 6230 boolean_t right_justify; 6231 size_t width = 0; 6232 6233 for (; pl != NULL; pl = pl->pl_next) { 6234 width = pl->pl_width; 6235 if (first && cb->cb_verbose) { 6236 /* 6237 * Reset the width to accommodate the verbose listing 6238 * of devices. 6239 */ 6240 width = cb->cb_namewidth; 6241 } 6242 6243 if (!first) 6244 (void) fputs(" ", stdout); 6245 else 6246 first = B_FALSE; 6247 6248 right_justify = B_FALSE; 6249 if (pl->pl_prop != ZPROP_USERPROP) { 6250 header = zpool_prop_column_name(pl->pl_prop); 6251 right_justify = zpool_prop_align_right(pl->pl_prop); 6252 } else { 6253 int i; 6254 6255 for (i = 0; pl->pl_user_prop[i] != '\0'; i++) 6256 headerbuf[i] = toupper(pl->pl_user_prop[i]); 6257 headerbuf[i] = '\0'; 6258 header = headerbuf; 6259 } 6260 6261 if (pl->pl_next == NULL && !right_justify) 6262 (void) fputs(header, stdout); 6263 else if (right_justify) 6264 (void) printf("%*s", (int)width, header); 6265 else 6266 (void) printf("%-*s", (int)width, header); 6267 } 6268 6269 (void) fputc('\n', stdout); 6270 } 6271 6272 /* 6273 * Given a pool and a list of properties, print out all the properties according 6274 * to the described layout. Used by zpool_do_list(). 6275 */ 6276 static void 6277 print_pool(zpool_handle_t *zhp, list_cbdata_t *cb) 6278 { 6279 zprop_list_t *pl = cb->cb_proplist; 6280 boolean_t first = B_TRUE; 6281 char property[ZPOOL_MAXPROPLEN]; 6282 const char *propstr; 6283 boolean_t right_justify; 6284 size_t width; 6285 6286 for (; pl != NULL; pl = pl->pl_next) { 6287 6288 width = pl->pl_width; 6289 if (first && cb->cb_verbose) { 6290 /* 6291 * Reset the width to accommodate the verbose listing 6292 * of devices. 6293 */ 6294 width = cb->cb_namewidth; 6295 } 6296 6297 if (!first) { 6298 if (cb->cb_scripted) 6299 (void) fputc('\t', stdout); 6300 else 6301 (void) fputs(" ", stdout); 6302 } else { 6303 first = B_FALSE; 6304 } 6305 6306 right_justify = B_FALSE; 6307 if (pl->pl_prop != ZPROP_USERPROP) { 6308 if (zpool_get_prop(zhp, pl->pl_prop, property, 6309 sizeof (property), NULL, cb->cb_literal) != 0) 6310 propstr = "-"; 6311 else 6312 propstr = property; 6313 6314 right_justify = zpool_prop_align_right(pl->pl_prop); 6315 } else if ((zpool_prop_feature(pl->pl_user_prop) || 6316 zpool_prop_unsupported(pl->pl_user_prop)) && 6317 zpool_prop_get_feature(zhp, pl->pl_user_prop, property, 6318 sizeof (property)) == 0) { 6319 propstr = property; 6320 } else if (zfs_prop_user(pl->pl_user_prop) && 6321 zpool_get_userprop(zhp, pl->pl_user_prop, property, 6322 sizeof (property), NULL) == 0) { 6323 propstr = property; 6324 } else { 6325 propstr = "-"; 6326 } 6327 6328 /* 6329 * If this is being called in scripted mode, or if this is the 6330 * last column and it is left-justified, don't include a width 6331 * format specifier. 6332 */ 6333 if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify)) 6334 (void) fputs(propstr, stdout); 6335 else if (right_justify) 6336 (void) printf("%*s", (int)width, propstr); 6337 else 6338 (void) printf("%-*s", (int)width, propstr); 6339 } 6340 6341 (void) fputc('\n', stdout); 6342 } 6343 6344 static void 6345 print_one_column(zpool_prop_t prop, uint64_t value, const char *str, 6346 boolean_t scripted, boolean_t valid, enum zfs_nicenum_format format) 6347 { 6348 char propval[64]; 6349 boolean_t fixed; 6350 size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL); 6351 6352 switch (prop) { 6353 case ZPOOL_PROP_SIZE: 6354 case ZPOOL_PROP_EXPANDSZ: 6355 case ZPOOL_PROP_CHECKPOINT: 6356 case ZPOOL_PROP_DEDUPRATIO: 6357 if (value == 0) 6358 (void) strlcpy(propval, "-", sizeof (propval)); 6359 else 6360 zfs_nicenum_format(value, propval, sizeof (propval), 6361 format); 6362 break; 6363 case ZPOOL_PROP_FRAGMENTATION: 6364 if (value == ZFS_FRAG_INVALID) { 6365 (void) strlcpy(propval, "-", sizeof (propval)); 6366 } else if (format == ZFS_NICENUM_RAW) { 6367 (void) snprintf(propval, sizeof (propval), "%llu", 6368 (unsigned long long)value); 6369 } else { 6370 (void) snprintf(propval, sizeof (propval), "%llu%%", 6371 (unsigned long long)value); 6372 } 6373 break; 6374 case ZPOOL_PROP_CAPACITY: 6375 /* capacity value is in parts-per-10,000 (aka permyriad) */ 6376 if (format == ZFS_NICENUM_RAW) 6377 (void) snprintf(propval, sizeof (propval), "%llu", 6378 (unsigned long long)value / 100); 6379 else 6380 (void) snprintf(propval, sizeof (propval), 6381 value < 1000 ? "%1.2f%%" : value < 10000 ? 6382 "%2.1f%%" : "%3.0f%%", value / 100.0); 6383 break; 6384 case ZPOOL_PROP_HEALTH: 6385 width = 8; 6386 (void) strlcpy(propval, str, sizeof (propval)); 6387 break; 6388 default: 6389 zfs_nicenum_format(value, propval, sizeof (propval), format); 6390 } 6391 6392 if (!valid) 6393 (void) strlcpy(propval, "-", sizeof (propval)); 6394 6395 if (scripted) 6396 (void) printf("\t%s", propval); 6397 else 6398 (void) printf(" %*s", (int)width, propval); 6399 } 6400 6401 /* 6402 * print static default line per vdev 6403 * not compatible with '-o' <proplist> option 6404 */ 6405 static void 6406 print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, 6407 list_cbdata_t *cb, int depth, boolean_t isspare) 6408 { 6409 nvlist_t **child; 6410 vdev_stat_t *vs; 6411 uint_t c, children; 6412 char *vname; 6413 boolean_t scripted = cb->cb_scripted; 6414 uint64_t islog = B_FALSE; 6415 const char *dashes = "%-*s - - - - " 6416 "- - - - -\n"; 6417 6418 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 6419 (uint64_t **)&vs, &c) == 0); 6420 6421 if (name != NULL) { 6422 boolean_t toplevel = (vs->vs_space != 0); 6423 uint64_t cap; 6424 enum zfs_nicenum_format format; 6425 const char *state; 6426 6427 if (cb->cb_literal) 6428 format = ZFS_NICENUM_RAW; 6429 else 6430 format = ZFS_NICENUM_1024; 6431 6432 if (strcmp(name, VDEV_TYPE_INDIRECT) == 0) 6433 return; 6434 6435 if (scripted) 6436 (void) printf("\t%s", name); 6437 else if (strlen(name) + depth > cb->cb_namewidth) 6438 (void) printf("%*s%s", depth, "", name); 6439 else 6440 (void) printf("%*s%s%*s", depth, "", name, 6441 (int)(cb->cb_namewidth - strlen(name) - depth), ""); 6442 6443 /* 6444 * Print the properties for the individual vdevs. Some 6445 * properties are only applicable to toplevel vdevs. The 6446 * 'toplevel' boolean value is passed to the print_one_column() 6447 * to indicate that the value is valid. 6448 */ 6449 if (VDEV_STAT_VALID(vs_pspace, c) && vs->vs_pspace) 6450 print_one_column(ZPOOL_PROP_SIZE, vs->vs_pspace, NULL, 6451 scripted, B_TRUE, format); 6452 else 6453 print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, NULL, 6454 scripted, toplevel, format); 6455 print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, NULL, 6456 scripted, toplevel, format); 6457 print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc, 6458 NULL, scripted, toplevel, format); 6459 print_one_column(ZPOOL_PROP_CHECKPOINT, 6460 vs->vs_checkpoint_space, NULL, scripted, toplevel, format); 6461 print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, NULL, 6462 scripted, B_TRUE, format); 6463 print_one_column(ZPOOL_PROP_FRAGMENTATION, 6464 vs->vs_fragmentation, NULL, scripted, 6465 (vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel), 6466 format); 6467 cap = (vs->vs_space == 0) ? 0 : 6468 (vs->vs_alloc * 10000 / vs->vs_space); 6469 print_one_column(ZPOOL_PROP_CAPACITY, cap, NULL, 6470 scripted, toplevel, format); 6471 print_one_column(ZPOOL_PROP_DEDUPRATIO, 0, NULL, 6472 scripted, toplevel, format); 6473 state = zpool_state_to_name(vs->vs_state, vs->vs_aux); 6474 if (isspare) { 6475 if (vs->vs_aux == VDEV_AUX_SPARED) 6476 state = "INUSE"; 6477 else if (vs->vs_state == VDEV_STATE_HEALTHY) 6478 state = "AVAIL"; 6479 } 6480 print_one_column(ZPOOL_PROP_HEALTH, 0, state, scripted, 6481 B_TRUE, format); 6482 (void) fputc('\n', stdout); 6483 } 6484 6485 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 6486 &child, &children) != 0) 6487 return; 6488 6489 /* list the normal vdevs first */ 6490 for (c = 0; c < children; c++) { 6491 uint64_t ishole = B_FALSE; 6492 6493 if (nvlist_lookup_uint64(child[c], 6494 ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole) 6495 continue; 6496 6497 if (nvlist_lookup_uint64(child[c], 6498 ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog) 6499 continue; 6500 6501 if (nvlist_exists(child[c], ZPOOL_CONFIG_ALLOCATION_BIAS)) 6502 continue; 6503 6504 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6505 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 6506 print_list_stats(zhp, vname, child[c], cb, depth + 2, B_FALSE); 6507 free(vname); 6508 } 6509 6510 /* list the classes: 'logs', 'dedup', and 'special' */ 6511 for (uint_t n = 0; n < ARRAY_SIZE(class_name); n++) { 6512 boolean_t printed = B_FALSE; 6513 6514 for (c = 0; c < children; c++) { 6515 const char *bias = NULL; 6516 const char *type = NULL; 6517 6518 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 6519 &islog) == 0 && islog) { 6520 bias = VDEV_ALLOC_CLASS_LOGS; 6521 } else { 6522 (void) nvlist_lookup_string(child[c], 6523 ZPOOL_CONFIG_ALLOCATION_BIAS, &bias); 6524 (void) nvlist_lookup_string(child[c], 6525 ZPOOL_CONFIG_TYPE, &type); 6526 } 6527 if (bias == NULL || strcmp(bias, class_name[n]) != 0) 6528 continue; 6529 if (!islog && strcmp(type, VDEV_TYPE_INDIRECT) == 0) 6530 continue; 6531 6532 if (!printed) { 6533 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6534 (void) printf(dashes, cb->cb_namewidth, 6535 class_name[n]); 6536 printed = B_TRUE; 6537 } 6538 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6539 cb->cb_name_flags | VDEV_NAME_TYPE_ID); 6540 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6541 B_FALSE); 6542 free(vname); 6543 } 6544 } 6545 6546 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 6547 &child, &children) == 0 && children > 0) { 6548 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6549 (void) printf(dashes, cb->cb_namewidth, "cache"); 6550 for (c = 0; c < children; c++) { 6551 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6552 cb->cb_name_flags); 6553 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6554 B_FALSE); 6555 free(vname); 6556 } 6557 } 6558 6559 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child, 6560 &children) == 0 && children > 0) { 6561 /* LINTED E_SEC_PRINTF_VAR_FMT */ 6562 (void) printf(dashes, cb->cb_namewidth, "spare"); 6563 for (c = 0; c < children; c++) { 6564 vname = zpool_vdev_name(g_zfs, zhp, child[c], 6565 cb->cb_name_flags); 6566 print_list_stats(zhp, vname, child[c], cb, depth + 2, 6567 B_TRUE); 6568 free(vname); 6569 } 6570 } 6571 } 6572 6573 /* 6574 * Generic callback function to list a pool. 6575 */ 6576 static int 6577 list_callback(zpool_handle_t *zhp, void *data) 6578 { 6579 list_cbdata_t *cbp = data; 6580 6581 print_pool(zhp, cbp); 6582 6583 if (cbp->cb_verbose) { 6584 nvlist_t *config, *nvroot; 6585 6586 config = zpool_get_config(zhp, NULL); 6587 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 6588 &nvroot) == 0); 6589 print_list_stats(zhp, NULL, nvroot, cbp, 0, B_FALSE); 6590 } 6591 6592 return (0); 6593 } 6594 6595 /* 6596 * Set the minimum pool/vdev name column width. The width must be at least 9, 6597 * but may be as large as needed. 6598 */ 6599 static int 6600 get_namewidth_list(zpool_handle_t *zhp, void *data) 6601 { 6602 list_cbdata_t *cb = data; 6603 int width; 6604 6605 width = get_namewidth(zhp, cb->cb_namewidth, 6606 cb->cb_name_flags | VDEV_NAME_TYPE_ID, cb->cb_verbose); 6607 6608 if (width < 9) 6609 width = 9; 6610 6611 cb->cb_namewidth = width; 6612 6613 return (0); 6614 } 6615 6616 /* 6617 * zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]] 6618 * 6619 * -g Display guid for individual vdev name. 6620 * -H Scripted mode. Don't display headers, and separate properties 6621 * by a single tab. 6622 * -L Follow links when resolving vdev path name. 6623 * -o List of properties to display. Defaults to 6624 * "name,size,allocated,free,expandsize,fragmentation,capacity," 6625 * "dedupratio,health,altroot" 6626 * -p Display values in parsable (exact) format. 6627 * -P Display full path for vdev name. 6628 * -T Display a timestamp in date(1) or Unix format 6629 * 6630 * List all pools in the system, whether or not they're healthy. Output space 6631 * statistics for each one, as well as health status summary. 6632 */ 6633 int 6634 zpool_do_list(int argc, char **argv) 6635 { 6636 int c; 6637 int ret = 0; 6638 list_cbdata_t cb = { 0 }; 6639 static char default_props[] = 6640 "name,size,allocated,free,checkpoint,expandsize,fragmentation," 6641 "capacity,dedupratio,health,altroot"; 6642 char *props = default_props; 6643 float interval = 0; 6644 unsigned long count = 0; 6645 zpool_list_t *list; 6646 boolean_t first = B_TRUE; 6647 current_prop_type = ZFS_TYPE_POOL; 6648 6649 /* check options */ 6650 while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) { 6651 switch (c) { 6652 case 'g': 6653 cb.cb_name_flags |= VDEV_NAME_GUID; 6654 break; 6655 case 'H': 6656 cb.cb_scripted = B_TRUE; 6657 break; 6658 case 'L': 6659 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 6660 break; 6661 case 'o': 6662 props = optarg; 6663 break; 6664 case 'P': 6665 cb.cb_name_flags |= VDEV_NAME_PATH; 6666 break; 6667 case 'p': 6668 cb.cb_literal = B_TRUE; 6669 break; 6670 case 'T': 6671 get_timestamp_arg(*optarg); 6672 break; 6673 case 'v': 6674 cb.cb_verbose = B_TRUE; 6675 cb.cb_namewidth = 8; /* 8 until precalc is avail */ 6676 break; 6677 case ':': 6678 (void) fprintf(stderr, gettext("missing argument for " 6679 "'%c' option\n"), optopt); 6680 usage(B_FALSE); 6681 break; 6682 case '?': 6683 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6684 optopt); 6685 usage(B_FALSE); 6686 } 6687 } 6688 6689 argc -= optind; 6690 argv += optind; 6691 6692 get_interval_count(&argc, argv, &interval, &count); 6693 6694 if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0) 6695 usage(B_FALSE); 6696 6697 for (;;) { 6698 if ((list = pool_list_get(argc, argv, &cb.cb_proplist, 6699 ZFS_TYPE_POOL, cb.cb_literal, &ret)) == NULL) 6700 return (1); 6701 6702 if (pool_list_count(list) == 0) 6703 break; 6704 6705 cb.cb_namewidth = 0; 6706 (void) pool_list_iter(list, B_FALSE, get_namewidth_list, &cb); 6707 6708 if (timestamp_fmt != NODATE) 6709 print_timestamp(timestamp_fmt); 6710 6711 if (!cb.cb_scripted && (first || cb.cb_verbose)) { 6712 print_header(&cb); 6713 first = B_FALSE; 6714 } 6715 ret = pool_list_iter(list, B_TRUE, list_callback, &cb); 6716 6717 if (interval == 0) 6718 break; 6719 6720 if (count != 0 && --count == 0) 6721 break; 6722 6723 pool_list_free(list); 6724 6725 (void) fflush(stdout); 6726 (void) fsleep(interval); 6727 } 6728 6729 if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) { 6730 (void) printf(gettext("no pools available\n")); 6731 ret = 0; 6732 } 6733 6734 pool_list_free(list); 6735 zprop_free_list(cb.cb_proplist); 6736 return (ret); 6737 } 6738 6739 static int 6740 zpool_do_attach_or_replace(int argc, char **argv, int replacing) 6741 { 6742 boolean_t force = B_FALSE; 6743 boolean_t rebuild = B_FALSE; 6744 boolean_t wait = B_FALSE; 6745 int c; 6746 nvlist_t *nvroot; 6747 char *poolname, *old_disk, *new_disk; 6748 zpool_handle_t *zhp; 6749 nvlist_t *props = NULL; 6750 char *propval; 6751 int ret; 6752 6753 /* check options */ 6754 while ((c = getopt(argc, argv, "fo:sw")) != -1) { 6755 switch (c) { 6756 case 'f': 6757 force = B_TRUE; 6758 break; 6759 case 'o': 6760 if ((propval = strchr(optarg, '=')) == NULL) { 6761 (void) fprintf(stderr, gettext("missing " 6762 "'=' for -o option\n")); 6763 usage(B_FALSE); 6764 } 6765 *propval = '\0'; 6766 propval++; 6767 6768 if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) || 6769 (add_prop_list(optarg, propval, &props, B_TRUE))) 6770 usage(B_FALSE); 6771 break; 6772 case 's': 6773 rebuild = B_TRUE; 6774 break; 6775 case 'w': 6776 wait = B_TRUE; 6777 break; 6778 case '?': 6779 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6780 optopt); 6781 usage(B_FALSE); 6782 } 6783 } 6784 6785 argc -= optind; 6786 argv += optind; 6787 6788 /* get pool name and check number of arguments */ 6789 if (argc < 1) { 6790 (void) fprintf(stderr, gettext("missing pool name argument\n")); 6791 usage(B_FALSE); 6792 } 6793 6794 poolname = argv[0]; 6795 6796 if (argc < 2) { 6797 (void) fprintf(stderr, 6798 gettext("missing <device> specification\n")); 6799 usage(B_FALSE); 6800 } 6801 6802 old_disk = argv[1]; 6803 6804 if (argc < 3) { 6805 if (!replacing) { 6806 (void) fprintf(stderr, 6807 gettext("missing <new_device> specification\n")); 6808 usage(B_FALSE); 6809 } 6810 new_disk = old_disk; 6811 argc -= 1; 6812 argv += 1; 6813 } else { 6814 new_disk = argv[2]; 6815 argc -= 2; 6816 argv += 2; 6817 } 6818 6819 if (argc > 1) { 6820 (void) fprintf(stderr, gettext("too many arguments\n")); 6821 usage(B_FALSE); 6822 } 6823 6824 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) { 6825 nvlist_free(props); 6826 return (1); 6827 } 6828 6829 if (zpool_get_config(zhp, NULL) == NULL) { 6830 (void) fprintf(stderr, gettext("pool '%s' is unavailable\n"), 6831 poolname); 6832 zpool_close(zhp); 6833 nvlist_free(props); 6834 return (1); 6835 } 6836 6837 /* unless manually specified use "ashift" pool property (if set) */ 6838 if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) { 6839 int intval; 6840 zprop_source_t src; 6841 char strval[ZPOOL_MAXPROPLEN]; 6842 6843 intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src); 6844 if (src != ZPROP_SRC_DEFAULT) { 6845 (void) sprintf(strval, "%" PRId32, intval); 6846 verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval, 6847 &props, B_TRUE) == 0); 6848 } 6849 } 6850 6851 nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE, 6852 argc, argv); 6853 if (nvroot == NULL) { 6854 zpool_close(zhp); 6855 nvlist_free(props); 6856 return (1); 6857 } 6858 6859 ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing, 6860 rebuild); 6861 6862 if (ret == 0 && wait) { 6863 zpool_wait_activity_t activity = ZPOOL_WAIT_RESILVER; 6864 char raidz_prefix[] = "raidz"; 6865 if (replacing) { 6866 activity = ZPOOL_WAIT_REPLACE; 6867 } else if (strncmp(old_disk, 6868 raidz_prefix, strlen(raidz_prefix)) == 0) { 6869 activity = ZPOOL_WAIT_RAIDZ_EXPAND; 6870 } 6871 ret = zpool_wait(zhp, activity); 6872 } 6873 6874 nvlist_free(props); 6875 nvlist_free(nvroot); 6876 zpool_close(zhp); 6877 6878 return (ret); 6879 } 6880 6881 /* 6882 * zpool replace [-fsw] [-o property=value] <pool> <device> <new_device> 6883 * 6884 * -f Force attach, even if <new_device> appears to be in use. 6885 * -s Use sequential instead of healing reconstruction for resilver. 6886 * -o Set property=value. 6887 * -w Wait for replacing to complete before returning 6888 * 6889 * Replace <device> with <new_device>. 6890 */ 6891 int 6892 zpool_do_replace(int argc, char **argv) 6893 { 6894 return (zpool_do_attach_or_replace(argc, argv, B_TRUE)); 6895 } 6896 6897 /* 6898 * zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device> 6899 * 6900 * -f Force attach, even if <new_device> appears to be in use. 6901 * -s Use sequential instead of healing reconstruction for resilver. 6902 * -o Set property=value. 6903 * -w Wait for resilvering (mirror) or expansion (raidz) to complete 6904 * before returning. 6905 * 6906 * Attach <new_device> to a <device> or <vdev>, where the vdev can be of type 6907 * mirror or raidz. If <device> is not part of a mirror, then <device> will 6908 * be transformed into a mirror of <device> and <new_device>. When a mirror 6909 * is involved, <new_device> will begin life with a DTL of [0, now], and will 6910 * immediately begin to resilver itself. For the raidz case, a expansion will 6911 * commence and reflow the raidz data across all the disks including the 6912 * <new_device>. 6913 */ 6914 int 6915 zpool_do_attach(int argc, char **argv) 6916 { 6917 return (zpool_do_attach_or_replace(argc, argv, B_FALSE)); 6918 } 6919 6920 /* 6921 * zpool detach [-f] <pool> <device> 6922 * 6923 * -f Force detach of <device>, even if DTLs argue against it 6924 * (not supported yet) 6925 * 6926 * Detach a device from a mirror. The operation will be refused if <device> 6927 * is the last device in the mirror, or if the DTLs indicate that this device 6928 * has the only valid copy of some data. 6929 */ 6930 int 6931 zpool_do_detach(int argc, char **argv) 6932 { 6933 int c; 6934 char *poolname, *path; 6935 zpool_handle_t *zhp; 6936 int ret; 6937 6938 /* check options */ 6939 while ((c = getopt(argc, argv, "")) != -1) { 6940 switch (c) { 6941 case '?': 6942 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 6943 optopt); 6944 usage(B_FALSE); 6945 } 6946 } 6947 6948 argc -= optind; 6949 argv += optind; 6950 6951 /* get pool name and check number of arguments */ 6952 if (argc < 1) { 6953 (void) fprintf(stderr, gettext("missing pool name argument\n")); 6954 usage(B_FALSE); 6955 } 6956 6957 if (argc < 2) { 6958 (void) fprintf(stderr, 6959 gettext("missing <device> specification\n")); 6960 usage(B_FALSE); 6961 } 6962 6963 poolname = argv[0]; 6964 path = argv[1]; 6965 6966 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 6967 return (1); 6968 6969 ret = zpool_vdev_detach(zhp, path); 6970 6971 zpool_close(zhp); 6972 6973 return (ret); 6974 } 6975 6976 /* 6977 * zpool split [-gLnP] [-o prop=val] ... 6978 * [-o mntopt] ... 6979 * [-R altroot] <pool> <newpool> [<device> ...] 6980 * 6981 * -g Display guid for individual vdev name. 6982 * -L Follow links when resolving vdev path name. 6983 * -n Do not split the pool, but display the resulting layout if 6984 * it were to be split. 6985 * -o Set property=value, or set mount options. 6986 * -P Display full path for vdev name. 6987 * -R Mount the split-off pool under an alternate root. 6988 * -l Load encryption keys while importing. 6989 * 6990 * Splits the named pool and gives it the new pool name. Devices to be split 6991 * off may be listed, provided that no more than one device is specified 6992 * per top-level vdev mirror. The newly split pool is left in an exported 6993 * state unless -R is specified. 6994 * 6995 * Restrictions: the top-level of the pool pool must only be made up of 6996 * mirrors; all devices in the pool must be healthy; no device may be 6997 * undergoing a resilvering operation. 6998 */ 6999 int 7000 zpool_do_split(int argc, char **argv) 7001 { 7002 char *srcpool, *newpool, *propval; 7003 char *mntopts = NULL; 7004 splitflags_t flags; 7005 int c, ret = 0; 7006 int ms_status = 0; 7007 boolean_t loadkeys = B_FALSE; 7008 zpool_handle_t *zhp; 7009 nvlist_t *config, *props = NULL; 7010 7011 flags.dryrun = B_FALSE; 7012 flags.import = B_FALSE; 7013 flags.name_flags = 0; 7014 7015 /* check options */ 7016 while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) { 7017 switch (c) { 7018 case 'g': 7019 flags.name_flags |= VDEV_NAME_GUID; 7020 break; 7021 case 'L': 7022 flags.name_flags |= VDEV_NAME_FOLLOW_LINKS; 7023 break; 7024 case 'R': 7025 flags.import = B_TRUE; 7026 if (add_prop_list( 7027 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg, 7028 &props, B_TRUE) != 0) { 7029 nvlist_free(props); 7030 usage(B_FALSE); 7031 } 7032 break; 7033 case 'l': 7034 loadkeys = B_TRUE; 7035 break; 7036 case 'n': 7037 flags.dryrun = B_TRUE; 7038 break; 7039 case 'o': 7040 if ((propval = strchr(optarg, '=')) != NULL) { 7041 *propval = '\0'; 7042 propval++; 7043 if (add_prop_list(optarg, propval, 7044 &props, B_TRUE) != 0) { 7045 nvlist_free(props); 7046 usage(B_FALSE); 7047 } 7048 } else { 7049 mntopts = optarg; 7050 } 7051 break; 7052 case 'P': 7053 flags.name_flags |= VDEV_NAME_PATH; 7054 break; 7055 case ':': 7056 (void) fprintf(stderr, gettext("missing argument for " 7057 "'%c' option\n"), optopt); 7058 usage(B_FALSE); 7059 break; 7060 case '?': 7061 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7062 optopt); 7063 usage(B_FALSE); 7064 break; 7065 } 7066 } 7067 7068 if (!flags.import && mntopts != NULL) { 7069 (void) fprintf(stderr, gettext("setting mntopts is only " 7070 "valid when importing the pool\n")); 7071 usage(B_FALSE); 7072 } 7073 7074 if (!flags.import && loadkeys) { 7075 (void) fprintf(stderr, gettext("loading keys is only " 7076 "valid when importing the pool\n")); 7077 usage(B_FALSE); 7078 } 7079 7080 argc -= optind; 7081 argv += optind; 7082 7083 if (argc < 1) { 7084 (void) fprintf(stderr, gettext("Missing pool name\n")); 7085 usage(B_FALSE); 7086 } 7087 if (argc < 2) { 7088 (void) fprintf(stderr, gettext("Missing new pool name\n")); 7089 usage(B_FALSE); 7090 } 7091 7092 srcpool = argv[0]; 7093 newpool = argv[1]; 7094 7095 argc -= 2; 7096 argv += 2; 7097 7098 if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) { 7099 nvlist_free(props); 7100 return (1); 7101 } 7102 7103 config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv); 7104 if (config == NULL) { 7105 ret = 1; 7106 } else { 7107 if (flags.dryrun) { 7108 (void) printf(gettext("would create '%s' with the " 7109 "following layout:\n\n"), newpool); 7110 print_vdev_tree(NULL, newpool, config, 0, "", 7111 flags.name_flags); 7112 print_vdev_tree(NULL, "dedup", config, 0, 7113 VDEV_ALLOC_BIAS_DEDUP, 0); 7114 print_vdev_tree(NULL, "special", config, 0, 7115 VDEV_ALLOC_BIAS_SPECIAL, 0); 7116 } 7117 } 7118 7119 zpool_close(zhp); 7120 7121 if (ret != 0 || flags.dryrun || !flags.import) { 7122 nvlist_free(config); 7123 nvlist_free(props); 7124 return (ret); 7125 } 7126 7127 /* 7128 * The split was successful. Now we need to open the new 7129 * pool and import it. 7130 */ 7131 if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) { 7132 nvlist_free(config); 7133 nvlist_free(props); 7134 return (1); 7135 } 7136 7137 if (loadkeys) { 7138 ret = zfs_crypto_attempt_load_keys(g_zfs, newpool); 7139 if (ret != 0) 7140 ret = 1; 7141 } 7142 7143 if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) { 7144 ms_status = zpool_enable_datasets(zhp, mntopts, 0); 7145 if (ms_status == EZFS_SHAREFAILED) { 7146 (void) fprintf(stderr, gettext("Split was successful, " 7147 "datasets are mounted but sharing of some datasets " 7148 "has failed\n")); 7149 } else if (ms_status == EZFS_MOUNTFAILED) { 7150 (void) fprintf(stderr, gettext("Split was successful" 7151 ", but some datasets could not be mounted\n")); 7152 (void) fprintf(stderr, gettext("Try doing '%s' with a " 7153 "different altroot\n"), "zpool import"); 7154 } 7155 } 7156 zpool_close(zhp); 7157 nvlist_free(config); 7158 nvlist_free(props); 7159 7160 return (ret); 7161 } 7162 7163 7164 /* 7165 * zpool online [--power] <pool> <device> ... 7166 * 7167 * --power: Power on the enclosure slot to the drive (if possible) 7168 */ 7169 int 7170 zpool_do_online(int argc, char **argv) 7171 { 7172 int c, i; 7173 char *poolname; 7174 zpool_handle_t *zhp; 7175 int ret = 0; 7176 vdev_state_t newstate; 7177 int flags = 0; 7178 boolean_t is_power_on = B_FALSE; 7179 struct option long_options[] = { 7180 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 7181 {0, 0, 0, 0} 7182 }; 7183 7184 /* check options */ 7185 while ((c = getopt_long(argc, argv, "e", long_options, NULL)) != -1) { 7186 switch (c) { 7187 case 'e': 7188 flags |= ZFS_ONLINE_EXPAND; 7189 break; 7190 case ZPOOL_OPTION_POWER: 7191 is_power_on = B_TRUE; 7192 break; 7193 case '?': 7194 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7195 optopt); 7196 usage(B_FALSE); 7197 } 7198 } 7199 7200 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT")) 7201 is_power_on = B_TRUE; 7202 7203 argc -= optind; 7204 argv += optind; 7205 7206 /* get pool name and check number of arguments */ 7207 if (argc < 1) { 7208 (void) fprintf(stderr, gettext("missing pool name\n")); 7209 usage(B_FALSE); 7210 } 7211 if (argc < 2) { 7212 (void) fprintf(stderr, gettext("missing device name\n")); 7213 usage(B_FALSE); 7214 } 7215 7216 poolname = argv[0]; 7217 7218 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7219 return (1); 7220 7221 for (i = 1; i < argc; i++) { 7222 vdev_state_t oldstate; 7223 boolean_t avail_spare, l2cache; 7224 int rc; 7225 7226 if (is_power_on) { 7227 rc = zpool_power_on_and_disk_wait(zhp, argv[i]); 7228 if (rc == ENOTSUP) { 7229 (void) fprintf(stderr, 7230 gettext("Power control not supported\n")); 7231 } 7232 if (rc != 0) 7233 return (rc); 7234 } 7235 7236 nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare, 7237 &l2cache, NULL); 7238 if (tgt == NULL) { 7239 ret = 1; 7240 continue; 7241 } 7242 uint_t vsc; 7243 oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt, 7244 ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state; 7245 if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) { 7246 if (newstate != VDEV_STATE_HEALTHY) { 7247 (void) printf(gettext("warning: device '%s' " 7248 "onlined, but remains in faulted state\n"), 7249 argv[i]); 7250 if (newstate == VDEV_STATE_FAULTED) 7251 (void) printf(gettext("use 'zpool " 7252 "clear' to restore a faulted " 7253 "device\n")); 7254 else 7255 (void) printf(gettext("use 'zpool " 7256 "replace' to replace devices " 7257 "that are no longer present\n")); 7258 if ((flags & ZFS_ONLINE_EXPAND)) { 7259 (void) printf(gettext("%s: failed " 7260 "to expand usable space on " 7261 "unhealthy device '%s'\n"), 7262 (oldstate >= VDEV_STATE_DEGRADED ? 7263 "error" : "warning"), argv[i]); 7264 if (oldstate >= VDEV_STATE_DEGRADED) { 7265 ret = 1; 7266 break; 7267 } 7268 } 7269 } 7270 } else { 7271 ret = 1; 7272 } 7273 } 7274 7275 zpool_close(zhp); 7276 7277 return (ret); 7278 } 7279 7280 /* 7281 * zpool offline [-ft]|[--power] <pool> <device> ... 7282 * 7283 * 7284 * -f Force the device into a faulted state. 7285 * 7286 * -t Only take the device off-line temporarily. The offline/faulted 7287 * state will not be persistent across reboots. 7288 * 7289 * --power Power off the enclosure slot to the drive (if possible) 7290 */ 7291 int 7292 zpool_do_offline(int argc, char **argv) 7293 { 7294 int c, i; 7295 char *poolname; 7296 zpool_handle_t *zhp; 7297 int ret = 0; 7298 boolean_t istmp = B_FALSE; 7299 boolean_t fault = B_FALSE; 7300 boolean_t is_power_off = B_FALSE; 7301 7302 struct option long_options[] = { 7303 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 7304 {0, 0, 0, 0} 7305 }; 7306 7307 /* check options */ 7308 while ((c = getopt_long(argc, argv, "ft", long_options, NULL)) != -1) { 7309 switch (c) { 7310 case 'f': 7311 fault = B_TRUE; 7312 break; 7313 case 't': 7314 istmp = B_TRUE; 7315 break; 7316 case ZPOOL_OPTION_POWER: 7317 is_power_off = B_TRUE; 7318 break; 7319 case '?': 7320 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7321 optopt); 7322 usage(B_FALSE); 7323 } 7324 } 7325 7326 if (is_power_off && fault) { 7327 (void) fprintf(stderr, 7328 gettext("-0 and -f cannot be used together\n")); 7329 usage(B_FALSE); 7330 return (1); 7331 } 7332 7333 if (is_power_off && istmp) { 7334 (void) fprintf(stderr, 7335 gettext("-0 and -t cannot be used together\n")); 7336 usage(B_FALSE); 7337 return (1); 7338 } 7339 7340 argc -= optind; 7341 argv += optind; 7342 7343 /* get pool name and check number of arguments */ 7344 if (argc < 1) { 7345 (void) fprintf(stderr, gettext("missing pool name\n")); 7346 usage(B_FALSE); 7347 } 7348 if (argc < 2) { 7349 (void) fprintf(stderr, gettext("missing device name\n")); 7350 usage(B_FALSE); 7351 } 7352 7353 poolname = argv[0]; 7354 7355 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7356 return (1); 7357 7358 for (i = 1; i < argc; i++) { 7359 uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]); 7360 if (is_power_off) { 7361 /* 7362 * Note: we have to power off first, then set REMOVED, 7363 * or else zpool_vdev_set_removed_state() returns 7364 * EAGAIN. 7365 */ 7366 ret = zpool_power_off(zhp, argv[i]); 7367 if (ret != 0) { 7368 (void) fprintf(stderr, "%s %s %d\n", 7369 gettext("unable to power off slot for"), 7370 argv[i], ret); 7371 } 7372 zpool_vdev_set_removed_state(zhp, guid, VDEV_AUX_NONE); 7373 7374 } else if (fault) { 7375 vdev_aux_t aux; 7376 if (istmp == B_FALSE) { 7377 /* Force the fault to persist across imports */ 7378 aux = VDEV_AUX_EXTERNAL_PERSIST; 7379 } else { 7380 aux = VDEV_AUX_EXTERNAL; 7381 } 7382 7383 if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0) 7384 ret = 1; 7385 } else { 7386 if (zpool_vdev_offline(zhp, argv[i], istmp) != 0) 7387 ret = 1; 7388 } 7389 } 7390 7391 zpool_close(zhp); 7392 7393 return (ret); 7394 } 7395 7396 /* 7397 * zpool clear [-nF]|[--power] <pool> [device] 7398 * 7399 * Clear all errors associated with a pool or a particular device. 7400 */ 7401 int 7402 zpool_do_clear(int argc, char **argv) 7403 { 7404 int c; 7405 int ret = 0; 7406 boolean_t dryrun = B_FALSE; 7407 boolean_t do_rewind = B_FALSE; 7408 boolean_t xtreme_rewind = B_FALSE; 7409 boolean_t is_power_on = B_FALSE; 7410 uint32_t rewind_policy = ZPOOL_NO_REWIND; 7411 nvlist_t *policy = NULL; 7412 zpool_handle_t *zhp; 7413 char *pool, *device; 7414 7415 struct option long_options[] = { 7416 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 7417 {0, 0, 0, 0} 7418 }; 7419 7420 /* check options */ 7421 while ((c = getopt_long(argc, argv, "FnX", long_options, 7422 NULL)) != -1) { 7423 switch (c) { 7424 case 'F': 7425 do_rewind = B_TRUE; 7426 break; 7427 case 'n': 7428 dryrun = B_TRUE; 7429 break; 7430 case 'X': 7431 xtreme_rewind = B_TRUE; 7432 break; 7433 case ZPOOL_OPTION_POWER: 7434 is_power_on = B_TRUE; 7435 break; 7436 case '?': 7437 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7438 optopt); 7439 usage(B_FALSE); 7440 } 7441 } 7442 7443 if (libzfs_envvar_is_set("ZPOOL_AUTO_POWER_ON_SLOT")) 7444 is_power_on = B_TRUE; 7445 7446 argc -= optind; 7447 argv += optind; 7448 7449 if (argc < 1) { 7450 (void) fprintf(stderr, gettext("missing pool name\n")); 7451 usage(B_FALSE); 7452 } 7453 7454 if (argc > 2) { 7455 (void) fprintf(stderr, gettext("too many arguments\n")); 7456 usage(B_FALSE); 7457 } 7458 7459 if ((dryrun || xtreme_rewind) && !do_rewind) { 7460 (void) fprintf(stderr, 7461 gettext("-n or -X only meaningful with -F\n")); 7462 usage(B_FALSE); 7463 } 7464 if (dryrun) 7465 rewind_policy = ZPOOL_TRY_REWIND; 7466 else if (do_rewind) 7467 rewind_policy = ZPOOL_DO_REWIND; 7468 if (xtreme_rewind) 7469 rewind_policy |= ZPOOL_EXTREME_REWIND; 7470 7471 /* In future, further rewind policy choices can be passed along here */ 7472 if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 7473 nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, 7474 rewind_policy) != 0) { 7475 return (1); 7476 } 7477 7478 pool = argv[0]; 7479 device = argc == 2 ? argv[1] : NULL; 7480 7481 if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) { 7482 nvlist_free(policy); 7483 return (1); 7484 } 7485 7486 if (is_power_on) { 7487 if (device == NULL) { 7488 zpool_power_on_pool_and_wait_for_devices(zhp); 7489 } else { 7490 zpool_power_on_and_disk_wait(zhp, device); 7491 } 7492 } 7493 7494 if (zpool_clear(zhp, device, policy) != 0) 7495 ret = 1; 7496 7497 zpool_close(zhp); 7498 7499 nvlist_free(policy); 7500 7501 return (ret); 7502 } 7503 7504 /* 7505 * zpool reguid <pool> 7506 */ 7507 int 7508 zpool_do_reguid(int argc, char **argv) 7509 { 7510 int c; 7511 char *poolname; 7512 zpool_handle_t *zhp; 7513 int ret = 0; 7514 7515 /* check options */ 7516 while ((c = getopt(argc, argv, "")) != -1) { 7517 switch (c) { 7518 case '?': 7519 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7520 optopt); 7521 usage(B_FALSE); 7522 } 7523 } 7524 7525 argc -= optind; 7526 argv += optind; 7527 7528 /* get pool name and check number of arguments */ 7529 if (argc < 1) { 7530 (void) fprintf(stderr, gettext("missing pool name\n")); 7531 usage(B_FALSE); 7532 } 7533 7534 if (argc > 1) { 7535 (void) fprintf(stderr, gettext("too many arguments\n")); 7536 usage(B_FALSE); 7537 } 7538 7539 poolname = argv[0]; 7540 if ((zhp = zpool_open(g_zfs, poolname)) == NULL) 7541 return (1); 7542 7543 ret = zpool_reguid(zhp); 7544 7545 zpool_close(zhp); 7546 return (ret); 7547 } 7548 7549 7550 /* 7551 * zpool reopen <pool> 7552 * 7553 * Reopen the pool so that the kernel can update the sizes of all vdevs. 7554 */ 7555 int 7556 zpool_do_reopen(int argc, char **argv) 7557 { 7558 int c; 7559 int ret = 0; 7560 boolean_t scrub_restart = B_TRUE; 7561 7562 /* check options */ 7563 while ((c = getopt(argc, argv, "n")) != -1) { 7564 switch (c) { 7565 case 'n': 7566 scrub_restart = B_FALSE; 7567 break; 7568 case '?': 7569 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7570 optopt); 7571 usage(B_FALSE); 7572 } 7573 } 7574 7575 argc -= optind; 7576 argv += optind; 7577 7578 /* if argc == 0 we will execute zpool_reopen_one on all pools */ 7579 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7580 B_FALSE, zpool_reopen_one, &scrub_restart); 7581 7582 return (ret); 7583 } 7584 7585 typedef struct scrub_cbdata { 7586 int cb_type; 7587 pool_scrub_cmd_t cb_scrub_cmd; 7588 } scrub_cbdata_t; 7589 7590 static boolean_t 7591 zpool_has_checkpoint(zpool_handle_t *zhp) 7592 { 7593 nvlist_t *config, *nvroot; 7594 7595 config = zpool_get_config(zhp, NULL); 7596 7597 if (config != NULL) { 7598 pool_checkpoint_stat_t *pcs = NULL; 7599 uint_t c; 7600 7601 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 7602 (void) nvlist_lookup_uint64_array(nvroot, 7603 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 7604 7605 if (pcs == NULL || pcs->pcs_state == CS_NONE) 7606 return (B_FALSE); 7607 7608 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS || 7609 pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 7610 return (B_TRUE); 7611 } 7612 7613 return (B_FALSE); 7614 } 7615 7616 static int 7617 scrub_callback(zpool_handle_t *zhp, void *data) 7618 { 7619 scrub_cbdata_t *cb = data; 7620 int err; 7621 7622 /* 7623 * Ignore faulted pools. 7624 */ 7625 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 7626 (void) fprintf(stderr, gettext("cannot scan '%s': pool is " 7627 "currently unavailable\n"), zpool_get_name(zhp)); 7628 return (1); 7629 } 7630 7631 err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd); 7632 7633 if (err == 0 && zpool_has_checkpoint(zhp) && 7634 cb->cb_type == POOL_SCAN_SCRUB) { 7635 (void) printf(gettext("warning: will not scrub state that " 7636 "belongs to the checkpoint of pool '%s'\n"), 7637 zpool_get_name(zhp)); 7638 } 7639 7640 return (err != 0); 7641 } 7642 7643 static int 7644 wait_callback(zpool_handle_t *zhp, void *data) 7645 { 7646 zpool_wait_activity_t *act = data; 7647 return (zpool_wait(zhp, *act)); 7648 } 7649 7650 /* 7651 * zpool scrub [-s | -p] [-w] [-e] <pool> ... 7652 * 7653 * -e Only scrub blocks in the error log. 7654 * -s Stop. Stops any in-progress scrub. 7655 * -p Pause. Pause in-progress scrub. 7656 * -w Wait. Blocks until scrub has completed. 7657 */ 7658 int 7659 zpool_do_scrub(int argc, char **argv) 7660 { 7661 int c; 7662 scrub_cbdata_t cb; 7663 boolean_t wait = B_FALSE; 7664 int error; 7665 7666 cb.cb_type = POOL_SCAN_SCRUB; 7667 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7668 7669 boolean_t is_error_scrub = B_FALSE; 7670 boolean_t is_pause = B_FALSE; 7671 boolean_t is_stop = B_FALSE; 7672 7673 /* check options */ 7674 while ((c = getopt(argc, argv, "spwe")) != -1) { 7675 switch (c) { 7676 case 'e': 7677 is_error_scrub = B_TRUE; 7678 break; 7679 case 's': 7680 is_stop = B_TRUE; 7681 break; 7682 case 'p': 7683 is_pause = B_TRUE; 7684 break; 7685 case 'w': 7686 wait = B_TRUE; 7687 break; 7688 case '?': 7689 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7690 optopt); 7691 usage(B_FALSE); 7692 } 7693 } 7694 7695 if (is_pause && is_stop) { 7696 (void) fprintf(stderr, gettext("invalid option " 7697 "combination :-s and -p are mutually exclusive\n")); 7698 usage(B_FALSE); 7699 } else { 7700 if (is_error_scrub) 7701 cb.cb_type = POOL_SCAN_ERRORSCRUB; 7702 7703 if (is_pause) { 7704 cb.cb_scrub_cmd = POOL_SCRUB_PAUSE; 7705 } else if (is_stop) { 7706 cb.cb_type = POOL_SCAN_NONE; 7707 } else { 7708 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7709 } 7710 } 7711 7712 if (wait && (cb.cb_type == POOL_SCAN_NONE || 7713 cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) { 7714 (void) fprintf(stderr, gettext("invalid option combination: " 7715 "-w cannot be used with -p or -s\n")); 7716 usage(B_FALSE); 7717 } 7718 7719 argc -= optind; 7720 argv += optind; 7721 7722 if (argc < 1) { 7723 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7724 usage(B_FALSE); 7725 } 7726 7727 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7728 B_FALSE, scrub_callback, &cb); 7729 7730 if (wait && !error) { 7731 zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB; 7732 error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7733 B_FALSE, wait_callback, &act); 7734 } 7735 7736 return (error); 7737 } 7738 7739 /* 7740 * zpool resilver <pool> ... 7741 * 7742 * Restarts any in-progress resilver 7743 */ 7744 int 7745 zpool_do_resilver(int argc, char **argv) 7746 { 7747 int c; 7748 scrub_cbdata_t cb; 7749 7750 cb.cb_type = POOL_SCAN_RESILVER; 7751 cb.cb_scrub_cmd = POOL_SCRUB_NORMAL; 7752 7753 /* check options */ 7754 while ((c = getopt(argc, argv, "")) != -1) { 7755 switch (c) { 7756 case '?': 7757 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 7758 optopt); 7759 usage(B_FALSE); 7760 } 7761 } 7762 7763 argc -= optind; 7764 argv += optind; 7765 7766 if (argc < 1) { 7767 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7768 usage(B_FALSE); 7769 } 7770 7771 return (for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 7772 B_FALSE, scrub_callback, &cb)); 7773 } 7774 7775 /* 7776 * zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...] 7777 * 7778 * -c Cancel. Ends any in-progress trim. 7779 * -d Secure trim. Requires kernel and device support. 7780 * -r <rate> Sets the TRIM rate in bytes (per second). Supports 7781 * adding a multiplier suffix such as 'k' or 'm'. 7782 * -s Suspend. TRIM can then be restarted with no flags. 7783 * -w Wait. Blocks until trimming has completed. 7784 */ 7785 int 7786 zpool_do_trim(int argc, char **argv) 7787 { 7788 struct option long_options[] = { 7789 {"cancel", no_argument, NULL, 'c'}, 7790 {"secure", no_argument, NULL, 'd'}, 7791 {"rate", required_argument, NULL, 'r'}, 7792 {"suspend", no_argument, NULL, 's'}, 7793 {"wait", no_argument, NULL, 'w'}, 7794 {0, 0, 0, 0} 7795 }; 7796 7797 pool_trim_func_t cmd_type = POOL_TRIM_START; 7798 uint64_t rate = 0; 7799 boolean_t secure = B_FALSE; 7800 boolean_t wait = B_FALSE; 7801 7802 int c; 7803 while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL)) 7804 != -1) { 7805 switch (c) { 7806 case 'c': 7807 if (cmd_type != POOL_TRIM_START && 7808 cmd_type != POOL_TRIM_CANCEL) { 7809 (void) fprintf(stderr, gettext("-c cannot be " 7810 "combined with other options\n")); 7811 usage(B_FALSE); 7812 } 7813 cmd_type = POOL_TRIM_CANCEL; 7814 break; 7815 case 'd': 7816 if (cmd_type != POOL_TRIM_START) { 7817 (void) fprintf(stderr, gettext("-d cannot be " 7818 "combined with the -c or -s options\n")); 7819 usage(B_FALSE); 7820 } 7821 secure = B_TRUE; 7822 break; 7823 case 'r': 7824 if (cmd_type != POOL_TRIM_START) { 7825 (void) fprintf(stderr, gettext("-r cannot be " 7826 "combined with the -c or -s options\n")); 7827 usage(B_FALSE); 7828 } 7829 if (zfs_nicestrtonum(g_zfs, optarg, &rate) == -1) { 7830 (void) fprintf(stderr, "%s: %s\n", 7831 gettext("invalid value for rate"), 7832 libzfs_error_description(g_zfs)); 7833 usage(B_FALSE); 7834 } 7835 break; 7836 case 's': 7837 if (cmd_type != POOL_TRIM_START && 7838 cmd_type != POOL_TRIM_SUSPEND) { 7839 (void) fprintf(stderr, gettext("-s cannot be " 7840 "combined with other options\n")); 7841 usage(B_FALSE); 7842 } 7843 cmd_type = POOL_TRIM_SUSPEND; 7844 break; 7845 case 'w': 7846 wait = B_TRUE; 7847 break; 7848 case '?': 7849 if (optopt != 0) { 7850 (void) fprintf(stderr, 7851 gettext("invalid option '%c'\n"), optopt); 7852 } else { 7853 (void) fprintf(stderr, 7854 gettext("invalid option '%s'\n"), 7855 argv[optind - 1]); 7856 } 7857 usage(B_FALSE); 7858 } 7859 } 7860 7861 argc -= optind; 7862 argv += optind; 7863 7864 if (argc < 1) { 7865 (void) fprintf(stderr, gettext("missing pool name argument\n")); 7866 usage(B_FALSE); 7867 return (-1); 7868 } 7869 7870 if (wait && (cmd_type != POOL_TRIM_START)) { 7871 (void) fprintf(stderr, gettext("-w cannot be used with -c or " 7872 "-s\n")); 7873 usage(B_FALSE); 7874 } 7875 7876 char *poolname = argv[0]; 7877 zpool_handle_t *zhp = zpool_open(g_zfs, poolname); 7878 if (zhp == NULL) 7879 return (-1); 7880 7881 trimflags_t trim_flags = { 7882 .secure = secure, 7883 .rate = rate, 7884 .wait = wait, 7885 }; 7886 7887 nvlist_t *vdevs = fnvlist_alloc(); 7888 if (argc == 1) { 7889 /* no individual leaf vdevs specified, so add them all */ 7890 nvlist_t *config = zpool_get_config(zhp, NULL); 7891 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 7892 ZPOOL_CONFIG_VDEV_TREE); 7893 zpool_collect_leaves(zhp, nvroot, vdevs); 7894 trim_flags.fullpool = B_TRUE; 7895 } else { 7896 trim_flags.fullpool = B_FALSE; 7897 for (int i = 1; i < argc; i++) { 7898 fnvlist_add_boolean(vdevs, argv[i]); 7899 } 7900 } 7901 7902 int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags); 7903 7904 fnvlist_free(vdevs); 7905 zpool_close(zhp); 7906 7907 return (error); 7908 } 7909 7910 /* 7911 * Converts a total number of seconds to a human readable string broken 7912 * down in to days/hours/minutes/seconds. 7913 */ 7914 static void 7915 secs_to_dhms(uint64_t total, char *buf) 7916 { 7917 uint64_t days = total / 60 / 60 / 24; 7918 uint64_t hours = (total / 60 / 60) % 24; 7919 uint64_t mins = (total / 60) % 60; 7920 uint64_t secs = (total % 60); 7921 7922 if (days > 0) { 7923 (void) sprintf(buf, "%llu days %02llu:%02llu:%02llu", 7924 (u_longlong_t)days, (u_longlong_t)hours, 7925 (u_longlong_t)mins, (u_longlong_t)secs); 7926 } else { 7927 (void) sprintf(buf, "%02llu:%02llu:%02llu", 7928 (u_longlong_t)hours, (u_longlong_t)mins, 7929 (u_longlong_t)secs); 7930 } 7931 } 7932 7933 /* 7934 * Print out detailed error scrub status. 7935 */ 7936 static void 7937 print_err_scrub_status(pool_scan_stat_t *ps) 7938 { 7939 time_t start, end, pause; 7940 uint64_t total_secs_left; 7941 uint64_t secs_left, mins_left, hours_left, days_left; 7942 uint64_t examined, to_be_examined; 7943 7944 if (ps == NULL || ps->pss_error_scrub_func != POOL_SCAN_ERRORSCRUB) { 7945 return; 7946 } 7947 7948 (void) printf(gettext(" scrub: ")); 7949 7950 start = ps->pss_error_scrub_start; 7951 end = ps->pss_error_scrub_end; 7952 pause = ps->pss_pass_error_scrub_pause; 7953 examined = ps->pss_error_scrub_examined; 7954 to_be_examined = ps->pss_error_scrub_to_be_examined; 7955 7956 assert(ps->pss_error_scrub_func == POOL_SCAN_ERRORSCRUB); 7957 7958 if (ps->pss_error_scrub_state == DSS_FINISHED) { 7959 total_secs_left = end - start; 7960 days_left = total_secs_left / 60 / 60 / 24; 7961 hours_left = (total_secs_left / 60 / 60) % 24; 7962 mins_left = (total_secs_left / 60) % 60; 7963 secs_left = (total_secs_left % 60); 7964 7965 (void) printf(gettext("scrubbed %llu error blocks in %llu days " 7966 "%02llu:%02llu:%02llu on %s"), (u_longlong_t)examined, 7967 (u_longlong_t)days_left, (u_longlong_t)hours_left, 7968 (u_longlong_t)mins_left, (u_longlong_t)secs_left, 7969 ctime(&end)); 7970 7971 return; 7972 } else if (ps->pss_error_scrub_state == DSS_CANCELED) { 7973 (void) printf(gettext("error scrub canceled on %s"), 7974 ctime(&end)); 7975 return; 7976 } 7977 assert(ps->pss_error_scrub_state == DSS_ERRORSCRUBBING); 7978 7979 /* Error scrub is in progress. */ 7980 if (pause == 0) { 7981 (void) printf(gettext("error scrub in progress since %s"), 7982 ctime(&start)); 7983 } else { 7984 (void) printf(gettext("error scrub paused since %s"), 7985 ctime(&pause)); 7986 (void) printf(gettext("\terror scrub started on %s"), 7987 ctime(&start)); 7988 } 7989 7990 double fraction_done = (double)examined / (to_be_examined + examined); 7991 (void) printf(gettext("\t%.2f%% done, issued I/O for %llu error" 7992 " blocks"), 100 * fraction_done, (u_longlong_t)examined); 7993 7994 (void) printf("\n"); 7995 } 7996 7997 /* 7998 * Print out detailed scrub status. 7999 */ 8000 static void 8001 print_scan_scrub_resilver_status(pool_scan_stat_t *ps) 8002 { 8003 time_t start, end, pause; 8004 uint64_t pass_scanned, scanned, pass_issued, issued, total_s, total_i; 8005 uint64_t elapsed, scan_rate, issue_rate; 8006 double fraction_done; 8007 char processed_buf[7], scanned_buf[7], issued_buf[7], total_s_buf[7]; 8008 char total_i_buf[7], srate_buf[7], irate_buf[7], time_buf[32]; 8009 8010 printf(" "); 8011 printf_color(ANSI_BOLD, gettext("scan:")); 8012 printf(" "); 8013 8014 /* If there's never been a scan, there's not much to say. */ 8015 if (ps == NULL || ps->pss_func == POOL_SCAN_NONE || 8016 ps->pss_func >= POOL_SCAN_FUNCS) { 8017 (void) printf(gettext("none requested\n")); 8018 return; 8019 } 8020 8021 start = ps->pss_start_time; 8022 end = ps->pss_end_time; 8023 pause = ps->pss_pass_scrub_pause; 8024 8025 zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf)); 8026 8027 int is_resilver = ps->pss_func == POOL_SCAN_RESILVER; 8028 int is_scrub = ps->pss_func == POOL_SCAN_SCRUB; 8029 assert(is_resilver || is_scrub); 8030 8031 /* Scan is finished or canceled. */ 8032 if (ps->pss_state == DSS_FINISHED) { 8033 secs_to_dhms(end - start, time_buf); 8034 8035 if (is_scrub) { 8036 (void) printf(gettext("scrub repaired %s " 8037 "in %s with %llu errors on %s"), processed_buf, 8038 time_buf, (u_longlong_t)ps->pss_errors, 8039 ctime(&end)); 8040 } else if (is_resilver) { 8041 (void) printf(gettext("resilvered %s " 8042 "in %s with %llu errors on %s"), processed_buf, 8043 time_buf, (u_longlong_t)ps->pss_errors, 8044 ctime(&end)); 8045 } 8046 return; 8047 } else if (ps->pss_state == DSS_CANCELED) { 8048 if (is_scrub) { 8049 (void) printf(gettext("scrub canceled on %s"), 8050 ctime(&end)); 8051 } else if (is_resilver) { 8052 (void) printf(gettext("resilver canceled on %s"), 8053 ctime(&end)); 8054 } 8055 return; 8056 } 8057 8058 assert(ps->pss_state == DSS_SCANNING); 8059 8060 /* Scan is in progress. Resilvers can't be paused. */ 8061 if (is_scrub) { 8062 if (pause == 0) { 8063 (void) printf(gettext("scrub in progress since %s"), 8064 ctime(&start)); 8065 } else { 8066 (void) printf(gettext("scrub paused since %s"), 8067 ctime(&pause)); 8068 (void) printf(gettext("\tscrub started on %s"), 8069 ctime(&start)); 8070 } 8071 } else if (is_resilver) { 8072 (void) printf(gettext("resilver in progress since %s"), 8073 ctime(&start)); 8074 } 8075 8076 scanned = ps->pss_examined; 8077 pass_scanned = ps->pss_pass_exam; 8078 issued = ps->pss_issued; 8079 pass_issued = ps->pss_pass_issued; 8080 total_s = ps->pss_to_examine; 8081 total_i = ps->pss_to_examine - ps->pss_skipped; 8082 8083 /* we are only done with a block once we have issued the IO for it */ 8084 fraction_done = (double)issued / total_i; 8085 8086 /* elapsed time for this pass, rounding up to 1 if it's 0 */ 8087 elapsed = time(NULL) - ps->pss_pass_start; 8088 elapsed -= ps->pss_pass_scrub_spent_paused; 8089 elapsed = (elapsed != 0) ? elapsed : 1; 8090 8091 scan_rate = pass_scanned / elapsed; 8092 issue_rate = pass_issued / elapsed; 8093 8094 /* format all of the numbers we will be reporting */ 8095 zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf)); 8096 zfs_nicebytes(issued, issued_buf, sizeof (issued_buf)); 8097 zfs_nicebytes(total_s, total_s_buf, sizeof (total_s_buf)); 8098 zfs_nicebytes(total_i, total_i_buf, sizeof (total_i_buf)); 8099 8100 /* do not print estimated time if we have a paused scrub */ 8101 (void) printf(gettext("\t%s / %s scanned"), scanned_buf, total_s_buf); 8102 if (pause == 0 && scan_rate > 0) { 8103 zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf)); 8104 (void) printf(gettext(" at %s/s"), srate_buf); 8105 } 8106 (void) printf(gettext(", %s / %s issued"), issued_buf, total_i_buf); 8107 if (pause == 0 && issue_rate > 0) { 8108 zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf)); 8109 (void) printf(gettext(" at %s/s"), irate_buf); 8110 } 8111 (void) printf(gettext("\n")); 8112 8113 if (is_resilver) { 8114 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 8115 processed_buf, 100 * fraction_done); 8116 } else if (is_scrub) { 8117 (void) printf(gettext("\t%s repaired, %.2f%% done"), 8118 processed_buf, 100 * fraction_done); 8119 } 8120 8121 if (pause == 0) { 8122 /* 8123 * Only provide an estimate iff: 8124 * 1) we haven't yet issued all we expected, and 8125 * 2) the issue rate exceeds 10 MB/s, and 8126 * 3) it's either: 8127 * a) a resilver which has started repairs, or 8128 * b) a scrub which has entered the issue phase. 8129 */ 8130 if (total_i >= issued && issue_rate >= 10 * 1024 * 1024 && 8131 ((is_resilver && ps->pss_processed > 0) || 8132 (is_scrub && issued > 0))) { 8133 secs_to_dhms((total_i - issued) / issue_rate, time_buf); 8134 (void) printf(gettext(", %s to go\n"), time_buf); 8135 } else { 8136 (void) printf(gettext(", no estimated " 8137 "completion time\n")); 8138 } 8139 } else { 8140 (void) printf(gettext("\n")); 8141 } 8142 } 8143 8144 static void 8145 print_rebuild_status_impl(vdev_rebuild_stat_t *vrs, uint_t c, char *vdev_name) 8146 { 8147 if (vrs == NULL || vrs->vrs_state == VDEV_REBUILD_NONE) 8148 return; 8149 8150 printf(" "); 8151 printf_color(ANSI_BOLD, gettext("scan:")); 8152 printf(" "); 8153 8154 uint64_t bytes_scanned = vrs->vrs_bytes_scanned; 8155 uint64_t bytes_issued = vrs->vrs_bytes_issued; 8156 uint64_t bytes_rebuilt = vrs->vrs_bytes_rebuilt; 8157 uint64_t bytes_est_s = vrs->vrs_bytes_est; 8158 uint64_t bytes_est_i = vrs->vrs_bytes_est; 8159 if (c > offsetof(vdev_rebuild_stat_t, vrs_pass_bytes_skipped) / 8) 8160 bytes_est_i -= vrs->vrs_pass_bytes_skipped; 8161 uint64_t scan_rate = (vrs->vrs_pass_bytes_scanned / 8162 (vrs->vrs_pass_time_ms + 1)) * 1000; 8163 uint64_t issue_rate = (vrs->vrs_pass_bytes_issued / 8164 (vrs->vrs_pass_time_ms + 1)) * 1000; 8165 double scan_pct = MIN((double)bytes_scanned * 100 / 8166 (bytes_est_s + 1), 100); 8167 8168 /* Format all of the numbers we will be reporting */ 8169 char bytes_scanned_buf[7], bytes_issued_buf[7]; 8170 char bytes_rebuilt_buf[7], bytes_est_s_buf[7], bytes_est_i_buf[7]; 8171 char scan_rate_buf[7], issue_rate_buf[7], time_buf[32]; 8172 zfs_nicebytes(bytes_scanned, bytes_scanned_buf, 8173 sizeof (bytes_scanned_buf)); 8174 zfs_nicebytes(bytes_issued, bytes_issued_buf, 8175 sizeof (bytes_issued_buf)); 8176 zfs_nicebytes(bytes_rebuilt, bytes_rebuilt_buf, 8177 sizeof (bytes_rebuilt_buf)); 8178 zfs_nicebytes(bytes_est_s, bytes_est_s_buf, sizeof (bytes_est_s_buf)); 8179 zfs_nicebytes(bytes_est_i, bytes_est_i_buf, sizeof (bytes_est_i_buf)); 8180 8181 time_t start = vrs->vrs_start_time; 8182 time_t end = vrs->vrs_end_time; 8183 8184 /* Rebuild is finished or canceled. */ 8185 if (vrs->vrs_state == VDEV_REBUILD_COMPLETE) { 8186 secs_to_dhms(vrs->vrs_scan_time_ms / 1000, time_buf); 8187 (void) printf(gettext("resilvered (%s) %s in %s " 8188 "with %llu errors on %s"), vdev_name, bytes_rebuilt_buf, 8189 time_buf, (u_longlong_t)vrs->vrs_errors, ctime(&end)); 8190 return; 8191 } else if (vrs->vrs_state == VDEV_REBUILD_CANCELED) { 8192 (void) printf(gettext("resilver (%s) canceled on %s"), 8193 vdev_name, ctime(&end)); 8194 return; 8195 } else if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 8196 (void) printf(gettext("resilver (%s) in progress since %s"), 8197 vdev_name, ctime(&start)); 8198 } 8199 8200 assert(vrs->vrs_state == VDEV_REBUILD_ACTIVE); 8201 8202 (void) printf(gettext("\t%s / %s scanned"), bytes_scanned_buf, 8203 bytes_est_s_buf); 8204 if (scan_rate > 0) { 8205 zfs_nicebytes(scan_rate, scan_rate_buf, sizeof (scan_rate_buf)); 8206 (void) printf(gettext(" at %s/s"), scan_rate_buf); 8207 } 8208 (void) printf(gettext(", %s / %s issued"), bytes_issued_buf, 8209 bytes_est_i_buf); 8210 if (issue_rate > 0) { 8211 zfs_nicebytes(issue_rate, issue_rate_buf, 8212 sizeof (issue_rate_buf)); 8213 (void) printf(gettext(" at %s/s"), issue_rate_buf); 8214 } 8215 (void) printf(gettext("\n")); 8216 8217 (void) printf(gettext("\t%s resilvered, %.2f%% done"), 8218 bytes_rebuilt_buf, scan_pct); 8219 8220 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 8221 if (bytes_est_s >= bytes_scanned && 8222 scan_rate >= 10 * 1024 * 1024) { 8223 secs_to_dhms((bytes_est_s - bytes_scanned) / scan_rate, 8224 time_buf); 8225 (void) printf(gettext(", %s to go\n"), time_buf); 8226 } else { 8227 (void) printf(gettext(", no estimated " 8228 "completion time\n")); 8229 } 8230 } else { 8231 (void) printf(gettext("\n")); 8232 } 8233 } 8234 8235 /* 8236 * Print rebuild status for top-level vdevs. 8237 */ 8238 static void 8239 print_rebuild_status(zpool_handle_t *zhp, nvlist_t *nvroot) 8240 { 8241 nvlist_t **child; 8242 uint_t children; 8243 8244 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 8245 &child, &children) != 0) 8246 children = 0; 8247 8248 for (uint_t c = 0; c < children; c++) { 8249 vdev_rebuild_stat_t *vrs; 8250 uint_t i; 8251 8252 if (nvlist_lookup_uint64_array(child[c], 8253 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 8254 char *name = zpool_vdev_name(g_zfs, zhp, 8255 child[c], VDEV_NAME_TYPE_ID); 8256 print_rebuild_status_impl(vrs, i, name); 8257 free(name); 8258 } 8259 } 8260 } 8261 8262 /* 8263 * As we don't scrub checkpointed blocks, we want to warn the user that we 8264 * skipped scanning some blocks if a checkpoint exists or existed at any 8265 * time during the scan. If a sequential instead of healing reconstruction 8266 * was performed then the blocks were reconstructed. However, their checksums 8267 * have not been verified so we still print the warning. 8268 */ 8269 static void 8270 print_checkpoint_scan_warning(pool_scan_stat_t *ps, pool_checkpoint_stat_t *pcs) 8271 { 8272 if (ps == NULL || pcs == NULL) 8273 return; 8274 8275 if (pcs->pcs_state == CS_NONE || 8276 pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 8277 return; 8278 8279 assert(pcs->pcs_state == CS_CHECKPOINT_EXISTS); 8280 8281 if (ps->pss_state == DSS_NONE) 8282 return; 8283 8284 if ((ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) && 8285 ps->pss_end_time < pcs->pcs_start_time) 8286 return; 8287 8288 if (ps->pss_state == DSS_FINISHED || ps->pss_state == DSS_CANCELED) { 8289 (void) printf(gettext(" scan warning: skipped blocks " 8290 "that are only referenced by the checkpoint.\n")); 8291 } else { 8292 assert(ps->pss_state == DSS_SCANNING); 8293 (void) printf(gettext(" scan warning: skipping blocks " 8294 "that are only referenced by the checkpoint.\n")); 8295 } 8296 } 8297 8298 /* 8299 * Returns B_TRUE if there is an active rebuild in progress. Otherwise, 8300 * B_FALSE is returned and 'rebuild_end_time' is set to the end time for 8301 * the last completed (or cancelled) rebuild. 8302 */ 8303 static boolean_t 8304 check_rebuilding(nvlist_t *nvroot, uint64_t *rebuild_end_time) 8305 { 8306 nvlist_t **child; 8307 uint_t children; 8308 boolean_t rebuilding = B_FALSE; 8309 uint64_t end_time = 0; 8310 8311 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 8312 &child, &children) != 0) 8313 children = 0; 8314 8315 for (uint_t c = 0; c < children; c++) { 8316 vdev_rebuild_stat_t *vrs; 8317 uint_t i; 8318 8319 if (nvlist_lookup_uint64_array(child[c], 8320 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i) == 0) { 8321 8322 if (vrs->vrs_end_time > end_time) 8323 end_time = vrs->vrs_end_time; 8324 8325 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 8326 rebuilding = B_TRUE; 8327 end_time = 0; 8328 break; 8329 } 8330 } 8331 } 8332 8333 if (rebuild_end_time != NULL) 8334 *rebuild_end_time = end_time; 8335 8336 return (rebuilding); 8337 } 8338 8339 /* 8340 * Print the scan status. 8341 */ 8342 static void 8343 print_scan_status(zpool_handle_t *zhp, nvlist_t *nvroot) 8344 { 8345 uint64_t rebuild_end_time = 0, resilver_end_time = 0; 8346 boolean_t have_resilver = B_FALSE, have_scrub = B_FALSE; 8347 boolean_t have_errorscrub = B_FALSE; 8348 boolean_t active_resilver = B_FALSE; 8349 pool_checkpoint_stat_t *pcs = NULL; 8350 pool_scan_stat_t *ps = NULL; 8351 uint_t c; 8352 time_t scrub_start = 0, errorscrub_start = 0; 8353 8354 if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, 8355 (uint64_t **)&ps, &c) == 0) { 8356 if (ps->pss_func == POOL_SCAN_RESILVER) { 8357 resilver_end_time = ps->pss_end_time; 8358 active_resilver = (ps->pss_state == DSS_SCANNING); 8359 } 8360 8361 have_resilver = (ps->pss_func == POOL_SCAN_RESILVER); 8362 have_scrub = (ps->pss_func == POOL_SCAN_SCRUB); 8363 scrub_start = ps->pss_start_time; 8364 if (c > offsetof(pool_scan_stat_t, 8365 pss_pass_error_scrub_pause) / 8) { 8366 have_errorscrub = (ps->pss_error_scrub_func == 8367 POOL_SCAN_ERRORSCRUB); 8368 errorscrub_start = ps->pss_error_scrub_start; 8369 } 8370 } 8371 8372 boolean_t active_rebuild = check_rebuilding(nvroot, &rebuild_end_time); 8373 boolean_t have_rebuild = (active_rebuild || (rebuild_end_time > 0)); 8374 8375 /* Always print the scrub status when available. */ 8376 if (have_scrub && scrub_start > errorscrub_start) 8377 print_scan_scrub_resilver_status(ps); 8378 else if (have_errorscrub && errorscrub_start >= scrub_start) 8379 print_err_scrub_status(ps); 8380 8381 /* 8382 * When there is an active resilver or rebuild print its status. 8383 * Otherwise print the status of the last resilver or rebuild. 8384 */ 8385 if (active_resilver || (!active_rebuild && have_resilver && 8386 resilver_end_time && resilver_end_time > rebuild_end_time)) { 8387 print_scan_scrub_resilver_status(ps); 8388 } else if (active_rebuild || (!active_resilver && have_rebuild && 8389 rebuild_end_time && rebuild_end_time > resilver_end_time)) { 8390 print_rebuild_status(zhp, nvroot); 8391 } 8392 8393 (void) nvlist_lookup_uint64_array(nvroot, 8394 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 8395 print_checkpoint_scan_warning(ps, pcs); 8396 } 8397 8398 /* 8399 * Print out detailed removal status. 8400 */ 8401 static void 8402 print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs) 8403 { 8404 char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7]; 8405 time_t start, end; 8406 nvlist_t *config, *nvroot; 8407 nvlist_t **child; 8408 uint_t children; 8409 char *vdev_name; 8410 8411 if (prs == NULL || prs->prs_state == DSS_NONE) 8412 return; 8413 8414 /* 8415 * Determine name of vdev. 8416 */ 8417 config = zpool_get_config(zhp, NULL); 8418 nvroot = fnvlist_lookup_nvlist(config, 8419 ZPOOL_CONFIG_VDEV_TREE); 8420 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 8421 &child, &children) == 0); 8422 assert(prs->prs_removing_vdev < children); 8423 vdev_name = zpool_vdev_name(g_zfs, zhp, 8424 child[prs->prs_removing_vdev], B_TRUE); 8425 8426 printf_color(ANSI_BOLD, gettext("remove: ")); 8427 8428 start = prs->prs_start_time; 8429 end = prs->prs_end_time; 8430 zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf)); 8431 8432 /* 8433 * Removal is finished or canceled. 8434 */ 8435 if (prs->prs_state == DSS_FINISHED) { 8436 uint64_t minutes_taken = (end - start) / 60; 8437 8438 (void) printf(gettext("Removal of vdev %llu copied %s " 8439 "in %lluh%um, completed on %s"), 8440 (longlong_t)prs->prs_removing_vdev, 8441 copied_buf, 8442 (u_longlong_t)(minutes_taken / 60), 8443 (uint_t)(minutes_taken % 60), 8444 ctime((time_t *)&end)); 8445 } else if (prs->prs_state == DSS_CANCELED) { 8446 (void) printf(gettext("Removal of %s canceled on %s"), 8447 vdev_name, ctime(&end)); 8448 } else { 8449 uint64_t copied, total, elapsed, mins_left, hours_left; 8450 double fraction_done; 8451 uint_t rate; 8452 8453 assert(prs->prs_state == DSS_SCANNING); 8454 8455 /* 8456 * Removal is in progress. 8457 */ 8458 (void) printf(gettext( 8459 "Evacuation of %s in progress since %s"), 8460 vdev_name, ctime(&start)); 8461 8462 copied = prs->prs_copied > 0 ? prs->prs_copied : 1; 8463 total = prs->prs_to_copy; 8464 fraction_done = (double)copied / total; 8465 8466 /* elapsed time for this pass */ 8467 elapsed = time(NULL) - prs->prs_start_time; 8468 elapsed = elapsed > 0 ? elapsed : 1; 8469 rate = copied / elapsed; 8470 rate = rate > 0 ? rate : 1; 8471 mins_left = ((total - copied) / rate) / 60; 8472 hours_left = mins_left / 60; 8473 8474 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 8475 zfs_nicenum(total, total_buf, sizeof (total_buf)); 8476 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 8477 8478 /* 8479 * do not print estimated time if hours_left is more than 8480 * 30 days 8481 */ 8482 (void) printf(gettext( 8483 "\t%s copied out of %s at %s/s, %.2f%% done"), 8484 examined_buf, total_buf, rate_buf, 100 * fraction_done); 8485 if (hours_left < (30 * 24)) { 8486 (void) printf(gettext(", %lluh%um to go\n"), 8487 (u_longlong_t)hours_left, (uint_t)(mins_left % 60)); 8488 } else { 8489 (void) printf(gettext( 8490 ", (copy is slow, no estimated time)\n")); 8491 } 8492 } 8493 free(vdev_name); 8494 8495 if (prs->prs_mapping_memory > 0) { 8496 char mem_buf[7]; 8497 zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf)); 8498 (void) printf(gettext( 8499 "\t%s memory used for removed device mappings\n"), 8500 mem_buf); 8501 } 8502 } 8503 8504 /* 8505 * Print out detailed raidz expansion status. 8506 */ 8507 static void 8508 print_raidz_expand_status(zpool_handle_t *zhp, pool_raidz_expand_stat_t *pres) 8509 { 8510 char copied_buf[7]; 8511 8512 if (pres == NULL || pres->pres_state == DSS_NONE) 8513 return; 8514 8515 /* 8516 * Determine name of vdev. 8517 */ 8518 nvlist_t *config = zpool_get_config(zhp, NULL); 8519 nvlist_t *nvroot = fnvlist_lookup_nvlist(config, 8520 ZPOOL_CONFIG_VDEV_TREE); 8521 nvlist_t **child; 8522 uint_t children; 8523 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 8524 &child, &children) == 0); 8525 assert(pres->pres_expanding_vdev < children); 8526 8527 printf_color(ANSI_BOLD, gettext("expand: ")); 8528 8529 time_t start = pres->pres_start_time; 8530 time_t end = pres->pres_end_time; 8531 char *vname = 8532 zpool_vdev_name(g_zfs, zhp, child[pres->pres_expanding_vdev], 0); 8533 zfs_nicenum(pres->pres_reflowed, copied_buf, sizeof (copied_buf)); 8534 8535 /* 8536 * Expansion is finished or canceled. 8537 */ 8538 if (pres->pres_state == DSS_FINISHED) { 8539 char time_buf[32]; 8540 secs_to_dhms(end - start, time_buf); 8541 8542 (void) printf(gettext("expanded %s-%u copied %s in %s, " 8543 "on %s"), vname, (int)pres->pres_expanding_vdev, 8544 copied_buf, time_buf, ctime((time_t *)&end)); 8545 } else { 8546 char examined_buf[7], total_buf[7], rate_buf[7]; 8547 uint64_t copied, total, elapsed, secs_left; 8548 double fraction_done; 8549 uint_t rate; 8550 8551 assert(pres->pres_state == DSS_SCANNING); 8552 8553 /* 8554 * Expansion is in progress. 8555 */ 8556 (void) printf(gettext( 8557 "expansion of %s-%u in progress since %s"), 8558 vname, (int)pres->pres_expanding_vdev, ctime(&start)); 8559 8560 copied = pres->pres_reflowed > 0 ? pres->pres_reflowed : 1; 8561 total = pres->pres_to_reflow; 8562 fraction_done = (double)copied / total; 8563 8564 /* elapsed time for this pass */ 8565 elapsed = time(NULL) - pres->pres_start_time; 8566 elapsed = elapsed > 0 ? elapsed : 1; 8567 rate = copied / elapsed; 8568 rate = rate > 0 ? rate : 1; 8569 secs_left = (total - copied) / rate; 8570 8571 zfs_nicenum(copied, examined_buf, sizeof (examined_buf)); 8572 zfs_nicenum(total, total_buf, sizeof (total_buf)); 8573 zfs_nicenum(rate, rate_buf, sizeof (rate_buf)); 8574 8575 /* 8576 * do not print estimated time if hours_left is more than 8577 * 30 days 8578 */ 8579 (void) printf(gettext("\t%s / %s copied at %s/s, %.2f%% done"), 8580 examined_buf, total_buf, rate_buf, 100 * fraction_done); 8581 if (pres->pres_waiting_for_resilver) { 8582 (void) printf(gettext(", paused for resilver or " 8583 "clear\n")); 8584 } else if (secs_left < (30 * 24 * 3600)) { 8585 char time_buf[32]; 8586 secs_to_dhms(secs_left, time_buf); 8587 (void) printf(gettext(", %s to go\n"), time_buf); 8588 } else { 8589 (void) printf(gettext( 8590 ", (copy is slow, no estimated time)\n")); 8591 } 8592 } 8593 free(vname); 8594 } 8595 static void 8596 print_checkpoint_status(pool_checkpoint_stat_t *pcs) 8597 { 8598 time_t start; 8599 char space_buf[7]; 8600 8601 if (pcs == NULL || pcs->pcs_state == CS_NONE) 8602 return; 8603 8604 (void) printf(gettext("checkpoint: ")); 8605 8606 start = pcs->pcs_start_time; 8607 zfs_nicenum(pcs->pcs_space, space_buf, sizeof (space_buf)); 8608 8609 if (pcs->pcs_state == CS_CHECKPOINT_EXISTS) { 8610 char *date = ctime(&start); 8611 8612 /* 8613 * ctime() adds a newline at the end of the generated 8614 * string, thus the weird format specifier and the 8615 * strlen() call used to chop it off from the output. 8616 */ 8617 (void) printf(gettext("created %.*s, consumes %s\n"), 8618 (int)(strlen(date) - 1), date, space_buf); 8619 return; 8620 } 8621 8622 assert(pcs->pcs_state == CS_CHECKPOINT_DISCARDING); 8623 8624 (void) printf(gettext("discarding, %s remaining.\n"), 8625 space_buf); 8626 } 8627 8628 static void 8629 print_error_log(zpool_handle_t *zhp) 8630 { 8631 nvlist_t *nverrlist = NULL; 8632 nvpair_t *elem; 8633 char *pathname; 8634 size_t len = MAXPATHLEN * 2; 8635 8636 if (zpool_get_errlog(zhp, &nverrlist) != 0) 8637 return; 8638 8639 (void) printf("errors: Permanent errors have been " 8640 "detected in the following files:\n\n"); 8641 8642 pathname = safe_malloc(len); 8643 elem = NULL; 8644 while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) { 8645 nvlist_t *nv; 8646 uint64_t dsobj, obj; 8647 8648 verify(nvpair_value_nvlist(elem, &nv) == 0); 8649 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET, 8650 &dsobj) == 0); 8651 verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT, 8652 &obj) == 0); 8653 zpool_obj_to_path(zhp, dsobj, obj, pathname, len); 8654 (void) printf("%7s %s\n", "", pathname); 8655 } 8656 free(pathname); 8657 nvlist_free(nverrlist); 8658 } 8659 8660 static void 8661 print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares, 8662 uint_t nspares) 8663 { 8664 uint_t i; 8665 char *name; 8666 8667 if (nspares == 0) 8668 return; 8669 8670 (void) printf(gettext("\tspares\n")); 8671 8672 for (i = 0; i < nspares; i++) { 8673 name = zpool_vdev_name(g_zfs, zhp, spares[i], 8674 cb->cb_name_flags); 8675 print_status_config(zhp, cb, name, spares[i], 2, B_TRUE, NULL); 8676 free(name); 8677 } 8678 } 8679 8680 static void 8681 print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache, 8682 uint_t nl2cache) 8683 { 8684 uint_t i; 8685 char *name; 8686 8687 if (nl2cache == 0) 8688 return; 8689 8690 (void) printf(gettext("\tcache\n")); 8691 8692 for (i = 0; i < nl2cache; i++) { 8693 name = zpool_vdev_name(g_zfs, zhp, l2cache[i], 8694 cb->cb_name_flags); 8695 print_status_config(zhp, cb, name, l2cache[i], 2, 8696 B_FALSE, NULL); 8697 free(name); 8698 } 8699 } 8700 8701 static void 8702 print_dedup_stats(nvlist_t *config) 8703 { 8704 ddt_histogram_t *ddh; 8705 ddt_stat_t *dds; 8706 ddt_object_t *ddo; 8707 uint_t c; 8708 char dspace[6], mspace[6]; 8709 8710 /* 8711 * If the pool was faulted then we may not have been able to 8712 * obtain the config. Otherwise, if we have anything in the dedup 8713 * table continue processing the stats. 8714 */ 8715 if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS, 8716 (uint64_t **)&ddo, &c) != 0) 8717 return; 8718 8719 (void) printf("\n"); 8720 (void) printf(gettext(" dedup: ")); 8721 if (ddo->ddo_count == 0) { 8722 (void) printf(gettext("no DDT entries\n")); 8723 return; 8724 } 8725 8726 zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace)); 8727 zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace)); 8728 (void) printf("DDT entries %llu, size %s on disk, %s in core\n", 8729 (u_longlong_t)ddo->ddo_count, 8730 dspace, 8731 mspace); 8732 8733 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS, 8734 (uint64_t **)&dds, &c) == 0); 8735 verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM, 8736 (uint64_t **)&ddh, &c) == 0); 8737 zpool_dump_ddt(dds, ddh); 8738 } 8739 8740 /* 8741 * Display a summary of pool status. Displays a summary such as: 8742 * 8743 * pool: tank 8744 * status: DEGRADED 8745 * reason: One or more devices ... 8746 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01 8747 * config: 8748 * mirror DEGRADED 8749 * c1t0d0 OK 8750 * c2t0d0 UNAVAIL 8751 * 8752 * When given the '-v' option, we print out the complete config. If the '-e' 8753 * option is specified, then we print out error rate information as well. 8754 */ 8755 static int 8756 status_callback(zpool_handle_t *zhp, void *data) 8757 { 8758 status_cbdata_t *cbp = data; 8759 nvlist_t *config, *nvroot; 8760 const char *msgid; 8761 zpool_status_t reason; 8762 zpool_errata_t errata; 8763 const char *health; 8764 uint_t c; 8765 vdev_stat_t *vs; 8766 8767 config = zpool_get_config(zhp, NULL); 8768 reason = zpool_get_status(zhp, &msgid, &errata); 8769 8770 cbp->cb_count++; 8771 8772 /* 8773 * If we were given 'zpool status -x', only report those pools with 8774 * problems. 8775 */ 8776 if (cbp->cb_explain && 8777 (reason == ZPOOL_STATUS_OK || 8778 reason == ZPOOL_STATUS_VERSION_OLDER || 8779 reason == ZPOOL_STATUS_FEAT_DISABLED || 8780 reason == ZPOOL_STATUS_COMPATIBILITY_ERR || 8781 reason == ZPOOL_STATUS_INCOMPATIBLE_FEAT)) { 8782 if (!cbp->cb_allpools) { 8783 (void) printf(gettext("pool '%s' is healthy\n"), 8784 zpool_get_name(zhp)); 8785 if (cbp->cb_first) 8786 cbp->cb_first = B_FALSE; 8787 } 8788 return (0); 8789 } 8790 8791 if (cbp->cb_first) 8792 cbp->cb_first = B_FALSE; 8793 else 8794 (void) printf("\n"); 8795 8796 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 8797 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 8798 (uint64_t **)&vs, &c) == 0); 8799 8800 health = zpool_get_state_str(zhp); 8801 8802 printf(" "); 8803 printf_color(ANSI_BOLD, gettext("pool:")); 8804 printf(" %s\n", zpool_get_name(zhp)); 8805 fputc(' ', stdout); 8806 printf_color(ANSI_BOLD, gettext("state: ")); 8807 8808 printf_color(health_str_to_color(health), "%s", health); 8809 8810 fputc('\n', stdout); 8811 8812 switch (reason) { 8813 case ZPOOL_STATUS_MISSING_DEV_R: 8814 printf_color(ANSI_BOLD, gettext("status: ")); 8815 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8816 "not be opened. Sufficient replicas exist for\n\tthe pool " 8817 "to continue functioning in a degraded state.\n")); 8818 printf_color(ANSI_BOLD, gettext("action: ")); 8819 printf_color(ANSI_YELLOW, gettext("Attach the missing device " 8820 "and online it using 'zpool online'.\n")); 8821 break; 8822 8823 case ZPOOL_STATUS_MISSING_DEV_NR: 8824 printf_color(ANSI_BOLD, gettext("status: ")); 8825 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8826 "not be opened. There are insufficient\n\treplicas for the" 8827 " pool to continue functioning.\n")); 8828 printf_color(ANSI_BOLD, gettext("action: ")); 8829 printf_color(ANSI_YELLOW, gettext("Attach the missing device " 8830 "and online it using 'zpool online'.\n")); 8831 break; 8832 8833 case ZPOOL_STATUS_CORRUPT_LABEL_R: 8834 printf_color(ANSI_BOLD, gettext("status: ")); 8835 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8836 "not be used because the label is missing or\n\tinvalid. " 8837 "Sufficient replicas exist for the pool to continue\n\t" 8838 "functioning in a degraded state.\n")); 8839 printf_color(ANSI_BOLD, gettext("action: ")); 8840 printf_color(ANSI_YELLOW, gettext("Replace the device using " 8841 "'zpool replace'.\n")); 8842 break; 8843 8844 case ZPOOL_STATUS_CORRUPT_LABEL_NR: 8845 printf_color(ANSI_BOLD, gettext("status: ")); 8846 printf_color(ANSI_YELLOW, gettext("One or more devices could " 8847 "not be used because the label is missing \n\tor invalid. " 8848 "There are insufficient replicas for the pool to " 8849 "continue\n\tfunctioning.\n")); 8850 zpool_explain_recover(zpool_get_handle(zhp), 8851 zpool_get_name(zhp), reason, config); 8852 break; 8853 8854 case ZPOOL_STATUS_FAILING_DEV: 8855 printf_color(ANSI_BOLD, gettext("status: ")); 8856 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8857 "experienced an unrecoverable error. An\n\tattempt was " 8858 "made to correct the error. Applications are " 8859 "unaffected.\n")); 8860 printf_color(ANSI_BOLD, gettext("action: ")); 8861 printf_color(ANSI_YELLOW, gettext("Determine if the " 8862 "device needs to be replaced, and clear the errors\n\tusing" 8863 " 'zpool clear' or replace the device with 'zpool " 8864 "replace'.\n")); 8865 break; 8866 8867 case ZPOOL_STATUS_OFFLINE_DEV: 8868 printf_color(ANSI_BOLD, gettext("status: ")); 8869 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8870 "been taken offline by the administrator.\n\tSufficient " 8871 "replicas exist for the pool to continue functioning in " 8872 "a\n\tdegraded state.\n")); 8873 printf_color(ANSI_BOLD, gettext("action: ")); 8874 printf_color(ANSI_YELLOW, gettext("Online the device " 8875 "using 'zpool online' or replace the device with\n\t'zpool " 8876 "replace'.\n")); 8877 break; 8878 8879 case ZPOOL_STATUS_REMOVED_DEV: 8880 printf_color(ANSI_BOLD, gettext("status: ")); 8881 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8882 "been removed by the administrator.\n\tSufficient " 8883 "replicas exist for the pool to continue functioning in " 8884 "a\n\tdegraded state.\n")); 8885 printf_color(ANSI_BOLD, gettext("action: ")); 8886 printf_color(ANSI_YELLOW, gettext("Online the device " 8887 "using zpool online' or replace the device with\n\t'zpool " 8888 "replace'.\n")); 8889 break; 8890 8891 case ZPOOL_STATUS_RESILVERING: 8892 case ZPOOL_STATUS_REBUILDING: 8893 printf_color(ANSI_BOLD, gettext("status: ")); 8894 printf_color(ANSI_YELLOW, gettext("One or more devices is " 8895 "currently being resilvered. The pool will\n\tcontinue " 8896 "to function, possibly in a degraded state.\n")); 8897 printf_color(ANSI_BOLD, gettext("action: ")); 8898 printf_color(ANSI_YELLOW, gettext("Wait for the resilver to " 8899 "complete.\n")); 8900 break; 8901 8902 case ZPOOL_STATUS_REBUILD_SCRUB: 8903 printf_color(ANSI_BOLD, gettext("status: ")); 8904 printf_color(ANSI_YELLOW, gettext("One or more devices have " 8905 "been sequentially resilvered, scrubbing\n\tthe pool " 8906 "is recommended.\n")); 8907 printf_color(ANSI_BOLD, gettext("action: ")); 8908 printf_color(ANSI_YELLOW, gettext("Use 'zpool scrub' to " 8909 "verify all data checksums.\n")); 8910 break; 8911 8912 case ZPOOL_STATUS_CORRUPT_DATA: 8913 printf_color(ANSI_BOLD, gettext("status: ")); 8914 printf_color(ANSI_YELLOW, gettext("One or more devices has " 8915 "experienced an error resulting in data\n\tcorruption. " 8916 "Applications may be affected.\n")); 8917 printf_color(ANSI_BOLD, gettext("action: ")); 8918 printf_color(ANSI_YELLOW, gettext("Restore the file in question" 8919 " if possible. Otherwise restore the\n\tentire pool from " 8920 "backup.\n")); 8921 break; 8922 8923 case ZPOOL_STATUS_CORRUPT_POOL: 8924 printf_color(ANSI_BOLD, gettext("status: ")); 8925 printf_color(ANSI_YELLOW, gettext("The pool metadata is " 8926 "corrupted and the pool cannot be opened.\n")); 8927 zpool_explain_recover(zpool_get_handle(zhp), 8928 zpool_get_name(zhp), reason, config); 8929 break; 8930 8931 case ZPOOL_STATUS_VERSION_OLDER: 8932 printf_color(ANSI_BOLD, gettext("status: ")); 8933 printf_color(ANSI_YELLOW, gettext("The pool is formatted using " 8934 "a legacy on-disk format. The pool can\n\tstill be used, " 8935 "but some features are unavailable.\n")); 8936 printf_color(ANSI_BOLD, gettext("action: ")); 8937 printf_color(ANSI_YELLOW, gettext("Upgrade the pool using " 8938 "'zpool upgrade'. Once this is done, the\n\tpool will no " 8939 "longer be accessible on software that does not support\n\t" 8940 "feature flags.\n")); 8941 break; 8942 8943 case ZPOOL_STATUS_VERSION_NEWER: 8944 printf_color(ANSI_BOLD, gettext("status: ")); 8945 printf_color(ANSI_YELLOW, gettext("The pool has been upgraded " 8946 "to a newer, incompatible on-disk version.\n\tThe pool " 8947 "cannot be accessed on this system.\n")); 8948 printf_color(ANSI_BOLD, gettext("action: ")); 8949 printf_color(ANSI_YELLOW, gettext("Access the pool from a " 8950 "system running more recent software, or\n\trestore the " 8951 "pool from backup.\n")); 8952 break; 8953 8954 case ZPOOL_STATUS_FEAT_DISABLED: 8955 printf_color(ANSI_BOLD, gettext("status: ")); 8956 printf_color(ANSI_YELLOW, gettext("Some supported and " 8957 "requested features are not enabled on the pool.\n\t" 8958 "The pool can still be used, but some features are " 8959 "unavailable.\n")); 8960 printf_color(ANSI_BOLD, gettext("action: ")); 8961 printf_color(ANSI_YELLOW, gettext("Enable all features using " 8962 "'zpool upgrade'. Once this is done,\n\tthe pool may no " 8963 "longer be accessible by software that does not support\n\t" 8964 "the features. See zpool-features(7) for details.\n")); 8965 break; 8966 8967 case ZPOOL_STATUS_COMPATIBILITY_ERR: 8968 printf_color(ANSI_BOLD, gettext("status: ")); 8969 printf_color(ANSI_YELLOW, gettext("This pool has a " 8970 "compatibility list specified, but it could not be\n\t" 8971 "read/parsed at this time. The pool can still be used, " 8972 "but this\n\tshould be investigated.\n")); 8973 printf_color(ANSI_BOLD, gettext("action: ")); 8974 printf_color(ANSI_YELLOW, gettext("Check the value of the " 8975 "'compatibility' property against the\n\t" 8976 "appropriate file in " ZPOOL_SYSCONF_COMPAT_D " or " 8977 ZPOOL_DATA_COMPAT_D ".\n")); 8978 break; 8979 8980 case ZPOOL_STATUS_INCOMPATIBLE_FEAT: 8981 printf_color(ANSI_BOLD, gettext("status: ")); 8982 printf_color(ANSI_YELLOW, gettext("One or more features " 8983 "are enabled on the pool despite not being\n\t" 8984 "requested by the 'compatibility' property.\n")); 8985 printf_color(ANSI_BOLD, gettext("action: ")); 8986 printf_color(ANSI_YELLOW, gettext("Consider setting " 8987 "'compatibility' to an appropriate value, or\n\t" 8988 "adding needed features to the relevant file in\n\t" 8989 ZPOOL_SYSCONF_COMPAT_D " or " ZPOOL_DATA_COMPAT_D ".\n")); 8990 break; 8991 8992 case ZPOOL_STATUS_UNSUP_FEAT_READ: 8993 printf_color(ANSI_BOLD, gettext("status: ")); 8994 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " 8995 "on this system because it uses the\n\tfollowing feature(s)" 8996 " not supported on this system:\n")); 8997 zpool_print_unsup_feat(config); 8998 (void) printf("\n"); 8999 printf_color(ANSI_BOLD, gettext("action: ")); 9000 printf_color(ANSI_YELLOW, gettext("Access the pool from a " 9001 "system that supports the required feature(s),\n\tor " 9002 "restore the pool from backup.\n")); 9003 break; 9004 9005 case ZPOOL_STATUS_UNSUP_FEAT_WRITE: 9006 printf_color(ANSI_BOLD, gettext("status: ")); 9007 printf_color(ANSI_YELLOW, gettext("The pool can only be " 9008 "accessed in read-only mode on this system. It\n\tcannot be" 9009 " accessed in read-write mode because it uses the " 9010 "following\n\tfeature(s) not supported on this system:\n")); 9011 zpool_print_unsup_feat(config); 9012 (void) printf("\n"); 9013 printf_color(ANSI_BOLD, gettext("action: ")); 9014 printf_color(ANSI_YELLOW, gettext("The pool cannot be accessed " 9015 "in read-write mode. Import the pool with\n" 9016 "\t\"-o readonly=on\", access the pool from a system that " 9017 "supports the\n\trequired feature(s), or restore the " 9018 "pool from backup.\n")); 9019 break; 9020 9021 case ZPOOL_STATUS_FAULTED_DEV_R: 9022 printf_color(ANSI_BOLD, gettext("status: ")); 9023 printf_color(ANSI_YELLOW, gettext("One or more devices are " 9024 "faulted in response to persistent errors.\n\tSufficient " 9025 "replicas exist for the pool to continue functioning " 9026 "in a\n\tdegraded state.\n")); 9027 printf_color(ANSI_BOLD, gettext("action: ")); 9028 printf_color(ANSI_YELLOW, gettext("Replace the faulted device, " 9029 "or use 'zpool clear' to mark the device\n\trepaired.\n")); 9030 break; 9031 9032 case ZPOOL_STATUS_FAULTED_DEV_NR: 9033 printf_color(ANSI_BOLD, gettext("status: ")); 9034 printf_color(ANSI_YELLOW, gettext("One or more devices are " 9035 "faulted in response to persistent errors. There are " 9036 "insufficient replicas for the pool to\n\tcontinue " 9037 "functioning.\n")); 9038 printf_color(ANSI_BOLD, gettext("action: ")); 9039 printf_color(ANSI_YELLOW, gettext("Destroy and re-create the " 9040 "pool from a backup source. Manually marking the device\n" 9041 "\trepaired using 'zpool clear' may allow some data " 9042 "to be recovered.\n")); 9043 break; 9044 9045 case ZPOOL_STATUS_IO_FAILURE_MMP: 9046 printf_color(ANSI_BOLD, gettext("status: ")); 9047 printf_color(ANSI_YELLOW, gettext("The pool is suspended " 9048 "because multihost writes failed or were delayed;\n\t" 9049 "another system could import the pool undetected.\n")); 9050 printf_color(ANSI_BOLD, gettext("action: ")); 9051 printf_color(ANSI_YELLOW, gettext("Make sure the pool's devices" 9052 " are connected, then reboot your system and\n\timport the " 9053 "pool or run 'zpool clear' to resume the pool.\n")); 9054 break; 9055 9056 case ZPOOL_STATUS_IO_FAILURE_WAIT: 9057 case ZPOOL_STATUS_IO_FAILURE_CONTINUE: 9058 printf_color(ANSI_BOLD, gettext("status: ")); 9059 printf_color(ANSI_YELLOW, gettext("One or more devices are " 9060 "faulted in response to IO failures.\n")); 9061 printf_color(ANSI_BOLD, gettext("action: ")); 9062 printf_color(ANSI_YELLOW, gettext("Make sure the affected " 9063 "devices are connected, then run 'zpool clear'.\n")); 9064 break; 9065 9066 case ZPOOL_STATUS_BAD_LOG: 9067 printf_color(ANSI_BOLD, gettext("status: ")); 9068 printf_color(ANSI_YELLOW, gettext("An intent log record " 9069 "could not be read.\n" 9070 "\tWaiting for administrator intervention to fix the " 9071 "faulted pool.\n")); 9072 printf_color(ANSI_BOLD, gettext("action: ")); 9073 printf_color(ANSI_YELLOW, gettext("Either restore the affected " 9074 "device(s) and run 'zpool online',\n" 9075 "\tor ignore the intent log records by running " 9076 "'zpool clear'.\n")); 9077 break; 9078 9079 case ZPOOL_STATUS_NON_NATIVE_ASHIFT: 9080 (void) printf(gettext("status: One or more devices are " 9081 "configured to use a non-native block size.\n" 9082 "\tExpect reduced performance.\n")); 9083 (void) printf(gettext("action: Replace affected devices with " 9084 "devices that support the\n\tconfigured block size, or " 9085 "migrate data to a properly configured\n\tpool.\n")); 9086 break; 9087 9088 case ZPOOL_STATUS_HOSTID_MISMATCH: 9089 printf_color(ANSI_BOLD, gettext("status: ")); 9090 printf_color(ANSI_YELLOW, gettext("Mismatch between pool hostid" 9091 " and system hostid on imported pool.\n\tThis pool was " 9092 "previously imported into a system with a different " 9093 "hostid,\n\tand then was verbatim imported into this " 9094 "system.\n")); 9095 printf_color(ANSI_BOLD, gettext("action: ")); 9096 printf_color(ANSI_YELLOW, gettext("Export this pool on all " 9097 "systems on which it is imported.\n" 9098 "\tThen import it to correct the mismatch.\n")); 9099 break; 9100 9101 case ZPOOL_STATUS_ERRATA: 9102 printf_color(ANSI_BOLD, gettext("status: ")); 9103 printf_color(ANSI_YELLOW, gettext("Errata #%d detected.\n"), 9104 errata); 9105 9106 switch (errata) { 9107 case ZPOOL_ERRATA_NONE: 9108 break; 9109 9110 case ZPOOL_ERRATA_ZOL_2094_SCRUB: 9111 printf_color(ANSI_BOLD, gettext("action: ")); 9112 printf_color(ANSI_YELLOW, gettext("To correct the issue" 9113 " run 'zpool scrub'.\n")); 9114 break; 9115 9116 case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION: 9117 (void) printf(gettext("\tExisting encrypted datasets " 9118 "contain an on-disk incompatibility\n\twhich " 9119 "needs to be corrected.\n")); 9120 printf_color(ANSI_BOLD, gettext("action: ")); 9121 printf_color(ANSI_YELLOW, gettext("To correct the issue" 9122 " backup existing encrypted datasets to new\n\t" 9123 "encrypted datasets and destroy the old ones. " 9124 "'zfs mount -o ro' can\n\tbe used to temporarily " 9125 "mount existing encrypted datasets readonly.\n")); 9126 break; 9127 9128 case ZPOOL_ERRATA_ZOL_8308_ENCRYPTION: 9129 (void) printf(gettext("\tExisting encrypted snapshots " 9130 "and bookmarks contain an on-disk\n\tincompat" 9131 "ibility. This may cause on-disk corruption if " 9132 "they are used\n\twith 'zfs recv'.\n")); 9133 printf_color(ANSI_BOLD, gettext("action: ")); 9134 printf_color(ANSI_YELLOW, gettext("To correct the" 9135 "issue, enable the bookmark_v2 feature. No " 9136 "additional\n\taction is needed if there are no " 9137 "encrypted snapshots or bookmarks.\n\tIf preserving" 9138 "the encrypted snapshots and bookmarks is required," 9139 " use\n\ta non-raw send to backup and restore them." 9140 " Alternately, they may be\n\tremoved to resolve " 9141 "the incompatibility.\n")); 9142 break; 9143 9144 default: 9145 /* 9146 * All errata which allow the pool to be imported 9147 * must contain an action message. 9148 */ 9149 assert(0); 9150 } 9151 break; 9152 9153 default: 9154 /* 9155 * The remaining errors can't actually be generated, yet. 9156 */ 9157 assert(reason == ZPOOL_STATUS_OK); 9158 } 9159 9160 if (msgid != NULL) { 9161 printf(" "); 9162 printf_color(ANSI_BOLD, gettext("see:")); 9163 printf(gettext( 9164 " https://openzfs.github.io/openzfs-docs/msg/%s\n"), 9165 msgid); 9166 } 9167 9168 if (config != NULL) { 9169 uint64_t nerr; 9170 nvlist_t **spares, **l2cache; 9171 uint_t nspares, nl2cache; 9172 9173 print_scan_status(zhp, nvroot); 9174 9175 pool_removal_stat_t *prs = NULL; 9176 (void) nvlist_lookup_uint64_array(nvroot, 9177 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 9178 print_removal_status(zhp, prs); 9179 9180 pool_checkpoint_stat_t *pcs = NULL; 9181 (void) nvlist_lookup_uint64_array(nvroot, 9182 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 9183 print_checkpoint_status(pcs); 9184 9185 pool_raidz_expand_stat_t *pres = NULL; 9186 (void) nvlist_lookup_uint64_array(nvroot, 9187 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c); 9188 print_raidz_expand_status(zhp, pres); 9189 9190 cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0, 9191 cbp->cb_name_flags | VDEV_NAME_TYPE_ID); 9192 if (cbp->cb_namewidth < 10) 9193 cbp->cb_namewidth = 10; 9194 9195 color_start(ANSI_BOLD); 9196 (void) printf(gettext("config:\n\n")); 9197 (void) printf(gettext("\t%-*s %-8s %5s %5s %5s"), 9198 cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE", 9199 "CKSUM"); 9200 color_end(); 9201 9202 if (cbp->cb_print_slow_ios) { 9203 printf_color(ANSI_BOLD, " %5s", gettext("SLOW")); 9204 } 9205 9206 if (cbp->cb_print_power) { 9207 printf_color(ANSI_BOLD, " %5s", gettext("POWER")); 9208 } 9209 9210 if (cbp->vcdl != NULL) 9211 print_cmd_columns(cbp->vcdl, 0); 9212 9213 printf("\n"); 9214 9215 print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0, 9216 B_FALSE, NULL); 9217 9218 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_DEDUP); 9219 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_BIAS_SPECIAL); 9220 print_class_vdevs(zhp, cbp, nvroot, VDEV_ALLOC_CLASS_LOGS); 9221 9222 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 9223 &l2cache, &nl2cache) == 0) 9224 print_l2cache(zhp, cbp, l2cache, nl2cache); 9225 9226 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 9227 &spares, &nspares) == 0) 9228 print_spares(zhp, cbp, spares, nspares); 9229 9230 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, 9231 &nerr) == 0) { 9232 (void) printf("\n"); 9233 if (nerr == 0) { 9234 (void) printf(gettext( 9235 "errors: No known data errors\n")); 9236 } else if (!cbp->cb_verbose) { 9237 color_start(ANSI_RED); 9238 (void) printf(gettext("errors: %llu data " 9239 "errors, use '-v' for a list\n"), 9240 (u_longlong_t)nerr); 9241 color_end(); 9242 } else { 9243 print_error_log(zhp); 9244 } 9245 } 9246 9247 if (cbp->cb_dedup_stats) 9248 print_dedup_stats(config); 9249 } else { 9250 (void) printf(gettext("config: The configuration cannot be " 9251 "determined.\n")); 9252 } 9253 9254 return (0); 9255 } 9256 9257 /* 9258 * zpool status [-c [script1,script2,...]] [-DegiLpPstvx] [--power] [-T d|u] ... 9259 * [pool] [interval [count]] 9260 * 9261 * -c CMD For each vdev, run command CMD 9262 * -D Display dedup status (undocumented) 9263 * -e Display only unhealthy vdevs 9264 * -g Display guid for individual vdev name. 9265 * -i Display vdev initialization status. 9266 * -L Follow links when resolving vdev path name. 9267 * -p Display values in parsable (exact) format. 9268 * -P Display full path for vdev name. 9269 * -s Display slow IOs column. 9270 * -t Display vdev TRIM status. 9271 * -T Display a timestamp in date(1) or Unix format 9272 * -v Display complete error logs 9273 * -x Display only pools with potential problems 9274 * --power Display vdev enclosure slot power status 9275 * 9276 * Describes the health status of all pools or some subset. 9277 */ 9278 int 9279 zpool_do_status(int argc, char **argv) 9280 { 9281 int c; 9282 int ret; 9283 float interval = 0; 9284 unsigned long count = 0; 9285 status_cbdata_t cb = { 0 }; 9286 char *cmd = NULL; 9287 9288 struct option long_options[] = { 9289 {"power", no_argument, NULL, ZPOOL_OPTION_POWER}, 9290 {0, 0, 0, 0} 9291 }; 9292 9293 /* check options */ 9294 while ((c = getopt_long(argc, argv, "c:DegiLpPstT:vx", long_options, 9295 NULL)) != -1) { 9296 switch (c) { 9297 case 'c': 9298 if (cmd != NULL) { 9299 fprintf(stderr, 9300 gettext("Can't set -c flag twice\n")); 9301 exit(1); 9302 } 9303 9304 if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL && 9305 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) { 9306 fprintf(stderr, gettext( 9307 "Can't run -c, disabled by " 9308 "ZPOOL_SCRIPTS_ENABLED.\n")); 9309 exit(1); 9310 } 9311 9312 if ((getuid() <= 0 || geteuid() <= 0) && 9313 !libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) { 9314 fprintf(stderr, gettext( 9315 "Can't run -c with root privileges " 9316 "unless ZPOOL_SCRIPTS_AS_ROOT is set.\n")); 9317 exit(1); 9318 } 9319 cmd = optarg; 9320 break; 9321 case 'D': 9322 cb.cb_dedup_stats = B_TRUE; 9323 break; 9324 case 'e': 9325 cb.cb_print_unhealthy = B_TRUE; 9326 break; 9327 case 'g': 9328 cb.cb_name_flags |= VDEV_NAME_GUID; 9329 break; 9330 case 'i': 9331 cb.cb_print_vdev_init = B_TRUE; 9332 break; 9333 case 'L': 9334 cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS; 9335 break; 9336 case 'p': 9337 cb.cb_literal = B_TRUE; 9338 break; 9339 case 'P': 9340 cb.cb_name_flags |= VDEV_NAME_PATH; 9341 break; 9342 case 's': 9343 cb.cb_print_slow_ios = B_TRUE; 9344 break; 9345 case 't': 9346 cb.cb_print_vdev_trim = B_TRUE; 9347 break; 9348 case 'T': 9349 get_timestamp_arg(*optarg); 9350 break; 9351 case 'v': 9352 cb.cb_verbose = B_TRUE; 9353 break; 9354 case 'x': 9355 cb.cb_explain = B_TRUE; 9356 break; 9357 case ZPOOL_OPTION_POWER: 9358 cb.cb_print_power = B_TRUE; 9359 break; 9360 case '?': 9361 if (optopt == 'c') { 9362 print_zpool_script_list("status"); 9363 exit(0); 9364 } else { 9365 fprintf(stderr, 9366 gettext("invalid option '%c'\n"), optopt); 9367 } 9368 usage(B_FALSE); 9369 } 9370 } 9371 9372 argc -= optind; 9373 argv += optind; 9374 9375 get_interval_count(&argc, argv, &interval, &count); 9376 9377 if (argc == 0) 9378 cb.cb_allpools = B_TRUE; 9379 9380 cb.cb_first = B_TRUE; 9381 cb.cb_print_status = B_TRUE; 9382 9383 for (;;) { 9384 if (timestamp_fmt != NODATE) 9385 print_timestamp(timestamp_fmt); 9386 9387 if (cmd != NULL) 9388 cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd, 9389 NULL, NULL, 0, 0); 9390 9391 ret = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 9392 cb.cb_literal, status_callback, &cb); 9393 9394 if (cb.vcdl != NULL) 9395 free_vdev_cmd_data_list(cb.vcdl); 9396 if (argc == 0 && cb.cb_count == 0) 9397 (void) fprintf(stderr, gettext("no pools available\n")); 9398 else if (cb.cb_explain && cb.cb_first && cb.cb_allpools) 9399 (void) printf(gettext("all pools are healthy\n")); 9400 9401 if (ret != 0) 9402 return (ret); 9403 9404 if (interval == 0) 9405 break; 9406 9407 if (count != 0 && --count == 0) 9408 break; 9409 9410 (void) fflush(stdout); 9411 (void) fsleep(interval); 9412 } 9413 9414 return (0); 9415 } 9416 9417 typedef struct upgrade_cbdata { 9418 int cb_first; 9419 int cb_argc; 9420 uint64_t cb_version; 9421 char **cb_argv; 9422 } upgrade_cbdata_t; 9423 9424 static int 9425 check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs) 9426 { 9427 int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION); 9428 int *count = (int *)unsupp_fs; 9429 9430 if (zfs_version > ZPL_VERSION) { 9431 (void) printf(gettext("%s (v%d) is not supported by this " 9432 "implementation of ZFS.\n"), 9433 zfs_get_name(zhp), zfs_version); 9434 (*count)++; 9435 } 9436 9437 zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs); 9438 9439 zfs_close(zhp); 9440 9441 return (0); 9442 } 9443 9444 static int 9445 upgrade_version(zpool_handle_t *zhp, uint64_t version) 9446 { 9447 int ret; 9448 nvlist_t *config; 9449 uint64_t oldversion; 9450 int unsupp_fs = 0; 9451 9452 config = zpool_get_config(zhp, NULL); 9453 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 9454 &oldversion) == 0); 9455 9456 char compat[ZFS_MAXPROPLEN]; 9457 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 9458 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 9459 compat[0] = '\0'; 9460 9461 assert(SPA_VERSION_IS_SUPPORTED(oldversion)); 9462 assert(oldversion < version); 9463 9464 ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs); 9465 if (ret != 0) 9466 return (ret); 9467 9468 if (unsupp_fs) { 9469 (void) fprintf(stderr, gettext("Upgrade not performed due " 9470 "to %d unsupported filesystems (max v%d).\n"), 9471 unsupp_fs, (int)ZPL_VERSION); 9472 return (1); 9473 } 9474 9475 if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) { 9476 (void) fprintf(stderr, gettext("Upgrade not performed because " 9477 "'compatibility' property set to '" 9478 ZPOOL_COMPAT_LEGACY "'.\n")); 9479 return (1); 9480 } 9481 9482 ret = zpool_upgrade(zhp, version); 9483 if (ret != 0) 9484 return (ret); 9485 9486 if (version >= SPA_VERSION_FEATURES) { 9487 (void) printf(gettext("Successfully upgraded " 9488 "'%s' from version %llu to feature flags.\n"), 9489 zpool_get_name(zhp), (u_longlong_t)oldversion); 9490 } else { 9491 (void) printf(gettext("Successfully upgraded " 9492 "'%s' from version %llu to version %llu.\n"), 9493 zpool_get_name(zhp), (u_longlong_t)oldversion, 9494 (u_longlong_t)version); 9495 } 9496 9497 return (0); 9498 } 9499 9500 static int 9501 upgrade_enable_all(zpool_handle_t *zhp, int *countp) 9502 { 9503 int i, ret, count; 9504 boolean_t firstff = B_TRUE; 9505 nvlist_t *enabled = zpool_get_features(zhp); 9506 9507 char compat[ZFS_MAXPROPLEN]; 9508 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compat, 9509 ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 9510 compat[0] = '\0'; 9511 9512 boolean_t requested_features[SPA_FEATURES]; 9513 if (zpool_do_load_compat(compat, requested_features) != 9514 ZPOOL_COMPATIBILITY_OK) 9515 return (-1); 9516 9517 count = 0; 9518 for (i = 0; i < SPA_FEATURES; i++) { 9519 const char *fname = spa_feature_table[i].fi_uname; 9520 const char *fguid = spa_feature_table[i].fi_guid; 9521 9522 if (!spa_feature_table[i].fi_zfs_mod_supported) 9523 continue; 9524 9525 if (!nvlist_exists(enabled, fguid) && requested_features[i]) { 9526 char *propname; 9527 verify(-1 != asprintf(&propname, "feature@%s", fname)); 9528 ret = zpool_set_prop(zhp, propname, 9529 ZFS_FEATURE_ENABLED); 9530 if (ret != 0) { 9531 free(propname); 9532 return (ret); 9533 } 9534 count++; 9535 9536 if (firstff) { 9537 (void) printf(gettext("Enabled the " 9538 "following features on '%s':\n"), 9539 zpool_get_name(zhp)); 9540 firstff = B_FALSE; 9541 } 9542 (void) printf(gettext(" %s\n"), fname); 9543 free(propname); 9544 } 9545 } 9546 9547 if (countp != NULL) 9548 *countp = count; 9549 return (0); 9550 } 9551 9552 static int 9553 upgrade_cb(zpool_handle_t *zhp, void *arg) 9554 { 9555 upgrade_cbdata_t *cbp = arg; 9556 nvlist_t *config; 9557 uint64_t version; 9558 boolean_t modified_pool = B_FALSE; 9559 int ret; 9560 9561 config = zpool_get_config(zhp, NULL); 9562 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 9563 &version) == 0); 9564 9565 assert(SPA_VERSION_IS_SUPPORTED(version)); 9566 9567 if (version < cbp->cb_version) { 9568 cbp->cb_first = B_FALSE; 9569 ret = upgrade_version(zhp, cbp->cb_version); 9570 if (ret != 0) 9571 return (ret); 9572 modified_pool = B_TRUE; 9573 9574 /* 9575 * If they did "zpool upgrade -a", then we could 9576 * be doing ioctls to different pools. We need 9577 * to log this history once to each pool, and bypass 9578 * the normal history logging that happens in main(). 9579 */ 9580 (void) zpool_log_history(g_zfs, history_str); 9581 log_history = B_FALSE; 9582 } 9583 9584 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 9585 int count; 9586 ret = upgrade_enable_all(zhp, &count); 9587 if (ret != 0) 9588 return (ret); 9589 9590 if (count > 0) { 9591 cbp->cb_first = B_FALSE; 9592 modified_pool = B_TRUE; 9593 } 9594 } 9595 9596 if (modified_pool) { 9597 (void) printf("\n"); 9598 (void) after_zpool_upgrade(zhp); 9599 } 9600 9601 return (0); 9602 } 9603 9604 static int 9605 upgrade_list_older_cb(zpool_handle_t *zhp, void *arg) 9606 { 9607 upgrade_cbdata_t *cbp = arg; 9608 nvlist_t *config; 9609 uint64_t version; 9610 9611 config = zpool_get_config(zhp, NULL); 9612 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 9613 &version) == 0); 9614 9615 assert(SPA_VERSION_IS_SUPPORTED(version)); 9616 9617 if (version < SPA_VERSION_FEATURES) { 9618 if (cbp->cb_first) { 9619 (void) printf(gettext("The following pools are " 9620 "formatted with legacy version numbers and can\n" 9621 "be upgraded to use feature flags. After " 9622 "being upgraded, these pools\nwill no " 9623 "longer be accessible by software that does not " 9624 "support feature\nflags.\n\n" 9625 "Note that setting a pool's 'compatibility' " 9626 "feature to '" ZPOOL_COMPAT_LEGACY "' will\n" 9627 "inhibit upgrades.\n\n")); 9628 (void) printf(gettext("VER POOL\n")); 9629 (void) printf(gettext("--- ------------\n")); 9630 cbp->cb_first = B_FALSE; 9631 } 9632 9633 (void) printf("%2llu %s\n", (u_longlong_t)version, 9634 zpool_get_name(zhp)); 9635 } 9636 9637 return (0); 9638 } 9639 9640 static int 9641 upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg) 9642 { 9643 upgrade_cbdata_t *cbp = arg; 9644 nvlist_t *config; 9645 uint64_t version; 9646 9647 config = zpool_get_config(zhp, NULL); 9648 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 9649 &version) == 0); 9650 9651 if (version >= SPA_VERSION_FEATURES) { 9652 int i; 9653 boolean_t poolfirst = B_TRUE; 9654 nvlist_t *enabled = zpool_get_features(zhp); 9655 9656 for (i = 0; i < SPA_FEATURES; i++) { 9657 const char *fguid = spa_feature_table[i].fi_guid; 9658 const char *fname = spa_feature_table[i].fi_uname; 9659 9660 if (!spa_feature_table[i].fi_zfs_mod_supported) 9661 continue; 9662 9663 if (!nvlist_exists(enabled, fguid)) { 9664 if (cbp->cb_first) { 9665 (void) printf(gettext("\nSome " 9666 "supported features are not " 9667 "enabled on the following pools. " 9668 "Once a\nfeature is enabled the " 9669 "pool may become incompatible with " 9670 "software\nthat does not support " 9671 "the feature. See " 9672 "zpool-features(7) for " 9673 "details.\n\n" 9674 "Note that the pool " 9675 "'compatibility' feature can be " 9676 "used to inhibit\nfeature " 9677 "upgrades.\n\n")); 9678 (void) printf(gettext("POOL " 9679 "FEATURE\n")); 9680 (void) printf(gettext("------" 9681 "---------\n")); 9682 cbp->cb_first = B_FALSE; 9683 } 9684 9685 if (poolfirst) { 9686 (void) printf(gettext("%s\n"), 9687 zpool_get_name(zhp)); 9688 poolfirst = B_FALSE; 9689 } 9690 9691 (void) printf(gettext(" %s\n"), fname); 9692 } 9693 /* 9694 * If they did "zpool upgrade -a", then we could 9695 * be doing ioctls to different pools. We need 9696 * to log this history once to each pool, and bypass 9697 * the normal history logging that happens in main(). 9698 */ 9699 (void) zpool_log_history(g_zfs, history_str); 9700 log_history = B_FALSE; 9701 } 9702 } 9703 9704 return (0); 9705 } 9706 9707 static int 9708 upgrade_one(zpool_handle_t *zhp, void *data) 9709 { 9710 boolean_t modified_pool = B_FALSE; 9711 upgrade_cbdata_t *cbp = data; 9712 uint64_t cur_version; 9713 int ret; 9714 9715 if (strcmp("log", zpool_get_name(zhp)) == 0) { 9716 (void) fprintf(stderr, gettext("'log' is now a reserved word\n" 9717 "Pool 'log' must be renamed using export and import" 9718 " to upgrade.\n")); 9719 return (1); 9720 } 9721 9722 cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 9723 if (cur_version > cbp->cb_version) { 9724 (void) printf(gettext("Pool '%s' is already formatted " 9725 "using more current version '%llu'.\n\n"), 9726 zpool_get_name(zhp), (u_longlong_t)cur_version); 9727 return (0); 9728 } 9729 9730 if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) { 9731 (void) printf(gettext("Pool '%s' is already formatted " 9732 "using version %llu.\n\n"), zpool_get_name(zhp), 9733 (u_longlong_t)cbp->cb_version); 9734 return (0); 9735 } 9736 9737 if (cur_version != cbp->cb_version) { 9738 modified_pool = B_TRUE; 9739 ret = upgrade_version(zhp, cbp->cb_version); 9740 if (ret != 0) 9741 return (ret); 9742 } 9743 9744 if (cbp->cb_version >= SPA_VERSION_FEATURES) { 9745 int count = 0; 9746 ret = upgrade_enable_all(zhp, &count); 9747 if (ret != 0) 9748 return (ret); 9749 9750 if (count != 0) { 9751 modified_pool = B_TRUE; 9752 } else if (cur_version == SPA_VERSION) { 9753 (void) printf(gettext("Pool '%s' already has all " 9754 "supported and requested features enabled.\n"), 9755 zpool_get_name(zhp)); 9756 } 9757 } 9758 9759 if (modified_pool) { 9760 (void) printf("\n"); 9761 (void) after_zpool_upgrade(zhp); 9762 } 9763 9764 return (0); 9765 } 9766 9767 /* 9768 * zpool upgrade 9769 * zpool upgrade -v 9770 * zpool upgrade [-V version] <-a | pool ...> 9771 * 9772 * With no arguments, display downrev'd ZFS pool available for upgrade. 9773 * Individual pools can be upgraded by specifying the pool, and '-a' will 9774 * upgrade all pools. 9775 */ 9776 int 9777 zpool_do_upgrade(int argc, char **argv) 9778 { 9779 int c; 9780 upgrade_cbdata_t cb = { 0 }; 9781 int ret = 0; 9782 boolean_t showversions = B_FALSE; 9783 boolean_t upgradeall = B_FALSE; 9784 char *end; 9785 9786 9787 /* check options */ 9788 while ((c = getopt(argc, argv, ":avV:")) != -1) { 9789 switch (c) { 9790 case 'a': 9791 upgradeall = B_TRUE; 9792 break; 9793 case 'v': 9794 showversions = B_TRUE; 9795 break; 9796 case 'V': 9797 cb.cb_version = strtoll(optarg, &end, 10); 9798 if (*end != '\0' || 9799 !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) { 9800 (void) fprintf(stderr, 9801 gettext("invalid version '%s'\n"), optarg); 9802 usage(B_FALSE); 9803 } 9804 break; 9805 case ':': 9806 (void) fprintf(stderr, gettext("missing argument for " 9807 "'%c' option\n"), optopt); 9808 usage(B_FALSE); 9809 break; 9810 case '?': 9811 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 9812 optopt); 9813 usage(B_FALSE); 9814 } 9815 } 9816 9817 cb.cb_argc = argc; 9818 cb.cb_argv = argv; 9819 argc -= optind; 9820 argv += optind; 9821 9822 if (cb.cb_version == 0) { 9823 cb.cb_version = SPA_VERSION; 9824 } else if (!upgradeall && argc == 0) { 9825 (void) fprintf(stderr, gettext("-V option is " 9826 "incompatible with other arguments\n")); 9827 usage(B_FALSE); 9828 } 9829 9830 if (showversions) { 9831 if (upgradeall || argc != 0) { 9832 (void) fprintf(stderr, gettext("-v option is " 9833 "incompatible with other arguments\n")); 9834 usage(B_FALSE); 9835 } 9836 } else if (upgradeall) { 9837 if (argc != 0) { 9838 (void) fprintf(stderr, gettext("-a option should not " 9839 "be used along with a pool name\n")); 9840 usage(B_FALSE); 9841 } 9842 } 9843 9844 (void) printf("%s", gettext("This system supports ZFS pool feature " 9845 "flags.\n\n")); 9846 if (showversions) { 9847 int i; 9848 9849 (void) printf(gettext("The following features are " 9850 "supported:\n\n")); 9851 (void) printf(gettext("FEAT DESCRIPTION\n")); 9852 (void) printf("----------------------------------------------" 9853 "---------------\n"); 9854 for (i = 0; i < SPA_FEATURES; i++) { 9855 zfeature_info_t *fi = &spa_feature_table[i]; 9856 if (!fi->fi_zfs_mod_supported) 9857 continue; 9858 const char *ro = 9859 (fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ? 9860 " (read-only compatible)" : ""; 9861 9862 (void) printf("%-37s%s\n", fi->fi_uname, ro); 9863 (void) printf(" %s\n", fi->fi_desc); 9864 } 9865 (void) printf("\n"); 9866 9867 (void) printf(gettext("The following legacy versions are also " 9868 "supported:\n\n")); 9869 (void) printf(gettext("VER DESCRIPTION\n")); 9870 (void) printf("--- -----------------------------------------" 9871 "---------------\n"); 9872 (void) printf(gettext(" 1 Initial ZFS version\n")); 9873 (void) printf(gettext(" 2 Ditto blocks " 9874 "(replicated metadata)\n")); 9875 (void) printf(gettext(" 3 Hot spares and double parity " 9876 "RAID-Z\n")); 9877 (void) printf(gettext(" 4 zpool history\n")); 9878 (void) printf(gettext(" 5 Compression using the gzip " 9879 "algorithm\n")); 9880 (void) printf(gettext(" 6 bootfs pool property\n")); 9881 (void) printf(gettext(" 7 Separate intent log devices\n")); 9882 (void) printf(gettext(" 8 Delegated administration\n")); 9883 (void) printf(gettext(" 9 refquota and refreservation " 9884 "properties\n")); 9885 (void) printf(gettext(" 10 Cache devices\n")); 9886 (void) printf(gettext(" 11 Improved scrub performance\n")); 9887 (void) printf(gettext(" 12 Snapshot properties\n")); 9888 (void) printf(gettext(" 13 snapused property\n")); 9889 (void) printf(gettext(" 14 passthrough-x aclinherit\n")); 9890 (void) printf(gettext(" 15 user/group space accounting\n")); 9891 (void) printf(gettext(" 16 stmf property support\n")); 9892 (void) printf(gettext(" 17 Triple-parity RAID-Z\n")); 9893 (void) printf(gettext(" 18 Snapshot user holds\n")); 9894 (void) printf(gettext(" 19 Log device removal\n")); 9895 (void) printf(gettext(" 20 Compression using zle " 9896 "(zero-length encoding)\n")); 9897 (void) printf(gettext(" 21 Deduplication\n")); 9898 (void) printf(gettext(" 22 Received properties\n")); 9899 (void) printf(gettext(" 23 Slim ZIL\n")); 9900 (void) printf(gettext(" 24 System attributes\n")); 9901 (void) printf(gettext(" 25 Improved scrub stats\n")); 9902 (void) printf(gettext(" 26 Improved snapshot deletion " 9903 "performance\n")); 9904 (void) printf(gettext(" 27 Improved snapshot creation " 9905 "performance\n")); 9906 (void) printf(gettext(" 28 Multiple vdev replacements\n")); 9907 (void) printf(gettext("\nFor more information on a particular " 9908 "version, including supported releases,\n")); 9909 (void) printf(gettext("see the ZFS Administration Guide.\n\n")); 9910 } else if (argc == 0 && upgradeall) { 9911 cb.cb_first = B_TRUE; 9912 ret = zpool_iter(g_zfs, upgrade_cb, &cb); 9913 if (ret == 0 && cb.cb_first) { 9914 if (cb.cb_version == SPA_VERSION) { 9915 (void) printf(gettext("All pools are already " 9916 "formatted using feature flags.\n\n")); 9917 (void) printf(gettext("Every feature flags " 9918 "pool already has all supported and " 9919 "requested features enabled.\n")); 9920 } else { 9921 (void) printf(gettext("All pools are already " 9922 "formatted with version %llu or higher.\n"), 9923 (u_longlong_t)cb.cb_version); 9924 } 9925 } 9926 } else if (argc == 0) { 9927 cb.cb_first = B_TRUE; 9928 ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb); 9929 assert(ret == 0); 9930 9931 if (cb.cb_first) { 9932 (void) printf(gettext("All pools are formatted " 9933 "using feature flags.\n\n")); 9934 } else { 9935 (void) printf(gettext("\nUse 'zpool upgrade -v' " 9936 "for a list of available legacy versions.\n")); 9937 } 9938 9939 cb.cb_first = B_TRUE; 9940 ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb); 9941 assert(ret == 0); 9942 9943 if (cb.cb_first) { 9944 (void) printf(gettext("Every feature flags pool has " 9945 "all supported and requested features enabled.\n")); 9946 } else { 9947 (void) printf(gettext("\n")); 9948 } 9949 } else { 9950 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 9951 B_FALSE, upgrade_one, &cb); 9952 } 9953 9954 return (ret); 9955 } 9956 9957 typedef struct hist_cbdata { 9958 boolean_t first; 9959 boolean_t longfmt; 9960 boolean_t internal; 9961 } hist_cbdata_t; 9962 9963 static void 9964 print_history_records(nvlist_t *nvhis, hist_cbdata_t *cb) 9965 { 9966 nvlist_t **records; 9967 uint_t numrecords; 9968 int i; 9969 9970 verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD, 9971 &records, &numrecords) == 0); 9972 for (i = 0; i < numrecords; i++) { 9973 nvlist_t *rec = records[i]; 9974 char tbuf[64] = ""; 9975 9976 if (nvlist_exists(rec, ZPOOL_HIST_TIME)) { 9977 time_t tsec; 9978 struct tm t; 9979 9980 tsec = fnvlist_lookup_uint64(records[i], 9981 ZPOOL_HIST_TIME); 9982 (void) localtime_r(&tsec, &t); 9983 (void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t); 9984 } 9985 9986 if (nvlist_exists(rec, ZPOOL_HIST_ELAPSED_NS)) { 9987 uint64_t elapsed_ns = fnvlist_lookup_int64(records[i], 9988 ZPOOL_HIST_ELAPSED_NS); 9989 (void) snprintf(tbuf + strlen(tbuf), 9990 sizeof (tbuf) - strlen(tbuf), 9991 " (%lldms)", (long long)elapsed_ns / 1000 / 1000); 9992 } 9993 9994 if (nvlist_exists(rec, ZPOOL_HIST_CMD)) { 9995 (void) printf("%s %s", tbuf, 9996 fnvlist_lookup_string(rec, ZPOOL_HIST_CMD)); 9997 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) { 9998 int ievent = 9999 fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT); 10000 if (!cb->internal) 10001 continue; 10002 if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) { 10003 (void) printf("%s unrecognized record:\n", 10004 tbuf); 10005 dump_nvlist(rec, 4); 10006 continue; 10007 } 10008 (void) printf("%s [internal %s txg:%lld] %s", tbuf, 10009 zfs_history_event_names[ievent], 10010 (longlong_t)fnvlist_lookup_uint64( 10011 rec, ZPOOL_HIST_TXG), 10012 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR)); 10013 } else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) { 10014 if (!cb->internal) 10015 continue; 10016 (void) printf("%s [txg:%lld] %s", tbuf, 10017 (longlong_t)fnvlist_lookup_uint64( 10018 rec, ZPOOL_HIST_TXG), 10019 fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME)); 10020 if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) { 10021 (void) printf(" %s (%llu)", 10022 fnvlist_lookup_string(rec, 10023 ZPOOL_HIST_DSNAME), 10024 (u_longlong_t)fnvlist_lookup_uint64(rec, 10025 ZPOOL_HIST_DSID)); 10026 } 10027 (void) printf(" %s", fnvlist_lookup_string(rec, 10028 ZPOOL_HIST_INT_STR)); 10029 } else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) { 10030 if (!cb->internal) 10031 continue; 10032 (void) printf("%s ioctl %s\n", tbuf, 10033 fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL)); 10034 if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) { 10035 (void) printf(" input:\n"); 10036 dump_nvlist(fnvlist_lookup_nvlist(rec, 10037 ZPOOL_HIST_INPUT_NVL), 8); 10038 } 10039 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) { 10040 (void) printf(" output:\n"); 10041 dump_nvlist(fnvlist_lookup_nvlist(rec, 10042 ZPOOL_HIST_OUTPUT_NVL), 8); 10043 } 10044 if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_SIZE)) { 10045 (void) printf(" output nvlist omitted; " 10046 "original size: %lldKB\n", 10047 (longlong_t)fnvlist_lookup_int64(rec, 10048 ZPOOL_HIST_OUTPUT_SIZE) / 1024); 10049 } 10050 if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) { 10051 (void) printf(" errno: %lld\n", 10052 (longlong_t)fnvlist_lookup_int64(rec, 10053 ZPOOL_HIST_ERRNO)); 10054 } 10055 } else { 10056 if (!cb->internal) 10057 continue; 10058 (void) printf("%s unrecognized record:\n", tbuf); 10059 dump_nvlist(rec, 4); 10060 } 10061 10062 if (!cb->longfmt) { 10063 (void) printf("\n"); 10064 continue; 10065 } 10066 (void) printf(" ["); 10067 if (nvlist_exists(rec, ZPOOL_HIST_WHO)) { 10068 uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO); 10069 struct passwd *pwd = getpwuid(who); 10070 (void) printf("user %d ", (int)who); 10071 if (pwd != NULL) 10072 (void) printf("(%s) ", pwd->pw_name); 10073 } 10074 if (nvlist_exists(rec, ZPOOL_HIST_HOST)) { 10075 (void) printf("on %s", 10076 fnvlist_lookup_string(rec, ZPOOL_HIST_HOST)); 10077 } 10078 if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) { 10079 (void) printf(":%s", 10080 fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE)); 10081 } 10082 10083 (void) printf("]"); 10084 (void) printf("\n"); 10085 } 10086 } 10087 10088 /* 10089 * Print out the command history for a specific pool. 10090 */ 10091 static int 10092 get_history_one(zpool_handle_t *zhp, void *data) 10093 { 10094 nvlist_t *nvhis; 10095 int ret; 10096 hist_cbdata_t *cb = (hist_cbdata_t *)data; 10097 uint64_t off = 0; 10098 boolean_t eof = B_FALSE; 10099 10100 cb->first = B_FALSE; 10101 10102 (void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp)); 10103 10104 while (!eof) { 10105 if ((ret = zpool_get_history(zhp, &nvhis, &off, &eof)) != 0) 10106 return (ret); 10107 10108 print_history_records(nvhis, cb); 10109 nvlist_free(nvhis); 10110 } 10111 (void) printf("\n"); 10112 10113 return (ret); 10114 } 10115 10116 /* 10117 * zpool history <pool> 10118 * 10119 * Displays the history of commands that modified pools. 10120 */ 10121 int 10122 zpool_do_history(int argc, char **argv) 10123 { 10124 hist_cbdata_t cbdata = { 0 }; 10125 int ret; 10126 int c; 10127 10128 cbdata.first = B_TRUE; 10129 /* check options */ 10130 while ((c = getopt(argc, argv, "li")) != -1) { 10131 switch (c) { 10132 case 'l': 10133 cbdata.longfmt = B_TRUE; 10134 break; 10135 case 'i': 10136 cbdata.internal = B_TRUE; 10137 break; 10138 case '?': 10139 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10140 optopt); 10141 usage(B_FALSE); 10142 } 10143 } 10144 argc -= optind; 10145 argv += optind; 10146 10147 ret = for_each_pool(argc, argv, B_FALSE, NULL, ZFS_TYPE_POOL, 10148 B_FALSE, get_history_one, &cbdata); 10149 10150 if (argc == 0 && cbdata.first == B_TRUE) { 10151 (void) fprintf(stderr, gettext("no pools available\n")); 10152 return (0); 10153 } 10154 10155 return (ret); 10156 } 10157 10158 typedef struct ev_opts { 10159 int verbose; 10160 int scripted; 10161 int follow; 10162 int clear; 10163 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 10164 } ev_opts_t; 10165 10166 static void 10167 zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts) 10168 { 10169 char ctime_str[26], str[32]; 10170 const char *ptr; 10171 int64_t *tv; 10172 uint_t n; 10173 10174 verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0); 10175 memset(str, ' ', 32); 10176 (void) ctime_r((const time_t *)&tv[0], ctime_str); 10177 (void) memcpy(str, ctime_str+4, 6); /* 'Jun 30' */ 10178 (void) memcpy(str+7, ctime_str+20, 4); /* '1993' */ 10179 (void) memcpy(str+12, ctime_str+11, 8); /* '21:49:08' */ 10180 (void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */ 10181 if (opts->scripted) 10182 (void) printf(gettext("%s\t"), str); 10183 else 10184 (void) printf(gettext("%s "), str); 10185 10186 verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0); 10187 (void) printf(gettext("%s\n"), ptr); 10188 } 10189 10190 static void 10191 zpool_do_events_nvprint(nvlist_t *nvl, int depth) 10192 { 10193 nvpair_t *nvp; 10194 10195 for (nvp = nvlist_next_nvpair(nvl, NULL); 10196 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) { 10197 10198 data_type_t type = nvpair_type(nvp); 10199 const char *name = nvpair_name(nvp); 10200 10201 boolean_t b; 10202 uint8_t i8; 10203 uint16_t i16; 10204 uint32_t i32; 10205 uint64_t i64; 10206 const char *str; 10207 nvlist_t *cnv; 10208 10209 printf(gettext("%*s%s = "), depth, "", name); 10210 10211 switch (type) { 10212 case DATA_TYPE_BOOLEAN: 10213 printf(gettext("%s"), "1"); 10214 break; 10215 10216 case DATA_TYPE_BOOLEAN_VALUE: 10217 (void) nvpair_value_boolean_value(nvp, &b); 10218 printf(gettext("%s"), b ? "1" : "0"); 10219 break; 10220 10221 case DATA_TYPE_BYTE: 10222 (void) nvpair_value_byte(nvp, &i8); 10223 printf(gettext("0x%x"), i8); 10224 break; 10225 10226 case DATA_TYPE_INT8: 10227 (void) nvpair_value_int8(nvp, (void *)&i8); 10228 printf(gettext("0x%x"), i8); 10229 break; 10230 10231 case DATA_TYPE_UINT8: 10232 (void) nvpair_value_uint8(nvp, &i8); 10233 printf(gettext("0x%x"), i8); 10234 break; 10235 10236 case DATA_TYPE_INT16: 10237 (void) nvpair_value_int16(nvp, (void *)&i16); 10238 printf(gettext("0x%x"), i16); 10239 break; 10240 10241 case DATA_TYPE_UINT16: 10242 (void) nvpair_value_uint16(nvp, &i16); 10243 printf(gettext("0x%x"), i16); 10244 break; 10245 10246 case DATA_TYPE_INT32: 10247 (void) nvpair_value_int32(nvp, (void *)&i32); 10248 printf(gettext("0x%x"), i32); 10249 break; 10250 10251 case DATA_TYPE_UINT32: 10252 (void) nvpair_value_uint32(nvp, &i32); 10253 printf(gettext("0x%x"), i32); 10254 break; 10255 10256 case DATA_TYPE_INT64: 10257 (void) nvpair_value_int64(nvp, (void *)&i64); 10258 printf(gettext("0x%llx"), (u_longlong_t)i64); 10259 break; 10260 10261 case DATA_TYPE_UINT64: 10262 (void) nvpair_value_uint64(nvp, &i64); 10263 /* 10264 * translate vdev state values to readable 10265 * strings to aide zpool events consumers 10266 */ 10267 if (strcmp(name, 10268 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 || 10269 strcmp(name, 10270 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) { 10271 printf(gettext("\"%s\" (0x%llx)"), 10272 zpool_state_to_name(i64, VDEV_AUX_NONE), 10273 (u_longlong_t)i64); 10274 } else { 10275 printf(gettext("0x%llx"), (u_longlong_t)i64); 10276 } 10277 break; 10278 10279 case DATA_TYPE_HRTIME: 10280 (void) nvpair_value_hrtime(nvp, (void *)&i64); 10281 printf(gettext("0x%llx"), (u_longlong_t)i64); 10282 break; 10283 10284 case DATA_TYPE_STRING: 10285 (void) nvpair_value_string(nvp, &str); 10286 printf(gettext("\"%s\""), str ? str : "<NULL>"); 10287 break; 10288 10289 case DATA_TYPE_NVLIST: 10290 printf(gettext("(embedded nvlist)\n")); 10291 (void) nvpair_value_nvlist(nvp, &cnv); 10292 zpool_do_events_nvprint(cnv, depth + 8); 10293 printf(gettext("%*s(end %s)"), depth, "", name); 10294 break; 10295 10296 case DATA_TYPE_NVLIST_ARRAY: { 10297 nvlist_t **val; 10298 uint_t i, nelem; 10299 10300 (void) nvpair_value_nvlist_array(nvp, &val, &nelem); 10301 printf(gettext("(%d embedded nvlists)\n"), nelem); 10302 for (i = 0; i < nelem; i++) { 10303 printf(gettext("%*s%s[%d] = %s\n"), 10304 depth, "", name, i, "(embedded nvlist)"); 10305 zpool_do_events_nvprint(val[i], depth + 8); 10306 printf(gettext("%*s(end %s[%i])\n"), 10307 depth, "", name, i); 10308 } 10309 printf(gettext("%*s(end %s)\n"), depth, "", name); 10310 } 10311 break; 10312 10313 case DATA_TYPE_INT8_ARRAY: { 10314 int8_t *val; 10315 uint_t i, nelem; 10316 10317 (void) nvpair_value_int8_array(nvp, &val, &nelem); 10318 for (i = 0; i < nelem; i++) 10319 printf(gettext("0x%x "), val[i]); 10320 10321 break; 10322 } 10323 10324 case DATA_TYPE_UINT8_ARRAY: { 10325 uint8_t *val; 10326 uint_t i, nelem; 10327 10328 (void) nvpair_value_uint8_array(nvp, &val, &nelem); 10329 for (i = 0; i < nelem; i++) 10330 printf(gettext("0x%x "), val[i]); 10331 10332 break; 10333 } 10334 10335 case DATA_TYPE_INT16_ARRAY: { 10336 int16_t *val; 10337 uint_t i, nelem; 10338 10339 (void) nvpair_value_int16_array(nvp, &val, &nelem); 10340 for (i = 0; i < nelem; i++) 10341 printf(gettext("0x%x "), val[i]); 10342 10343 break; 10344 } 10345 10346 case DATA_TYPE_UINT16_ARRAY: { 10347 uint16_t *val; 10348 uint_t i, nelem; 10349 10350 (void) nvpair_value_uint16_array(nvp, &val, &nelem); 10351 for (i = 0; i < nelem; i++) 10352 printf(gettext("0x%x "), val[i]); 10353 10354 break; 10355 } 10356 10357 case DATA_TYPE_INT32_ARRAY: { 10358 int32_t *val; 10359 uint_t i, nelem; 10360 10361 (void) nvpair_value_int32_array(nvp, &val, &nelem); 10362 for (i = 0; i < nelem; i++) 10363 printf(gettext("0x%x "), val[i]); 10364 10365 break; 10366 } 10367 10368 case DATA_TYPE_UINT32_ARRAY: { 10369 uint32_t *val; 10370 uint_t i, nelem; 10371 10372 (void) nvpair_value_uint32_array(nvp, &val, &nelem); 10373 for (i = 0; i < nelem; i++) 10374 printf(gettext("0x%x "), val[i]); 10375 10376 break; 10377 } 10378 10379 case DATA_TYPE_INT64_ARRAY: { 10380 int64_t *val; 10381 uint_t i, nelem; 10382 10383 (void) nvpair_value_int64_array(nvp, &val, &nelem); 10384 for (i = 0; i < nelem; i++) 10385 printf(gettext("0x%llx "), 10386 (u_longlong_t)val[i]); 10387 10388 break; 10389 } 10390 10391 case DATA_TYPE_UINT64_ARRAY: { 10392 uint64_t *val; 10393 uint_t i, nelem; 10394 10395 (void) nvpair_value_uint64_array(nvp, &val, &nelem); 10396 for (i = 0; i < nelem; i++) 10397 printf(gettext("0x%llx "), 10398 (u_longlong_t)val[i]); 10399 10400 break; 10401 } 10402 10403 case DATA_TYPE_STRING_ARRAY: { 10404 const char **str; 10405 uint_t i, nelem; 10406 10407 (void) nvpair_value_string_array(nvp, &str, &nelem); 10408 for (i = 0; i < nelem; i++) 10409 printf(gettext("\"%s\" "), 10410 str[i] ? str[i] : "<NULL>"); 10411 10412 break; 10413 } 10414 10415 case DATA_TYPE_BOOLEAN_ARRAY: 10416 case DATA_TYPE_BYTE_ARRAY: 10417 case DATA_TYPE_DOUBLE: 10418 case DATA_TYPE_DONTCARE: 10419 case DATA_TYPE_UNKNOWN: 10420 printf(gettext("<unknown>")); 10421 break; 10422 } 10423 10424 printf(gettext("\n")); 10425 } 10426 } 10427 10428 static int 10429 zpool_do_events_next(ev_opts_t *opts) 10430 { 10431 nvlist_t *nvl; 10432 int zevent_fd, ret, dropped; 10433 const char *pool; 10434 10435 zevent_fd = open(ZFS_DEV, O_RDWR); 10436 VERIFY(zevent_fd >= 0); 10437 10438 if (!opts->scripted) 10439 (void) printf(gettext("%-30s %s\n"), "TIME", "CLASS"); 10440 10441 while (1) { 10442 ret = zpool_events_next(g_zfs, &nvl, &dropped, 10443 (opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd); 10444 if (ret || nvl == NULL) 10445 break; 10446 10447 if (dropped > 0) 10448 (void) printf(gettext("dropped %d events\n"), dropped); 10449 10450 if (strlen(opts->poolname) > 0 && 10451 nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 && 10452 strcmp(opts->poolname, pool) != 0) 10453 continue; 10454 10455 zpool_do_events_short(nvl, opts); 10456 10457 if (opts->verbose) { 10458 zpool_do_events_nvprint(nvl, 8); 10459 printf(gettext("\n")); 10460 } 10461 (void) fflush(stdout); 10462 10463 nvlist_free(nvl); 10464 } 10465 10466 VERIFY(0 == close(zevent_fd)); 10467 10468 return (ret); 10469 } 10470 10471 static int 10472 zpool_do_events_clear(void) 10473 { 10474 int count, ret; 10475 10476 ret = zpool_events_clear(g_zfs, &count); 10477 if (!ret) 10478 (void) printf(gettext("cleared %d events\n"), count); 10479 10480 return (ret); 10481 } 10482 10483 /* 10484 * zpool events [-vHf [pool] | -c] 10485 * 10486 * Displays events logs by ZFS. 10487 */ 10488 int 10489 zpool_do_events(int argc, char **argv) 10490 { 10491 ev_opts_t opts = { 0 }; 10492 int ret; 10493 int c; 10494 10495 /* check options */ 10496 while ((c = getopt(argc, argv, "vHfc")) != -1) { 10497 switch (c) { 10498 case 'v': 10499 opts.verbose = 1; 10500 break; 10501 case 'H': 10502 opts.scripted = 1; 10503 break; 10504 case 'f': 10505 opts.follow = 1; 10506 break; 10507 case 'c': 10508 opts.clear = 1; 10509 break; 10510 case '?': 10511 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10512 optopt); 10513 usage(B_FALSE); 10514 } 10515 } 10516 argc -= optind; 10517 argv += optind; 10518 10519 if (argc > 1) { 10520 (void) fprintf(stderr, gettext("too many arguments\n")); 10521 usage(B_FALSE); 10522 } else if (argc == 1) { 10523 (void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname)); 10524 if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) { 10525 (void) fprintf(stderr, 10526 gettext("invalid pool name '%s'\n"), opts.poolname); 10527 usage(B_FALSE); 10528 } 10529 } 10530 10531 if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) && 10532 opts.clear) { 10533 (void) fprintf(stderr, 10534 gettext("invalid options combined with -c\n")); 10535 usage(B_FALSE); 10536 } 10537 10538 if (opts.clear) 10539 ret = zpool_do_events_clear(); 10540 else 10541 ret = zpool_do_events_next(&opts); 10542 10543 return (ret); 10544 } 10545 10546 static int 10547 get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data) 10548 { 10549 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 10550 char value[ZFS_MAXPROPLEN]; 10551 zprop_source_t srctype; 10552 10553 for (zprop_list_t *pl = cbp->cb_proplist; pl != NULL; 10554 pl = pl->pl_next) { 10555 char *prop_name; 10556 /* 10557 * If the first property is pool name, it is a special 10558 * placeholder that we can skip. This will also skip 10559 * over the name property when 'all' is specified. 10560 */ 10561 if (pl->pl_prop == ZPOOL_PROP_NAME && 10562 pl == cbp->cb_proplist) 10563 continue; 10564 10565 if (pl->pl_prop == ZPROP_INVAL) { 10566 prop_name = pl->pl_user_prop; 10567 } else { 10568 prop_name = (char *)vdev_prop_to_name(pl->pl_prop); 10569 } 10570 if (zpool_get_vdev_prop(zhp, vdevname, pl->pl_prop, 10571 prop_name, value, sizeof (value), &srctype, 10572 cbp->cb_literal) == 0) { 10573 zprop_print_one_property(vdevname, cbp, prop_name, 10574 value, srctype, NULL, NULL); 10575 } 10576 } 10577 10578 return (0); 10579 } 10580 10581 static int 10582 get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data) 10583 { 10584 zpool_handle_t *zhp = zhp_data; 10585 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 10586 char *vdevname; 10587 const char *type; 10588 int ret; 10589 10590 /* 10591 * zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the 10592 * pool name for display purposes, which is not desired. Fallback to 10593 * zpool_vdev_name() when not dealing with the root vdev. 10594 */ 10595 type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE); 10596 if (zhp != NULL && strcmp(type, "root") == 0) 10597 vdevname = strdup("root-0"); 10598 else 10599 vdevname = zpool_vdev_name(g_zfs, zhp, nv, 10600 cbp->cb_vdevs.cb_name_flags); 10601 10602 (void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist); 10603 10604 ret = get_callback_vdev(zhp, vdevname, data); 10605 10606 free(vdevname); 10607 10608 return (ret); 10609 } 10610 10611 static int 10612 get_callback(zpool_handle_t *zhp, void *data) 10613 { 10614 zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; 10615 char value[ZFS_MAXPROPLEN]; 10616 zprop_source_t srctype; 10617 zprop_list_t *pl; 10618 int vid; 10619 10620 if (cbp->cb_type == ZFS_TYPE_VDEV) { 10621 if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) { 10622 for_each_vdev(zhp, get_callback_vdev_cb, data); 10623 } else { 10624 /* Adjust column widths for vdev properties */ 10625 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 10626 vid++) { 10627 vdev_expand_proplist(zhp, 10628 cbp->cb_vdevs.cb_names[vid], 10629 &cbp->cb_proplist); 10630 } 10631 /* Display the properties */ 10632 for (vid = 0; vid < cbp->cb_vdevs.cb_names_count; 10633 vid++) { 10634 get_callback_vdev(zhp, 10635 cbp->cb_vdevs.cb_names[vid], data); 10636 } 10637 } 10638 } else { 10639 assert(cbp->cb_type == ZFS_TYPE_POOL); 10640 for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) { 10641 /* 10642 * Skip the special fake placeholder. This will also 10643 * skip over the name property when 'all' is specified. 10644 */ 10645 if (pl->pl_prop == ZPOOL_PROP_NAME && 10646 pl == cbp->cb_proplist) 10647 continue; 10648 10649 if (pl->pl_prop == ZPROP_INVAL && 10650 zfs_prop_user(pl->pl_user_prop)) { 10651 srctype = ZPROP_SRC_LOCAL; 10652 10653 if (zpool_get_userprop(zhp, pl->pl_user_prop, 10654 value, sizeof (value), &srctype) != 0) 10655 continue; 10656 10657 zprop_print_one_property(zpool_get_name(zhp), 10658 cbp, pl->pl_user_prop, value, srctype, 10659 NULL, NULL); 10660 } else if (pl->pl_prop == ZPROP_INVAL && 10661 (zpool_prop_feature(pl->pl_user_prop) || 10662 zpool_prop_unsupported(pl->pl_user_prop))) { 10663 srctype = ZPROP_SRC_LOCAL; 10664 10665 if (zpool_prop_get_feature(zhp, 10666 pl->pl_user_prop, value, 10667 sizeof (value)) == 0) { 10668 zprop_print_one_property( 10669 zpool_get_name(zhp), cbp, 10670 pl->pl_user_prop, value, srctype, 10671 NULL, NULL); 10672 } 10673 } else { 10674 if (zpool_get_prop(zhp, pl->pl_prop, value, 10675 sizeof (value), &srctype, 10676 cbp->cb_literal) != 0) 10677 continue; 10678 10679 zprop_print_one_property(zpool_get_name(zhp), 10680 cbp, zpool_prop_to_name(pl->pl_prop), 10681 value, srctype, NULL, NULL); 10682 } 10683 } 10684 } 10685 10686 return (0); 10687 } 10688 10689 /* 10690 * zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ... 10691 * 10692 * -H Scripted mode. Don't display headers, and separate properties 10693 * by a single tab. 10694 * -o List of columns to display. Defaults to 10695 * "name,property,value,source". 10696 * -p Display values in parsable (exact) format. 10697 * 10698 * Get properties of pools in the system. Output space statistics 10699 * for each one as well as other attributes. 10700 */ 10701 int 10702 zpool_do_get(int argc, char **argv) 10703 { 10704 zprop_get_cbdata_t cb = { 0 }; 10705 zprop_list_t fake_name = { 0 }; 10706 int ret; 10707 int c, i; 10708 char *propstr = NULL; 10709 char *vdev = NULL; 10710 10711 cb.cb_first = B_TRUE; 10712 10713 /* 10714 * Set up default columns and sources. 10715 */ 10716 cb.cb_sources = ZPROP_SRC_ALL; 10717 cb.cb_columns[0] = GET_COL_NAME; 10718 cb.cb_columns[1] = GET_COL_PROPERTY; 10719 cb.cb_columns[2] = GET_COL_VALUE; 10720 cb.cb_columns[3] = GET_COL_SOURCE; 10721 cb.cb_type = ZFS_TYPE_POOL; 10722 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 10723 current_prop_type = cb.cb_type; 10724 10725 /* check options */ 10726 while ((c = getopt(argc, argv, ":Hpo:")) != -1) { 10727 switch (c) { 10728 case 'p': 10729 cb.cb_literal = B_TRUE; 10730 break; 10731 case 'H': 10732 cb.cb_scripted = B_TRUE; 10733 break; 10734 case 'o': 10735 memset(&cb.cb_columns, 0, sizeof (cb.cb_columns)); 10736 i = 0; 10737 10738 for (char *tok; (tok = strsep(&optarg, ",")); ) { 10739 static const char *const col_opts[] = 10740 { "name", "property", "value", "source", 10741 "all" }; 10742 static const zfs_get_column_t col_cols[] = 10743 { GET_COL_NAME, GET_COL_PROPERTY, GET_COL_VALUE, 10744 GET_COL_SOURCE }; 10745 10746 if (i == ZFS_GET_NCOLS - 1) { 10747 (void) fprintf(stderr, gettext("too " 10748 "many fields given to -o " 10749 "option\n")); 10750 usage(B_FALSE); 10751 } 10752 10753 for (c = 0; c < ARRAY_SIZE(col_opts); ++c) 10754 if (strcmp(tok, col_opts[c]) == 0) 10755 goto found; 10756 10757 (void) fprintf(stderr, 10758 gettext("invalid column name '%s'\n"), tok); 10759 usage(B_FALSE); 10760 10761 found: 10762 if (c >= 4) { 10763 if (i > 0) { 10764 (void) fprintf(stderr, 10765 gettext("\"all\" conflicts " 10766 "with specific fields " 10767 "given to -o option\n")); 10768 usage(B_FALSE); 10769 } 10770 10771 memcpy(cb.cb_columns, col_cols, 10772 sizeof (col_cols)); 10773 i = ZFS_GET_NCOLS - 1; 10774 } else 10775 cb.cb_columns[i++] = col_cols[c]; 10776 } 10777 break; 10778 case '?': 10779 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10780 optopt); 10781 usage(B_FALSE); 10782 } 10783 } 10784 10785 argc -= optind; 10786 argv += optind; 10787 10788 if (argc < 1) { 10789 (void) fprintf(stderr, gettext("missing property " 10790 "argument\n")); 10791 usage(B_FALSE); 10792 } 10793 10794 /* Properties list is needed later by zprop_get_list() */ 10795 propstr = argv[0]; 10796 10797 argc--; 10798 argv++; 10799 10800 if (argc == 0) { 10801 /* No args, so just print the defaults. */ 10802 } else if (are_all_pools(argc, argv)) { 10803 /* All the args are pool names */ 10804 } else if (are_all_pools(1, argv)) { 10805 /* The first arg is a pool name */ 10806 if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) || 10807 (argc == 2 && strcmp(argv[1], "root") == 0) || 10808 are_vdevs_in_pool(argc - 1, argv + 1, argv[0], 10809 &cb.cb_vdevs)) { 10810 10811 if (strcmp(argv[1], "root") == 0) 10812 vdev = strdup("root-0"); 10813 else 10814 vdev = strdup(argv[1]); 10815 10816 /* ... and the rest are vdev names */ 10817 cb.cb_vdevs.cb_names = &vdev; 10818 cb.cb_vdevs.cb_names_count = argc - 1; 10819 cb.cb_type = ZFS_TYPE_VDEV; 10820 argc = 1; /* One pool to process */ 10821 } else { 10822 fprintf(stderr, gettext("Expected a list of vdevs in" 10823 " \"%s\", but got:\n"), argv[0]); 10824 error_list_unresolved_vdevs(argc - 1, argv + 1, 10825 argv[0], &cb.cb_vdevs); 10826 fprintf(stderr, "\n"); 10827 usage(B_FALSE); 10828 return (1); 10829 } 10830 } else { 10831 /* 10832 * The first arg isn't the name of a valid pool. 10833 */ 10834 fprintf(stderr, gettext("Cannot get properties of %s: " 10835 "no such pool available.\n"), argv[0]); 10836 return (1); 10837 } 10838 10839 if (zprop_get_list(g_zfs, propstr, &cb.cb_proplist, 10840 cb.cb_type) != 0) { 10841 /* Use correct list of valid properties (pool or vdev) */ 10842 current_prop_type = cb.cb_type; 10843 usage(B_FALSE); 10844 } 10845 10846 if (cb.cb_proplist != NULL) { 10847 fake_name.pl_prop = ZPOOL_PROP_NAME; 10848 fake_name.pl_width = strlen(gettext("NAME")); 10849 fake_name.pl_next = cb.cb_proplist; 10850 cb.cb_proplist = &fake_name; 10851 } 10852 10853 ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist, cb.cb_type, 10854 cb.cb_literal, get_callback, &cb); 10855 10856 if (cb.cb_proplist == &fake_name) 10857 zprop_free_list(fake_name.pl_next); 10858 else 10859 zprop_free_list(cb.cb_proplist); 10860 10861 if (vdev != NULL) 10862 free(vdev); 10863 10864 return (ret); 10865 } 10866 10867 typedef struct set_cbdata { 10868 char *cb_propname; 10869 char *cb_value; 10870 zfs_type_t cb_type; 10871 vdev_cbdata_t cb_vdevs; 10872 boolean_t cb_any_successful; 10873 } set_cbdata_t; 10874 10875 static int 10876 set_pool_callback(zpool_handle_t *zhp, set_cbdata_t *cb) 10877 { 10878 int error; 10879 10880 /* Check if we have out-of-bounds features */ 10881 if (strcmp(cb->cb_propname, ZPOOL_CONFIG_COMPATIBILITY) == 0) { 10882 boolean_t features[SPA_FEATURES]; 10883 if (zpool_do_load_compat(cb->cb_value, features) != 10884 ZPOOL_COMPATIBILITY_OK) 10885 return (-1); 10886 10887 nvlist_t *enabled = zpool_get_features(zhp); 10888 spa_feature_t i; 10889 for (i = 0; i < SPA_FEATURES; i++) { 10890 const char *fguid = spa_feature_table[i].fi_guid; 10891 if (nvlist_exists(enabled, fguid) && !features[i]) 10892 break; 10893 } 10894 if (i < SPA_FEATURES) 10895 (void) fprintf(stderr, gettext("Warning: one or " 10896 "more features already enabled on pool '%s'\n" 10897 "are not present in this compatibility set.\n"), 10898 zpool_get_name(zhp)); 10899 } 10900 10901 /* if we're setting a feature, check it's in compatibility set */ 10902 if (zpool_prop_feature(cb->cb_propname) && 10903 strcmp(cb->cb_value, ZFS_FEATURE_ENABLED) == 0) { 10904 char *fname = strchr(cb->cb_propname, '@') + 1; 10905 spa_feature_t f; 10906 10907 if (zfeature_lookup_name(fname, &f) == 0) { 10908 char compat[ZFS_MAXPROPLEN]; 10909 if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, 10910 compat, ZFS_MAXPROPLEN, NULL, B_FALSE) != 0) 10911 compat[0] = '\0'; 10912 10913 boolean_t features[SPA_FEATURES]; 10914 if (zpool_do_load_compat(compat, features) != 10915 ZPOOL_COMPATIBILITY_OK) { 10916 (void) fprintf(stderr, gettext("Error: " 10917 "cannot enable feature '%s' on pool '%s'\n" 10918 "because the pool's 'compatibility' " 10919 "property cannot be parsed.\n"), 10920 fname, zpool_get_name(zhp)); 10921 return (-1); 10922 } 10923 10924 if (!features[f]) { 10925 (void) fprintf(stderr, gettext("Error: " 10926 "cannot enable feature '%s' on pool '%s'\n" 10927 "as it is not specified in this pool's " 10928 "current compatibility set.\n" 10929 "Consider setting 'compatibility' to a " 10930 "less restrictive set, or to 'off'.\n"), 10931 fname, zpool_get_name(zhp)); 10932 return (-1); 10933 } 10934 } 10935 } 10936 10937 error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value); 10938 10939 return (error); 10940 } 10941 10942 static int 10943 set_callback(zpool_handle_t *zhp, void *data) 10944 { 10945 int error; 10946 set_cbdata_t *cb = (set_cbdata_t *)data; 10947 10948 if (cb->cb_type == ZFS_TYPE_VDEV) { 10949 error = zpool_set_vdev_prop(zhp, *cb->cb_vdevs.cb_names, 10950 cb->cb_propname, cb->cb_value); 10951 } else { 10952 assert(cb->cb_type == ZFS_TYPE_POOL); 10953 error = set_pool_callback(zhp, cb); 10954 } 10955 10956 cb->cb_any_successful = !error; 10957 return (error); 10958 } 10959 10960 int 10961 zpool_do_set(int argc, char **argv) 10962 { 10963 set_cbdata_t cb = { 0 }; 10964 int error; 10965 char *vdev = NULL; 10966 10967 current_prop_type = ZFS_TYPE_POOL; 10968 if (argc > 1 && argv[1][0] == '-') { 10969 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 10970 argv[1][1]); 10971 usage(B_FALSE); 10972 } 10973 10974 if (argc < 2) { 10975 (void) fprintf(stderr, gettext("missing property=value " 10976 "argument\n")); 10977 usage(B_FALSE); 10978 } 10979 10980 if (argc < 3) { 10981 (void) fprintf(stderr, gettext("missing pool name\n")); 10982 usage(B_FALSE); 10983 } 10984 10985 if (argc > 4) { 10986 (void) fprintf(stderr, gettext("too many pool names\n")); 10987 usage(B_FALSE); 10988 } 10989 10990 cb.cb_propname = argv[1]; 10991 cb.cb_type = ZFS_TYPE_POOL; 10992 cb.cb_vdevs.cb_name_flags |= VDEV_NAME_TYPE_ID; 10993 cb.cb_value = strchr(cb.cb_propname, '='); 10994 if (cb.cb_value == NULL) { 10995 (void) fprintf(stderr, gettext("missing value in " 10996 "property=value argument\n")); 10997 usage(B_FALSE); 10998 } 10999 11000 *(cb.cb_value) = '\0'; 11001 cb.cb_value++; 11002 argc -= 2; 11003 argv += 2; 11004 11005 /* argv[0] is pool name */ 11006 if (!is_pool(argv[0])) { 11007 (void) fprintf(stderr, 11008 gettext("cannot open '%s': is not a pool\n"), argv[0]); 11009 return (EINVAL); 11010 } 11011 11012 /* argv[1], when supplied, is vdev name */ 11013 if (argc == 2) { 11014 11015 if (strcmp(argv[1], "root") == 0) 11016 vdev = strdup("root-0"); 11017 else 11018 vdev = strdup(argv[1]); 11019 11020 if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) { 11021 (void) fprintf(stderr, gettext( 11022 "cannot find '%s' in '%s': device not in pool\n"), 11023 vdev, argv[0]); 11024 free(vdev); 11025 return (EINVAL); 11026 } 11027 cb.cb_vdevs.cb_names = &vdev; 11028 cb.cb_vdevs.cb_names_count = 1; 11029 cb.cb_type = ZFS_TYPE_VDEV; 11030 } 11031 11032 error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL, 11033 B_FALSE, set_callback, &cb); 11034 11035 if (vdev != NULL) 11036 free(vdev); 11037 11038 return (error); 11039 } 11040 11041 /* Add up the total number of bytes left to initialize/trim across all vdevs */ 11042 static uint64_t 11043 vdev_activity_remaining(nvlist_t *nv, zpool_wait_activity_t activity) 11044 { 11045 uint64_t bytes_remaining; 11046 nvlist_t **child; 11047 uint_t c, children; 11048 vdev_stat_t *vs; 11049 11050 assert(activity == ZPOOL_WAIT_INITIALIZE || 11051 activity == ZPOOL_WAIT_TRIM); 11052 11053 verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 11054 (uint64_t **)&vs, &c) == 0); 11055 11056 if (activity == ZPOOL_WAIT_INITIALIZE && 11057 vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE) 11058 bytes_remaining = vs->vs_initialize_bytes_est - 11059 vs->vs_initialize_bytes_done; 11060 else if (activity == ZPOOL_WAIT_TRIM && 11061 vs->vs_trim_state == VDEV_TRIM_ACTIVE) 11062 bytes_remaining = vs->vs_trim_bytes_est - 11063 vs->vs_trim_bytes_done; 11064 else 11065 bytes_remaining = 0; 11066 11067 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 11068 &child, &children) != 0) 11069 children = 0; 11070 11071 for (c = 0; c < children; c++) 11072 bytes_remaining += vdev_activity_remaining(child[c], activity); 11073 11074 return (bytes_remaining); 11075 } 11076 11077 /* Add up the total number of bytes left to rebuild across top-level vdevs */ 11078 static uint64_t 11079 vdev_activity_top_remaining(nvlist_t *nv) 11080 { 11081 uint64_t bytes_remaining = 0; 11082 nvlist_t **child; 11083 uint_t children; 11084 int error; 11085 11086 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 11087 &child, &children) != 0) 11088 children = 0; 11089 11090 for (uint_t c = 0; c < children; c++) { 11091 vdev_rebuild_stat_t *vrs; 11092 uint_t i; 11093 11094 error = nvlist_lookup_uint64_array(child[c], 11095 ZPOOL_CONFIG_REBUILD_STATS, (uint64_t **)&vrs, &i); 11096 if (error == 0) { 11097 if (vrs->vrs_state == VDEV_REBUILD_ACTIVE) { 11098 bytes_remaining += (vrs->vrs_bytes_est - 11099 vrs->vrs_bytes_rebuilt); 11100 } 11101 } 11102 } 11103 11104 return (bytes_remaining); 11105 } 11106 11107 /* Whether any vdevs are 'spare' or 'replacing' vdevs */ 11108 static boolean_t 11109 vdev_any_spare_replacing(nvlist_t *nv) 11110 { 11111 nvlist_t **child; 11112 uint_t c, children; 11113 const char *vdev_type; 11114 11115 (void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type); 11116 11117 if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 || 11118 strcmp(vdev_type, VDEV_TYPE_SPARE) == 0 || 11119 strcmp(vdev_type, VDEV_TYPE_DRAID_SPARE) == 0) { 11120 return (B_TRUE); 11121 } 11122 11123 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 11124 &child, &children) != 0) 11125 children = 0; 11126 11127 for (c = 0; c < children; c++) { 11128 if (vdev_any_spare_replacing(child[c])) 11129 return (B_TRUE); 11130 } 11131 11132 return (B_FALSE); 11133 } 11134 11135 typedef struct wait_data { 11136 char *wd_poolname; 11137 boolean_t wd_scripted; 11138 boolean_t wd_exact; 11139 boolean_t wd_headers_once; 11140 boolean_t wd_should_exit; 11141 /* Which activities to wait for */ 11142 boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES]; 11143 float wd_interval; 11144 pthread_cond_t wd_cv; 11145 pthread_mutex_t wd_mutex; 11146 } wait_data_t; 11147 11148 /* 11149 * Print to stdout a single line, containing one column for each activity that 11150 * we are waiting for specifying how many bytes of work are left for that 11151 * activity. 11152 */ 11153 static void 11154 print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row) 11155 { 11156 nvlist_t *config, *nvroot; 11157 uint_t c; 11158 int i; 11159 pool_checkpoint_stat_t *pcs = NULL; 11160 pool_scan_stat_t *pss = NULL; 11161 pool_removal_stat_t *prs = NULL; 11162 pool_raidz_expand_stat_t *pres = NULL; 11163 const char *const headers[] = {"DISCARD", "FREE", "INITIALIZE", 11164 "REPLACE", "REMOVE", "RESILVER", "SCRUB", "TRIM", "RAIDZ_EXPAND"}; 11165 int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES]; 11166 11167 /* Calculate the width of each column */ 11168 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 11169 /* 11170 * Make sure we have enough space in the col for pretty-printed 11171 * numbers and for the column header, and then leave a couple 11172 * spaces between cols for readability. 11173 */ 11174 col_widths[i] = MAX(strlen(headers[i]), 6) + 2; 11175 } 11176 11177 if (timestamp_fmt != NODATE) 11178 print_timestamp(timestamp_fmt); 11179 11180 /* Print header if appropriate */ 11181 int term_height = terminal_height(); 11182 boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 && 11183 row % (term_height-1) == 0); 11184 if (!wd->wd_scripted && (row == 0 || reprint_header)) { 11185 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 11186 if (wd->wd_enabled[i]) 11187 (void) printf("%*s", col_widths[i], headers[i]); 11188 } 11189 (void) fputc('\n', stdout); 11190 } 11191 11192 /* Bytes of work remaining in each activity */ 11193 int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0}; 11194 11195 bytes_rem[ZPOOL_WAIT_FREE] = 11196 zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL); 11197 11198 config = zpool_get_config(zhp, NULL); 11199 nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE); 11200 11201 (void) nvlist_lookup_uint64_array(nvroot, 11202 ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c); 11203 if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING) 11204 bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space; 11205 11206 (void) nvlist_lookup_uint64_array(nvroot, 11207 ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c); 11208 if (prs != NULL && prs->prs_state == DSS_SCANNING) 11209 bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy - 11210 prs->prs_copied; 11211 11212 (void) nvlist_lookup_uint64_array(nvroot, 11213 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c); 11214 if (pss != NULL && pss->pss_state == DSS_SCANNING && 11215 pss->pss_pass_scrub_pause == 0) { 11216 int64_t rem = pss->pss_to_examine - pss->pss_issued; 11217 if (pss->pss_func == POOL_SCAN_SCRUB) 11218 bytes_rem[ZPOOL_WAIT_SCRUB] = rem; 11219 else 11220 bytes_rem[ZPOOL_WAIT_RESILVER] = rem; 11221 } else if (check_rebuilding(nvroot, NULL)) { 11222 bytes_rem[ZPOOL_WAIT_RESILVER] = 11223 vdev_activity_top_remaining(nvroot); 11224 } 11225 11226 (void) nvlist_lookup_uint64_array(nvroot, 11227 ZPOOL_CONFIG_RAIDZ_EXPAND_STATS, (uint64_t **)&pres, &c); 11228 if (pres != NULL && pres->pres_state == DSS_SCANNING) { 11229 int64_t rem = pres->pres_to_reflow - pres->pres_reflowed; 11230 bytes_rem[ZPOOL_WAIT_RAIDZ_EXPAND] = rem; 11231 } 11232 11233 bytes_rem[ZPOOL_WAIT_INITIALIZE] = 11234 vdev_activity_remaining(nvroot, ZPOOL_WAIT_INITIALIZE); 11235 bytes_rem[ZPOOL_WAIT_TRIM] = 11236 vdev_activity_remaining(nvroot, ZPOOL_WAIT_TRIM); 11237 11238 /* 11239 * A replace finishes after resilvering finishes, so the amount of work 11240 * left for a replace is the same as for resilvering. 11241 * 11242 * It isn't quite correct to say that if we have any 'spare' or 11243 * 'replacing' vdevs and a resilver is happening, then a replace is in 11244 * progress, like we do here. When a hot spare is used, the faulted vdev 11245 * is not removed after the hot spare is resilvered, so parent 'spare' 11246 * vdev is not removed either. So we could have a 'spare' vdev, but be 11247 * resilvering for a different reason. However, we use it as a heuristic 11248 * because we don't have access to the DTLs, which could tell us whether 11249 * or not we have really finished resilvering a hot spare. 11250 */ 11251 if (vdev_any_spare_replacing(nvroot)) 11252 bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER]; 11253 11254 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 11255 char buf[64]; 11256 if (!wd->wd_enabled[i]) 11257 continue; 11258 11259 if (wd->wd_exact) { 11260 (void) snprintf(buf, sizeof (buf), "%" PRIi64, 11261 bytes_rem[i]); 11262 } else { 11263 zfs_nicenum(bytes_rem[i], buf, sizeof (buf)); 11264 } 11265 11266 if (wd->wd_scripted) 11267 (void) printf(i == 0 ? "%s" : "\t%s", buf); 11268 else 11269 (void) printf(" %*s", col_widths[i] - 1, buf); 11270 } 11271 (void) printf("\n"); 11272 (void) fflush(stdout); 11273 } 11274 11275 static void * 11276 wait_status_thread(void *arg) 11277 { 11278 wait_data_t *wd = (wait_data_t *)arg; 11279 zpool_handle_t *zhp; 11280 11281 if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL) 11282 return (void *)(1); 11283 11284 for (int row = 0; ; row++) { 11285 boolean_t missing; 11286 struct timespec timeout; 11287 int ret = 0; 11288 (void) clock_gettime(CLOCK_REALTIME, &timeout); 11289 11290 if (zpool_refresh_stats(zhp, &missing) != 0 || missing || 11291 zpool_props_refresh(zhp) != 0) { 11292 zpool_close(zhp); 11293 return (void *)(uintptr_t)(missing ? 0 : 1); 11294 } 11295 11296 print_wait_status_row(wd, zhp, row); 11297 11298 timeout.tv_sec += floor(wd->wd_interval); 11299 long nanos = timeout.tv_nsec + 11300 (wd->wd_interval - floor(wd->wd_interval)) * NANOSEC; 11301 if (nanos >= NANOSEC) { 11302 timeout.tv_sec++; 11303 timeout.tv_nsec = nanos - NANOSEC; 11304 } else { 11305 timeout.tv_nsec = nanos; 11306 } 11307 pthread_mutex_lock(&wd->wd_mutex); 11308 if (!wd->wd_should_exit) 11309 ret = pthread_cond_timedwait(&wd->wd_cv, &wd->wd_mutex, 11310 &timeout); 11311 pthread_mutex_unlock(&wd->wd_mutex); 11312 if (ret == 0) { 11313 break; /* signaled by main thread */ 11314 } else if (ret != ETIMEDOUT) { 11315 (void) fprintf(stderr, gettext("pthread_cond_timedwait " 11316 "failed: %s\n"), strerror(ret)); 11317 zpool_close(zhp); 11318 return (void *)(uintptr_t)(1); 11319 } 11320 } 11321 11322 zpool_close(zhp); 11323 return (void *)(0); 11324 } 11325 11326 int 11327 zpool_do_wait(int argc, char **argv) 11328 { 11329 boolean_t verbose = B_FALSE; 11330 int c, i; 11331 unsigned long count; 11332 pthread_t status_thr; 11333 int error = 0; 11334 zpool_handle_t *zhp; 11335 11336 wait_data_t wd; 11337 wd.wd_scripted = B_FALSE; 11338 wd.wd_exact = B_FALSE; 11339 wd.wd_headers_once = B_FALSE; 11340 wd.wd_should_exit = B_FALSE; 11341 11342 pthread_mutex_init(&wd.wd_mutex, NULL); 11343 pthread_cond_init(&wd.wd_cv, NULL); 11344 11345 /* By default, wait for all types of activity. */ 11346 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) 11347 wd.wd_enabled[i] = B_TRUE; 11348 11349 while ((c = getopt(argc, argv, "HpT:t:")) != -1) { 11350 switch (c) { 11351 case 'H': 11352 wd.wd_scripted = B_TRUE; 11353 break; 11354 case 'n': 11355 wd.wd_headers_once = B_TRUE; 11356 break; 11357 case 'p': 11358 wd.wd_exact = B_TRUE; 11359 break; 11360 case 'T': 11361 get_timestamp_arg(*optarg); 11362 break; 11363 case 't': 11364 /* Reset activities array */ 11365 memset(&wd.wd_enabled, 0, sizeof (wd.wd_enabled)); 11366 11367 for (char *tok; (tok = strsep(&optarg, ",")); ) { 11368 static const char *const col_opts[] = { 11369 "discard", "free", "initialize", "replace", 11370 "remove", "resilver", "scrub", "trim", 11371 "raidz_expand" }; 11372 11373 for (i = 0; i < ARRAY_SIZE(col_opts); ++i) 11374 if (strcmp(tok, col_opts[i]) == 0) { 11375 wd.wd_enabled[i] = B_TRUE; 11376 goto found; 11377 } 11378 11379 (void) fprintf(stderr, 11380 gettext("invalid activity '%s'\n"), tok); 11381 usage(B_FALSE); 11382 found:; 11383 } 11384 break; 11385 case '?': 11386 (void) fprintf(stderr, gettext("invalid option '%c'\n"), 11387 optopt); 11388 usage(B_FALSE); 11389 } 11390 } 11391 11392 argc -= optind; 11393 argv += optind; 11394 11395 get_interval_count(&argc, argv, &wd.wd_interval, &count); 11396 if (count != 0) { 11397 /* This subcmd only accepts an interval, not a count */ 11398 (void) fprintf(stderr, gettext("too many arguments\n")); 11399 usage(B_FALSE); 11400 } 11401 11402 if (wd.wd_interval != 0) 11403 verbose = B_TRUE; 11404 11405 if (argc < 1) { 11406 (void) fprintf(stderr, gettext("missing 'pool' argument\n")); 11407 usage(B_FALSE); 11408 } 11409 if (argc > 1) { 11410 (void) fprintf(stderr, gettext("too many arguments\n")); 11411 usage(B_FALSE); 11412 } 11413 11414 wd.wd_poolname = argv[0]; 11415 11416 if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL) 11417 return (1); 11418 11419 if (verbose) { 11420 /* 11421 * We use a separate thread for printing status updates because 11422 * the main thread will call lzc_wait(), which blocks as long 11423 * as an activity is in progress, which can be a long time. 11424 */ 11425 if (pthread_create(&status_thr, NULL, wait_status_thread, &wd) 11426 != 0) { 11427 (void) fprintf(stderr, gettext("failed to create status" 11428 "thread: %s\n"), strerror(errno)); 11429 zpool_close(zhp); 11430 return (1); 11431 } 11432 } 11433 11434 /* 11435 * Loop over all activities that we are supposed to wait for until none 11436 * of them are in progress. Note that this means we can end up waiting 11437 * for more activities to complete than just those that were in progress 11438 * when we began waiting; if an activity we are interested in begins 11439 * while we are waiting for another activity, we will wait for both to 11440 * complete before exiting. 11441 */ 11442 for (;;) { 11443 boolean_t missing = B_FALSE; 11444 boolean_t any_waited = B_FALSE; 11445 11446 for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) { 11447 boolean_t waited; 11448 11449 if (!wd.wd_enabled[i]) 11450 continue; 11451 11452 error = zpool_wait_status(zhp, i, &missing, &waited); 11453 if (error != 0 || missing) 11454 break; 11455 11456 any_waited = (any_waited || waited); 11457 } 11458 11459 if (error != 0 || missing || !any_waited) 11460 break; 11461 } 11462 11463 zpool_close(zhp); 11464 11465 if (verbose) { 11466 uintptr_t status; 11467 pthread_mutex_lock(&wd.wd_mutex); 11468 wd.wd_should_exit = B_TRUE; 11469 pthread_cond_signal(&wd.wd_cv); 11470 pthread_mutex_unlock(&wd.wd_mutex); 11471 (void) pthread_join(status_thr, (void *)&status); 11472 if (status != 0) 11473 error = status; 11474 } 11475 11476 pthread_mutex_destroy(&wd.wd_mutex); 11477 pthread_cond_destroy(&wd.wd_cv); 11478 return (error); 11479 } 11480 11481 static int 11482 find_command_idx(const char *command, int *idx) 11483 { 11484 for (int i = 0; i < NCOMMAND; ++i) { 11485 if (command_table[i].name == NULL) 11486 continue; 11487 11488 if (strcmp(command, command_table[i].name) == 0) { 11489 *idx = i; 11490 return (0); 11491 } 11492 } 11493 return (1); 11494 } 11495 11496 /* 11497 * Display version message 11498 */ 11499 static int 11500 zpool_do_version(int argc, char **argv) 11501 { 11502 (void) argc, (void) argv; 11503 return (zfs_version_print() != 0); 11504 } 11505 11506 /* Display documentation */ 11507 static int 11508 zpool_do_help(int argc, char **argv) 11509 { 11510 char page[MAXNAMELEN]; 11511 if (argc < 3 || strcmp(argv[2], "zpool") == 0) 11512 strcpy(page, "zpool"); 11513 else if (strcmp(argv[2], "concepts") == 0 || 11514 strcmp(argv[2], "props") == 0) 11515 snprintf(page, sizeof (page), "zpool%s", argv[2]); 11516 else 11517 snprintf(page, sizeof (page), "zpool-%s", argv[2]); 11518 11519 execlp("man", "man", page, NULL); 11520 11521 fprintf(stderr, "couldn't run man program: %s", strerror(errno)); 11522 return (-1); 11523 } 11524 11525 /* 11526 * Do zpool_load_compat() and print error message on failure 11527 */ 11528 static zpool_compat_status_t 11529 zpool_do_load_compat(const char *compat, boolean_t *list) 11530 { 11531 char report[1024]; 11532 11533 zpool_compat_status_t ret; 11534 11535 ret = zpool_load_compat(compat, list, report, 1024); 11536 switch (ret) { 11537 11538 case ZPOOL_COMPATIBILITY_OK: 11539 break; 11540 11541 case ZPOOL_COMPATIBILITY_NOFILES: 11542 case ZPOOL_COMPATIBILITY_BADFILE: 11543 case ZPOOL_COMPATIBILITY_BADTOKEN: 11544 (void) fprintf(stderr, "Error: %s\n", report); 11545 break; 11546 11547 case ZPOOL_COMPATIBILITY_WARNTOKEN: 11548 (void) fprintf(stderr, "Warning: %s\n", report); 11549 ret = ZPOOL_COMPATIBILITY_OK; 11550 break; 11551 } 11552 return (ret); 11553 } 11554 11555 int 11556 main(int argc, char **argv) 11557 { 11558 int ret = 0; 11559 int i = 0; 11560 char *cmdname; 11561 char **newargv; 11562 11563 (void) setlocale(LC_ALL, ""); 11564 (void) setlocale(LC_NUMERIC, "C"); 11565 (void) textdomain(TEXT_DOMAIN); 11566 srand(time(NULL)); 11567 11568 opterr = 0; 11569 11570 /* 11571 * Make sure the user has specified some command. 11572 */ 11573 if (argc < 2) { 11574 (void) fprintf(stderr, gettext("missing command\n")); 11575 usage(B_FALSE); 11576 } 11577 11578 cmdname = argv[1]; 11579 11580 /* 11581 * Special case '-?' 11582 */ 11583 if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0) 11584 usage(B_TRUE); 11585 11586 /* 11587 * Special case '-V|--version' 11588 */ 11589 if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0)) 11590 return (zpool_do_version(argc, argv)); 11591 11592 /* 11593 * Special case 'help' 11594 */ 11595 if (strcmp(cmdname, "help") == 0) 11596 return (zpool_do_help(argc, argv)); 11597 11598 if ((g_zfs = libzfs_init()) == NULL) { 11599 (void) fprintf(stderr, "%s\n", libzfs_error_init(errno)); 11600 return (1); 11601 } 11602 11603 libzfs_print_on_error(g_zfs, B_TRUE); 11604 11605 zfs_save_arguments(argc, argv, history_str, sizeof (history_str)); 11606 11607 /* 11608 * Many commands modify input strings for string parsing reasons. 11609 * We create a copy to protect the original argv. 11610 */ 11611 newargv = safe_malloc((argc + 1) * sizeof (newargv[0])); 11612 for (i = 0; i < argc; i++) 11613 newargv[i] = strdup(argv[i]); 11614 newargv[argc] = NULL; 11615 11616 /* 11617 * Run the appropriate command. 11618 */ 11619 if (find_command_idx(cmdname, &i) == 0) { 11620 current_command = &command_table[i]; 11621 ret = command_table[i].func(argc - 1, newargv + 1); 11622 } else if (strchr(cmdname, '=')) { 11623 verify(find_command_idx("set", &i) == 0); 11624 current_command = &command_table[i]; 11625 ret = command_table[i].func(argc, newargv); 11626 } else if (strcmp(cmdname, "freeze") == 0 && argc == 3) { 11627 /* 11628 * 'freeze' is a vile debugging abomination, so we treat 11629 * it as such. 11630 */ 11631 zfs_cmd_t zc = {"\0"}; 11632 11633 (void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name)); 11634 ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc); 11635 if (ret != 0) { 11636 (void) fprintf(stderr, 11637 gettext("failed to freeze pool: %d\n"), errno); 11638 ret = 1; 11639 } 11640 11641 log_history = 0; 11642 } else { 11643 (void) fprintf(stderr, gettext("unrecognized " 11644 "command '%s'\n"), cmdname); 11645 usage(B_FALSE); 11646 ret = 1; 11647 } 11648 11649 for (i = 0; i < argc; i++) 11650 free(newargv[i]); 11651 free(newargv); 11652 11653 if (ret == 0 && log_history) 11654 (void) zpool_log_history(g_zfs, history_str); 11655 11656 libzfs_fini(g_zfs); 11657 11658 /* 11659 * The 'ZFS_ABORT' environment variable causes us to dump core on exit 11660 * for the purposes of running ::findleaks. 11661 */ 11662 if (getenv("ZFS_ABORT") != NULL) { 11663 (void) printf("dumping core by request\n"); 11664 abort(); 11665 } 11666 11667 return (ret); 11668 } 11669