1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012 by Delphix. All rights reserved. 25 */ 26 27 /* 28 * This file contains the functions which analyze the status of a pool. This 29 * include both the status of an active pool, as well as the status exported 30 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of 31 * the pool. This status is independent (to a certain degree) from the state of 32 * the pool. A pool's state describes only whether or not it is capable of 33 * providing the necessary fault tolerance for data. The status describes the 34 * overall status of devices. A pool that is online can still have a device 35 * that is experiencing errors. 36 * 37 * Only a subset of the possible faults can be detected using 'zpool status', 38 * and not all possible errors correspond to a FMA message ID. The explanation 39 * is left up to the caller, depending on whether it is a live pool or an 40 * import. 41 */ 42 43 #include <libzfs.h> 44 #include <string.h> 45 #include <unistd.h> 46 #include "libzfs_impl.h" 47 #include "zfeature_common.h" 48 49 /* 50 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines 51 * in libzfs.h. Note that there are some status results which go past the end 52 * of this table, and hence have no associated message ID. 53 */ 54 static char *zfs_msgid_table[] = { 55 "ZFS-8000-14", 56 "ZFS-8000-2Q", 57 "ZFS-8000-3C", 58 "ZFS-8000-4J", 59 "ZFS-8000-5E", 60 "ZFS-8000-6X", 61 "ZFS-8000-72", 62 "ZFS-8000-8A", 63 "ZFS-8000-9P", 64 "ZFS-8000-A5", 65 "ZFS-8000-EY", 66 "ZFS-8000-HC", 67 "ZFS-8000-JQ", 68 "ZFS-8000-K4", 69 }; 70 71 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0])) 72 73 /* ARGSUSED */ 74 static int 75 vdev_missing(uint64_t state, uint64_t aux, uint64_t errs) 76 { 77 return (state == VDEV_STATE_CANT_OPEN && 78 aux == VDEV_AUX_OPEN_FAILED); 79 } 80 81 /* ARGSUSED */ 82 static int 83 vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs) 84 { 85 return (state == VDEV_STATE_FAULTED); 86 } 87 88 /* ARGSUSED */ 89 static int 90 vdev_errors(uint64_t state, uint64_t aux, uint64_t errs) 91 { 92 return (state == VDEV_STATE_DEGRADED || errs != 0); 93 } 94 95 /* ARGSUSED */ 96 static int 97 vdev_broken(uint64_t state, uint64_t aux, uint64_t errs) 98 { 99 return (state == VDEV_STATE_CANT_OPEN); 100 } 101 102 /* ARGSUSED */ 103 static int 104 vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs) 105 { 106 return (state == VDEV_STATE_OFFLINE); 107 } 108 109 /* ARGSUSED */ 110 static int 111 vdev_removed(uint64_t state, uint64_t aux, uint64_t errs) 112 { 113 return (state == VDEV_STATE_REMOVED); 114 } 115 116 /* 117 * Detect if any leaf devices that have seen errors or could not be opened. 118 */ 119 static boolean_t 120 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t)) 121 { 122 nvlist_t **child; 123 vdev_stat_t *vs; 124 uint_t c, children; 125 char *type; 126 127 /* 128 * Ignore problems within a 'replacing' vdev, since we're presumably in 129 * the process of repairing any such errors, and don't want to call them 130 * out again. We'll pick up the fact that a resilver is happening 131 * later. 132 */ 133 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0); 134 if (strcmp(type, VDEV_TYPE_REPLACING) == 0) 135 return (B_FALSE); 136 137 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child, 138 &children) == 0) { 139 for (c = 0; c < children; c++) 140 if (find_vdev_problem(child[c], func)) 141 return (B_TRUE); 142 } else { 143 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS, 144 (uint64_t **)&vs, &c) == 0); 145 146 if (func(vs->vs_state, vs->vs_aux, 147 vs->vs_read_errors + 148 vs->vs_write_errors + 149 vs->vs_checksum_errors)) 150 return (B_TRUE); 151 } 152 153 return (B_FALSE); 154 } 155 156 /* 157 * Active pool health status. 158 * 159 * To determine the status for a pool, we make several passes over the config, 160 * picking the most egregious error we find. In order of importance, we do the 161 * following: 162 * 163 * - Check for a complete and valid configuration 164 * - Look for any faulted or missing devices in a non-replicated config 165 * - Check for any data errors 166 * - Check for any faulted or missing devices in a replicated config 167 * - Look for any devices showing errors 168 * - Check for any resilvering devices 169 * 170 * There can obviously be multiple errors within a single pool, so this routine 171 * only picks the most damaging of all the current errors to report. 172 */ 173 static zpool_status_t 174 check_status(nvlist_t *config, boolean_t isimport) 175 { 176 nvlist_t *nvroot; 177 vdev_stat_t *vs; 178 pool_scan_stat_t *ps = NULL; 179 uint_t vsc, psc; 180 uint64_t nerr; 181 uint64_t version; 182 uint64_t stateval; 183 uint64_t suspended; 184 uint64_t hostid = 0; 185 186 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 187 &version) == 0); 188 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 189 &nvroot) == 0); 190 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, 191 (uint64_t **)&vs, &vsc) == 0); 192 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 193 &stateval) == 0); 194 195 /* 196 * Currently resilvering a vdev 197 */ 198 (void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, 199 (uint64_t **)&ps, &psc); 200 if (ps && ps->pss_func == POOL_SCAN_RESILVER && 201 ps->pss_state == DSS_SCANNING) 202 return (ZPOOL_STATUS_RESILVERING); 203 204 /* 205 * Pool last accessed by another system. 206 */ 207 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid); 208 if (hostid != 0 && (unsigned long)hostid != gethostid() && 209 stateval == POOL_STATE_ACTIVE) 210 return (ZPOOL_STATUS_HOSTID_MISMATCH); 211 212 /* 213 * Newer on-disk version. 214 */ 215 if (vs->vs_state == VDEV_STATE_CANT_OPEN && 216 vs->vs_aux == VDEV_AUX_VERSION_NEWER) 217 return (ZPOOL_STATUS_VERSION_NEWER); 218 219 /* 220 * Unsupported feature(s). 221 */ 222 if (vs->vs_state == VDEV_STATE_CANT_OPEN && 223 vs->vs_aux == VDEV_AUX_UNSUP_FEAT) { 224 nvlist_t *nvinfo; 225 226 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 227 &nvinfo) == 0); 228 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY)) 229 return (ZPOOL_STATUS_UNSUP_FEAT_WRITE); 230 return (ZPOOL_STATUS_UNSUP_FEAT_READ); 231 } 232 233 /* 234 * Check that the config is complete. 235 */ 236 if (vs->vs_state == VDEV_STATE_CANT_OPEN && 237 vs->vs_aux == VDEV_AUX_BAD_GUID_SUM) 238 return (ZPOOL_STATUS_BAD_GUID_SUM); 239 240 /* 241 * Check whether the pool has suspended due to failed I/O. 242 */ 243 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED, 244 &suspended) == 0) { 245 if (suspended == ZIO_FAILURE_MODE_CONTINUE) 246 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE); 247 return (ZPOOL_STATUS_IO_FAILURE_WAIT); 248 } 249 250 /* 251 * Could not read a log. 252 */ 253 if (vs->vs_state == VDEV_STATE_CANT_OPEN && 254 vs->vs_aux == VDEV_AUX_BAD_LOG) { 255 return (ZPOOL_STATUS_BAD_LOG); 256 } 257 258 /* 259 * Bad devices in non-replicated config. 260 */ 261 if (vs->vs_state == VDEV_STATE_CANT_OPEN && 262 find_vdev_problem(nvroot, vdev_faulted)) 263 return (ZPOOL_STATUS_FAULTED_DEV_NR); 264 265 if (vs->vs_state == VDEV_STATE_CANT_OPEN && 266 find_vdev_problem(nvroot, vdev_missing)) 267 return (ZPOOL_STATUS_MISSING_DEV_NR); 268 269 if (vs->vs_state == VDEV_STATE_CANT_OPEN && 270 find_vdev_problem(nvroot, vdev_broken)) 271 return (ZPOOL_STATUS_CORRUPT_LABEL_NR); 272 273 /* 274 * Corrupted pool metadata 275 */ 276 if (vs->vs_state == VDEV_STATE_CANT_OPEN && 277 vs->vs_aux == VDEV_AUX_CORRUPT_DATA) 278 return (ZPOOL_STATUS_CORRUPT_POOL); 279 280 /* 281 * Persistent data errors. 282 */ 283 if (!isimport) { 284 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, 285 &nerr) == 0 && nerr != 0) 286 return (ZPOOL_STATUS_CORRUPT_DATA); 287 } 288 289 /* 290 * Missing devices in a replicated config. 291 */ 292 if (find_vdev_problem(nvroot, vdev_faulted)) 293 return (ZPOOL_STATUS_FAULTED_DEV_R); 294 if (find_vdev_problem(nvroot, vdev_missing)) 295 return (ZPOOL_STATUS_MISSING_DEV_R); 296 if (find_vdev_problem(nvroot, vdev_broken)) 297 return (ZPOOL_STATUS_CORRUPT_LABEL_R); 298 299 /* 300 * Devices with errors 301 */ 302 if (!isimport && find_vdev_problem(nvroot, vdev_errors)) 303 return (ZPOOL_STATUS_FAILING_DEV); 304 305 /* 306 * Offlined devices 307 */ 308 if (find_vdev_problem(nvroot, vdev_offlined)) 309 return (ZPOOL_STATUS_OFFLINE_DEV); 310 311 /* 312 * Removed device 313 */ 314 if (find_vdev_problem(nvroot, vdev_removed)) 315 return (ZPOOL_STATUS_REMOVED_DEV); 316 317 /* 318 * Outdated, but usable, version 319 */ 320 if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION) 321 return (ZPOOL_STATUS_VERSION_OLDER); 322 323 /* 324 * Usable pool with disabled features 325 */ 326 if (version >= SPA_VERSION_FEATURES) { 327 int i; 328 nvlist_t *feat; 329 330 if (isimport) { 331 feat = fnvlist_lookup_nvlist(config, 332 ZPOOL_CONFIG_LOAD_INFO); 333 feat = fnvlist_lookup_nvlist(feat, 334 ZPOOL_CONFIG_ENABLED_FEAT); 335 } else { 336 feat = fnvlist_lookup_nvlist(config, 337 ZPOOL_CONFIG_FEATURE_STATS); 338 } 339 340 for (i = 0; i < SPA_FEATURES; i++) { 341 zfeature_info_t *fi = &spa_feature_table[i]; 342 if (!nvlist_exists(feat, fi->fi_guid)) 343 return (ZPOOL_STATUS_FEAT_DISABLED); 344 } 345 } 346 347 return (ZPOOL_STATUS_OK); 348 } 349 350 zpool_status_t 351 zpool_get_status(zpool_handle_t *zhp, char **msgid) 352 { 353 zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE); 354 355 if (ret >= NMSGID) 356 *msgid = NULL; 357 else 358 *msgid = zfs_msgid_table[ret]; 359 360 return (ret); 361 } 362 363 zpool_status_t 364 zpool_import_status(nvlist_t *config, char **msgid) 365 { 366 zpool_status_t ret = check_status(config, B_TRUE); 367 368 if (ret >= NMSGID) 369 *msgid = NULL; 370 else 371 *msgid = zfs_msgid_table[ret]; 372 373 return (ret); 374 } 375 376 static void 377 dump_ddt_stat(const ddt_stat_t *dds, int h) 378 { 379 char refcnt[6]; 380 char blocks[6], lsize[6], psize[6], dsize[6]; 381 char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6]; 382 383 if (dds == NULL || dds->dds_blocks == 0) 384 return; 385 386 if (h == -1) 387 (void) strcpy(refcnt, "Total"); 388 else 389 zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt)); 390 391 zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks)); 392 zfs_nicenum(dds->dds_lsize, lsize, sizeof (lsize)); 393 zfs_nicenum(dds->dds_psize, psize, sizeof (psize)); 394 zfs_nicenum(dds->dds_dsize, dsize, sizeof (dsize)); 395 zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks)); 396 zfs_nicenum(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize)); 397 zfs_nicenum(dds->dds_ref_psize, ref_psize, sizeof (ref_psize)); 398 zfs_nicenum(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize)); 399 400 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n", 401 refcnt, 402 blocks, lsize, psize, dsize, 403 ref_blocks, ref_lsize, ref_psize, ref_dsize); 404 } 405 406 /* 407 * Print the DDT histogram and the column totals. 408 */ 409 void 410 zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh) 411 { 412 int h; 413 414 (void) printf("\n"); 415 416 (void) printf("bucket " 417 " allocated " 418 " referenced \n"); 419 (void) printf("______ " 420 "______________________________ " 421 "______________________________\n"); 422 423 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n", 424 "refcnt", 425 "blocks", "LSIZE", "PSIZE", "DSIZE", 426 "blocks", "LSIZE", "PSIZE", "DSIZE"); 427 428 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n", 429 "------", 430 "------", "-----", "-----", "-----", 431 "------", "-----", "-----", "-----"); 432 433 for (h = 0; h < 64; h++) 434 dump_ddt_stat(&ddh->ddh_stat[h], h); 435 436 dump_ddt_stat(dds_total, -1); 437 438 (void) printf("\n"); 439 } 440