xref: /freebsd/sys/contrib/openzfs/lib/libzfs/libzfs_status.c (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012 by Delphix. All rights reserved.
25  * Copyright (c) 2013 Steven Hartland. All rights reserved.
26  * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
27  */
28 
29 /*
30  * This file contains the functions which analyze the status of a pool.  This
31  * include both the status of an active pool, as well as the status exported
32  * pools.  Returns one of the ZPOOL_STATUS_* defines describing the status of
33  * the pool.  This status is independent (to a certain degree) from the state of
34  * the pool.  A pool's state describes only whether or not it is capable of
35  * providing the necessary fault tolerance for data.  The status describes the
36  * overall status of devices.  A pool that is online can still have a device
37  * that is experiencing errors.
38  *
39  * Only a subset of the possible faults can be detected using 'zpool status',
40  * and not all possible errors correspond to a FMA message ID.  The explanation
41  * is left up to the caller, depending on whether it is a live pool or an
42  * import.
43  */
44 
45 #include <libzfs.h>
46 #include <libzutil.h>
47 #include <stdlib.h>
48 #include <string.h>
49 #include <unistd.h>
50 #include <sys/systeminfo.h>
51 #include "libzfs_impl.h"
52 #include "zfeature_common.h"
53 
54 /*
55  * Message ID table.  This must be kept in sync with the ZPOOL_STATUS_* defines
56  * in include/libzfs.h.  Note that there are some status results which go past
57  * the end of this table, and hence have no associated message ID.
58  */
59 static char *zfs_msgid_table[] = {
60 	"ZFS-8000-14", /* ZPOOL_STATUS_CORRUPT_CACHE */
61 	"ZFS-8000-2Q", /* ZPOOL_STATUS_MISSING_DEV_R */
62 	"ZFS-8000-3C", /* ZPOOL_STATUS_MISSING_DEV_NR */
63 	"ZFS-8000-4J", /* ZPOOL_STATUS_CORRUPT_LABEL_R */
64 	"ZFS-8000-5E", /* ZPOOL_STATUS_CORRUPT_LABEL_NR */
65 	"ZFS-8000-6X", /* ZPOOL_STATUS_BAD_GUID_SUM */
66 	"ZFS-8000-72", /* ZPOOL_STATUS_CORRUPT_POOL */
67 	"ZFS-8000-8A", /* ZPOOL_STATUS_CORRUPT_DATA */
68 	"ZFS-8000-9P", /* ZPOOL_STATUS_FAILING_DEV */
69 	"ZFS-8000-A5", /* ZPOOL_STATUS_VERSION_NEWER */
70 	"ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_MISMATCH */
71 	"ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_ACTIVE */
72 	"ZFS-8000-EY", /* ZPOOL_STATUS_HOSTID_REQUIRED */
73 	"ZFS-8000-HC", /* ZPOOL_STATUS_IO_FAILURE_WAIT */
74 	"ZFS-8000-JQ", /* ZPOOL_STATUS_IO_FAILURE_CONTINUE */
75 	"ZFS-8000-MM", /* ZPOOL_STATUS_IO_FAILURE_MMP */
76 	"ZFS-8000-K4", /* ZPOOL_STATUS_BAD_LOG */
77 	"ZFS-8000-ER", /* ZPOOL_STATUS_ERRATA */
78 	/*
79 	 * The following results have no message ID.
80 	 *	ZPOOL_STATUS_UNSUP_FEAT_READ
81 	 *	ZPOOL_STATUS_UNSUP_FEAT_WRITE
82 	 *	ZPOOL_STATUS_FAULTED_DEV_R
83 	 *	ZPOOL_STATUS_FAULTED_DEV_NR
84 	 *	ZPOOL_STATUS_VERSION_OLDER
85 	 *	ZPOOL_STATUS_FEAT_DISABLED
86 	 *	ZPOOL_STATUS_RESILVERING
87 	 *	ZPOOL_STATUS_OFFLINE_DEV
88 	 *	ZPOOL_STATUS_REMOVED_DEV
89 	 *	ZPOOL_STATUS_REBUILDING
90 	 *	ZPOOL_STATUS_REBUILD_SCRUB
91 	 *	ZPOOL_STATUS_COMPATIBILITY_ERR
92 	 *	ZPOOL_STATUS_INCOMPATIBLE_FEAT
93 	 *	ZPOOL_STATUS_OK
94 	 */
95 };
96 
97 #define	NMSGID	(sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
98 
99 /* ARGSUSED */
100 static int
101 vdev_missing(vdev_stat_t *vs, uint_t vsc)
102 {
103 	return (vs->vs_state == VDEV_STATE_CANT_OPEN &&
104 	    vs->vs_aux == VDEV_AUX_OPEN_FAILED);
105 }
106 
107 /* ARGSUSED */
108 static int
109 vdev_faulted(vdev_stat_t *vs, uint_t vsc)
110 {
111 	return (vs->vs_state == VDEV_STATE_FAULTED);
112 }
113 
114 /* ARGSUSED */
115 static int
116 vdev_errors(vdev_stat_t *vs, uint_t vsc)
117 {
118 	return (vs->vs_state == VDEV_STATE_DEGRADED ||
119 	    vs->vs_read_errors != 0 || vs->vs_write_errors != 0 ||
120 	    vs->vs_checksum_errors != 0);
121 }
122 
123 /* ARGSUSED */
124 static int
125 vdev_broken(vdev_stat_t *vs, uint_t vsc)
126 {
127 	return (vs->vs_state == VDEV_STATE_CANT_OPEN);
128 }
129 
130 /* ARGSUSED */
131 static int
132 vdev_offlined(vdev_stat_t *vs, uint_t vsc)
133 {
134 	return (vs->vs_state == VDEV_STATE_OFFLINE);
135 }
136 
137 /* ARGSUSED */
138 static int
139 vdev_removed(vdev_stat_t *vs, uint_t vsc)
140 {
141 	return (vs->vs_state == VDEV_STATE_REMOVED);
142 }
143 
144 static int
145 vdev_non_native_ashift(vdev_stat_t *vs, uint_t vsc)
146 {
147 	if (getenv("ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE") != NULL)
148 		return (0);
149 
150 	return (VDEV_STAT_VALID(vs_physical_ashift, vsc) &&
151 	    vs->vs_configured_ashift < vs->vs_physical_ashift);
152 }
153 
154 /*
155  * Detect if any leaf devices that have seen errors or could not be opened.
156  */
157 static boolean_t
158 find_vdev_problem(nvlist_t *vdev, int (*func)(vdev_stat_t *, uint_t),
159     boolean_t ignore_replacing)
160 {
161 	nvlist_t **child;
162 	vdev_stat_t *vs;
163 	uint_t c, vsc, children;
164 
165 	/*
166 	 * Ignore problems within a 'replacing' vdev, since we're presumably in
167 	 * the process of repairing any such errors, and don't want to call them
168 	 * out again.  We'll pick up the fact that a resilver is happening
169 	 * later.
170 	 */
171 	if (ignore_replacing == B_TRUE) {
172 		char *type;
173 
174 		verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE,
175 		    &type) == 0);
176 		if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
177 			return (B_FALSE);
178 	}
179 
180 	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
181 	    &children) == 0) {
182 		for (c = 0; c < children; c++)
183 			if (find_vdev_problem(child[c], func, ignore_replacing))
184 				return (B_TRUE);
185 	} else {
186 		verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
187 		    (uint64_t **)&vs, &vsc) == 0);
188 
189 		if (func(vs, vsc) != 0)
190 			return (B_TRUE);
191 	}
192 
193 	/*
194 	 * Check any L2 cache devs
195 	 */
196 	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
197 	    &children) == 0) {
198 		for (c = 0; c < children; c++)
199 			if (find_vdev_problem(child[c], func, ignore_replacing))
200 				return (B_TRUE);
201 	}
202 
203 	return (B_FALSE);
204 }
205 
206 /*
207  * Active pool health status.
208  *
209  * To determine the status for a pool, we make several passes over the config,
210  * picking the most egregious error we find.  In order of importance, we do the
211  * following:
212  *
213  *	- Check for a complete and valid configuration
214  *	- Look for any faulted or missing devices in a non-replicated config
215  *	- Check for any data errors
216  *	- Check for any faulted or missing devices in a replicated config
217  *	- Look for any devices showing errors
218  *	- Check for any resilvering or rebuilding devices
219  *
220  * There can obviously be multiple errors within a single pool, so this routine
221  * only picks the most damaging of all the current errors to report.
222  */
223 static zpool_status_t
224 check_status(nvlist_t *config, boolean_t isimport,
225     zpool_errata_t *erratap, const char *compat)
226 {
227 	nvlist_t *nvroot;
228 	vdev_stat_t *vs;
229 	pool_scan_stat_t *ps = NULL;
230 	uint_t vsc, psc;
231 	uint64_t nerr;
232 	uint64_t version;
233 	uint64_t stateval;
234 	uint64_t suspended;
235 	uint64_t hostid = 0;
236 	uint64_t errata = 0;
237 	unsigned long system_hostid = get_system_hostid();
238 
239 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
240 	    &version) == 0);
241 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
242 	    &nvroot) == 0);
243 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
244 	    (uint64_t **)&vs, &vsc) == 0);
245 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
246 	    &stateval) == 0);
247 
248 	/*
249 	 * Currently resilvering a vdev
250 	 */
251 	(void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
252 	    (uint64_t **)&ps, &psc);
253 	if (ps != NULL && ps->pss_func == POOL_SCAN_RESILVER &&
254 	    ps->pss_state == DSS_SCANNING)
255 		return (ZPOOL_STATUS_RESILVERING);
256 
257 	/*
258 	 * Currently rebuilding a vdev, check top-level vdevs.
259 	 */
260 	vdev_rebuild_stat_t *vrs = NULL;
261 	nvlist_t **child;
262 	uint_t c, i, children;
263 	uint64_t rebuild_end_time = 0;
264 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
265 	    &child, &children) == 0) {
266 		for (c = 0; c < children; c++) {
267 			if ((nvlist_lookup_uint64_array(child[c],
268 			    ZPOOL_CONFIG_REBUILD_STATS,
269 			    (uint64_t **)&vrs, &i) == 0) && (vrs != NULL)) {
270 				uint64_t state = vrs->vrs_state;
271 
272 				if (state == VDEV_REBUILD_ACTIVE) {
273 					return (ZPOOL_STATUS_REBUILDING);
274 				} else if (state == VDEV_REBUILD_COMPLETE &&
275 				    vrs->vrs_end_time > rebuild_end_time) {
276 					rebuild_end_time = vrs->vrs_end_time;
277 				}
278 			}
279 		}
280 
281 		/*
282 		 * If we can determine when the last scrub was run, and it
283 		 * was before the last rebuild completed, then recommend
284 		 * that the pool be scrubbed to verify all checksums.  When
285 		 * ps is NULL we can infer the pool has never been scrubbed.
286 		 */
287 		if (rebuild_end_time > 0) {
288 			if (ps != NULL) {
289 				if ((ps->pss_state == DSS_FINISHED &&
290 				    ps->pss_func == POOL_SCAN_SCRUB &&
291 				    rebuild_end_time > ps->pss_end_time) ||
292 				    ps->pss_state == DSS_NONE)
293 					return (ZPOOL_STATUS_REBUILD_SCRUB);
294 			} else {
295 				return (ZPOOL_STATUS_REBUILD_SCRUB);
296 			}
297 		}
298 	}
299 
300 	/*
301 	 * The multihost property is set and the pool may be active.
302 	 */
303 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
304 	    vs->vs_aux == VDEV_AUX_ACTIVE) {
305 		mmp_state_t mmp_state;
306 		nvlist_t *nvinfo;
307 
308 		nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
309 		mmp_state = fnvlist_lookup_uint64(nvinfo,
310 		    ZPOOL_CONFIG_MMP_STATE);
311 
312 		if (mmp_state == MMP_STATE_ACTIVE)
313 			return (ZPOOL_STATUS_HOSTID_ACTIVE);
314 		else if (mmp_state == MMP_STATE_NO_HOSTID)
315 			return (ZPOOL_STATUS_HOSTID_REQUIRED);
316 		else
317 			return (ZPOOL_STATUS_HOSTID_MISMATCH);
318 	}
319 
320 	/*
321 	 * Pool last accessed by another system.
322 	 */
323 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
324 	if (hostid != 0 && (unsigned long)hostid != system_hostid &&
325 	    stateval == POOL_STATE_ACTIVE)
326 		return (ZPOOL_STATUS_HOSTID_MISMATCH);
327 
328 	/*
329 	 * Newer on-disk version.
330 	 */
331 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
332 	    vs->vs_aux == VDEV_AUX_VERSION_NEWER)
333 		return (ZPOOL_STATUS_VERSION_NEWER);
334 
335 	/*
336 	 * Unsupported feature(s).
337 	 */
338 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
339 	    vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
340 		nvlist_t *nvinfo;
341 
342 		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
343 		    &nvinfo) == 0);
344 		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
345 			return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
346 		return (ZPOOL_STATUS_UNSUP_FEAT_READ);
347 	}
348 
349 	/*
350 	 * Check that the config is complete.
351 	 */
352 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
353 	    vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
354 		return (ZPOOL_STATUS_BAD_GUID_SUM);
355 
356 	/*
357 	 * Check whether the pool has suspended.
358 	 */
359 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
360 	    &suspended) == 0) {
361 		uint64_t reason;
362 
363 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED_REASON,
364 		    &reason) == 0 && reason == ZIO_SUSPEND_MMP)
365 			return (ZPOOL_STATUS_IO_FAILURE_MMP);
366 
367 		if (suspended == ZIO_FAILURE_MODE_CONTINUE)
368 			return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
369 		return (ZPOOL_STATUS_IO_FAILURE_WAIT);
370 	}
371 
372 	/*
373 	 * Could not read a log.
374 	 */
375 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
376 	    vs->vs_aux == VDEV_AUX_BAD_LOG) {
377 		return (ZPOOL_STATUS_BAD_LOG);
378 	}
379 
380 	/*
381 	 * Bad devices in non-replicated config.
382 	 */
383 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
384 	    find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
385 		return (ZPOOL_STATUS_FAULTED_DEV_NR);
386 
387 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
388 	    find_vdev_problem(nvroot, vdev_missing, B_TRUE))
389 		return (ZPOOL_STATUS_MISSING_DEV_NR);
390 
391 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
392 	    find_vdev_problem(nvroot, vdev_broken, B_TRUE))
393 		return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
394 
395 	/*
396 	 * Corrupted pool metadata
397 	 */
398 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
399 	    vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
400 		return (ZPOOL_STATUS_CORRUPT_POOL);
401 
402 	/*
403 	 * Persistent data errors.
404 	 */
405 	if (!isimport) {
406 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
407 		    &nerr) == 0 && nerr != 0)
408 			return (ZPOOL_STATUS_CORRUPT_DATA);
409 	}
410 
411 	/*
412 	 * Missing devices in a replicated config.
413 	 */
414 	if (find_vdev_problem(nvroot, vdev_faulted, B_TRUE))
415 		return (ZPOOL_STATUS_FAULTED_DEV_R);
416 	if (find_vdev_problem(nvroot, vdev_missing, B_TRUE))
417 		return (ZPOOL_STATUS_MISSING_DEV_R);
418 	if (find_vdev_problem(nvroot, vdev_broken, B_TRUE))
419 		return (ZPOOL_STATUS_CORRUPT_LABEL_R);
420 
421 	/*
422 	 * Devices with errors
423 	 */
424 	if (!isimport && find_vdev_problem(nvroot, vdev_errors, B_TRUE))
425 		return (ZPOOL_STATUS_FAILING_DEV);
426 
427 	/*
428 	 * Offlined devices
429 	 */
430 	if (find_vdev_problem(nvroot, vdev_offlined, B_TRUE))
431 		return (ZPOOL_STATUS_OFFLINE_DEV);
432 
433 	/*
434 	 * Removed device
435 	 */
436 	if (find_vdev_problem(nvroot, vdev_removed, B_TRUE))
437 		return (ZPOOL_STATUS_REMOVED_DEV);
438 
439 	/*
440 	 * Suboptimal, but usable, ashift configuration.
441 	 */
442 	if (find_vdev_problem(nvroot, vdev_non_native_ashift, B_FALSE))
443 		return (ZPOOL_STATUS_NON_NATIVE_ASHIFT);
444 
445 	/*
446 	 * Informational errata available.
447 	 */
448 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRATA, &errata);
449 	if (errata) {
450 		*erratap = errata;
451 		return (ZPOOL_STATUS_ERRATA);
452 	}
453 
454 	/*
455 	 * Outdated, but usable, version
456 	 */
457 	if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION) {
458 		/* "legacy" compatibility disables old version reporting */
459 		if (compat != NULL && strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0)
460 			return (ZPOOL_STATUS_OK);
461 		else
462 			return (ZPOOL_STATUS_VERSION_OLDER);
463 	}
464 
465 	/*
466 	 * Usable pool with disabled or superfluous features
467 	 * (superfluous = beyond what's requested by 'compatibility')
468 	 */
469 	if (version >= SPA_VERSION_FEATURES) {
470 		int i;
471 		nvlist_t *feat;
472 
473 		if (isimport) {
474 			feat = fnvlist_lookup_nvlist(config,
475 			    ZPOOL_CONFIG_LOAD_INFO);
476 			if (nvlist_exists(feat, ZPOOL_CONFIG_ENABLED_FEAT))
477 				feat = fnvlist_lookup_nvlist(feat,
478 				    ZPOOL_CONFIG_ENABLED_FEAT);
479 		} else {
480 			feat = fnvlist_lookup_nvlist(config,
481 			    ZPOOL_CONFIG_FEATURE_STATS);
482 		}
483 
484 		/* check against all features, or limited set? */
485 		boolean_t c_features[SPA_FEATURES];
486 
487 		switch (zpool_load_compat(compat, c_features, NULL, 0)) {
488 		case ZPOOL_COMPATIBILITY_OK:
489 		case ZPOOL_COMPATIBILITY_WARNTOKEN:
490 			break;
491 		default:
492 			return (ZPOOL_STATUS_COMPATIBILITY_ERR);
493 		}
494 		for (i = 0; i < SPA_FEATURES; i++) {
495 			zfeature_info_t *fi = &spa_feature_table[i];
496 			if (!fi->fi_zfs_mod_supported)
497 				continue;
498 			if (c_features[i] && !nvlist_exists(feat, fi->fi_guid))
499 				return (ZPOOL_STATUS_FEAT_DISABLED);
500 			if (!c_features[i] && nvlist_exists(feat, fi->fi_guid))
501 				return (ZPOOL_STATUS_INCOMPATIBLE_FEAT);
502 		}
503 	}
504 
505 	return (ZPOOL_STATUS_OK);
506 }
507 
508 zpool_status_t
509 zpool_get_status(zpool_handle_t *zhp, char **msgid, zpool_errata_t *errata)
510 {
511 	/*
512 	 * pass in the desired feature set, as
513 	 * it affects check for disabled features
514 	 */
515 	char compatibility[ZFS_MAXPROPLEN];
516 	if (zpool_get_prop(zhp, ZPOOL_PROP_COMPATIBILITY, compatibility,
517 	    ZFS_MAXPROPLEN, NULL, B_FALSE) != 0)
518 		compatibility[0] = '\0';
519 
520 	zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE, errata,
521 	    compatibility);
522 
523 	if (msgid != NULL) {
524 		if (ret >= NMSGID)
525 			*msgid = NULL;
526 		else
527 			*msgid = zfs_msgid_table[ret];
528 	}
529 	return (ret);
530 }
531 
532 zpool_status_t
533 zpool_import_status(nvlist_t *config, char **msgid, zpool_errata_t *errata)
534 {
535 	zpool_status_t ret = check_status(config, B_TRUE, errata, NULL);
536 
537 	if (ret >= NMSGID)
538 		*msgid = NULL;
539 	else
540 		*msgid = zfs_msgid_table[ret];
541 
542 	return (ret);
543 }
544