xref: /titanic_41/usr/src/lib/libzfs/common/libzfs_status.c (revision 500b1e787b108592a37e3d54dc9b5e676de5386d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file contains the functions which analyze the status of a pool.  This
28  * include both the status of an active pool, as well as the status exported
29  * pools.  Returns one of the ZPOOL_STATUS_* defines describing the status of
30  * the pool.  This status is independent (to a certain degree) from the state of
31  * the pool.  A pool's state describes only whether or not it is capable of
32  * providing the necessary fault tolerance for data.  The status describes the
33  * overall status of devices.  A pool that is online can still have a device
34  * that is experiencing errors.
35  *
36  * Only a subset of the possible faults can be detected using 'zpool status',
37  * and not all possible errors correspond to a FMA message ID.  The explanation
38  * is left up to the caller, depending on whether it is a live pool or an
39  * import.
40  */
41 
42 #include <libzfs.h>
43 #include <string.h>
44 #include <unistd.h>
45 #include "libzfs_impl.h"
46 
47 /*
48  * Message ID table.  This must be kept in sync with the ZPOOL_STATUS_* defines
49  * in libzfs.h.  Note that there are some status results which go past the end
50  * of this table, and hence have no associated message ID.
51  */
52 static char *zfs_msgid_table[] = {
53 	"ZFS-8000-14",
54 	"ZFS-8000-2Q",
55 	"ZFS-8000-3C",
56 	"ZFS-8000-4J",
57 	"ZFS-8000-5E",
58 	"ZFS-8000-6X",
59 	"ZFS-8000-72",
60 	"ZFS-8000-8A",
61 	"ZFS-8000-9P",
62 	"ZFS-8000-A5",
63 	"ZFS-8000-EY",
64 	"ZFS-8000-HC",
65 	"ZFS-8000-JQ",
66 	"ZFS-8000-K4",
67 };
68 
69 #define	NMSGID	(sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
70 
71 /* ARGSUSED */
72 static int
73 vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
74 {
75 	return (state == VDEV_STATE_CANT_OPEN &&
76 	    aux == VDEV_AUX_OPEN_FAILED);
77 }
78 
79 /* ARGSUSED */
80 static int
81 vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
82 {
83 	return (state == VDEV_STATE_FAULTED);
84 }
85 
86 /* ARGSUSED */
87 static int
88 vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
89 {
90 	return (state == VDEV_STATE_DEGRADED || errs != 0);
91 }
92 
93 /* ARGSUSED */
94 static int
95 vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
96 {
97 	return (state == VDEV_STATE_CANT_OPEN);
98 }
99 
100 /* ARGSUSED */
101 static int
102 vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
103 {
104 	return (state == VDEV_STATE_OFFLINE);
105 }
106 
107 /* ARGSUSED */
108 static int
109 vdev_removed(uint64_t state, uint64_t aux, uint64_t errs)
110 {
111 	return (state == VDEV_STATE_REMOVED);
112 }
113 
114 /*
115  * Detect if any leaf devices that have seen errors or could not be opened.
116  */
117 static boolean_t
118 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
119 {
120 	nvlist_t **child;
121 	vdev_stat_t *vs;
122 	uint_t c, children;
123 	char *type;
124 
125 	/*
126 	 * Ignore problems within a 'replacing' vdev, since we're presumably in
127 	 * the process of repairing any such errors, and don't want to call them
128 	 * out again.  We'll pick up the fact that a resilver is happening
129 	 * later.
130 	 */
131 	verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
132 	if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
133 		return (B_FALSE);
134 
135 	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
136 	    &children) == 0) {
137 		for (c = 0; c < children; c++)
138 			if (find_vdev_problem(child[c], func))
139 				return (B_TRUE);
140 	} else {
141 		verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_STATS,
142 		    (uint64_t **)&vs, &c) == 0);
143 
144 		if (func(vs->vs_state, vs->vs_aux,
145 		    vs->vs_read_errors +
146 		    vs->vs_write_errors +
147 		    vs->vs_checksum_errors))
148 			return (B_TRUE);
149 	}
150 
151 	return (B_FALSE);
152 }
153 
154 /*
155  * Active pool health status.
156  *
157  * To determine the status for a pool, we make several passes over the config,
158  * picking the most egregious error we find.  In order of importance, we do the
159  * following:
160  *
161  *	- Check for a complete and valid configuration
162  *	- Look for any faulted or missing devices in a non-replicated config
163  *	- Check for any data errors
164  *	- Check for any faulted or missing devices in a replicated config
165  *	- Look for any devices showing errors
166  *	- Check for any resilvering devices
167  *
168  * There can obviously be multiple errors within a single pool, so this routine
169  * only picks the most damaging of all the current errors to report.
170  */
171 static zpool_status_t
172 check_status(nvlist_t *config, boolean_t isimport)
173 {
174 	nvlist_t *nvroot;
175 	vdev_stat_t *vs;
176 	uint_t vsc;
177 	uint64_t nerr;
178 	uint64_t version;
179 	uint64_t stateval;
180 	uint64_t suspended;
181 	uint64_t hostid = 0;
182 
183 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
184 	    &version) == 0);
185 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
186 	    &nvroot) == 0);
187 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
188 	    (uint64_t **)&vs, &vsc) == 0);
189 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
190 	    &stateval) == 0);
191 	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
192 
193 	/*
194 	 * Pool last accessed by another system.
195 	 */
196 	if (hostid != 0 && (unsigned long)hostid != gethostid() &&
197 	    stateval == POOL_STATE_ACTIVE)
198 		return (ZPOOL_STATUS_HOSTID_MISMATCH);
199 
200 	/*
201 	 * Newer on-disk version.
202 	 */
203 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
204 	    vs->vs_aux == VDEV_AUX_VERSION_NEWER)
205 		return (ZPOOL_STATUS_VERSION_NEWER);
206 
207 	/*
208 	 * Check that the config is complete.
209 	 */
210 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
211 	    vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
212 		return (ZPOOL_STATUS_BAD_GUID_SUM);
213 
214 	/*
215 	 * Check whether the pool has suspended due to failed I/O.
216 	 */
217 	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
218 	    &suspended) == 0) {
219 		if (suspended == ZIO_FAILURE_MODE_CONTINUE)
220 			return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
221 		return (ZPOOL_STATUS_IO_FAILURE_WAIT);
222 	}
223 
224 	/*
225 	 * Could not read a log.
226 	 */
227 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
228 	    vs->vs_aux == VDEV_AUX_BAD_LOG) {
229 		return (ZPOOL_STATUS_BAD_LOG);
230 	}
231 
232 	/*
233 	 * Bad devices in non-replicated config.
234 	 */
235 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
236 	    find_vdev_problem(nvroot, vdev_faulted))
237 		return (ZPOOL_STATUS_FAULTED_DEV_NR);
238 
239 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
240 	    find_vdev_problem(nvroot, vdev_missing))
241 		return (ZPOOL_STATUS_MISSING_DEV_NR);
242 
243 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
244 	    find_vdev_problem(nvroot, vdev_broken))
245 		return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
246 
247 	/*
248 	 * Corrupted pool metadata
249 	 */
250 	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
251 	    vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
252 		return (ZPOOL_STATUS_CORRUPT_POOL);
253 
254 	/*
255 	 * Persistent data errors.
256 	 */
257 	if (!isimport) {
258 		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
259 		    &nerr) == 0 && nerr != 0)
260 			return (ZPOOL_STATUS_CORRUPT_DATA);
261 	}
262 
263 	/*
264 	 * Missing devices in a replicated config.
265 	 */
266 	if (find_vdev_problem(nvroot, vdev_faulted))
267 		return (ZPOOL_STATUS_FAULTED_DEV_R);
268 	if (find_vdev_problem(nvroot, vdev_missing))
269 		return (ZPOOL_STATUS_MISSING_DEV_R);
270 	if (find_vdev_problem(nvroot, vdev_broken))
271 		return (ZPOOL_STATUS_CORRUPT_LABEL_R);
272 
273 	/*
274 	 * Devices with errors
275 	 */
276 	if (!isimport && find_vdev_problem(nvroot, vdev_errors))
277 		return (ZPOOL_STATUS_FAILING_DEV);
278 
279 	/*
280 	 * Offlined devices
281 	 */
282 	if (find_vdev_problem(nvroot, vdev_offlined))
283 		return (ZPOOL_STATUS_OFFLINE_DEV);
284 
285 	/*
286 	 * Removed device
287 	 */
288 	if (find_vdev_problem(nvroot, vdev_removed))
289 		return (ZPOOL_STATUS_REMOVED_DEV);
290 
291 	/*
292 	 * Currently resilvering
293 	 */
294 	if (!vs->vs_scrub_complete && vs->vs_scrub_type == POOL_SCRUB_RESILVER)
295 		return (ZPOOL_STATUS_RESILVERING);
296 
297 	/*
298 	 * Outdated, but usable, version
299 	 */
300 	if (version < SPA_VERSION)
301 		return (ZPOOL_STATUS_VERSION_OLDER);
302 
303 	return (ZPOOL_STATUS_OK);
304 }
305 
306 zpool_status_t
307 zpool_get_status(zpool_handle_t *zhp, char **msgid)
308 {
309 	zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE);
310 
311 	if (ret >= NMSGID)
312 		*msgid = NULL;
313 	else
314 		*msgid = zfs_msgid_table[ret];
315 
316 	return (ret);
317 }
318 
319 zpool_status_t
320 zpool_import_status(nvlist_t *config, char **msgid)
321 {
322 	zpool_status_t ret = check_status(config, B_TRUE);
323 
324 	if (ret >= NMSGID)
325 		*msgid = NULL;
326 	else
327 		*msgid = zfs_msgid_table[ret];
328 
329 	return (ret);
330 }
331 
332 static void
333 dump_ddt_stat(const ddt_stat_t *dds, int h)
334 {
335 	char refcnt[6];
336 	char blocks[6], lsize[6], psize[6], dsize[6];
337 	char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];
338 
339 	if (dds == NULL || dds->dds_blocks == 0)
340 		return;
341 
342 	if (h == -1)
343 		(void) strcpy(refcnt, "Total");
344 	else
345 		zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));
346 
347 	zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
348 	zfs_nicenum(dds->dds_lsize, lsize, sizeof (lsize));
349 	zfs_nicenum(dds->dds_psize, psize, sizeof (psize));
350 	zfs_nicenum(dds->dds_dsize, dsize, sizeof (dsize));
351 	zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
352 	zfs_nicenum(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
353 	zfs_nicenum(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
354 	zfs_nicenum(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));
355 
356 	(void) printf("%6s   %6s   %5s   %5s   %5s   %6s   %5s   %5s   %5s\n",
357 	    refcnt,
358 	    blocks, lsize, psize, dsize,
359 	    ref_blocks, ref_lsize, ref_psize, ref_dsize);
360 }
361 
362 /*
363  * Print the DDT histogram and the column totals.
364  */
365 void
366 zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh)
367 {
368 	int h;
369 
370 	(void) printf("\n");
371 
372 	(void) printf("bucket   "
373 	    "           allocated             "
374 	    "          referenced          \n");
375 	(void) printf("______   "
376 	    "______________________________   "
377 	    "______________________________\n");
378 
379 	(void) printf("%6s   %6s   %5s   %5s   %5s   %6s   %5s   %5s   %5s\n",
380 	    "refcnt",
381 	    "blocks", "LSIZE", "PSIZE", "DSIZE",
382 	    "blocks", "LSIZE", "PSIZE", "DSIZE");
383 
384 	(void) printf("%6s   %6s   %5s   %5s   %5s   %6s   %5s   %5s   %5s\n",
385 	    "------",
386 	    "------", "-----", "-----", "-----",
387 	    "------", "-----", "-----", "-----");
388 
389 	for (h = 0; h < 64; h++)
390 		dump_ddt_stat(&ddh->ddh_stat[h], h);
391 
392 	dump_ddt_stat(dds_total, -1);
393 
394 	(void) printf("\n");
395 }
396