1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 * Copyright (c) 2013 Steven Hartland. All rights reserved.
26 */
27
28 /*
29 * This file contains the functions which analyze the status of a pool. This
30 * include both the status of an active pool, as well as the status exported
31 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
32 * the pool. This status is independent (to a certain degree) from the state of
33 * the pool. A pool's state describes only whether or not it is capable of
34 * providing the necessary fault tolerance for data. The status describes the
35 * overall status of devices. A pool that is online can still have a device
36 * that is experiencing errors.
37 *
38 * Only a subset of the possible faults can be detected using 'zpool status',
39 * and not all possible errors correspond to a FMA message ID. The explanation
40 * is left up to the caller, depending on whether it is a live pool or an
41 * import.
42 */
43
44 #include <libzfs.h>
45 #include <string.h>
46 #include <unistd.h>
47 #include "libzfs_impl.h"
48 #include "zfeature_common.h"
49
50 /*
51 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
52 * in libzfs.h. Note that there are some status results which go past the end
53 * of this table, and hence have no associated message ID.
54 */
55 static char *zfs_msgid_table[] = {
56 "ZFS-8000-14",
57 "ZFS-8000-2Q",
58 "ZFS-8000-3C",
59 "ZFS-8000-4J",
60 "ZFS-8000-5E",
61 "ZFS-8000-6X",
62 "ZFS-8000-72",
63 "ZFS-8000-8A",
64 "ZFS-8000-9P",
65 "ZFS-8000-A5",
66 "ZFS-8000-EY",
67 "ZFS-8000-HC",
68 "ZFS-8000-JQ",
69 "ZFS-8000-K4",
70 };
71
72 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
73
74 /* ARGSUSED */
75 static int
vdev_missing(uint64_t state,uint64_t aux,uint64_t errs)76 vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
77 {
78 return (state == VDEV_STATE_CANT_OPEN &&
79 aux == VDEV_AUX_OPEN_FAILED);
80 }
81
82 /* ARGSUSED */
83 static int
vdev_faulted(uint64_t state,uint64_t aux,uint64_t errs)84 vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
85 {
86 return (state == VDEV_STATE_FAULTED);
87 }
88
89 /* ARGSUSED */
90 static int
vdev_errors(uint64_t state,uint64_t aux,uint64_t errs)91 vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
92 {
93 return (state == VDEV_STATE_DEGRADED || errs != 0);
94 }
95
96 /* ARGSUSED */
97 static int
vdev_broken(uint64_t state,uint64_t aux,uint64_t errs)98 vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
99 {
100 return (state == VDEV_STATE_CANT_OPEN);
101 }
102
103 /* ARGSUSED */
104 static int
vdev_offlined(uint64_t state,uint64_t aux,uint64_t errs)105 vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
106 {
107 return (state == VDEV_STATE_OFFLINE);
108 }
109
110 /* ARGSUSED */
111 static int
vdev_removed(uint64_t state,uint64_t aux,uint64_t errs)112 vdev_removed(uint64_t state, uint64_t aux, uint64_t errs)
113 {
114 return (state == VDEV_STATE_REMOVED);
115 }
116
117 /*
118 * Detect if any leaf devices that have seen errors or could not be opened.
119 */
120 static boolean_t
find_vdev_problem(nvlist_t * vdev,int (* func)(uint64_t,uint64_t,uint64_t))121 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
122 {
123 nvlist_t **child;
124 vdev_stat_t *vs;
125 uint_t c, children;
126 char *type;
127
128 /*
129 * Ignore problems within a 'replacing' vdev, since we're presumably in
130 * the process of repairing any such errors, and don't want to call them
131 * out again. We'll pick up the fact that a resilver is happening
132 * later.
133 */
134 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
135 if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
136 return (B_FALSE);
137
138 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
139 &children) == 0) {
140 for (c = 0; c < children; c++)
141 if (find_vdev_problem(child[c], func))
142 return (B_TRUE);
143 } else {
144 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
145 (uint64_t **)&vs, &c) == 0);
146
147 if (func(vs->vs_state, vs->vs_aux,
148 vs->vs_read_errors +
149 vs->vs_write_errors +
150 vs->vs_checksum_errors))
151 return (B_TRUE);
152 }
153
154 /*
155 * Check any L2 cache devs
156 */
157 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
158 &children) == 0) {
159 for (c = 0; c < children; c++)
160 if (find_vdev_problem(child[c], func))
161 return (B_TRUE);
162 }
163
164 return (B_FALSE);
165 }
166
167 /*
168 * Active pool health status.
169 *
170 * To determine the status for a pool, we make several passes over the config,
171 * picking the most egregious error we find. In order of importance, we do the
172 * following:
173 *
174 * - Check for a complete and valid configuration
175 * - Look for any faulted or missing devices in a non-replicated config
176 * - Check for any data errors
177 * - Check for any faulted or missing devices in a replicated config
178 * - Look for any devices showing errors
179 * - Check for any resilvering devices
180 *
181 * There can obviously be multiple errors within a single pool, so this routine
182 * only picks the most damaging of all the current errors to report.
183 */
184 static zpool_status_t
check_status(nvlist_t * config,boolean_t isimport)185 check_status(nvlist_t *config, boolean_t isimport)
186 {
187 nvlist_t *nvroot;
188 vdev_stat_t *vs;
189 pool_scan_stat_t *ps = NULL;
190 uint_t vsc, psc;
191 uint64_t nerr;
192 uint64_t version;
193 uint64_t stateval;
194 uint64_t suspended;
195 uint64_t hostid = 0;
196
197 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
198 &version) == 0);
199 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
200 &nvroot) == 0);
201 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
202 (uint64_t **)&vs, &vsc) == 0);
203 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
204 &stateval) == 0);
205
206 /*
207 * Currently resilvering a vdev
208 */
209 (void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
210 (uint64_t **)&ps, &psc);
211 if (ps && ps->pss_func == POOL_SCAN_RESILVER &&
212 ps->pss_state == DSS_SCANNING)
213 return (ZPOOL_STATUS_RESILVERING);
214
215 /*
216 * Pool last accessed by another system.
217 */
218 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
219 if (hostid != 0 && (unsigned long)hostid != gethostid() &&
220 stateval == POOL_STATE_ACTIVE)
221 return (ZPOOL_STATUS_HOSTID_MISMATCH);
222
223 /*
224 * Newer on-disk version.
225 */
226 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
227 vs->vs_aux == VDEV_AUX_VERSION_NEWER)
228 return (ZPOOL_STATUS_VERSION_NEWER);
229
230 /*
231 * Unsupported feature(s).
232 */
233 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
234 vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
235 nvlist_t *nvinfo;
236
237 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
238 &nvinfo) == 0);
239 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
240 return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
241 return (ZPOOL_STATUS_UNSUP_FEAT_READ);
242 }
243
244 /*
245 * Check that the config is complete.
246 */
247 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
248 vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
249 return (ZPOOL_STATUS_BAD_GUID_SUM);
250
251 /*
252 * Check whether the pool has suspended due to failed I/O.
253 */
254 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
255 &suspended) == 0) {
256 if (suspended == ZIO_FAILURE_MODE_CONTINUE)
257 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
258 return (ZPOOL_STATUS_IO_FAILURE_WAIT);
259 }
260
261 /*
262 * Could not read a log.
263 */
264 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
265 vs->vs_aux == VDEV_AUX_BAD_LOG) {
266 return (ZPOOL_STATUS_BAD_LOG);
267 }
268
269 /*
270 * Bad devices in non-replicated config.
271 */
272 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
273 find_vdev_problem(nvroot, vdev_faulted))
274 return (ZPOOL_STATUS_FAULTED_DEV_NR);
275
276 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
277 find_vdev_problem(nvroot, vdev_missing))
278 return (ZPOOL_STATUS_MISSING_DEV_NR);
279
280 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
281 find_vdev_problem(nvroot, vdev_broken))
282 return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
283
284 /*
285 * Corrupted pool metadata
286 */
287 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
288 vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
289 return (ZPOOL_STATUS_CORRUPT_POOL);
290
291 /*
292 * Persistent data errors.
293 */
294 if (!isimport) {
295 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
296 &nerr) == 0 && nerr != 0)
297 return (ZPOOL_STATUS_CORRUPT_DATA);
298 }
299
300 /*
301 * Missing devices in a replicated config.
302 */
303 if (find_vdev_problem(nvroot, vdev_faulted))
304 return (ZPOOL_STATUS_FAULTED_DEV_R);
305 if (find_vdev_problem(nvroot, vdev_missing))
306 return (ZPOOL_STATUS_MISSING_DEV_R);
307 if (find_vdev_problem(nvroot, vdev_broken))
308 return (ZPOOL_STATUS_CORRUPT_LABEL_R);
309
310 /*
311 * Devices with errors
312 */
313 if (!isimport && find_vdev_problem(nvroot, vdev_errors))
314 return (ZPOOL_STATUS_FAILING_DEV);
315
316 /*
317 * Offlined devices
318 */
319 if (find_vdev_problem(nvroot, vdev_offlined))
320 return (ZPOOL_STATUS_OFFLINE_DEV);
321
322 /*
323 * Removed device
324 */
325 if (find_vdev_problem(nvroot, vdev_removed))
326 return (ZPOOL_STATUS_REMOVED_DEV);
327
328 /*
329 * Outdated, but usable, version
330 */
331 if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
332 return (ZPOOL_STATUS_VERSION_OLDER);
333
334 /*
335 * Usable pool with disabled features
336 */
337 if (version >= SPA_VERSION_FEATURES) {
338 int i;
339 nvlist_t *feat;
340
341 if (isimport) {
342 feat = fnvlist_lookup_nvlist(config,
343 ZPOOL_CONFIG_LOAD_INFO);
344 feat = fnvlist_lookup_nvlist(feat,
345 ZPOOL_CONFIG_ENABLED_FEAT);
346 } else {
347 feat = fnvlist_lookup_nvlist(config,
348 ZPOOL_CONFIG_FEATURE_STATS);
349 }
350
351 for (i = 0; i < SPA_FEATURES; i++) {
352 zfeature_info_t *fi = &spa_feature_table[i];
353 if (!nvlist_exists(feat, fi->fi_guid))
354 return (ZPOOL_STATUS_FEAT_DISABLED);
355 }
356 }
357
358 return (ZPOOL_STATUS_OK);
359 }
360
361 zpool_status_t
zpool_get_status(zpool_handle_t * zhp,char ** msgid)362 zpool_get_status(zpool_handle_t *zhp, char **msgid)
363 {
364 zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE);
365
366 if (ret >= NMSGID)
367 *msgid = NULL;
368 else
369 *msgid = zfs_msgid_table[ret];
370
371 return (ret);
372 }
373
374 zpool_status_t
zpool_import_status(nvlist_t * config,char ** msgid)375 zpool_import_status(nvlist_t *config, char **msgid)
376 {
377 zpool_status_t ret = check_status(config, B_TRUE);
378
379 if (ret >= NMSGID)
380 *msgid = NULL;
381 else
382 *msgid = zfs_msgid_table[ret];
383
384 return (ret);
385 }
386
387 static void
dump_ddt_stat(const ddt_stat_t * dds,int h)388 dump_ddt_stat(const ddt_stat_t *dds, int h)
389 {
390 char refcnt[6];
391 char blocks[6], lsize[6], psize[6], dsize[6];
392 char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];
393
394 if (dds == NULL || dds->dds_blocks == 0)
395 return;
396
397 if (h == -1)
398 (void) strcpy(refcnt, "Total");
399 else
400 zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));
401
402 zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
403 zfs_nicenum(dds->dds_lsize, lsize, sizeof (lsize));
404 zfs_nicenum(dds->dds_psize, psize, sizeof (psize));
405 zfs_nicenum(dds->dds_dsize, dsize, sizeof (dsize));
406 zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
407 zfs_nicenum(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
408 zfs_nicenum(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
409 zfs_nicenum(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));
410
411 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
412 refcnt,
413 blocks, lsize, psize, dsize,
414 ref_blocks, ref_lsize, ref_psize, ref_dsize);
415 }
416
417 /*
418 * Print the DDT histogram and the column totals.
419 */
420 void
zpool_dump_ddt(const ddt_stat_t * dds_total,const ddt_histogram_t * ddh)421 zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh)
422 {
423 int h;
424
425 (void) printf("\n");
426
427 (void) printf("bucket "
428 " allocated "
429 " referenced \n");
430 (void) printf("______ "
431 "______________________________ "
432 "______________________________\n");
433
434 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
435 "refcnt",
436 "blocks", "LSIZE", "PSIZE", "DSIZE",
437 "blocks", "LSIZE", "PSIZE", "DSIZE");
438
439 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
440 "------",
441 "------", "-----", "-----", "-----",
442 "------", "-----", "-----", "-----");
443
444 for (h = 0; h < 64; h++)
445 dump_ddt_stat(&ddh->ddh_stat[h], h);
446
447 dump_ddt_stat(dds_total, -1);
448
449 (void) printf("\n");
450 }
451