xref: /illumos-gate/usr/src/cmd/fm/modules/common/zfs-diagnosis/zfs_de.c (revision 622200ad88c6c6382403a01985a94e22484baac6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <assert.h>
29 #include <stddef.h>
30 #include <strings.h>
31 #include <libuutil.h>
32 #include <fm/fmd_api.h>
33 #include <sys/fs/zfs.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/fs/zfs.h>
36 
37 typedef struct zfs_case_data {
38 	uint64_t	zc_version;
39 	uint64_t	zc_ena;
40 	uint64_t	zc_pool_guid;
41 	uint64_t	zc_vdev_guid;
42 	int		zc_has_timer;
43 	int		zc_pool_state;
44 } zfs_case_data_t;
45 
46 typedef struct zfs_case {
47 	int		zc_version;
48 	zfs_case_data_t	zc_data;
49 	fmd_case_t	*zc_case;
50 	uu_list_node_t	zc_node;
51 	id_t		zc_timer;
52 } zfs_case_t;
53 
54 #define	CASE_DATA		"data"
55 #define	CASE_DATA_VERSION	1
56 
57 static int zfs_case_timeout;
58 
59 uu_list_pool_t *zfs_case_pool;
60 uu_list_t *zfs_cases;
61 
62 static void
63 zfs_case_serialize(fmd_hdl_t *hdl, zfs_case_t *zcp)
64 {
65 	fmd_buf_write(hdl, zcp->zc_case, CASE_DATA, &zcp->zc_data,
66 	    sizeof (zcp->zc_data));
67 }
68 
69 static zfs_case_t *
70 zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp)
71 {
72 	zfs_case_t *zcp;
73 
74 	zcp = fmd_hdl_zalloc(hdl, sizeof (zfs_case_t), FMD_SLEEP);
75 	zcp->zc_case = cp;
76 
77 	fmd_buf_read(hdl, cp, CASE_DATA, &zcp->zc_data,
78 	    sizeof (zcp->zc_data));
79 
80 	if (zcp->zc_data.zc_version != CASE_DATA_VERSION) {
81 		fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
82 		return (NULL);
83 	}
84 
85 	if (zcp->zc_data.zc_has_timer)
86 		zcp->zc_timer = fmd_timer_install(hdl, zcp,
87 		    NULL, zfs_case_timeout);
88 
89 	(void) uu_list_insert_before(zfs_cases, NULL, zcp);
90 
91 	fmd_case_setspecific(hdl, cp, zcp);
92 
93 	return (zcp);
94 }
95 
96 /*ARGSUSED*/
97 static void
98 zfs_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
99 {
100 	zfs_case_t *zcp;
101 	int32_t pool_state;
102 	uint64_t ena, pool_guid, vdev_guid;
103 	nvlist_t *detector;
104 	boolean_t isresource;
105 
106 	isresource = fmd_nvl_class_match(hdl, nvl, "resource.fs.zfs.*");
107 
108 	if (isresource) {
109 		/*
110 		 * For our faked-up 'ok' resource (see below), we have no normal
111 		 * payload members.
112 		 */
113 		if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
114 		    &vdev_guid) != 0)
115 			pool_state = SPA_LOAD_OPEN;
116 		else
117 			pool_state = SPA_LOAD_NONE;
118 		detector = NULL;
119 	} else {
120 		(void) nvlist_lookup_nvlist(nvl,
121 		    FM_EREPORT_DETECTOR, &detector);
122 		(void) nvlist_lookup_int32(nvl,
123 		    FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, &pool_state);
124 	}
125 
126 	/*
127 	 * Without a retire agent, we subscribe to our own faults and just
128 	 * discard them.
129 	 */
130 	if (fmd_nvl_class_match(hdl, nvl, "fault.fs.zfs.*"))
131 		return;
132 
133 	/*
134 	 * Ignore all block level (.io and .checksum) errors not associated with
135 	 * a pool open.  We should really update a bean counter, and eventually
136 	 * do some real predictive analysis based on these faults.
137 	 */
138 	if ((fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.io") ||
139 	    fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.checksum")) &&
140 	    pool_state == SPA_LOAD_NONE)
141 		return;
142 
143 	/*
144 	 * We also ignore all ereports generated during an import of a pool,
145 	 * since the only possible fault (.pool) would result in import failure,
146 	 * and hence no persistent fault.  Some day we may want to do something
147 	 * with these ereports, so we continue generating them internally.
148 	 */
149 	if (pool_state == SPA_LOAD_IMPORT)
150 		return;
151 
152 	/*
153 	 * Determine if this ereport corresponds to an open case.  Cases are
154 	 * indexed by ENA, since ZFS does all the work of chaining together
155 	 * related ereports.
156 	 *
157 	 * We also detect if an ereport corresponds to an open case by context,
158 	 * such as:
159 	 *
160 	 * 	- An error occurred during an open of a pool with an existing
161 	 *	  case.
162 	 *
163 	 * 	- An error occurred for a device which already has an open
164 	 *	  case.
165 	 */
166 	if (!isresource) {
167 		(void) nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena);
168 		(void) nvlist_lookup_uint64(nvl,
169 		    FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, &pool_guid);
170 		if (fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.vdev.*"))
171 			(void) nvlist_lookup_uint64(nvl,
172 			    FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid);
173 		else
174 			vdev_guid = 0;
175 	} else {
176 		(void) nvlist_lookup_uint64(nvl,
177 		    FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, &pool_guid);
178 		if (nvlist_lookup_uint64(nvl,
179 		    FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
180 			vdev_guid = 0;
181 	}
182 
183 	for (zcp = uu_list_first(zfs_cases); zcp != NULL;
184 	    zcp = uu_list_next(zfs_cases, zcp)) {
185 		/*
186 		 * Matches a known ENA.
187 		 */
188 		if (zcp->zc_data.zc_ena == ena)
189 			break;
190 
191 		/*
192 		 * Matches a case involving load errors for this same pool.
193 		 */
194 		if (zcp->zc_data.zc_pool_guid == pool_guid &&
195 		    zcp->zc_data.zc_pool_state == SPA_LOAD_OPEN &&
196 		    pool_state == SPA_LOAD_OPEN)
197 			break;
198 
199 		/*
200 		 * Device errors for the same device.
201 		 */
202 		if (vdev_guid != 0 && zcp->zc_data.zc_vdev_guid == vdev_guid)
203 			break;
204 	}
205 
206 	if (zcp == NULL) {
207 		fmd_case_t *cs;
208 		zfs_case_data_t data;
209 
210 		/*
211 		 * If this is one of our 'fake' resource ereports, and there is
212 		 * no case open, simply discard it.
213 		 */
214 		if (isresource)
215 			return;
216 
217 		/*
218 		 * Open a new case.
219 		 */
220 		cs = fmd_case_open(hdl, NULL);
221 
222 		/*
223 		 * Initialize the case buffer.  To commonize code, we actually
224 		 * create the buffer with existing data, and then call
225 		 * zfs_case_unserialize() to instantiate the in-core structure.
226 		 */
227 		fmd_buf_create(hdl, cs, CASE_DATA,
228 		    sizeof (zfs_case_data_t));
229 
230 		data.zc_version = CASE_DATA_VERSION;
231 		data.zc_ena = ena;
232 		data.zc_pool_guid = pool_guid;
233 		data.zc_vdev_guid = vdev_guid;
234 		data.zc_has_timer = 0;
235 		data.zc_pool_state = (int)pool_state;
236 
237 		fmd_buf_write(hdl, cs, CASE_DATA, &data, sizeof (data));
238 
239 		zcp = zfs_case_unserialize(hdl, cs);
240 		assert(zcp != NULL);
241 	}
242 
243 	/*
244 	 * The 'resource.fs.zfs.ok' event is a special internal-only event that
245 	 * signifies that a pool or device that was previously faulted has now
246 	 * come online (as detected by ZFS).  This allows us to close the
247 	 * associated case.
248 	 */
249 	if (isresource) {
250 		fmd_case_close(hdl, zcp->zc_case);
251 		return;
252 	}
253 
254 	/*
255 	 * Associate the ereport with this case.
256 	 */
257 	fmd_case_add_ereport(hdl, zcp->zc_case, ep);
258 
259 	/*
260 	 * Don't do anything else if this case is already solved.
261 	 */
262 	if (fmd_case_solved(hdl, zcp->zc_case))
263 		return;
264 
265 	/*
266 	 * Determine if we should solve the case and generate a fault.  We solve
267 	 * a case if:
268 	 *
269 	 * 	a. A pool failed to open (ereport.fs.zfs.pool)
270 	 * 	b. A device failed to open (ereport.fs.zfs.pool) while a pool
271 	 *	   was up and running.
272 	 *
273 	 * We may see a series of ereports associated with a pool open, all
274 	 * chained together by the same ENA.  If the pool open succeeds, then
275 	 * we'll see no further ereports.  To detect when a pool open has
276 	 * succeeded, we associate a timer with the event.  When it expires, we
277 	 * close the case.
278 	 */
279 	if (fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.zpool")) {
280 		/*
281 		 * Pool level fault.
282 		 */
283 		nvlist_t *fault;
284 
285 		fault = fmd_nvl_create_fault(hdl, "fault.fs.zfs.pool",
286 		    100, detector, NULL, detector);
287 		fmd_case_add_suspect(hdl, zcp->zc_case, fault);
288 		fmd_case_solve(hdl, zcp->zc_case);
289 
290 		if (zcp->zc_data.zc_has_timer) {
291 			fmd_timer_remove(hdl, zcp->zc_timer);
292 			zcp->zc_data.zc_has_timer = 0;
293 			zfs_case_serialize(hdl, zcp);
294 		}
295 
296 	} else if (fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.vdev.*") &&
297 	    pool_state == SPA_LOAD_NONE) {
298 		/*
299 		 * Device fault.
300 		 */
301 		nvlist_t *fault;
302 
303 		fault = fmd_nvl_create_fault(hdl, "fault.fs.zfs.device",
304 		    100, detector, NULL, detector);
305 		fmd_case_add_suspect(hdl, zcp->zc_case, fault);
306 		fmd_case_solve(hdl, zcp->zc_case);
307 
308 		if (zcp->zc_data.zc_has_timer) {
309 			fmd_timer_remove(hdl, zcp->zc_timer);
310 			zcp->zc_data.zc_has_timer = 0;
311 			zfs_case_serialize(hdl, zcp);
312 		}
313 
314 	} else if (pool_state == SPA_LOAD_OPEN) {
315 		/*
316 		 * Error incurred during a pool open.  Reset the timer
317 		 * associated with this case.
318 		 */
319 		if (zcp->zc_data.zc_has_timer)
320 			fmd_timer_remove(hdl, zcp->zc_timer);
321 		zcp->zc_timer = fmd_timer_install(hdl, zcp, NULL,
322 		    zfs_case_timeout);
323 		if (!zcp->zc_data.zc_has_timer) {
324 			zcp->zc_data.zc_has_timer = 1;
325 			zfs_case_serialize(hdl, zcp);
326 		}
327 	}
328 }
329 
330 /*
331  * Timeout - indicates that a pool had faults, but was eventually opened
332  * successfully.
333  */
334 /* ARGSUSED */
335 static void
336 zfs_timeout(fmd_hdl_t *hdl, id_t id, void *data)
337 {
338 	zfs_case_t *zcp = data;
339 
340 	zcp->zc_data.zc_has_timer = 0;
341 
342 	fmd_case_close(hdl, zcp->zc_case);
343 }
344 
345 static void
346 zfs_close(fmd_hdl_t *hdl, fmd_case_t *cs)
347 {
348 	zfs_case_t *zcp = fmd_case_getspecific(hdl, cs);
349 
350 	if (zcp->zc_data.zc_has_timer)
351 		fmd_timer_remove(hdl, zcp->zc_timer);
352 	uu_list_remove(zfs_cases, zcp);
353 	fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
354 }
355 
356 static const fmd_hdl_ops_t fmd_ops = {
357 	zfs_recv,	/* fmdo_recv */
358 	zfs_timeout,	/* fmdo_timeout */
359 	zfs_close,	/* fmdo_close */
360 	NULL,		/* fmdo_stats */
361 	NULL,		/* fmdo_gc */
362 };
363 
364 static const fmd_prop_t fmd_props[] = {
365 	{ "case_timeout", FMD_TYPE_UINT32, "5" },
366 	{ NULL, 0, NULL }
367 };
368 
369 static const fmd_hdl_info_t fmd_info = {
370 	"ZFS Diagnosis Engine", "1.0", &fmd_ops, fmd_props
371 };
372 
373 void
374 _fmd_init(fmd_hdl_t *hdl)
375 {
376 	fmd_case_t *cp;
377 
378 	if ((zfs_case_pool = uu_list_pool_create("zfs_case_pool",
379 	    sizeof (zfs_case_t), offsetof(zfs_case_t, zc_node),
380 	    NULL, 0)) == NULL)
381 		return;
382 
383 	if ((zfs_cases = uu_list_create(zfs_case_pool, NULL, 0)) == NULL) {
384 		uu_list_pool_destroy(zfs_case_pool);
385 		return;
386 	}
387 
388 	if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
389 		uu_list_destroy(zfs_cases);
390 		uu_list_pool_destroy(zfs_case_pool);
391 		return;
392 	}
393 
394 	/*
395 	 * Iterate over all active cases and unserialize the associated buffers,
396 	 * adding them to our list of open cases.
397 	 */
398 	for (cp = fmd_case_next(hdl, NULL);
399 	    cp != NULL; cp = fmd_case_next(hdl, cp))
400 		(void) zfs_case_unserialize(hdl, cp);
401 
402 	zfs_case_timeout = fmd_prop_get_int32(hdl, "case_timeout") * NANOSEC;
403 }
404 
405 void
406 _fmd_fini(fmd_hdl_t *hdl)
407 {
408 	zfs_case_t *zcp;
409 	uu_list_walk_t *walk;
410 
411 	/*
412 	 * Remove all active cases.
413 	 */
414 	walk = uu_list_walk_start(zfs_cases, UU_WALK_ROBUST);
415 	while ((zcp = uu_list_walk_next(walk)) != NULL) {
416 		uu_list_remove(zfs_cases, zcp);
417 		fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
418 	}
419 	uu_list_walk_end(walk);
420 
421 	uu_list_destroy(zfs_cases);
422 	uu_list_pool_destroy(zfs_case_pool);
423 }
424