xref: /illumos-gate/usr/src/cmd/fm/modules/common/zfs-diagnosis/zfs_de.c (revision cbab2b2687744cbfdc12fae90f8088127a0b266c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <assert.h>
29 #include <stddef.h>
30 #include <strings.h>
31 #include <libuutil.h>
32 #include <fm/fmd_api.h>
33 #include <sys/fs/zfs.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/fs/zfs.h>
36 
37 typedef struct zfs_case_data {
38 	uint64_t	zc_version;
39 	uint64_t	zc_ena;
40 	uint64_t	zc_pool_guid;
41 	uint64_t	zc_vdev_guid;
42 	int		zc_has_timer;
43 	int		zc_pool_state;
44 } zfs_case_data_t;
45 
46 typedef struct zfs_case {
47 	int		zc_version;
48 	zfs_case_data_t	zc_data;
49 	fmd_case_t	*zc_case;
50 	uu_list_node_t	zc_node;
51 	id_t		zc_timer;
52 } zfs_case_t;
53 
54 #define	CASE_DATA		"data"
55 #define	CASE_DATA_VERSION	1
56 
57 static int zfs_case_timeout;
58 
59 uu_list_pool_t *zfs_case_pool;
60 uu_list_t *zfs_cases;
61 
62 static void
63 zfs_case_serialize(fmd_hdl_t *hdl, zfs_case_t *zcp)
64 {
65 	fmd_buf_write(hdl, zcp->zc_case, CASE_DATA, &zcp->zc_data,
66 	    sizeof (zcp->zc_data));
67 }
68 
69 static zfs_case_t *
70 zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp)
71 {
72 	zfs_case_t *zcp;
73 
74 	zcp = fmd_hdl_zalloc(hdl, sizeof (zfs_case_t), FMD_SLEEP);
75 	zcp->zc_case = cp;
76 
77 	fmd_buf_read(hdl, cp, CASE_DATA, &zcp->zc_data,
78 	    sizeof (zcp->zc_data));
79 
80 	if (zcp->zc_data.zc_version != CASE_DATA_VERSION) {
81 		fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
82 		return (NULL);
83 	}
84 
85 	if (zcp->zc_data.zc_has_timer)
86 		zcp->zc_timer = fmd_timer_install(hdl, zcp,
87 		    NULL, zfs_case_timeout);
88 
89 	(void) uu_list_insert_before(zfs_cases, NULL, zcp);
90 
91 	fmd_case_setspecific(hdl, cp, zcp);
92 
93 	return (zcp);
94 }
95 
96 /*ARGSUSED*/
97 static void
98 zfs_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
99 {
100 	zfs_case_t *zcp;
101 	int32_t pool_state;
102 	uint64_t ena, pool_guid, vdev_guid;
103 	nvlist_t *detector;
104 	boolean_t isresource;
105 
106 	isresource = fmd_nvl_class_match(hdl, nvl, "resource.fs.zfs.*");
107 
108 	if (isresource) {
109 		/*
110 		 * For our faked-up 'ok' resource (see below), we have no normal
111 		 * payload members.
112 		 */
113 		if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
114 		    &vdev_guid) != 0)
115 			pool_state = SPA_LOAD_OPEN;
116 		else
117 			pool_state = SPA_LOAD_NONE;
118 		detector = NULL;
119 	} else {
120 		(void) nvlist_lookup_nvlist(nvl,
121 		    FM_EREPORT_DETECTOR, &detector);
122 		(void) nvlist_lookup_int32(nvl,
123 		    FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, &pool_state);
124 	}
125 
126 	/*
127 	 * Without a retire agent, we subscribe to our own faults and just
128 	 * discard them.
129 	 */
130 	if (fmd_nvl_class_match(hdl, nvl, "fault.fs.zfs.*"))
131 		return;
132 
133 	/*
134 	 * Ignore all block level (.io and .checksum) errors not associated with
135 	 * a pool open.  We should really update a bean counter, and eventually
136 	 * do some real predictive analysis based on these faults.
137 	 */
138 	if ((fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.io") ||
139 	    fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.checksum")) &&
140 	    pool_state == SPA_LOAD_NONE)
141 		return;
142 
143 	/*
144 	 * We also ignore all ereports generated during an import of a pool,
145 	 * since the only possible fault (.pool) would result in import failure,
146 	 * and hence no persistent fault.  Some day we may want to do something
147 	 * with these ereports, so we continue generating them internally.
148 	 */
149 	if (pool_state == SPA_LOAD_IMPORT)
150 		return;
151 
152 	/*
153 	 * Determine if this ereport corresponds to an open case.  Cases are
154 	 * indexed by ENA, since ZFS does all the work of chaining together
155 	 * related ereports.
156 	 *
157 	 * We also detect if an ereport corresponds to an open case by context,
158 	 * such as:
159 	 *
160 	 * 	- An error occurred during an open of a pool with an existing
161 	 *	  case.
162 	 *
163 	 * 	- An error occurred for a device which already has an open
164 	 *	  case.
165 	 */
166 	if (!isresource) {
167 		(void) nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena);
168 		(void) nvlist_lookup_uint64(nvl,
169 		    FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, &pool_guid);
170 		if (fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.vdev.*"))
171 			(void) nvlist_lookup_uint64(nvl,
172 			    FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid);
173 		else
174 			vdev_guid = 0;
175 	} else {
176 		(void) nvlist_lookup_uint64(nvl,
177 		    FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, &pool_guid);
178 		if (nvlist_lookup_uint64(nvl,
179 		    FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
180 			vdev_guid = 0;
181 		ena = 0;
182 	}
183 
184 	for (zcp = uu_list_first(zfs_cases); zcp != NULL;
185 	    zcp = uu_list_next(zfs_cases, zcp)) {
186 		/*
187 		 * Matches a known ENA.
188 		 */
189 		if (zcp->zc_data.zc_ena == ena)
190 			break;
191 
192 		/*
193 		 * Matches a case involving load errors for this same pool.
194 		 */
195 		if (zcp->zc_data.zc_pool_guid == pool_guid &&
196 		    zcp->zc_data.zc_pool_state == SPA_LOAD_OPEN &&
197 		    pool_state == SPA_LOAD_OPEN)
198 			break;
199 
200 		/*
201 		 * Device errors for the same device.
202 		 */
203 		if (vdev_guid != 0 && zcp->zc_data.zc_vdev_guid == vdev_guid)
204 			break;
205 	}
206 
207 	if (zcp == NULL) {
208 		fmd_case_t *cs;
209 		zfs_case_data_t data;
210 
211 		/*
212 		 * If this is one of our 'fake' resource ereports, and there is
213 		 * no case open, simply discard it.
214 		 */
215 		if (isresource)
216 			return;
217 
218 		/*
219 		 * Open a new case.
220 		 */
221 		cs = fmd_case_open(hdl, NULL);
222 
223 		/*
224 		 * Initialize the case buffer.  To commonize code, we actually
225 		 * create the buffer with existing data, and then call
226 		 * zfs_case_unserialize() to instantiate the in-core structure.
227 		 */
228 		fmd_buf_create(hdl, cs, CASE_DATA,
229 		    sizeof (zfs_case_data_t));
230 
231 		data.zc_version = CASE_DATA_VERSION;
232 		data.zc_ena = ena;
233 		data.zc_pool_guid = pool_guid;
234 		data.zc_vdev_guid = vdev_guid;
235 		data.zc_has_timer = 0;
236 		data.zc_pool_state = (int)pool_state;
237 
238 		fmd_buf_write(hdl, cs, CASE_DATA, &data, sizeof (data));
239 
240 		zcp = zfs_case_unserialize(hdl, cs);
241 		assert(zcp != NULL);
242 	}
243 
244 	/*
245 	 * The 'resource.fs.zfs.ok' event is a special internal-only event that
246 	 * signifies that a pool or device that was previously faulted has now
247 	 * come online (as detected by ZFS).  This allows us to close the
248 	 * associated case.
249 	 */
250 	if (isresource) {
251 		fmd_case_close(hdl, zcp->zc_case);
252 		return;
253 	}
254 
255 	/*
256 	 * Associate the ereport with this case.
257 	 */
258 	fmd_case_add_ereport(hdl, zcp->zc_case, ep);
259 
260 	/*
261 	 * Don't do anything else if this case is already solved.
262 	 */
263 	if (fmd_case_solved(hdl, zcp->zc_case))
264 		return;
265 
266 	/*
267 	 * Determine if we should solve the case and generate a fault.  We solve
268 	 * a case if:
269 	 *
270 	 * 	a. A pool failed to open (ereport.fs.zfs.pool)
271 	 * 	b. A device failed to open (ereport.fs.zfs.pool) while a pool
272 	 *	   was up and running.
273 	 *
274 	 * We may see a series of ereports associated with a pool open, all
275 	 * chained together by the same ENA.  If the pool open succeeds, then
276 	 * we'll see no further ereports.  To detect when a pool open has
277 	 * succeeded, we associate a timer with the event.  When it expires, we
278 	 * close the case.
279 	 */
280 	if (fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.zpool")) {
281 		/*
282 		 * Pool level fault.
283 		 */
284 		nvlist_t *fault;
285 
286 		fault = fmd_nvl_create_fault(hdl, "fault.fs.zfs.pool",
287 		    100, detector, NULL, detector);
288 		fmd_case_add_suspect(hdl, zcp->zc_case, fault);
289 		fmd_case_solve(hdl, zcp->zc_case);
290 
291 		if (zcp->zc_data.zc_has_timer) {
292 			fmd_timer_remove(hdl, zcp->zc_timer);
293 			zcp->zc_data.zc_has_timer = 0;
294 			zfs_case_serialize(hdl, zcp);
295 		}
296 
297 	} else if (fmd_nvl_class_match(hdl, nvl, "ereport.fs.zfs.vdev.*") &&
298 	    pool_state == SPA_LOAD_NONE) {
299 		/*
300 		 * Device fault.
301 		 */
302 		nvlist_t *fault;
303 
304 		fault = fmd_nvl_create_fault(hdl, "fault.fs.zfs.device",
305 		    100, detector, NULL, detector);
306 		fmd_case_add_suspect(hdl, zcp->zc_case, fault);
307 		fmd_case_solve(hdl, zcp->zc_case);
308 
309 		if (zcp->zc_data.zc_has_timer) {
310 			fmd_timer_remove(hdl, zcp->zc_timer);
311 			zcp->zc_data.zc_has_timer = 0;
312 			zfs_case_serialize(hdl, zcp);
313 		}
314 
315 	} else if (pool_state == SPA_LOAD_OPEN) {
316 		/*
317 		 * Error incurred during a pool open.  Reset the timer
318 		 * associated with this case.
319 		 */
320 		if (zcp->zc_data.zc_has_timer)
321 			fmd_timer_remove(hdl, zcp->zc_timer);
322 		zcp->zc_timer = fmd_timer_install(hdl, zcp, NULL,
323 		    zfs_case_timeout);
324 		if (!zcp->zc_data.zc_has_timer) {
325 			zcp->zc_data.zc_has_timer = 1;
326 			zfs_case_serialize(hdl, zcp);
327 		}
328 	}
329 }
330 
331 /*
332  * Timeout - indicates that a pool had faults, but was eventually opened
333  * successfully.
334  */
335 /* ARGSUSED */
336 static void
337 zfs_timeout(fmd_hdl_t *hdl, id_t id, void *data)
338 {
339 	zfs_case_t *zcp = data;
340 
341 	zcp->zc_data.zc_has_timer = 0;
342 
343 	fmd_case_close(hdl, zcp->zc_case);
344 }
345 
346 static void
347 zfs_close(fmd_hdl_t *hdl, fmd_case_t *cs)
348 {
349 	zfs_case_t *zcp = fmd_case_getspecific(hdl, cs);
350 
351 	if (zcp->zc_data.zc_has_timer)
352 		fmd_timer_remove(hdl, zcp->zc_timer);
353 	uu_list_remove(zfs_cases, zcp);
354 	fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
355 }
356 
357 static const fmd_hdl_ops_t fmd_ops = {
358 	zfs_recv,	/* fmdo_recv */
359 	zfs_timeout,	/* fmdo_timeout */
360 	zfs_close,	/* fmdo_close */
361 	NULL,		/* fmdo_stats */
362 	NULL,		/* fmdo_gc */
363 };
364 
365 static const fmd_prop_t fmd_props[] = {
366 	{ "case_timeout", FMD_TYPE_UINT32, "5" },
367 	{ NULL, 0, NULL }
368 };
369 
370 static const fmd_hdl_info_t fmd_info = {
371 	"ZFS Diagnosis Engine", "1.0", &fmd_ops, fmd_props
372 };
373 
374 void
375 _fmd_init(fmd_hdl_t *hdl)
376 {
377 	fmd_case_t *cp;
378 
379 	if ((zfs_case_pool = uu_list_pool_create("zfs_case_pool",
380 	    sizeof (zfs_case_t), offsetof(zfs_case_t, zc_node),
381 	    NULL, 0)) == NULL)
382 		return;
383 
384 	if ((zfs_cases = uu_list_create(zfs_case_pool, NULL, 0)) == NULL) {
385 		uu_list_pool_destroy(zfs_case_pool);
386 		return;
387 	}
388 
389 	if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
390 		uu_list_destroy(zfs_cases);
391 		uu_list_pool_destroy(zfs_case_pool);
392 		return;
393 	}
394 
395 	/*
396 	 * Iterate over all active cases and unserialize the associated buffers,
397 	 * adding them to our list of open cases.
398 	 */
399 	for (cp = fmd_case_next(hdl, NULL);
400 	    cp != NULL; cp = fmd_case_next(hdl, cp))
401 		(void) zfs_case_unserialize(hdl, cp);
402 
403 	zfs_case_timeout = fmd_prop_get_int32(hdl, "case_timeout") * NANOSEC;
404 }
405 
406 void
407 _fmd_fini(fmd_hdl_t *hdl)
408 {
409 	zfs_case_t *zcp;
410 	uu_list_walk_t *walk;
411 
412 	/*
413 	 * Remove all active cases.
414 	 */
415 	walk = uu_list_walk_start(zfs_cases, UU_WALK_ROBUST);
416 	while ((zcp = uu_list_walk_next(walk)) != NULL) {
417 		uu_list_remove(zfs_cases, zcp);
418 		fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
419 	}
420 	uu_list_walk_end(walk);
421 
422 	uu_list_destroy(zfs_cases);
423 	uu_list_pool_destroy(zfs_case_pool);
424 }
425