xref: /titanic_52/usr/src/cmd/fm/modules/common/sw-diag-response/subsidiary/panic/panic_diag.c (revision aab83bb83be7342f6cfccaed8d5fe0b2f404855d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * Panic software-diagnosis subsidiary
28  *
29  * We model a system panic as a defect diagnosis in FMA. When a system
30  * panicks, savecore publishes events which we subscribe to here.
31  *
32  * Our driving events are all raised by savecore, run either from
33  * startup of the dumpadm service or interactively at the command line.
34  * The following describes the logic for the handling of these events.
35  *
36  * On reboot after panic we will run savecore as part of the dumpadm
37  * service startup; we run savecore even if savecore is otherwise
38  * disabled (ie dumpadm -n in effect) - we run savecore -c to check for
39  * a valid dump and raise the initial event.
40  *
41  * If savecore (or savecore -c) observes a valid dump pending on the
42  * device, it raises a "dump_pending_on_device" event provided this
43  * was not an FMA-initiated panic (for those we will replay ereports
44  * from the dump device as usual and make a diagnosis from those; we do
45  * not need to open a case for the panic).  We subscribe to the
46  * "dump_pending_on_device" event and use that to open a case;  we
47  * open a case requesting the same case uuid as the panic dump image
48  * has for the OS instance uuid - if that fails because of a duplicate
49  * uuid then we have already opened a case for this panic so no need
50  * to open another.
51  *
52  * Included in the "dump_pending_on_device" event is an indication of
53  * whether or not dumpadm is enabled.  If not (dumpadm -n in effect)
54  * then we do not expect any further events regarding this panic
55  * until such time as the admin runs savecore manually (if ever).
56  * So in this case we solve the case immediately after open.  If/when
57  * subsequent events arrive when savecore is run manually, we will toss
58  * them.
59  *
60  * If dumpadm is enabled then savecore, run from dumpadm service startup,
61  * will attempt to process the dump - either to copy it off the dump
62  * device (if saving compressed) or to uncompress it off the dump device.
63  * If this succeeds savecore raises a "dump_available" event which
64  * includes information on the directory it was saved in, the instance
65  * number, image uuid, compressed form or not, and whether the dump
66  * was complete (as per the dumphdr).  If the savecore fails for
67  * some reason then it exits and raises a "savecore_failure" event.
68  * These two events are raised even for FMA-initiated panics.
69  *
70  * We subscribe to both the "dump_available" and "savecore_failed" events,
71  * and in the handling thereof we will close the case opened earlier (if
72  * this is not an FMA-initiated panic).  On receipt of the initial
73  * "dump_available" event we also arm a timer for +10 minutes if
74  * dumpadm is enabled - if no "dump_available" or "savecore_failed" arrives
75  * in that time we will solve the case on timeout.
76  *
77  * When the timer fires we check whether the initial event for each panic
78  * case was received more than 30 minutes ago; if it was we solve the case
79  * with what we have.  If we're still within the waiting period we rearm
80  * for a further 10 minutes.  The timer is shared by all cases that we
81  * create, which is why the fire interval is shorter than the maximum time
82  * we are prepared to wait.
83  */
84 
85 #include <strings.h>
86 #include <sys/panic.h>
87 #include <alloca.h>
88 #include <zone.h>
89 
90 #include "../../common/sw.h"
91 #include "panic.h"
92 
93 #define	MAX_STRING_LEN 160
94 
95 static id_t myid;
96 
97 static id_t mytimerid;
98 
99 /*
100  * Our serialization structure type.
101  */
102 #define	SWDE_PANIC_CASEDATA_VERS	1
103 
104 typedef struct swde_panic_casedata {
105 	uint32_t scd_vers;		/* must be first member */
106 	uint64_t scd_receive_time;	/* when we first knew of this panic */
107 	size_t scd_nvlbufsz;		/* size of following buffer */
108 					/* packed attr nvlist follows */
109 } swde_panic_casedata_t;
110 
111 static struct {
112 	fmd_stat_t swde_panic_diagnosed;
113 	fmd_stat_t swde_panic_badclass;
114 	fmd_stat_t swde_panic_noattr;
115 	fmd_stat_t swde_panic_unexpected_fm_panic;
116 	fmd_stat_t swde_panic_badattr;
117 	fmd_stat_t swde_panic_badfmri;
118 	fmd_stat_t swde_panic_noinstance;
119 	fmd_stat_t swde_panic_nouuid;
120 	fmd_stat_t swde_panic_dupuuid;
121 	fmd_stat_t swde_panic_nocase;
122 	fmd_stat_t swde_panic_notime;
123 	fmd_stat_t swde_panic_nopanicstr;
124 	fmd_stat_t swde_panic_nodumpdir;
125 	fmd_stat_t swde_panic_nostack;
126 	fmd_stat_t swde_panic_incomplete;
127 	fmd_stat_t swde_panic_failed;
128 	fmd_stat_t swde_panic_basecasedata;
129 	fmd_stat_t swde_panic_failsrlz;
130 } swde_panic_stats = {
131 	{ "swde_panic_diagnosed", FMD_TYPE_UINT64,
132 	    "panic defects published" },
133 	{ "swde_panic_badclass", FMD_TYPE_UINT64,
134 	    "incorrect event class received" },
135 	{ "swde_panic_noattr", FMD_TYPE_UINT64,
136 	    "malformed event - missing attr nvlist" },
137 	{ "swde_panic_unexpected_fm_panic", FMD_TYPE_UINT64,
138 	    "dump available for an fm_panic()" },
139 	{ "swde_panic_badattr", FMD_TYPE_UINT64,
140 	    "malformed event - invalid attr list" },
141 	{ "swde_panic_badfmri", FMD_TYPE_UINT64,
142 	    "malformed event - fmri2str fails" },
143 	{ "swde_panic_noinstance", FMD_TYPE_UINT64,
144 	    "malformed event - no instance number" },
145 	{ "swde_panic_nouuid", FMD_TYPE_UINT64,
146 	    "malformed event - missing uuid" },
147 	{ "swde_panic_dupuuid", FMD_TYPE_UINT64,
148 	    "duplicate events received" },
149 	{ "swde_panic_nocase", FMD_TYPE_UINT64,
150 	    "case missing for uuid" },
151 	{ "swde_panic_notime", FMD_TYPE_UINT64,
152 	    "missing crash dump time" },
153 	{ "swde_panic_nopanicstr", FMD_TYPE_UINT64,
154 	    "missing panic string" },
155 	{ "swde_panic_nodumpdir", FMD_TYPE_UINT64,
156 	    "missing crashdump save directory" },
157 	{ "swde_panic_nostack", FMD_TYPE_UINT64,
158 	    "missing panic stack" },
159 	{ "swde_panic_incomplete", FMD_TYPE_UINT64,
160 	    "missing panic incomplete" },
161 	{ "swde_panic_failed", FMD_TYPE_UINT64,
162 	    "missing panic failed" },
163 	{ "swde_panic_badcasedata", FMD_TYPE_UINT64,
164 	    "bad case data during timeout" },
165 	{ "swde_panic_failsrlz", FMD_TYPE_UINT64,
166 	    "failures to serialize case data" },
167 };
168 
169 #define	BUMPSTAT(stat)		swde_panic_stats.stat.fmds_value.ui64++
170 
171 static nvlist_t *
172 panic_sw_fmri(fmd_hdl_t *hdl, char *object)
173 {
174 	nvlist_t *fmri;
175 	nvlist_t *sw_obj;
176 	int err = 0;
177 
178 	fmri = fmd_nvl_alloc(hdl, FMD_SLEEP);
179 	err |= nvlist_add_uint8(fmri, FM_VERSION, FM_SW_SCHEME_VERSION);
180 	err |= nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_SW);
181 
182 	sw_obj = fmd_nvl_alloc(hdl, FMD_SLEEP);
183 	err |= nvlist_add_string(sw_obj, FM_FMRI_SW_OBJ_PATH, object);
184 	err |= nvlist_add_nvlist(fmri, FM_FMRI_SW_OBJ, sw_obj);
185 	nvlist_free(sw_obj);
186 	if (!err)
187 		return (fmri);
188 	else
189 		return (0);
190 }
191 
192 static const char *dumpfiles[2] = { "unix.%lld", "vmcore.%lld" };
193 static const char *dumpfiles_comp[2] = { "vmdump.%lld", NULL};
194 
195 static void
196 swde_panic_solve(fmd_hdl_t *hdl, fmd_case_t *cp,
197     nvlist_t *attr, fmd_event_t *ep, boolean_t savecore_success)
198 {
199 	char *dumpdir, *path, *uuid;
200 	nvlist_t *defect, *rsrc;
201 	nvpair_t *nvp;
202 	int i;
203 
204 	/*
205 	 * Attribute members to include in event-specific defect
206 	 * payload.  Some attributes will not be present for some
207 	 * cases - e.g., if we timed out and solved the case without
208 	 * a "dump_available" report.
209 	 */
210 	const char *toadd[] = {
211 		"os-instance-uuid",	/* same as case uuid */
212 		"panicstr",		/* for initial classification work */
213 		"panicstack",		/* for initial classification work */
214 		"crashtime",		/* in epoch time */
215 		"panic-time",		/* Formatted crash time */
216 	};
217 
218 	if (ep != NULL)
219 		fmd_case_add_ereport(hdl, cp, ep);
220 	/*
221 	 * As a temporary solution we create and fmri in the sw scheme
222 	 * in panic_sw_fmri. This should become a generic fmri constructor
223 	 *
224 	 * We need to user a resource FMRI which will have a sufficiently
225 	 * unique string representation such that fmd will not see
226 	 * repeated panic diagnoses (all using the same defect class)
227 	 * as duplicates and discard later cases.  We can't actually diagnose
228 	 * the panic to anything specific (e.g., a path to a module and
229 	 * function/line etc therein).  We could pick on a generic
230 	 * representative such as /kernel/genunix but that could lead
231 	 * to misunderstanding.  So we choose a path based on <dumpdir>
232 	 * and the OS instance UUID - "<dumpdir>/.<os-instance-uuid>".
233 	 * There's no file at that path (*) but no matter.  We can't use
234 	 * <dumpdir>/vmdump.N or similar because if savecore is disabled
235 	 * or failed we don't have any file or instance number.
236 	 *
237 	 * (*) Some day it would seem tidier to keep all files to do
238 	 * with a single crash (unix/vmcore/vmdump, analysis output etc)
239 	 * in a distinct directory, and <dumpdir>/.<uuid> seems like a good
240 	 * choice.  For compatability we'd symlink into it.  So that is
241 	 * another reason for this choice - some day it may exist!
242 	 */
243 	(void) nvlist_lookup_string(attr, "dumpdir", &dumpdir);
244 	(void) nvlist_lookup_string(attr, "os-instance-uuid", &uuid);
245 	path = alloca(strlen(dumpdir) + 1 + 1 + 36 + 1);
246 	/* LINTED: E_SEC_SPRINTF_UNBOUNDED_COPY */
247 	(void) sprintf(path, "%s/.%s", dumpdir, uuid);
248 	rsrc = panic_sw_fmri(hdl, path);
249 
250 	defect = fmd_nvl_create_defect(hdl, SW_SUNOS_PANIC_DEFECT,
251 	    100, rsrc, NULL, rsrc);
252 	nvlist_free(rsrc);
253 
254 	(void) nvlist_add_boolean_value(defect, "savecore-succcess",
255 	    savecore_success);
256 
257 	if (savecore_success) {
258 		boolean_t compressed;
259 		int64_t instance;
260 		const char **pathfmts;
261 		char buf[2][32];
262 		int files = 0;
263 		char *arr[2];
264 		int i;
265 
266 		(void) nvlist_lookup_int64(attr, "instance", &instance);
267 		(void) nvlist_lookup_boolean_value(attr, "compressed",
268 		    &compressed);
269 
270 		pathfmts = compressed ? &dumpfiles_comp[0] : &dumpfiles[0];
271 
272 		for (i = 0; i < 2; i++) {
273 			if (pathfmts[i] == NULL) {
274 				arr[i] = NULL;
275 				continue;
276 			}
277 
278 			(void) snprintf(buf[i], 32, pathfmts[i], instance);
279 			arr[i] = buf[i];
280 			files++;
281 		}
282 
283 		(void) nvlist_add_string(defect, "dump-dir", dumpdir);
284 		(void) nvlist_add_string_array(defect, "dump-files", arr,
285 		    files);
286 	} else {
287 		char *rsn;
288 
289 		if (nvlist_lookup_string(attr, "failure-reason", &rsn) == 0)
290 			(void) nvlist_add_string(defect, "failure-reason", rsn);
291 	}
292 
293 	/*
294 	 * Not all attributes will necessarily be available - eg if
295 	 * dumpadm was not enabled there'll be no instance and dumpdir.
296 	 */
297 	for (i = 0; i < sizeof (toadd) / sizeof (toadd[0]); i++) {
298 		if (nvlist_lookup_nvpair(attr, toadd[i], &nvp) == 0)
299 			(void) nvlist_add_nvpair(defect, nvp);
300 	}
301 
302 	fmd_case_add_suspect(hdl, cp, defect);
303 	fmd_case_solve(hdl, cp);
304 
305 	/*
306 	 * Close the case.  Do no free casedata - framework does that for us
307 	 * on closure callback.
308 	 */
309 	fmd_case_close(hdl, cp);
310 	BUMPSTAT(swde_panic_diagnosed);
311 }
312 
313 /*ARGSUSED*/
314 static void
315 swde_panic_timeout(fmd_hdl_t *hdl, id_t timerid, void *data)
316 {
317 	fmd_case_t *cp = swde_case_first(hdl, myid);
318 	swde_panic_casedata_t *cdp;
319 	time_t now = time(NULL);
320 	nvlist_t *attr;
321 	int remain = 0;
322 	uint32_t vers;
323 
324 	while (cp != NULL) {
325 		cdp = swde_case_data(hdl, cp, &vers);
326 		if (vers != SWDE_PANIC_CASEDATA_VERS)
327 			fmd_hdl_abort(hdl, "case data version confused\n");
328 
329 		if (now > cdp->scd_receive_time + 30 * 60) {
330 			if (nvlist_unpack((char *)cdp + sizeof (*cdp),
331 			    cdp->scd_nvlbufsz, &attr, 0) == 0) {
332 				swde_panic_solve(hdl, cp, attr, NULL, B_FALSE);
333 				nvlist_free(attr);
334 			} else {
335 				BUMPSTAT(swde_panic_basecasedata);
336 				fmd_case_close(hdl, cp);
337 			}
338 		} else {
339 			remain++;
340 		}
341 
342 
343 		cp = swde_case_next(hdl, cp);
344 	}
345 
346 	if (remain) {
347 		mytimerid = sw_timer_install(hdl, myid, NULL, NULL,
348 		    10ULL * NANOSEC * 60);
349 	}
350 }
351 
352 /*
353  * Our verify entry point is called for each of our open cases during
354  * module load.  We must return 0 for the case to be closed by our caller,
355  * or 1 to keep it (or if we have already closed it during this call).
356  */
357 static int
358 swde_panic_vrfy(fmd_hdl_t *hdl, fmd_case_t *cp)
359 {
360 	swde_panic_casedata_t *cdp;
361 	time_t now = time(NULL);
362 	nvlist_t *attr;
363 	uint32_t vers;
364 
365 	cdp = swde_case_data(hdl, cp, &vers);
366 
367 	if (vers != SWDE_PANIC_CASEDATA_VERS)
368 		return (0);	/* case will be closed */
369 
370 	if (now > cdp->scd_receive_time + 30 * 60) {
371 		if (nvlist_unpack((char *)cdp + sizeof (*cdp),
372 		    cdp->scd_nvlbufsz, &attr, 0) == 0) {
373 			swde_panic_solve(hdl, cp, attr, NULL, B_FALSE);
374 			nvlist_free(attr);
375 			return (1);	/* case already closed */
376 		} else {
377 			return (0);	/* close case */
378 		}
379 	}
380 
381 	if (mytimerid != 0)
382 		mytimerid = sw_timer_install(hdl, myid,
383 		    NULL, NULL, 10ULL * NANOSEC * 60);
384 
385 	return (1);	/* retain case */
386 }
387 
388 /*
389  * Handler for ireport.os.sunos.panic.dump_pending_on_device.
390  *
391  * A future RFE should try adding a means of avoiding diagnosing repeated
392  * defects on panic loops, which would just add to the mayhem and potentially
393  * log lots of calls through ASR.  Panics with similar enough panic
394  * strings and/or stacks should not diagnose to new defects with some
395  * period of time, for example.
396  */
397 
398 /*ARGSUSED*/
399 void
400 swde_panic_detected(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
401     const char *class, void *arg)
402 {
403 	boolean_t fm_panic, expect_savecore;
404 	swde_panic_casedata_t *cdp;
405 	nvlist_t *attr;
406 	fmd_case_t *cp;
407 	char *fmribuf;
408 	char *uuid;
409 	size_t sz;
410 
411 	fmd_hdl_debug(hdl, "swde_panic_detected\n");
412 
413 	if (nvlist_lookup_nvlist(nvl, FM_IREPORT_ATTRIBUTES, &attr) != 0) {
414 		BUMPSTAT(swde_panic_noattr);
415 		return;
416 	}
417 
418 	if (nvlist_lookup_string(attr, "os-instance-uuid", &uuid) != 0) {
419 		BUMPSTAT(swde_panic_nouuid);
420 		return;
421 	}
422 
423 	fmd_hdl_debug(hdl, "swde_panic_detected: OS instance %s\n", uuid);
424 
425 	if (nvlist_lookup_boolean_value(attr, "fm-panic", &fm_panic) != 0 ||
426 	    fm_panic == B_TRUE) {
427 		BUMPSTAT(swde_panic_unexpected_fm_panic);
428 		return;
429 	}
430 
431 	/*
432 	 * Prepare serialization data to be associated with a new
433 	 * case.  Our serialization data consists of a swde_panic_casedata_t
434 	 * structure followed by a packed nvlist of the attributes of
435 	 * the initial event.
436 	 */
437 	if (nvlist_size(attr, &sz, NV_ENCODE_NATIVE) != 0) {
438 		BUMPSTAT(swde_panic_failsrlz);
439 		return;
440 	}
441 
442 	cdp = fmd_hdl_zalloc(hdl, sizeof (*cdp) + sz, FMD_SLEEP);
443 	fmribuf = (char *)cdp + sizeof (*cdp);
444 	cdp->scd_vers = SWDE_PANIC_CASEDATA_VERS;
445 	cdp->scd_receive_time = time(NULL);
446 	cdp->scd_nvlbufsz = sz;
447 
448 	/*
449 	 * Open a case with UUID matching the the panicking kernel, add this
450 	 * event to the case.
451 	 */
452 	if ((cp = swde_case_open(hdl, myid, uuid, SWDE_PANIC_CASEDATA_VERS,
453 	    cdp, sizeof (*cdp) + sz)) == NULL) {
454 		BUMPSTAT(swde_panic_dupuuid);
455 		fmd_hdl_debug(hdl, "swde_case_open returned NULL - dup?\n");
456 		fmd_hdl_free(hdl, cdp, sizeof (*cdp) + sz);
457 		return;
458 	}
459 
460 	fmd_case_setprincipal(hdl, cp, ep);
461 
462 	if (nvlist_lookup_boolean_value(attr, "will-attempt-savecore",
463 	    &expect_savecore) != 0 || expect_savecore == B_FALSE) {
464 		fmd_hdl_debug(hdl, "savecore not being attempted - "
465 		    "solve now\n");
466 		swde_panic_solve(hdl, cp, attr, ep, B_FALSE);
467 		return;
468 	}
469 
470 	/*
471 	 * We expect to see either a "dump_available" or a "savecore_failed"
472 	 * event before too long.  In case that never shows up, for whatever
473 	 * reason, we want to be able to solve the case anyway.
474 	 */
475 	fmd_case_add_ereport(hdl, cp, ep);
476 	(void) nvlist_pack(attr, &fmribuf, &sz, NV_ENCODE_NATIVE, 0);
477 	swde_case_data_write(hdl, cp);
478 
479 	if (mytimerid == 0) {
480 		mytimerid = sw_timer_install(hdl, myid, NULL, ep,
481 		    10ULL * NANOSEC * 60);
482 		fmd_hdl_debug(hdl, "armed timer\n");
483 	} else {
484 		fmd_hdl_debug(hdl, "timer already armed\n");
485 	}
486 }
487 
488 /*
489  * savecore has now run and saved a crash dump to the filesystem. It is
490  * either a compressed dump (vmdump.n) or uncompressed {unix.n, vmcore.n}
491  * Savecore has raised an ireport to say the dump is there.
492  */
493 
494 /*ARGSUSED*/
495 void
496 swde_panic_savecore_done(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
497     const char *class, void *arg)
498 {
499 	boolean_t savecore_success = (arg != NULL);
500 	boolean_t fm_panic;
501 	nvlist_t *attr;
502 	fmd_case_t *cp;
503 	char *uuid;
504 
505 	fmd_hdl_debug(hdl, "savecore_done (%s)\n", savecore_success ?
506 	    "success" : "fail");
507 
508 	if (nvlist_lookup_nvlist(nvl, FM_IREPORT_ATTRIBUTES, &attr) != 0) {
509 		BUMPSTAT(swde_panic_noattr);
510 		return;
511 	}
512 
513 	if (nvlist_lookup_boolean_value(attr, "fm-panic", &fm_panic) != 0 ||
514 	    fm_panic == B_TRUE) {
515 		return;		/* not expected, but just in case */
516 	}
517 
518 	if (nvlist_lookup_string(attr, "os-instance-uuid", &uuid) != 0) {
519 		BUMPSTAT(swde_panic_nouuid);
520 		return;
521 	}
522 
523 	/*
524 	 * Find the case related to the panicking kernel; our cases have
525 	 * the same uuid as the crashed OS image.
526 	 */
527 	cp = fmd_case_uulookup(hdl, uuid);
528 	if (!cp) {
529 		/* Unable to find the case. */
530 		fmd_hdl_debug(hdl, "savecore_done: can't find case for "
531 		    "image %s\n", uuid);
532 		BUMPSTAT(swde_panic_nocase);
533 		return;
534 	}
535 
536 	fmd_hdl_debug(hdl, "savecore_done: solving case %s\n", uuid);
537 	swde_panic_solve(hdl, cp, attr, ep, savecore_success);
538 }
539 
540 const struct sw_disp swde_panic_disp[] = {
541 	{ SW_SUNOS_PANIC_DETECTED, swde_panic_detected, NULL },
542 	{ SW_SUNOS_PANIC_AVAIL, swde_panic_savecore_done, (void *)1 },
543 	{ SW_SUNOS_PANIC_FAILURE, swde_panic_savecore_done, NULL },
544 	/*
545 	 * Something has to subscribe to every fault
546 	 * or defect diagnosed in fmd.  We do that here, but throw it away.
547 	 */
548 	{ SW_SUNOS_PANIC_DEFECT, NULL, NULL },
549 	{ NULL, NULL, NULL }
550 };
551 
552 /*ARGSUSED*/
553 int
554 swde_panic_init(fmd_hdl_t *hdl, id_t id, const struct sw_disp **dpp,
555     int *nelemp)
556 {
557 	myid = id;
558 
559 	if (getzoneid() != GLOBAL_ZONEID)
560 		return (SW_SUB_INIT_FAIL_VOLUNTARY);
561 
562 	(void) fmd_stat_create(hdl, FMD_STAT_NOALLOC,
563 	    sizeof (swde_panic_stats) / sizeof (fmd_stat_t),
564 	    (fmd_stat_t *)&swde_panic_stats);
565 
566 	fmd_hdl_subscribe(hdl, SW_SUNOS_PANIC_DETECTED);
567 	fmd_hdl_subscribe(hdl, SW_SUNOS_PANIC_FAILURE);
568 	fmd_hdl_subscribe(hdl, SW_SUNOS_PANIC_AVAIL);
569 
570 	*dpp = &swde_panic_disp[0];
571 	*nelemp = sizeof (swde_panic_disp) / sizeof (swde_panic_disp[0]);
572 	return (SW_SUB_INIT_SUCCESS);
573 }
574 
575 void
576 swde_panic_fini(fmd_hdl_t *hdl)
577 {
578 	if (mytimerid)
579 		sw_timer_remove(hdl, myid, mytimerid);
580 }
581 
582 const struct sw_subinfo panic_diag_info = {
583 	"panic diagnosis",		/* swsub_name */
584 	SW_CASE_PANIC,			/* swsub_casetype */
585 	swde_panic_init,		/* swsub_init */
586 	swde_panic_fini,		/* swsub_fini */
587 	swde_panic_timeout,		/* swsub_timeout */
588 	NULL,				/* swsub_case_close */
589 	swde_panic_vrfy,		/* swsub_case_vrfy */
590 };
591