xref: /titanic_50/usr/src/cmd/fm/fmd/common/fmd_case.c (revision 554ff184129088135ad2643c1c9832174a17be88)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/fm/protocol.h>
30 #include <uuid/uuid.h>
31 #include <alloca.h>
32 
33 #include <fmd_alloc.h>
34 #include <fmd_module.h>
35 #include <fmd_error.h>
36 #include <fmd_conf.h>
37 #include <fmd_case.h>
38 #include <fmd_string.h>
39 #include <fmd_subr.h>
40 #include <fmd_protocol.h>
41 #include <fmd_event.h>
42 #include <fmd_eventq.h>
43 #include <fmd_dispq.h>
44 #include <fmd_buf.h>
45 #include <fmd_log.h>
46 #include <fmd_asru.h>
47 
48 #include <fmd.h>
49 
50 static const char *const _fmd_case_snames[] = {
51 	"UNSOLVED",	/* FMD_CASE_UNSOLVED */
52 	"SOLVED",	/* FMD_CASE_SOLVED */
53 	"CLOSED",	/* FMD_CASE_CLOSED */
54 };
55 
56 fmd_case_hash_t *
57 fmd_case_hash_create(void)
58 {
59 	fmd_case_hash_t *chp = fmd_alloc(sizeof (fmd_case_hash_t), FMD_SLEEP);
60 
61 	(void) pthread_rwlock_init(&chp->ch_lock, NULL);
62 	chp->ch_hashlen = fmd.d_str_buckets;
63 	chp->ch_hash = fmd_zalloc(sizeof (void *) * chp->ch_hashlen, FMD_SLEEP);
64 
65 	return (chp);
66 }
67 
68 /*
69  * Destroy the case hash.  Unlike most of our hash tables, no active references
70  * are kept by the case hash because cases are destroyed when modules unload.
71  * The hash must be destroyed after all modules are unloaded; if anything was
72  * present in the hash it would be by definition a reference count leak.
73  */
74 void
75 fmd_case_hash_destroy(fmd_case_hash_t *chp)
76 {
77 	fmd_free(chp->ch_hash, sizeof (void *) * chp->ch_hashlen);
78 	fmd_free(chp, sizeof (fmd_case_hash_t));
79 }
80 
81 static nvlist_t *
82 fmd_case_mkevent(fmd_case_t *cp)
83 {
84 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
85 	fmd_case_susp_t *cis;
86 
87 	char *code, **keys, **keyp;
88 	nvlist_t **nva, **nvp;
89 	const char *s;
90 
91 	int msg = B_TRUE;
92 	boolean_t b;
93 
94 	ASSERT(MUTEX_HELD(&cip->ci_lock));
95 	ASSERT(cip->ci_state >= FMD_CASE_SOLVED);
96 
97 	code = alloca(cip->ci_mod->mod_codelen);
98 	keys = keyp = alloca(sizeof (char *) * (cip->ci_nsuspects + 1));
99 	nva = nvp = alloca(sizeof (nvlist_t *) * cip->ci_nsuspects);
100 
101 	/*
102 	 * For each suspect associated with the case, store its fault event
103 	 * nvlist in 'nva' and its fault class in 'keys'.  We also look to see
104 	 * if any of the suspect faults have asked not to be messaged.  If any
105 	 * of them have made such a request, propagate that to the suspect list.
106 	 */
107 	for (cis = cip->ci_suspects; cis != NULL; cis = cis->cis_next) {
108 		if (nvlist_lookup_string(cis->cis_nvl, FM_CLASS, keyp) == 0)
109 			keyp++;
110 
111 		*nvp++ = cis->cis_nvl;
112 
113 		if (nvlist_lookup_boolean_value(cis->cis_nvl,
114 		    FM_SUSPECT_MESSAGE, &b) == 0 && b == B_FALSE)
115 			msg = B_FALSE;
116 	}
117 
118 	*keyp = NULL; /* mark end of keys[] array for libdiagcode */
119 
120 	/*
121 	 * Look up the diagcode corresponding to this suspect list.  If
122 	 * no suspects were defined for this case or if the lookup
123 	 * fails, the dictionary or module code is busted or not set up
124 	 * properly.  Emit the event with our precomputed default code.
125 	 */
126 	if (cip->ci_nsuspects == 0 || fmd_module_dc_key2code(
127 	    cip->ci_mod, keys, code, cip->ci_mod->mod_codelen) != 0) {
128 		(void) fmd_conf_getprop(fmd.d_conf, "nodiagcode", &s);
129 		code = alloca(strlen(s) + 1);
130 		(void) strcpy(code, s);
131 	}
132 
133 	return (fmd_protocol_suspects(cip->ci_mod->mod_fmri,
134 	    cip->ci_uuid, code, cip->ci_nsuspects, nva, msg));
135 }
136 
137 /*
138  * Publish appropriate events based on the specified case state.  For a case
139  * that is FMD_CASE_SOLVED, we send ci_event.  For a case that is
140  * FMD_CASE_CLOSED, we send a case-closed event to the owner module.
141  */
142 static void
143 fmd_case_publish(fmd_case_t *cp, uint_t state)
144 {
145 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
146 	fmd_event_t *e;
147 	nvlist_t *nvl;
148 	char *class;
149 
150 	switch (state) {
151 	case FMD_CASE_SOLVED:
152 		(void) pthread_mutex_lock(&cip->ci_lock);
153 
154 		/*
155 		 * If ci_event is NULL, the event was not created because the
156 		 * case was restored from a checkpoint before _fmd_init() was
157 		 * called.  Now that the module is ready, create the event.
158 		 */
159 		if (cip->ci_event == NULL)
160 			cip->ci_event = fmd_case_mkevent(cp);
161 
162 		(void) pthread_mutex_unlock(&cip->ci_lock);
163 
164 		(void) nvlist_xdup(cip->ci_event, &nvl, &fmd.d_nva);
165 		(void) nvlist_lookup_string(nvl, FM_CLASS, &class);
166 
167 		e = fmd_event_create(FMD_EVT_PROTOCOL, FMD_HRT_NOW, nvl, class);
168 		(void) pthread_rwlock_rdlock(&fmd.d_log_lock);
169 		fmd_log_append(fmd.d_fltlog, e, cp);
170 		(void) pthread_rwlock_unlock(&fmd.d_log_lock);
171 		fmd_dispq_dispatch(fmd.d_disp, e, class);
172 
173 		(void) pthread_mutex_lock(&cip->ci_mod->mod_stats_lock);
174 		cip->ci_mod->mod_stats->ms_casesolved.fmds_value.ui64++;
175 		(void) pthread_mutex_unlock(&cip->ci_mod->mod_stats_lock);
176 
177 		break;
178 
179 	case FMD_CASE_CLOSED:
180 		fmd_case_hold(cp);
181 		e = fmd_event_create(FMD_EVT_CLOSE, FMD_HRT_NOW, NULL, cp);
182 		fmd_eventq_insert_at_head(cip->ci_mod->mod_queue, e);
183 
184 		(void) pthread_mutex_lock(&cip->ci_mod->mod_stats_lock);
185 		cip->ci_mod->mod_stats->ms_caseclosed.fmds_value.ui64++;
186 		(void) pthread_mutex_unlock(&cip->ci_mod->mod_stats_lock);
187 
188 		break;
189 	}
190 }
191 
192 /*
193  * Refresh all of the cases by publishing events for each case if appropriate.
194  * We do this once during startup to trigger case close and list.suspect events
195  * for cases restored by checkpoints.  By holding the read lock on the case
196  * hash, we ensure that we only refresh the current set of cases.  New cases
197  * created in response to the events will block in fmd_case_hash_insert().
198  */
199 void
200 fmd_case_hash_refresh(fmd_case_hash_t *chp)
201 {
202 	fmd_case_impl_t *cip;
203 	uint_t i;
204 
205 	(void) pthread_rwlock_rdlock(&chp->ch_lock);
206 
207 	for (i = 0; i < chp->ch_hashlen; i++) {
208 		for (cip = chp->ch_hash[i]; cip != NULL; cip = cip->ci_next)
209 			fmd_case_publish((fmd_case_t *)cip, cip->ci_state);
210 	}
211 
212 	(void) pthread_rwlock_unlock(&chp->ch_lock);
213 }
214 
215 fmd_case_t *
216 fmd_case_hash_lookup(fmd_case_hash_t *chp, const char *uuid)
217 {
218 	fmd_case_impl_t *cip;
219 	uint_t h;
220 
221 	(void) pthread_rwlock_rdlock(&chp->ch_lock);
222 	h = fmd_strhash(uuid) % chp->ch_hashlen;
223 
224 	for (cip = chp->ch_hash[h]; cip != NULL; cip = cip->ci_next) {
225 		if (strcmp(cip->ci_uuid, uuid) == 0)
226 			break;
227 	}
228 
229 	if (cip != NULL)
230 		fmd_case_hold((fmd_case_t *)cip);
231 	else
232 		(void) fmd_set_errno(EFMD_CASE_INVAL);
233 
234 	(void) pthread_rwlock_unlock(&chp->ch_lock);
235 	return ((fmd_case_t *)cip);
236 }
237 
238 static fmd_case_impl_t *
239 fmd_case_hash_insert(fmd_case_hash_t *chp, fmd_case_impl_t *cip)
240 {
241 	fmd_case_impl_t *eip;
242 	uint_t h;
243 
244 	(void) pthread_rwlock_wrlock(&chp->ch_lock);
245 	h = fmd_strhash(cip->ci_uuid) % chp->ch_hashlen;
246 
247 	for (eip = chp->ch_hash[h]; eip != NULL; eip = eip->ci_next) {
248 		if (strcmp(cip->ci_uuid, eip->ci_uuid) == 0) {
249 			(void) pthread_rwlock_unlock(&chp->ch_lock);
250 			return (NULL); /* uuid already present */
251 		}
252 	}
253 
254 	cip->ci_next = chp->ch_hash[h];
255 	chp->ch_hash[h] = cip;
256 
257 	(void) pthread_rwlock_unlock(&chp->ch_lock);
258 	return (cip);
259 }
260 
261 static void
262 fmd_case_hash_delete(fmd_case_hash_t *chp, fmd_case_impl_t *cip)
263 {
264 	fmd_case_impl_t *cp, **pp;
265 	uint_t h;
266 
267 	(void) pthread_rwlock_wrlock(&chp->ch_lock);
268 
269 	h = fmd_strhash(cip->ci_uuid) % chp->ch_hashlen;
270 	pp = &chp->ch_hash[h];
271 
272 	for (cp = *pp; cp != NULL; cp = cp->ci_next) {
273 		if (cp != cip)
274 			pp = &cp->ci_next;
275 		else
276 			break;
277 	}
278 
279 	if (cp == NULL) {
280 		fmd_panic("case %p (%s) not found on hash chain %u\n",
281 		    (void *)cip, cip->ci_uuid, h);
282 	}
283 
284 	*pp = cp->ci_next;
285 	cp->ci_next = NULL;
286 
287 	(void) pthread_rwlock_unlock(&chp->ch_lock);
288 }
289 
290 fmd_case_t *
291 fmd_case_create(fmd_module_t *mp, void *data)
292 {
293 	fmd_case_impl_t *cip = fmd_zalloc(sizeof (fmd_case_impl_t), FMD_SLEEP);
294 	uuid_t uuid;
295 
296 	(void) pthread_mutex_init(&cip->ci_lock, NULL);
297 	fmd_buf_hash_create(&cip->ci_bufs);
298 
299 	fmd_module_hold(mp);
300 	cip->ci_mod = mp;
301 	cip->ci_refs = 1;
302 	cip->ci_state = FMD_CASE_UNSOLVED;
303 	cip->ci_flags = FMD_CF_DIRTY;
304 	cip->ci_data = data;
305 
306 	/*
307 	 * Calling libuuid: get a clue.  The library interfaces cleverly do not
308 	 * define any constant for the length of an unparse string, and do not
309 	 * permit the caller to specify a buffer length for safety.  The spec
310 	 * says it will be 36 bytes, but we make it tunable just in case.
311 	 */
312 	(void) fmd_conf_getprop(fmd.d_conf, "uuidlen", &cip->ci_uuidlen);
313 	cip->ci_uuid = fmd_zalloc(cip->ci_uuidlen + 1, FMD_SLEEP);
314 
315 	/*
316 	 * We expect this loop to execute only once, but code it defensively
317 	 * against the possibility of libuuid bugs.  Keep generating uuids and
318 	 * attempting to do a hash insert until we get a unique one.
319 	 */
320 	do {
321 		uuid_generate(uuid);
322 		uuid_unparse(uuid, cip->ci_uuid);
323 	} while (fmd_case_hash_insert(fmd.d_cases, cip) == NULL);
324 
325 	ASSERT(fmd_module_locked(mp));
326 	fmd_list_append(&mp->mod_cases, cip);
327 	fmd_module_setcdirty(mp);
328 
329 	(void) pthread_mutex_lock(&cip->ci_mod->mod_stats_lock);
330 	cip->ci_mod->mod_stats->ms_caseopen.fmds_value.ui64++;
331 	(void) pthread_mutex_unlock(&cip->ci_mod->mod_stats_lock);
332 
333 	return ((fmd_case_t *)cip);
334 }
335 
336 fmd_case_t *
337 fmd_case_recreate(fmd_module_t *mp, const char *uuid)
338 {
339 	fmd_case_impl_t *cip = fmd_zalloc(sizeof (fmd_case_impl_t), FMD_SLEEP);
340 
341 	(void) pthread_mutex_init(&cip->ci_lock, NULL);
342 	fmd_buf_hash_create(&cip->ci_bufs);
343 
344 	fmd_module_hold(mp);
345 	cip->ci_mod = mp;
346 	cip->ci_refs = 1;
347 	cip->ci_state = FMD_CASE_UNSOLVED;
348 	cip->ci_uuid = fmd_strdup(uuid, FMD_SLEEP);
349 	cip->ci_uuidlen = strlen(cip->ci_uuid);
350 
351 	ASSERT(fmd_module_locked(mp));
352 	fmd_list_append(&mp->mod_cases, cip);
353 
354 	(void) pthread_mutex_lock(&cip->ci_mod->mod_stats_lock);
355 	cip->ci_mod->mod_stats->ms_caseopen.fmds_value.ui64++;
356 	(void) pthread_mutex_unlock(&cip->ci_mod->mod_stats_lock);
357 
358 	if (fmd_case_hash_insert(fmd.d_cases, cip) == NULL) {
359 		fmd_case_destroy((fmd_case_t *)cip);
360 		return (NULL);
361 	}
362 
363 	return ((fmd_case_t *)cip);
364 }
365 
366 void
367 fmd_case_destroy(fmd_case_t *cp)
368 {
369 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
370 	fmd_case_item_t *cit, *ncit;
371 	fmd_case_susp_t *cis, *ncis;
372 
373 	ASSERT(MUTEX_HELD(&cip->ci_lock));
374 	ASSERT(cip->ci_refs == 0);
375 
376 	fmd_case_hash_delete(fmd.d_cases, cip);
377 
378 	for (cit = cip->ci_items; cit != NULL; cit = ncit) {
379 		ncit = cit->cit_next;
380 		fmd_event_rele(cit->cit_event);
381 		fmd_free(cit, sizeof (fmd_case_item_t));
382 	}
383 
384 	for (cis = cip->ci_suspects; cis != NULL; cis = ncis) {
385 		ncis = cis->cis_next;
386 		nvlist_free(cis->cis_nvl);
387 		fmd_free(cis, sizeof (fmd_case_susp_t));
388 	}
389 
390 	if (cip->ci_principal != NULL)
391 		fmd_event_rele(cip->ci_principal);
392 
393 	nvlist_free(cip->ci_event);
394 	fmd_free(cip->ci_uuid, cip->ci_uuidlen + 1);
395 	fmd_buf_hash_destroy(&cip->ci_bufs);
396 
397 	/*
398 	 * Unlike other case functions, fmd_case_destroy() can be called from
399 	 * fmd_module_unload() after the module is unregistered and mod_stats
400 	 * has been destroyed.  As such we must check for NULL mod_stats here.
401 	 */
402 	(void) pthread_mutex_lock(&cip->ci_mod->mod_stats_lock);
403 	if (cip->ci_mod->mod_stats != NULL)
404 		cip->ci_mod->mod_stats->ms_caseopen.fmds_value.ui64--;
405 	(void) pthread_mutex_unlock(&cip->ci_mod->mod_stats_lock);
406 
407 	fmd_module_setcdirty(cip->ci_mod);
408 	fmd_module_rele(cip->ci_mod);
409 	fmd_free(cip, sizeof (fmd_case_impl_t));
410 }
411 
412 void
413 fmd_case_hold(fmd_case_t *cp)
414 {
415 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
416 
417 	(void) pthread_mutex_lock(&cip->ci_lock);
418 	cip->ci_refs++;
419 	ASSERT(cip->ci_refs != 0);
420 	(void) pthread_mutex_unlock(&cip->ci_lock);
421 }
422 
423 void
424 fmd_case_rele(fmd_case_t *cp)
425 {
426 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
427 
428 	(void) pthread_mutex_lock(&cip->ci_lock);
429 	ASSERT(cip->ci_refs != 0);
430 
431 	if (--cip->ci_refs == 0)
432 		fmd_case_destroy((fmd_case_t *)cip);
433 	else
434 		(void) pthread_mutex_unlock(&cip->ci_lock);
435 }
436 
437 void
438 fmd_case_insert_principal(fmd_case_t *cp, fmd_event_t *ep)
439 {
440 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
441 	fmd_event_t *oep;
442 	uint_t state;
443 
444 	fmd_event_hold(ep);
445 	(void) pthread_mutex_lock(&cip->ci_lock);
446 
447 	if (cip->ci_state >= FMD_CASE_SOLVED && cip->ci_event != NULL)
448 		state = FMD_EVS_DIAGNOSED;
449 	else
450 		state = FMD_EVS_ACCEPTED;
451 
452 	oep = cip->ci_principal;
453 	cip->ci_principal = ep;
454 
455 	cip->ci_flags |= FMD_CF_DIRTY;
456 	(void) pthread_mutex_unlock(&cip->ci_lock);
457 
458 	fmd_module_setcdirty(cip->ci_mod);
459 	fmd_event_transition(ep, state);
460 
461 	if (oep != NULL)
462 		fmd_event_rele(oep);
463 }
464 
465 void
466 fmd_case_insert_event(fmd_case_t *cp, fmd_event_t *ep)
467 {
468 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
469 	fmd_case_item_t *cit = fmd_alloc(sizeof (fmd_case_item_t), FMD_SLEEP);
470 	uint_t state;
471 
472 	fmd_event_hold(ep);
473 	(void) pthread_mutex_lock(&cip->ci_lock);
474 
475 	cit->cit_next = cip->ci_items;
476 	cit->cit_event = ep;
477 
478 	cip->ci_items = cit;
479 	cip->ci_nitems++;
480 
481 	if (cip->ci_state >= FMD_CASE_SOLVED && cip->ci_event != NULL)
482 		state = FMD_EVS_DIAGNOSED;
483 	else
484 		state = FMD_EVS_ACCEPTED;
485 
486 	cip->ci_flags |= FMD_CF_DIRTY;
487 	(void) pthread_mutex_unlock(&cip->ci_lock);
488 
489 	fmd_module_setcdirty(cip->ci_mod);
490 	fmd_event_transition(ep, state);
491 }
492 
493 void
494 fmd_case_insert_suspect(fmd_case_t *cp, nvlist_t *nvl)
495 {
496 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
497 	fmd_case_susp_t *cis = fmd_alloc(sizeof (fmd_case_susp_t), FMD_SLEEP);
498 
499 	(void) pthread_mutex_lock(&cip->ci_lock);
500 	ASSERT(cip->ci_state < FMD_CASE_SOLVED);
501 	cip->ci_flags |= FMD_CF_DIRTY;
502 
503 	cis->cis_next = cip->ci_suspects;
504 	cis->cis_nvl = nvl;
505 
506 	cip->ci_suspects = cis;
507 	cip->ci_nsuspects++;
508 
509 	(void) pthread_mutex_unlock(&cip->ci_lock);
510 	fmd_module_setcdirty(cip->ci_mod);
511 }
512 
513 void
514 fmd_case_reset_suspects(fmd_case_t *cp)
515 {
516 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
517 	fmd_case_susp_t *cis, *ncis;
518 
519 	(void) pthread_mutex_lock(&cip->ci_lock);
520 	ASSERT(cip->ci_state < FMD_CASE_SOLVED);
521 
522 	for (cis = cip->ci_suspects; cis != NULL; cis = ncis) {
523 		ncis = cis->cis_next;
524 		nvlist_free(cis->cis_nvl);
525 		fmd_free(cis, sizeof (fmd_case_susp_t));
526 	}
527 
528 	cip->ci_flags |= FMD_CF_DIRTY;
529 	cip->ci_suspects = NULL;
530 	cip->ci_nsuspects = 0;
531 
532 	(void) pthread_mutex_unlock(&cip->ci_lock);
533 	fmd_module_setcdirty(cip->ci_mod);
534 }
535 
536 void
537 fmd_case_transition(fmd_case_t *cp, uint_t state)
538 {
539 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
540 	nvlist_t *nvl;
541 
542 	/*
543 	 * Grab ci_lock and update the case state and set the dirty bit.  If we
544 	 * are solving the case, create a list.suspects event as cip->ci_event
545 	 * and iterate over all the case events and mark them as DIAGNOSED.
546 	 */
547 	(void) pthread_mutex_lock(&cip->ci_lock);
548 
549 	if (cip->ci_state >= state) {
550 		(void) pthread_mutex_unlock(&cip->ci_lock);
551 		return; /* already in specified state */
552 	}
553 
554 	TRACE((FMD_DBG_CASE, "case %s %s->%s", cip->ci_uuid,
555 	    _fmd_case_snames[cip->ci_state], _fmd_case_snames[state]));
556 
557 	cip->ci_state = state;
558 	cip->ci_flags |= FMD_CF_DIRTY;
559 
560 	switch (state) {
561 	case FMD_CASE_SOLVED: {
562 		fmd_case_item_t *cit;
563 
564 		/*
565 		 * If the module has been initialized, then fill in ci_event.
566 		 * If not, we are being called from the checkpoint code, in
567 		 * in which case fmd_case_hash_refresh() will create and
568 		 * publish the event later once the module has initialized.
569 		 */
570 		if (cip->ci_mod->mod_flags & FMD_MOD_INIT)
571 			cip->ci_event = fmd_case_mkevent(cp);
572 
573 		for (cit = cip->ci_items; cit != NULL; cit = cit->cit_next)
574 			fmd_event_transition(cit->cit_event, FMD_EVS_DIAGNOSED);
575 
576 		if (cip->ci_principal != NULL) {
577 			fmd_event_transition(cip->ci_principal,
578 			    FMD_EVS_DIAGNOSED);
579 		}
580 		break;
581 	}
582 
583 	case FMD_CASE_CLOSED: {
584 		fmd_case_susp_t *cis;
585 		fmd_asru_t *asru;
586 
587 		if (cip->ci_flags & FMD_CF_REPAIR)
588 			break; /* don't change ASRUs if repair closed case */
589 
590 		/*
591 		 * For each fault event in the suspect list, attempt to look up
592 		 * the corresponding ASRU in the ASRU dictionary.  If the ASRU
593 		 * is found there and is marked faulty, we now mark it unusable
594 		 * and record the case meta-data and fault event with the ASRU.
595 		 */
596 		for (cis = cip->ci_suspects; cis != NULL; cis = cis->cis_next) {
597 			if (nvlist_lookup_nvlist(cis->cis_nvl, FM_FAULT_ASRU,
598 			    &nvl) == 0 && (asru = fmd_asru_hash_lookup_nvl(
599 			    fmd.d_asrus, nvl, FMD_B_FALSE)) != NULL) {
600 				(void) fmd_asru_setflags(asru,
601 				    FMD_ASRU_UNUSABLE,
602 				    cip->ci_uuid, cis->cis_nvl);
603 				fmd_asru_hash_release(fmd.d_asrus, asru);
604 			}
605 		}
606 		break;
607 	}
608 	}
609 
610 	(void) pthread_mutex_unlock(&cip->ci_lock);
611 	fmd_module_setcdirty(cip->ci_mod);
612 
613 	/*
614 	 * If the module has been initialized, then publish the appropriate
615 	 * event for the new case state.  If not, we are being called from
616 	 * the checkpoint code, in which case fmd_case_hash_refresh() will
617 	 * publish the event later once all the modules have initialized.
618 	 */
619 	if (cip->ci_mod->mod_flags & FMD_MOD_INIT)
620 		fmd_case_publish(cp, state);
621 }
622 
623 void
624 fmd_case_setdirty(fmd_case_t *cp)
625 {
626 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
627 
628 	(void) pthread_mutex_lock(&cip->ci_lock);
629 	cip->ci_flags |= FMD_CF_DIRTY;
630 	(void) pthread_mutex_unlock(&cip->ci_lock);
631 
632 	fmd_module_setcdirty(cip->ci_mod);
633 }
634 
635 void
636 fmd_case_clrdirty(fmd_case_t *cp)
637 {
638 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
639 
640 	(void) pthread_mutex_lock(&cip->ci_lock);
641 	cip->ci_flags &= ~FMD_CF_DIRTY;
642 	(void) pthread_mutex_unlock(&cip->ci_lock);
643 }
644 
645 void
646 fmd_case_commit(fmd_case_t *cp)
647 {
648 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
649 	fmd_case_item_t *cit;
650 
651 	(void) pthread_mutex_lock(&cip->ci_lock);
652 
653 	if (cip->ci_flags & FMD_CF_DIRTY) {
654 		for (cit = cip->ci_items; cit != NULL; cit = cit->cit_next)
655 			fmd_event_commit(cit->cit_event);
656 
657 		if (cip->ci_principal != NULL)
658 			fmd_event_commit(cip->ci_principal);
659 
660 		fmd_buf_hash_commit(&cip->ci_bufs);
661 		cip->ci_flags &= ~FMD_CF_DIRTY;
662 	}
663 
664 	(void) pthread_mutex_unlock(&cip->ci_lock);
665 }
666 
667 /*
668  * Indicate that the case may need to change state because one or more of the
669  * ASRUs named as a suspect has changed state.  We examine all the suspects
670  * and if none are still faulty, we initiate a case close transition.
671  */
672 void
673 fmd_case_update(fmd_case_t *cp)
674 {
675 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
676 	fmd_case_susp_t *cis;
677 	fmd_asru_t *asru;
678 	nvlist_t *nvl;
679 
680 	int state = 0;
681 
682 	(void) pthread_mutex_lock(&cip->ci_lock);
683 
684 	if (cip->ci_state < FMD_CASE_SOLVED) {
685 		(void) pthread_mutex_unlock(&cip->ci_lock);
686 		return; /* update is not yet appropriate */
687 	}
688 
689 	for (cis = cip->ci_suspects; cis != NULL; cis = cis->cis_next) {
690 		if (nvlist_lookup_nvlist(cis->cis_nvl, FM_FAULT_ASRU,
691 		    &nvl) == 0 && (asru = fmd_asru_hash_lookup_nvl(
692 		    fmd.d_asrus, nvl, FMD_B_FALSE)) != NULL) {
693 			state |= fmd_asru_getstate(asru);
694 			fmd_asru_hash_release(fmd.d_asrus, asru);
695 		}
696 	}
697 
698 	if (!(state & FMD_ASRU_FAULTY))
699 		cip->ci_flags |= FMD_CF_REPAIR;
700 
701 	(void) pthread_mutex_unlock(&cip->ci_lock);
702 
703 	if (!(state & FMD_ASRU_FAULTY))
704 		fmd_case_transition(cp, FMD_CASE_CLOSED);
705 }
706 
707 /*
708  * Indicate that the problem corresponding to a case has been repaired by
709  * clearing the faulty bit on each ASRU named as a suspect.  If the case has
710  * not already been closed, this function initiates the case close transition.
711  */
712 int
713 fmd_case_repair(fmd_case_t *cp)
714 {
715 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
716 	fmd_case_susp_t *cis;
717 	fmd_asru_t *asru;
718 	nvlist_t *nvl;
719 
720 	(void) pthread_mutex_lock(&cip->ci_lock);
721 
722 	if (cip->ci_state < FMD_CASE_SOLVED) {
723 		(void) pthread_mutex_unlock(&cip->ci_lock);
724 		return (fmd_set_errno(EFMD_CASE_STATE));
725 	}
726 
727 	for (cis = cip->ci_suspects; cis != NULL; cis = cis->cis_next) {
728 		if (nvlist_lookup_nvlist(cis->cis_nvl, FM_FAULT_ASRU,
729 		    &nvl) == 0 && (asru = fmd_asru_hash_lookup_nvl(
730 		    fmd.d_asrus, nvl, FMD_B_FALSE)) != NULL) {
731 			(void) fmd_asru_clrflags(asru,
732 			    FMD_ASRU_FAULTY, NULL, NULL);
733 			fmd_asru_hash_release(fmd.d_asrus, asru);
734 		}
735 	}
736 
737 	cip->ci_flags |= FMD_CF_REPAIR;
738 	(void) pthread_mutex_unlock(&cip->ci_lock);
739 
740 	fmd_case_transition(cp, FMD_CASE_CLOSED);
741 	return (0);
742 }
743 
744 int
745 fmd_case_contains(fmd_case_t *cp, fmd_event_t *ep)
746 {
747 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
748 	fmd_case_item_t *cit;
749 	uint_t state;
750 	int rv = 0;
751 
752 	(void) pthread_mutex_lock(&cip->ci_lock);
753 
754 	if (cip->ci_state >= FMD_CASE_SOLVED)
755 		state = FMD_EVS_DIAGNOSED;
756 	else
757 		state = FMD_EVS_ACCEPTED;
758 
759 	for (cit = cip->ci_items; cit != NULL; cit = cit->cit_next) {
760 		if ((rv = fmd_event_equal(ep, cit->cit_event)) != 0)
761 			break;
762 	}
763 
764 	if (rv == 0 && cip->ci_principal != NULL)
765 		rv = fmd_event_equal(ep, cip->ci_principal);
766 
767 	(void) pthread_mutex_unlock(&cip->ci_lock);
768 
769 	if (rv != 0)
770 		fmd_event_transition(ep, state);
771 
772 	return (rv);
773 }
774