xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_event.c (revision e9af4bc0b1cc30cea75d6ad4aa2fde97d985e9be)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #define	DEF_EVENT_STRUCT  /* Needed for emlxs_events.h in emlxs_event.h */
29 #include <emlxs.h>
30 
31 
32 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33 EMLXS_MSG_DEF(EMLXS_EVENT_C);
34 
35 
36 static uint32_t emlxs_event_check(emlxs_port_t *port, emlxs_event_t *evt);
37 static void emlxs_event_destroy(emlxs_hba_t *hba, emlxs_event_entry_t *entry);
38 
39 extern void
40 emlxs_null_func() {}
41 
42 
43 static uint32_t
44 emlxs_event_check(emlxs_port_t *port, emlxs_event_t *evt)
45 {
46 	emlxs_hba_t *hba = HBA;
47 
48 	/* Check if the event is being requested */
49 	if ((hba->event_mask & evt->mask)) {
50 		return (1);
51 	}
52 
53 #ifdef SAN_DIAG_SUPPORT
54 	if ((port->sd_event_mask & evt->mask)) {
55 		return (1);
56 	}
57 #endif /* SAN_DIAG_SUPPORT */
58 
59 	return (0);
60 
61 } /* emlxs_event_check() */
62 
63 
64 extern uint32_t
65 emlxs_event_queue_create(emlxs_hba_t *hba)
66 {
67 	emlxs_event_queue_t *eventq = &EVENTQ;
68 	char buf[40];
69 #ifdef MSI_SUPPORT
70 	ddi_intr_handle_t handle;
71 	uint32_t intr_pri;
72 	int32_t actual;
73 	uint32_t ret;
74 #endif /* MSI_SUPPORT */
75 	ddi_iblock_cookie_t iblock;
76 
77 	/* Clear the queue */
78 	bzero(eventq, sizeof (emlxs_event_queue_t));
79 
80 	/* Initialize */
81 	(void) sprintf(buf, "?%s%d_evt_lock control variable", DRIVER_NAME,
82 	    hba->ddiinst);
83 	cv_init(&eventq->lock_cv, buf, CV_DRIVER, NULL);
84 
85 	(void) sprintf(buf, "?%s%d_evt_lock mutex", DRIVER_NAME, hba->ddiinst);
86 
87 	if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
88 		/* Get the current interrupt block cookie */
89 		(void) ddi_get_iblock_cookie(hba->dip, (uint_t)EMLXS_INUMBER,
90 		    &iblock);
91 
92 		/* Create the mutex lock */
93 		mutex_init(&eventq->lock, buf, MUTEX_DRIVER, (void *)iblock);
94 	}
95 #ifdef  MSI_SUPPORT
96 	else {
97 		/* Allocate a temporary interrupt handle */
98 		actual = 0;
99 		ret =
100 		    ddi_intr_alloc(hba->dip, &handle, DDI_INTR_TYPE_FIXED,
101 		    EMLXS_MSI_INUMBER, 1, &actual, DDI_INTR_ALLOC_NORMAL);
102 
103 		if (ret != DDI_SUCCESS || actual == 0) {
104 			cmn_err(CE_WARN,
105 			    "?%s%d: Unable to allocate temporary interrupt "
106 			    "handle. ret=%d actual=%d", DRIVER_NAME,
107 			    hba->ddiinst, ret, actual);
108 
109 			bzero(eventq, sizeof (emlxs_event_queue_t));
110 
111 			return (0);
112 		}
113 
114 		/* Get the current interrupt priority */
115 		ret = ddi_intr_get_pri(handle, &intr_pri);
116 
117 		if (ret != DDI_SUCCESS) {
118 			cmn_err(CE_WARN,
119 			    "?%s%d: Unable to get interrupt priority. ret=%d",
120 			    DRIVER_NAME, hba->ddiinst, ret);
121 
122 			bzero(eventq, sizeof (emlxs_event_queue_t));
123 
124 			return (0);
125 		}
126 
127 		/* Create the log mutex lock */
128 		mutex_init(&eventq->lock, buf, MUTEX_DRIVER,
129 		    (void *)((unsigned long)intr_pri));
130 
131 		/* Free the temporary handle */
132 		(void) ddi_intr_free(handle);
133 	}
134 #endif
135 
136 	return (1);
137 
138 } /* emlxs_event_queue_create() */
139 
140 
141 extern void
142 emlxs_event_queue_destroy(emlxs_hba_t *hba)
143 {
144 	emlxs_port_t *vport;
145 	emlxs_event_queue_t *eventq = &EVENTQ;
146 	uint32_t i;
147 	uint32_t wakeup = 0;
148 
149 	mutex_enter(&eventq->lock);
150 
151 	/* Clear all event masks and broadcast a wakeup */
152 	/* to clear any sleeping threads */
153 	if (hba->event_mask) {
154 		hba->event_mask = 0;
155 		hba->event_timer = 0;
156 		wakeup = 1;
157 	}
158 
159 	for (i = 0; i < MAX_VPORTS; i++) {
160 		vport = &VPORT(i);
161 
162 		if (vport->sd_event_mask) {
163 			vport->sd_event_mask = 0;
164 			wakeup = 1;
165 		}
166 	}
167 
168 	if (wakeup) {
169 		cv_broadcast(&eventq->lock_cv);
170 
171 		mutex_exit(&eventq->lock);
172 		DELAYMS(10);
173 		mutex_enter(&eventq->lock);
174 	}
175 
176 	/* Destroy the remaining events */
177 	while (eventq->first) {
178 		emlxs_event_destroy(hba, eventq->first);
179 	}
180 
181 	mutex_exit(&eventq->lock);
182 
183 	/* Destroy the queue lock */
184 	mutex_destroy(&eventq->lock);
185 	cv_destroy(&eventq->lock_cv);
186 
187 	/* Clear the queue */
188 	bzero(eventq, sizeof (emlxs_event_queue_t));
189 
190 	return;
191 
192 } /* emlxs_event_queue_destroy() */
193 
194 
195 /* Event queue lock must be held */
196 static void
197 emlxs_event_destroy(emlxs_hba_t *hba, emlxs_event_entry_t *entry)
198 {
199 	emlxs_event_queue_t *eventq = &EVENTQ;
200 	emlxs_port_t *port;
201 	uint32_t missed = 0;
202 
203 	port = (emlxs_port_t *)entry->port;
204 
205 	eventq->count--;
206 	if (eventq->count == 0) {
207 		eventq->first = NULL;
208 		eventq->last = NULL;
209 	} else {
210 		if (entry->prev) {
211 			entry->prev->next = entry->next;
212 		}
213 		if (entry->next) {
214 			entry->next->prev = entry->prev;
215 		}
216 		if (eventq->first == entry) {
217 			eventq->first = entry->next;
218 		}
219 		if (eventq->last == entry) {
220 			eventq->last = entry->prev;
221 		}
222 	}
223 
224 	entry->prev = NULL;
225 	entry->next = NULL;
226 
227 	if ((entry->evt->mask == EVT_LINK) ||
228 	    (entry->evt->mask == EVT_RSCN)) {
229 		if (!(entry->flag & EMLXS_DFC_EVENT_DONE)) {
230 			hba->hba_event.missed++;
231 			missed = 1;
232 		}
233 	}
234 
235 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_dequeued_msg,
236 	    "%s[%d]: flag=%x missed=%d cnt=%d",
237 	    entry->evt->label, entry->id, entry->flag, missed, eventq->count);
238 
239 	/* Call notification handler */
240 	entry->evt->destroy(entry);
241 
242 	/* Free context buffer */
243 	if (entry->bp && entry->size) {
244 		kmem_free(entry->bp, entry->size);
245 	}
246 
247 	/* Free entry buffer */
248 	kmem_free(entry, sizeof (emlxs_event_entry_t));
249 
250 	return;
251 
252 } /* emlxs_event_destroy() */
253 
254 
255 extern void
256 emlxs_event(emlxs_port_t *port, emlxs_event_t *evt, void *bp, uint32_t size)
257 {
258 	emlxs_hba_t *hba = HBA;
259 	emlxs_event_queue_t *eventq = &EVENTQ;
260 	emlxs_event_entry_t *entry;
261 	uint32_t i;
262 	uint32_t mask;
263 
264 	if (emlxs_event_check(port, evt) == 0) {
265 		goto failed;
266 	}
267 
268 	/* Create event entry */
269 	if (!(entry = (emlxs_event_entry_t *)kmem_alloc(
270 	    sizeof (emlxs_event_entry_t), KM_NOSLEEP))) {
271 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
272 		    "%s: Unable to allocate event entry.", evt->label);
273 
274 		goto failed;
275 	}
276 
277 	/* Initialize */
278 	bzero(entry, sizeof (emlxs_event_entry_t));
279 
280 	entry->evt = evt;
281 	entry->port = (void *)port;
282 	entry->bp = bp;
283 	entry->size = size;
284 
285 	mutex_enter(&eventq->lock);
286 
287 	/* Set the event timer */
288 	entry->timestamp = hba->timer_tics;
289 	if (evt->timeout) {
290 		entry->timer = entry->timestamp + evt->timeout;
291 	}
292 
293 	/* Set the event id */
294 	entry->id = eventq->next_id++;
295 
296 	/* Set last event table */
297 	mask = evt->mask;
298 	for (i = 0; i < 32; i++) {
299 		if (mask & 0x01) {
300 			eventq->last_id[i] = entry->id;
301 		}
302 		mask >>= 1;
303 	}
304 
305 	/* Put event on bottom of queue */
306 	entry->next = NULL;
307 	if (eventq->count == 0) {
308 		entry->prev = NULL;
309 		eventq->first = entry;
310 		eventq->last = entry;
311 	} else {
312 		entry->prev = eventq->last;
313 		entry->prev->next = entry;
314 		eventq->last = entry;
315 	}
316 	eventq->count++;
317 
318 	if ((entry->evt->mask == EVT_LINK) ||
319 	    (entry->evt->mask == EVT_RSCN)) {
320 		hba->hba_event.new++;
321 	}
322 
323 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_queued_msg,
324 	    "%s[%d]: bp=%p size=%d cnt=%d", entry->evt->label,
325 	    entry->id, bp, size, eventq->count);
326 
327 	/* Broadcast the event */
328 	cv_broadcast(&eventq->lock_cv);
329 
330 	mutex_exit(&eventq->lock);
331 
332 	return;
333 
334 failed:
335 
336 	/* Call notification handler */
337 	entry->evt->destroy(entry);
338 
339 	if (entry->bp && entry->size) {
340 		kmem_free(entry->bp, entry->size);
341 	}
342 
343 	return;
344 
345 } /* emlxs_event() */
346 
347 
348 extern void
349 emlxs_timer_check_events(emlxs_hba_t *hba)
350 {
351 	emlxs_config_t *cfg = &CFG;
352 	emlxs_event_queue_t *eventq = &EVENTQ;
353 	emlxs_event_entry_t *entry;
354 	emlxs_event_entry_t *next;
355 
356 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
357 		return;
358 	}
359 
360 	if ((hba->event_timer > hba->timer_tics)) {
361 		return;
362 	}
363 
364 	if (eventq->count) {
365 		mutex_enter(&eventq->lock);
366 
367 		entry = eventq->first;
368 		while (entry) {
369 			if ((!entry->timer) ||
370 			    (entry->timer > hba->timer_tics)) {
371 				entry = entry->next;
372 				continue;
373 			}
374 
375 			/* Event timed out, destroy it */
376 			next = entry->next;
377 			emlxs_event_destroy(hba, entry);
378 			entry = next;
379 		}
380 
381 		mutex_exit(&eventq->lock);
382 	}
383 
384 	/* Set next event timer check */
385 	hba->event_timer = hba->timer_tics + EMLXS_EVENT_PERIOD;
386 
387 	return;
388 
389 } /* emlxs_timer_check_events() */
390 
391 
392 extern void
393 emlxs_log_rscn_event(emlxs_port_t *port, uint8_t *payload, uint32_t size)
394 {
395 	uint8_t *bp;
396 	uint32_t *ptr;
397 
398 	/* Check if the event is being requested */
399 	if (emlxs_event_check(port, &emlxs_rscn_event) == 0) {
400 		return;
401 	}
402 
403 	if (size > MAX_RSCN_PAYLOAD) {
404 		size = MAX_RSCN_PAYLOAD;
405 	}
406 
407 	size += sizeof (uint32_t);
408 
409 	/* Save a copy of the payload for the event log */
410 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
411 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
412 		    "%s: Unable to allocate buffer.", emlxs_rscn_event.label);
413 
414 		return;
415 	}
416 
417 	/*
418 	 * Buffer Format:
419 	 *	word[0] = DID of the RSCN
420 	 *	word[1] = RSCN Payload
421 	 */
422 	ptr = (uint32_t *)bp;
423 	*ptr++ = port->did;
424 	bcopy(payload, (char *)ptr, (size - sizeof (uint32_t)));
425 
426 	emlxs_event(port, &emlxs_rscn_event, bp, size);
427 
428 	return;
429 
430 } /* emlxs_log_rscn_event() */
431 
432 
433 extern void
434 emlxs_log_vportrscn_event(emlxs_port_t *port, uint8_t *payload, uint32_t size)
435 {
436 	uint8_t *bp;
437 	uint8_t *ptr;
438 
439 	/* Check if the event is being requested */
440 	if (emlxs_event_check(port, &emlxs_vportrscn_event) == 0) {
441 		return;
442 	}
443 
444 	if (size > MAX_RSCN_PAYLOAD) {
445 		size = MAX_RSCN_PAYLOAD;
446 	}
447 
448 	size += sizeof (NAME_TYPE);
449 
450 	/* Save a copy of the payload for the event log */
451 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
452 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
453 		    "%s: Unable to allocate buffer.",
454 		    emlxs_vportrscn_event.label);
455 
456 		return;
457 	}
458 
459 	/*
460 	 * Buffer Format:
461 	 *	word[0 - 4] = WWPN of the RSCN
462 	 *	word[5] = RSCN Payload
463 	 */
464 	ptr = bp;
465 	bcopy(&port->wwpn, ptr, sizeof (NAME_TYPE));
466 	ptr += sizeof (NAME_TYPE);
467 	bcopy(payload, ptr, (size - sizeof (NAME_TYPE)));
468 
469 	emlxs_event(port, &emlxs_vportrscn_event, bp, size);
470 
471 	return;
472 
473 } /* emlxs_log_vportrscn_event() */
474 
475 
476 extern uint32_t
477 emlxs_log_ct_event(emlxs_port_t *port, uint8_t *payload, uint32_t size,
478     uint32_t rxid)
479 {
480 	uint8_t *bp;
481 	uint32_t *ptr;
482 
483 	/* Check if the event is being requested */
484 	if (emlxs_event_check(port, &emlxs_ct_event) == 0) {
485 		return (1);
486 	}
487 
488 	if (size > MAX_CT_PAYLOAD) {
489 		size = MAX_CT_PAYLOAD;
490 	}
491 
492 	size += sizeof (uint32_t);
493 
494 	/* Save a copy of the payload for the event log */
495 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
496 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
497 		    "%s: Unable to allocate buffer.", emlxs_ct_event.label);
498 
499 		return (1);
500 	}
501 
502 	/*
503 	 * Buffer Format:
504 	 *	word[0] = RXID tag for outgoing reply to this CT request
505 	 *	word[1] = CT Payload
506 	 */
507 	ptr = (uint32_t *)bp;
508 	*ptr++ = rxid;
509 	bcopy(payload, (char *)ptr, (size - sizeof (uint32_t)));
510 
511 	emlxs_event(port, &emlxs_ct_event, bp, size);
512 
513 	return (0);
514 
515 } /* emlxs_log_ct_event() */
516 
517 
518 extern void
519 emlxs_ct_event_destroy(emlxs_event_entry_t *entry)
520 {
521 	emlxs_port_t *port = (emlxs_port_t *)entry->port;
522 	emlxs_hba_t *hba = HBA;
523 	uint32_t rxid;
524 
525 	if (!(entry->flag & EMLXS_DFC_EVENT_DONE)) {
526 
527 		rxid = *(uint32_t *)entry->bp;
528 
529 		/* Abort exchange */
530 		emlxs_thread_spawn(hba, emlxs_abort_ct_exchange,
531 		    entry->port, (void *)(unsigned long)rxid);
532 	}
533 
534 	return;
535 
536 } /* emlxs_ct_event_destroy() */
537 
538 
539 extern void
540 emlxs_log_link_event(emlxs_port_t *port)
541 {
542 	emlxs_hba_t *hba = HBA;
543 	uint8_t *bp;
544 	dfc_linkinfo_t *linkinfo;
545 	uint8_t *byte;
546 	uint8_t *linkspeed;
547 	uint8_t *liptype;
548 	uint8_t *resv1;
549 	uint8_t *resv2;
550 	uint32_t size;
551 
552 	/* Check if the event is being requested */
553 	if (emlxs_event_check(port, &emlxs_link_event) == 0) {
554 		return;
555 	}
556 
557 	size = sizeof (dfc_linkinfo_t) + sizeof (uint32_t);
558 
559 	/* Save a copy of the buffer for the event log */
560 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
561 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
562 		    "%s: Unable to allocate buffer.", emlxs_link_event.label);
563 
564 		return;
565 	}
566 
567 	/*
568 	 * Buffer Format:
569 	 *	word[0] = Linkspeed:8
570 	 *	word[0] = LIP_type:8
571 	 *	word[0] = resv1:8
572 	 *	word[0] = resv2:8
573 	 *	word[1] = dfc_linkinfo_t data
574 	 */
575 	byte = (uint8_t *)bp;
576 	linkspeed = &byte[0];
577 	liptype = &byte[1];
578 	resv1 = &byte[2];
579 	resv2 = &byte[3];
580 	linkinfo = (dfc_linkinfo_t *)&byte[4];
581 
582 	*resv1 = 0;
583 	*resv2 = 0;
584 
585 	if (hba->state <= FC_LINK_DOWN) {
586 		*linkspeed = 0;
587 		*liptype = 0;
588 	} else {
589 		/* Set linkspeed */
590 		if (hba->linkspeed == LA_2GHZ_LINK) {
591 			*linkspeed = HBA_PORTSPEED_2GBIT;
592 		} else if (hba->linkspeed == LA_4GHZ_LINK) {
593 			*linkspeed = HBA_PORTSPEED_4GBIT;
594 		} else if (hba->linkspeed == LA_8GHZ_LINK) {
595 			*linkspeed = HBA_PORTSPEED_8GBIT;
596 		} else if (hba->linkspeed == LA_10GHZ_LINK) {
597 			*linkspeed = HBA_PORTSPEED_10GBIT;
598 		} else {
599 			*linkspeed = HBA_PORTSPEED_1GBIT;
600 		}
601 
602 		/* Set LIP type */
603 		*liptype = port->lip_type;
604 	}
605 
606 	bzero(linkinfo, sizeof (dfc_linkinfo_t));
607 
608 	linkinfo->a_linkEventTag = hba->link_event_tag;
609 	linkinfo->a_linkUp = HBASTATS.LinkUp;
610 	linkinfo->a_linkDown = HBASTATS.LinkDown;
611 	linkinfo->a_linkMulti = HBASTATS.LinkMultiEvent;
612 
613 	if (hba->state <= FC_LINK_DOWN) {
614 		linkinfo->a_linkState = LNK_DOWN;
615 		linkinfo->a_DID = port->prev_did;
616 	} else if (hba->state < FC_READY) {
617 		linkinfo->a_linkState = LNK_DISCOVERY;
618 	} else {
619 		linkinfo->a_linkState = LNK_READY;
620 	}
621 
622 	if (linkinfo->a_linkState != LNK_DOWN) {
623 		if (hba->topology == TOPOLOGY_LOOP) {
624 			if (hba->flag & FC_FABRIC_ATTACHED) {
625 				linkinfo->a_topology = LNK_PUBLIC_LOOP;
626 			} else {
627 				linkinfo->a_topology = LNK_LOOP;
628 			}
629 
630 			linkinfo->a_alpa = port->did & 0xff;
631 			linkinfo->a_DID = linkinfo->a_alpa;
632 			linkinfo->a_alpaCnt = port->alpa_map[0];
633 
634 			if (linkinfo->a_alpaCnt > 127) {
635 				linkinfo->a_alpaCnt = 127;
636 			}
637 
638 			bcopy((void *)&port->alpa_map[1], linkinfo->a_alpaMap,
639 			    linkinfo->a_alpaCnt);
640 		} else {
641 			if (port->node_count == 1) {
642 				linkinfo->a_topology = LNK_PT2PT;
643 			} else {
644 				linkinfo->a_topology = LNK_FABRIC;
645 			}
646 
647 			linkinfo->a_DID = port->did;
648 		}
649 	}
650 
651 	bcopy(&hba->wwpn, linkinfo->a_wwpName, 8);
652 	bcopy(&hba->wwnn, linkinfo->a_wwnName, 8);
653 
654 	emlxs_event(port, &emlxs_link_event, bp, size);
655 
656 	return;
657 
658 } /* emlxs_log_link_event() */
659 
660 
661 extern void
662 emlxs_log_dump_event(emlxs_port_t *port, uint8_t *buffer, uint32_t size)
663 {
664 	emlxs_hba_t *hba = HBA;
665 	uint8_t *bp;
666 
667 	/* Check if the event is being requested */
668 	if (emlxs_event_check(port, &emlxs_dump_event) == 0) {
669 #ifdef DUMP_SUPPORT
670 		/* Schedule a dump thread */
671 		emlxs_dump(hba, EMLXS_DRV_DUMP, 0, 0);
672 #endif /* DUMP_SUPPORT */
673 		return;
674 	}
675 
676 	if (buffer && size) {
677 		/* Save a copy of the buffer for the event log */
678 		if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
679 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
680 			    "%s: Unable to allocate buffer.",
681 			    emlxs_dump_event.label);
682 
683 			return;
684 		}
685 
686 		bcopy(buffer, bp, size);
687 	} else {
688 		bp = NULL;
689 		size = 0;
690 	}
691 
692 	emlxs_event(port, &emlxs_dump_event, bp, size);
693 
694 	return;
695 
696 } /* emlxs_log_dump_event() */
697 
698 
699 extern void
700 emlxs_log_temp_event(emlxs_port_t *port, uint32_t type, uint32_t temp)
701 {
702 	emlxs_hba_t *hba = HBA;
703 	uint32_t *bp;
704 	uint32_t size;
705 
706 	/* Check if the event is being requested */
707 	if (emlxs_event_check(port, &emlxs_temp_event) == 0) {
708 #ifdef DUMP_SUPPORT
709 		/* Schedule a dump thread */
710 		emlxs_dump(hba, EMLXS_TEMP_DUMP, type, temp);
711 #endif /* DUMP_SUPPORT */
712 		return;
713 	}
714 
715 	size = 2 * sizeof (uint32_t);
716 
717 	if (!(bp = (uint32_t *)kmem_alloc(size, KM_NOSLEEP))) {
718 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
719 		    "%s: Unable to allocate buffer.", emlxs_temp_event.label);
720 
721 		return;
722 	}
723 
724 	bp[0] = type;
725 	bp[1] = temp;
726 
727 	emlxs_event(port, &emlxs_temp_event, bp, size);
728 
729 	return;
730 
731 } /* emlxs_log_temp_event() */
732 
733 
734 
735 extern void
736 emlxs_log_fcoe_event(emlxs_port_t *port, menlo_init_rsp_t *init_rsp)
737 {
738 	emlxs_hba_t *hba = HBA;
739 	uint8_t *bp;
740 	uint32_t size;
741 
742 	/* Check if the event is being requested */
743 	if (emlxs_event_check(port, &emlxs_fcoe_event) == 0) {
744 		return;
745 	}
746 
747 	/* Check if this is a FCOE adapter */
748 	if (hba->model_info.device_id != PCI_DEVICE_ID_LP21000_M) {
749 		return;
750 	}
751 
752 	size = sizeof (menlo_init_rsp_t);
753 
754 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
755 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
756 		    "%s: Unable to allocate buffer.", emlxs_fcoe_event.label);
757 
758 		return;
759 	}
760 
761 	bcopy((uint8_t *)init_rsp, bp, size);
762 
763 	emlxs_event(port, &emlxs_fcoe_event, bp, size);
764 
765 	return;
766 
767 } /* emlxs_log_fcoe_event() */
768 
769 
770 extern void
771 emlxs_log_async_event(emlxs_port_t *port, IOCB *iocb)
772 {
773 	uint8_t *bp;
774 	uint32_t size;
775 
776 	if (emlxs_event_check(port, &emlxs_async_event) == 0) {
777 		return;
778 	}
779 
780 	/* ASYNC_STATUS_CN response size */
781 	size = 64;
782 
783 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
784 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
785 		    "%s: Unable to allocate buffer.", emlxs_async_event.label);
786 
787 		return;
788 	}
789 
790 	bcopy((uint8_t *)iocb, bp, size);
791 
792 	emlxs_event(port, &emlxs_async_event, bp, size);
793 
794 	return;
795 
796 } /* emlxs_log_async_event() */
797 
798 
799 extern uint32_t
800 emlxs_get_dfc_eventinfo(emlxs_port_t *port, HBA_EVENTINFO *eventinfo,
801     uint32_t *eventcount, uint32_t *missed)
802 {
803 	emlxs_hba_t *hba = HBA;
804 	emlxs_event_queue_t *eventq = &EVENTQ;
805 	emlxs_event_entry_t *entry;
806 	uint32_t max_events;
807 	dfc_linkinfo_t *linkinfo;
808 	uint32_t *word;
809 	uint8_t *byte;
810 	uint8_t linkspeed;
811 	uint8_t liptype;
812 	fc_affected_id_t *aid;
813 	uint32_t events;
814 	uint8_t format;
815 
816 	if (!eventinfo || !eventcount || !missed) {
817 		return (DFC_ARG_NULL);
818 	}
819 
820 	max_events = *eventcount;
821 	*eventcount = 0;
822 	*missed = 0;
823 
824 	mutex_enter(&eventq->lock);
825 
826 	/* Account for missed events */
827 	if (hba->hba_event.new > hba->hba_event.missed) {
828 		hba->hba_event.new -= hba->hba_event.missed;
829 	} else {
830 		hba->hba_event.new = 0;
831 	}
832 
833 	*missed = hba->hba_event.missed;
834 	hba->hba_event.missed = 0;
835 
836 	if (!hba->hba_event.new) {
837 		hba->hba_event.last_id = eventq->next_id - 1;
838 		mutex_exit(&eventq->lock);
839 		return (0);
840 	}
841 
842 	/* A new event has occurred since last acquisition */
843 
844 	events = 0;
845 	entry = eventq->first;
846 	while (entry && (events < max_events)) {
847 
848 		/* Skip old events */
849 		if (entry->id <= hba->hba_event.last_id) {
850 			entry = entry->next;
851 			continue;
852 		}
853 
854 		/* Process this entry */
855 		switch (entry->evt->mask) {
856 		case EVT_LINK:
857 			byte = (uint8_t *)entry->bp;
858 			linkspeed = byte[0];
859 			liptype = byte[1];
860 			linkinfo = (dfc_linkinfo_t *)&byte[4];
861 
862 			if (linkinfo->a_linkState == LNK_DOWN) {
863 				eventinfo->EventCode =
864 				    HBA_EVENT_LINK_DOWN;
865 				eventinfo->Event.Link_EventInfo.
866 				    PortFcId = linkinfo->a_DID;
867 				eventinfo->Event.Link_EventInfo.
868 				    Reserved[0] = 0;
869 				eventinfo->Event.Link_EventInfo.
870 				    Reserved[1] = 0;
871 				eventinfo->Event.Link_EventInfo.
872 				    Reserved[2] = 0;
873 			} else {
874 				eventinfo->EventCode =
875 				    HBA_EVENT_LINK_UP;
876 				eventinfo->Event.Link_EventInfo.
877 				    PortFcId = linkinfo->a_DID;
878 
879 				if ((linkinfo->a_topology ==
880 				    LNK_PUBLIC_LOOP) ||
881 				    (linkinfo->a_topology ==
882 				    LNK_LOOP)) {
883 					eventinfo->Event.
884 					    Link_EventInfo.
885 					    Reserved[0] = 2;
886 				} else {
887 					eventinfo->Event.
888 					    Link_EventInfo.
889 					    Reserved[0] = 1;
890 				}
891 
892 				eventinfo->Event.Link_EventInfo.
893 				    Reserved[1] = liptype;
894 				eventinfo->Event.Link_EventInfo.
895 				    Reserved[2] = linkspeed;
896 			}
897 
898 			eventinfo++;
899 			events++;
900 			hba->hba_event.new--;
901 			break;
902 
903 		case EVT_RSCN:
904 			word = (uint32_t *)entry->bp;
905 			eventinfo->EventCode = HBA_EVENT_RSCN;
906 			eventinfo->Event.RSCN_EventInfo.PortFcId =
907 			    word[0] & 0xFFFFFF;
908 			/* word[1] is the RSCN payload command */
909 
910 			aid = (fc_affected_id_t *)&word[2];
911 			format = aid->aff_format;
912 
913 			switch (format) {
914 			case 0:	/* Port */
915 				eventinfo->Event.RSCN_EventInfo.
916 				    NPortPage =
917 				    aid->aff_d_id & 0x00ffffff;
918 				break;
919 
920 			case 1:	/* Area */
921 				eventinfo->Event.RSCN_EventInfo.
922 				    NPortPage =
923 				    aid->aff_d_id & 0x00ffff00;
924 				break;
925 
926 			case 2:	/* Domain */
927 				eventinfo->Event.RSCN_EventInfo.
928 				    NPortPage =
929 				    aid->aff_d_id & 0x00ff0000;
930 				break;
931 
932 			case 3:	/* Network */
933 				eventinfo->Event.RSCN_EventInfo.
934 				    NPortPage = 0;
935 				break;
936 			}
937 
938 			eventinfo->Event.RSCN_EventInfo.Reserved[0] =
939 			    0;
940 			eventinfo->Event.RSCN_EventInfo.Reserved[1] =
941 			    0;
942 
943 			eventinfo++;
944 			events++;
945 			hba->hba_event.new--;
946 			break;
947 		}
948 
949 		hba->hba_event.last_id = entry->id;
950 		entry = entry->next;
951 	}
952 
953 	/* Return number of events acquired */
954 	*eventcount = events;
955 
956 	mutex_exit(&eventq->lock);
957 
958 	return (0);
959 
960 } /* emlxs_get_dfc_eventinfo() */
961 
962 
963 uint32_t
964 emlxs_get_dfc_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event,
965     uint32_t sleep)
966 {
967 	emlxs_hba_t *hba = HBA;
968 	emlxs_event_queue_t *eventq = &EVENTQ;
969 	emlxs_event_entry_t *entry;
970 	uint32_t found;
971 	uint32_t mask;
972 	uint32_t i;
973 	uint32_t size = 0;
974 	uint32_t rc;
975 
976 	if (dfc_event->dataout && dfc_event->size) {
977 		size = dfc_event->size;
978 	}
979 	dfc_event->size = 0;
980 
981 	if (!dfc_event->event) {
982 		return (DFC_ARG_INVALID);
983 	}
984 
985 	/* Calculate the event index */
986 	mask = dfc_event->event;
987 	for (i = 0; i < 32; i++) {
988 		if (mask & 0x01) {
989 			break;
990 		}
991 
992 		mask >>= 1;
993 	}
994 
995 	mutex_enter(&eventq->lock);
996 
997 wait_for_event:
998 
999 	/* Check if no new event has occurred */
1000 	if (dfc_event->last_id == eventq->last_id[i]) {
1001 		if (!sleep) {
1002 			mutex_exit(&eventq->lock);
1003 			return (0);
1004 		}
1005 
1006 		/* While event is still active and */
1007 		/* no new event has been logged */
1008 		while ((dfc_event->event & hba->event_mask) &&
1009 		    (dfc_event->last_id == eventq->last_id[i])) {
1010 
1011 			rc = cv_wait_sig(&eventq->lock_cv, &eventq->lock);
1012 
1013 			/* Check if thread was killed by kernel */
1014 			if (rc == 0) {
1015 				dfc_event->pid = 0;
1016 				dfc_event->event = 0;
1017 				mutex_exit(&eventq->lock);
1018 				return (0);
1019 			}
1020 		}
1021 
1022 		/* If the event is no longer registered then */
1023 		/* return immediately */
1024 		if (!(dfc_event->event & hba->event_mask)) {
1025 			mutex_exit(&eventq->lock);
1026 			return (0);
1027 		}
1028 	}
1029 
1030 	/* !!! An event has occurred since last_id !!! */
1031 
1032 	/* Check if event data is not being requested */
1033 	if (!size) {
1034 		/* If so, then just return the last event id */
1035 		dfc_event->last_id = eventq->last_id[i];
1036 
1037 		mutex_exit(&eventq->lock);
1038 		return (0);
1039 	}
1040 
1041 	/* !!! The requester wants the next event buffer !!! */
1042 
1043 	found = 0;
1044 	entry = eventq->first;
1045 	while (entry) {
1046 		if ((entry->id > dfc_event->last_id) &&
1047 		    (entry->evt->mask == dfc_event->event)) {
1048 			found = 1;
1049 			break;
1050 		}
1051 
1052 		entry = entry->next;
1053 	}
1054 
1055 	if (!found) {
1056 		/* Update last_id to the last known event */
1057 		dfc_event->last_id = eventq->last_id[i];
1058 
1059 		/* Try waiting again if we can */
1060 		goto wait_for_event;
1061 	}
1062 
1063 	/* !!! Next event found !!! */
1064 
1065 	/* Copy the context buffer to the buffer provided */
1066 	if (entry->bp && entry->size) {
1067 		if (entry->size < size) {
1068 			size = entry->size;
1069 		}
1070 
1071 		if (ddi_copyout((void *)entry->bp, dfc_event->dataout, size,
1072 		    dfc_event->mode) != 0) {
1073 			mutex_exit(&eventq->lock);
1074 
1075 			return (DFC_COPYOUT_ERROR);
1076 		}
1077 
1078 		/* Event has been retrieved by DFCLIB */
1079 		entry->flag |= EMLXS_DFC_EVENT_DONE;
1080 
1081 		dfc_event->size = size;
1082 	}
1083 
1084 	dfc_event->last_id = entry->id;
1085 
1086 	mutex_exit(&eventq->lock);
1087 
1088 	return (0);
1089 
1090 } /* emlxs_get_dfc_event() */
1091 
1092 
1093 uint32_t
1094 emlxs_kill_dfc_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event)
1095 {
1096 	emlxs_hba_t *hba = HBA;
1097 	emlxs_event_queue_t *eventq = &EVENTQ;
1098 
1099 	mutex_enter(&eventq->lock);
1100 	dfc_event->pid = 0;
1101 	dfc_event->event = 0;
1102 	cv_broadcast(&eventq->lock_cv);
1103 	mutex_exit(&eventq->lock);
1104 
1105 	return (0);
1106 
1107 } /* emlxs_kill_dfc_event() */
1108 
1109 
1110 #ifdef SAN_DIAG_SUPPORT
1111 extern void
1112 emlxs_log_sd_basic_els_event(emlxs_port_t *port, uint32_t subcat,
1113     HBA_WWN *portname, HBA_WWN *nodename)
1114 {
1115 	struct sd_plogi_rcv_v0	*bp;
1116 	uint32_t		size;
1117 
1118 	/* Check if the event is being requested */
1119 	if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1120 		return;
1121 	}
1122 
1123 	size = sizeof (struct sd_plogi_rcv_v0);
1124 
1125 	if (!(bp = (struct sd_plogi_rcv_v0 *)kmem_alloc(size, KM_NOSLEEP))) {
1126 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1127 		    "%s: Unable to allocate buffer.", emlxs_sd_els_event.label);
1128 
1129 		return;
1130 	}
1131 
1132 	/*
1133 	 * we are using version field to store subtype, libdfc
1134 	 * will fix this up before returning data to app.
1135 	 */
1136 	bp->sd_plogir_version = subcat;
1137 	bcopy((uint8_t *)portname, (uint8_t *)&bp->sd_plogir_portname,
1138 	    sizeof (HBA_WWN));
1139 	bcopy((uint8_t *)nodename, (uint8_t *)&bp->sd_plogir_nodename,
1140 	    sizeof (HBA_WWN));
1141 
1142 	emlxs_event(port, &emlxs_sd_els_event, bp, size);
1143 
1144 	return;
1145 
1146 } /* emlxs_log_sd_basic_els_event() */
1147 
1148 
1149 extern void
1150 emlxs_log_sd_prlo_event(emlxs_port_t *port, HBA_WWN *remoteport)
1151 {
1152 	struct sd_prlo_rcv_v0	*bp;
1153 	uint32_t		size;
1154 
1155 	/* Check if the event is being requested */
1156 	if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1157 		return;
1158 	}
1159 
1160 	size = sizeof (struct sd_prlo_rcv_v0);
1161 
1162 	if (!(bp = (struct sd_prlo_rcv_v0 *)kmem_alloc(size,
1163 	    KM_NOSLEEP))) {
1164 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1165 		    "%s PRLO: Unable to allocate buffer.",
1166 		    emlxs_sd_els_event.label);
1167 
1168 		return;
1169 	}
1170 
1171 	/*
1172 	 * we are using version field to store subtype, libdfc
1173 	 * will fix this up before returning data to app.
1174 	 */
1175 	bp->sd_prlor_version = SD_ELS_SUBCATEGORY_PRLO_RCV;
1176 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_prlor_remoteport,
1177 	    sizeof (HBA_WWN));
1178 
1179 	emlxs_event(port, &emlxs_sd_els_event, bp, size);
1180 
1181 	return;
1182 
1183 } /* emlxs_log_sd_prlo_event() */
1184 
1185 
1186 extern void
1187 emlxs_log_sd_lsrjt_event(emlxs_port_t *port, HBA_WWN *remoteport,
1188     uint32_t orig_cmd, uint32_t reason, uint32_t reason_expl)
1189 {
1190 	struct sd_lsrjt_rcv_v0	*bp;
1191 	uint32_t		size;
1192 
1193 	/* Check if the event is being requested */
1194 	if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1195 		return;
1196 	}
1197 
1198 	size = sizeof (struct sd_lsrjt_rcv_v0);
1199 
1200 	if (!(bp = (struct sd_lsrjt_rcv_v0 *)kmem_alloc(size,
1201 	    KM_NOSLEEP))) {
1202 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1203 		    "%s LSRJT: Unable to allocate buffer.",
1204 		    emlxs_sd_els_event.label);
1205 
1206 		return;
1207 	}
1208 
1209 	/*
1210 	 * we are using version field to store subtype, libdfc
1211 	 * will fix this up before returning data to app.
1212 	 */
1213 	bp->sd_lsrjtr_version = SD_ELS_SUBCATEGORY_LSRJT_RCV;
1214 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_lsrjtr_remoteport,
1215 	    sizeof (HBA_WWN));
1216 	bp->sd_lsrjtr_original_cmd = orig_cmd;
1217 	bp->sd_lsrjtr_reasoncode = reason;
1218 	bp->sd_lsrjtr_reasoncodeexpl = reason_expl;
1219 
1220 	emlxs_event(port, &emlxs_sd_els_event, bp, size);
1221 
1222 	return;
1223 
1224 } /* emlxs_log_sd_lsrjt_event() */
1225 
1226 
1227 extern void
1228 emlxs_log_sd_fc_bsy_event(emlxs_port_t *port, HBA_WWN *remoteport)
1229 {
1230 	struct sd_pbsy_rcv_v0	*bp;
1231 	uint32_t		size;
1232 
1233 	/* Check if the event is being requested */
1234 	if (emlxs_event_check(port, &emlxs_sd_fabric_event) == 0) {
1235 		return;
1236 	}
1237 
1238 	size = sizeof (struct sd_pbsy_rcv_v0);
1239 
1240 	if (!(bp = (struct sd_pbsy_rcv_v0 *)kmem_alloc(size,
1241 	    KM_NOSLEEP))) {
1242 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1243 		    "%s BSY: Unable to allocate buffer.",
1244 		    emlxs_sd_fabric_event.label);
1245 
1246 		return;
1247 	}
1248 
1249 	/*
1250 	 * we are using version field to store subtype, libdfc
1251 	 * will fix this up before returning data to app.
1252 	 */
1253 	if (remoteport == NULL)
1254 		bp->sd_pbsyr_evt_version = SD_FABRIC_SUBCATEGORY_FABRIC_BUSY;
1255 	else
1256 	{
1257 		bp->sd_pbsyr_evt_version = SD_FABRIC_SUBCATEGORY_PORT_BUSY;
1258 		bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_pbsyr_rport,
1259 		    sizeof (HBA_WWN));
1260 	}
1261 
1262 	emlxs_event(port, &emlxs_sd_fabric_event, bp, size);
1263 
1264 	return;
1265 
1266 } /* emlxs_log_sd_fc_bsy_event() */
1267 
1268 
1269 extern void
1270 emlxs_log_sd_fc_rdchk_event(emlxs_port_t *port, HBA_WWN *remoteport,
1271     uint32_t lun, uint32_t opcode, uint32_t fcp_param)
1272 {
1273 	struct sd_fcprdchkerr_v0	*bp;
1274 	uint32_t			size;
1275 
1276 	/* Check if the event is being requested */
1277 	if (emlxs_event_check(port, &emlxs_sd_fabric_event) == 0) {
1278 		return;
1279 	}
1280 
1281 	size = sizeof (struct sd_fcprdchkerr_v0);
1282 
1283 	if (!(bp = (struct sd_fcprdchkerr_v0 *)kmem_alloc(size,
1284 	    KM_NOSLEEP))) {
1285 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1286 		    "%s RDCHK: Unable to allocate buffer.",
1287 		    emlxs_sd_fabric_event.label);
1288 
1289 		return;
1290 	}
1291 
1292 	/*
1293 	 * we are using version field to store subtype, libdfc
1294 	 * will fix this up before returning data to app.
1295 	 */
1296 	bp->sd_fcprdchkerr_version = SD_FABRIC_SUBCATEGORY_FCPRDCHKERR;
1297 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_fcprdchkerr_rport,
1298 	    sizeof (HBA_WWN));
1299 	bp->sd_fcprdchkerr_lun = lun;
1300 	bp->sd_fcprdchkerr_opcode = opcode;
1301 	bp->sd_fcprdchkerr_fcpiparam = fcp_param;
1302 
1303 	emlxs_event(port, &emlxs_sd_fabric_event, bp, size);
1304 
1305 	return;
1306 
1307 } /* emlxs_log_sd_rdchk_event() */
1308 
1309 
1310 extern void
1311 emlxs_log_sd_scsi_event(emlxs_port_t *port, uint32_t type,
1312     HBA_WWN *remoteport, int32_t lun)
1313 {
1314 	struct sd_scsi_generic_v0	*bp;
1315 	uint32_t			size;
1316 
1317 	/* Check if the event is being requested */
1318 	if (emlxs_event_check(port, &emlxs_sd_scsi_event) == 0) {
1319 		return;
1320 	}
1321 
1322 	size = sizeof (struct sd_scsi_generic_v0);
1323 
1324 	if (!(bp = (struct sd_scsi_generic_v0 *)kmem_alloc(size,
1325 	    KM_NOSLEEP))) {
1326 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1327 		    "%s: Unable to allocate buffer.",
1328 		    emlxs_sd_scsi_event.label);
1329 
1330 		return;
1331 	}
1332 
1333 	/*
1334 	 * we are using version field to store subtype, libdfc
1335 	 * will fix this up before returning data to app.
1336 	 */
1337 	bp->sd_scsi_generic_version = type;
1338 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_scsi_generic_rport,
1339 	    sizeof (HBA_WWN));
1340 	bp->sd_scsi_generic_lun = lun;
1341 
1342 	emlxs_event(port, &emlxs_sd_scsi_event, bp, size);
1343 
1344 	return;
1345 
1346 } /* emlxs_log_sd_scsi_event() */
1347 
1348 
1349 extern void
1350 emlxs_log_sd_scsi_check_event(emlxs_port_t *port, HBA_WWN *remoteport,
1351     uint32_t lun, uint32_t cmdcode, uint32_t sensekey,
1352     uint32_t asc, uint32_t ascq)
1353 {
1354 	struct sd_scsi_checkcond_v0	*bp;
1355 	uint32_t			size;
1356 
1357 	/* Check if the event is being requested */
1358 	if (emlxs_event_check(port, &emlxs_sd_scsi_event) == 0) {
1359 		return;
1360 	}
1361 
1362 	size = sizeof (struct sd_scsi_checkcond_v0);
1363 
1364 	if (!(bp = (struct sd_scsi_checkcond_v0 *)kmem_alloc(size,
1365 	    KM_NOSLEEP))) {
1366 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1367 		    "%s CHECK: Unable to allocate buffer.",
1368 		    emlxs_sd_scsi_event.label);
1369 
1370 		return;
1371 	}
1372 
1373 	/*
1374 	 * we are using version field to store subtype, libdfc
1375 	 * will fix this up before returning data to app.
1376 	 */
1377 	bp->sd_scsi_checkcond_version = SD_SCSI_SUBCATEGORY_CHECKCONDITION;
1378 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_scsi_checkcond_rport,
1379 	    sizeof (HBA_WWN));
1380 	bp->sd_scsi_checkcond_lun = lun;
1381 	bp->sd_scsi_checkcond_cmdcode = cmdcode;
1382 	bp->sd_scsi_checkcond_sensekey = sensekey;
1383 	bp->sd_scsi_checkcond_asc = asc;
1384 	bp->sd_scsi_checkcond_ascq = ascq;
1385 
1386 	emlxs_event(port, &emlxs_sd_scsi_event, bp, size);
1387 
1388 	return;
1389 
1390 } /* emlxs_log_sd_scsi_check_event() */
1391 
1392 
1393 uint32_t
1394 emlxs_get_sd_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event,
1395     uint32_t sleep)
1396 {
1397 	emlxs_hba_t *hba = HBA;
1398 	emlxs_event_queue_t *eventq = &EVENTQ;
1399 	emlxs_event_entry_t *entry;
1400 	uint32_t found;
1401 	uint32_t mask;
1402 	uint32_t i;
1403 	uint32_t size = 0;
1404 	uint32_t rc;
1405 
1406 	if (dfc_event->dataout && dfc_event->size) {
1407 		size = dfc_event->size;
1408 	}
1409 	dfc_event->size = 0;
1410 
1411 	if (!dfc_event->event) {
1412 		return (DFC_ARG_INVALID);
1413 	}
1414 
1415 	/* Calculate the event index */
1416 	mask = dfc_event->event;
1417 	for (i = 0; i < 32; i++) {
1418 		if (mask & 0x01) {
1419 			break;
1420 		}
1421 
1422 		mask >>= 1;
1423 	}
1424 
1425 	mutex_enter(&eventq->lock);
1426 
1427 wait_for_event:
1428 
1429 	/* Check if no new event has ocurred */
1430 	if (dfc_event->last_id == eventq->last_id[i]) {
1431 		if (!sleep) {
1432 			mutex_exit(&eventq->lock);
1433 			return (0);
1434 		}
1435 
1436 		/* While event is active and no new event has been logged */
1437 		while ((dfc_event->event & port->sd_event_mask) &&
1438 		    (dfc_event->last_id == eventq->last_id[i])) {
1439 			rc = cv_wait_sig(&eventq->lock_cv, &eventq->lock);
1440 
1441 			/* Check if thread was killed by kernel */
1442 			if (rc == 0) {
1443 				dfc_event->pid = 0;
1444 				dfc_event->event = 0;
1445 				mutex_exit(&eventq->lock);
1446 				return (0);
1447 			}
1448 		}
1449 
1450 		/* If the event is no longer registered then return */
1451 		if (!(dfc_event->event & port->sd_event_mask)) {
1452 			mutex_exit(&eventq->lock);
1453 			return (0);
1454 		}
1455 	}
1456 
1457 	/* !!! An event has occurred since last_id !!! */
1458 
1459 	/* Check if event data is not being requested */
1460 	if (!size) {
1461 		/* If so, then just return the last event id */
1462 		dfc_event->last_id = eventq->last_id[i];
1463 
1464 		mutex_exit(&eventq->lock);
1465 		return (0);
1466 	}
1467 
1468 	/* !!! The requester wants the next event buffer !!! */
1469 
1470 	found = 0;
1471 	entry = eventq->first;
1472 	while (entry) {
1473 		if ((entry->id > dfc_event->last_id) &&
1474 		    (entry->port == (void *)port) &&
1475 		    (entry->evt->mask == dfc_event->event)) {
1476 			found = 1;
1477 			break;
1478 		}
1479 
1480 		entry = entry->next;
1481 	}
1482 
1483 	if (!found) {
1484 		/* Update last_id to the last known event */
1485 		dfc_event->last_id = eventq->last_id[i];
1486 
1487 		/* Try waiting again if we can */
1488 		goto wait_for_event;
1489 	}
1490 
1491 	/* !!! Next event found !!! */
1492 
1493 	/* Copy the context buffer to the buffer provided */
1494 	if (entry->bp && entry->size) {
1495 		if (entry->size < size) {
1496 			size = entry->size;
1497 		}
1498 
1499 		if (ddi_copyout((void *) entry->bp, dfc_event->dataout,
1500 		    size, dfc_event->mode) != 0) {
1501 			mutex_exit(&eventq->lock);
1502 
1503 			return (DFC_COPYOUT_ERROR);
1504 		}
1505 
1506 		/* Event has been retrieved by SANDIAG */
1507 		entry->flag |= EMLXS_SD_EVENT_DONE;
1508 
1509 		dfc_event->size = size;
1510 	}
1511 
1512 	dfc_event->last_id = entry->id;
1513 
1514 	mutex_exit(&eventq->lock);
1515 
1516 	return (0);
1517 
1518 } /* emlxs_get_sd_event */
1519 #endif /* SAN_DIAG_SUPPORT */
1520