xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_event.c (revision a3170057524922242772a15fbeb3e91f5f8d4744)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2020 RackTop Systems, Inc.
26  */
27 
28 #define	DEF_EVENT_STRUCT  /* Needed for emlxs_events.h in emlxs_event.h */
29 #include <emlxs.h>
30 
31 
32 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33 EMLXS_MSG_DEF(EMLXS_EVENT_C);
34 
35 
36 static uint32_t emlxs_event_check(emlxs_port_t *port, emlxs_event_t *evt);
37 static void emlxs_event_destroy(emlxs_hba_t *hba, emlxs_event_entry_t *entry);
38 
39 extern void
emlxs_null_func()40 emlxs_null_func() {}
41 
42 
43 static uint32_t
emlxs_event_check(emlxs_port_t * port,emlxs_event_t * evt)44 emlxs_event_check(emlxs_port_t *port, emlxs_event_t *evt)
45 {
46 	emlxs_hba_t *hba = HBA;
47 
48 	/* Check if the event is being requested */
49 	if ((hba->event_mask & evt->mask)) {
50 		return (1);
51 	}
52 
53 #ifdef SAN_DIAG_SUPPORT
54 	if ((port->sd_event_mask & evt->mask)) {
55 		return (1);
56 	}
57 #endif /* SAN_DIAG_SUPPORT */
58 
59 	return (0);
60 
61 } /* emlxs_event_check() */
62 
63 
64 extern uint32_t
emlxs_event_queue_create(emlxs_hba_t * hba)65 emlxs_event_queue_create(emlxs_hba_t *hba)
66 {
67 	emlxs_event_queue_t *eventq = &EVENTQ;
68 	ddi_iblock_cookie_t iblock;
69 
70 	/* Clear the queue */
71 	bzero(eventq, sizeof (emlxs_event_queue_t));
72 
73 	cv_init(&eventq->lock_cv, NULL, CV_DRIVER, NULL);
74 
75 	if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
76 		/* Get the current interrupt block cookie */
77 		(void) ddi_get_iblock_cookie(hba->dip, (uint_t)EMLXS_INUMBER,
78 		    &iblock);
79 
80 		/* Create the mutex lock */
81 		mutex_init(&eventq->lock, NULL, MUTEX_DRIVER, (void *)iblock);
82 	}
83 #ifdef  MSI_SUPPORT
84 	else {
85 		/* Create event mutex lock */
86 		mutex_init(&eventq->lock, NULL, MUTEX_DRIVER,
87 		    DDI_INTR_PRI(hba->intr_arg));
88 	}
89 #endif
90 
91 	return (1);
92 
93 } /* emlxs_event_queue_create() */
94 
95 
96 extern void
emlxs_event_queue_destroy(emlxs_hba_t * hba)97 emlxs_event_queue_destroy(emlxs_hba_t *hba)
98 {
99 	emlxs_port_t *vport;
100 	emlxs_event_queue_t *eventq = &EVENTQ;
101 	uint32_t i;
102 	uint32_t wakeup = 0;
103 
104 	mutex_enter(&eventq->lock);
105 
106 	/* Clear all event masks and broadcast a wakeup */
107 	/* to clear any sleeping threads */
108 	if (hba->event_mask) {
109 		hba->event_mask = 0;
110 		hba->event_timer = 0;
111 		wakeup = 1;
112 	}
113 
114 	for (i = 0; i < MAX_VPORTS; i++) {
115 		vport = &VPORT(i);
116 
117 		if (vport->sd_event_mask) {
118 			vport->sd_event_mask = 0;
119 			wakeup = 1;
120 		}
121 	}
122 
123 	if (wakeup) {
124 		cv_broadcast(&eventq->lock_cv);
125 
126 		mutex_exit(&eventq->lock);
127 		BUSYWAIT_MS(10);
128 		mutex_enter(&eventq->lock);
129 	}
130 
131 	/* Destroy the remaining events */
132 	while (eventq->first) {
133 		emlxs_event_destroy(hba, eventq->first);
134 	}
135 
136 	mutex_exit(&eventq->lock);
137 
138 	/* Destroy the queue lock */
139 	mutex_destroy(&eventq->lock);
140 	cv_destroy(&eventq->lock_cv);
141 
142 	/* Clear the queue */
143 	bzero(eventq, sizeof (emlxs_event_queue_t));
144 
145 	return;
146 
147 } /* emlxs_event_queue_destroy() */
148 
149 
150 /* Event queue lock must be held */
151 static void
emlxs_event_destroy(emlxs_hba_t * hba,emlxs_event_entry_t * entry)152 emlxs_event_destroy(emlxs_hba_t *hba, emlxs_event_entry_t *entry)
153 {
154 	emlxs_event_queue_t *eventq = &EVENTQ;
155 	emlxs_port_t *port;
156 	uint32_t missed = 0;
157 
158 	port = (emlxs_port_t *)entry->port;
159 
160 	eventq->count--;
161 	if (eventq->count == 0) {
162 		eventq->first = NULL;
163 		eventq->last = NULL;
164 	} else {
165 		if (entry->prev) {
166 			entry->prev->next = entry->next;
167 		}
168 		if (entry->next) {
169 			entry->next->prev = entry->prev;
170 		}
171 		if (eventq->first == entry) {
172 			eventq->first = entry->next;
173 		}
174 		if (eventq->last == entry) {
175 			eventq->last = entry->prev;
176 		}
177 	}
178 
179 	entry->prev = NULL;
180 	entry->next = NULL;
181 
182 	if ((entry->evt->mask == EVT_LINK) ||
183 	    (entry->evt->mask == EVT_RSCN)) {
184 		if (!(entry->flag & EMLXS_DFC_EVENT_DONE)) {
185 			hba->hba_event.missed++;
186 			missed = 1;
187 		}
188 	}
189 
190 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_dequeued_msg,
191 	    "%s[%d]: flag=%x missed=%d cnt=%d",
192 	    entry->evt->label, entry->id, entry->flag, missed, eventq->count);
193 
194 	/* Call notification handler */
195 	if (entry->evt->destroy != emlxs_null_func) {
196 		entry->evt->destroy(entry);
197 	}
198 
199 	/* Free context buffer */
200 	if (entry->bp && entry->size) {
201 		kmem_free(entry->bp, entry->size);
202 	}
203 
204 	/* Free entry buffer */
205 	kmem_free(entry, sizeof (emlxs_event_entry_t));
206 
207 	return;
208 
209 } /* emlxs_event_destroy() */
210 
211 
212 extern void
emlxs_event(emlxs_port_t * port,emlxs_event_t * evt,void * bp,uint32_t size)213 emlxs_event(emlxs_port_t *port, emlxs_event_t *evt, void *bp, uint32_t size)
214 {
215 	emlxs_hba_t *hba = HBA;
216 	emlxs_event_queue_t *eventq = &EVENTQ;
217 	emlxs_event_entry_t *entry;
218 	uint32_t i;
219 	uint32_t mask;
220 
221 	if (emlxs_event_check(port, evt) == 0) {
222 		goto failed;
223 	}
224 
225 	/* Create event entry */
226 	if (!(entry = (emlxs_event_entry_t *)kmem_alloc(
227 	    sizeof (emlxs_event_entry_t), KM_NOSLEEP))) {
228 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
229 		    "%s: Unable to allocate event entry.", evt->label);
230 
231 		goto failed;
232 	}
233 
234 	/* Initialize */
235 	bzero(entry, sizeof (emlxs_event_entry_t));
236 
237 	entry->evt = evt;
238 	entry->port = (void *)port;
239 	entry->bp = bp;
240 	entry->size = size;
241 
242 	mutex_enter(&eventq->lock);
243 
244 	/* Set the event timer */
245 	entry->timestamp = hba->timer_tics;
246 	if (evt->timeout) {
247 		entry->timer = entry->timestamp + evt->timeout;
248 	}
249 
250 	/* Eventq id starts with 1 */
251 	if (eventq->next_id == 0) {
252 		eventq->next_id = 1;
253 	}
254 
255 	/* Set the event id */
256 	entry->id = eventq->next_id++;
257 
258 	/* Set last event table */
259 	mask = evt->mask;
260 	for (i = 0; i < 32; i++) {
261 		if (mask & 0x01) {
262 			eventq->last_id[i] = entry->id;
263 		}
264 		mask >>= 1;
265 	}
266 
267 	/* Put event on bottom of queue */
268 	entry->next = NULL;
269 	if (eventq->count == 0) {
270 		entry->prev = NULL;
271 		eventq->first = entry;
272 		eventq->last = entry;
273 	} else {
274 		entry->prev = eventq->last;
275 		entry->prev->next = entry;
276 		eventq->last = entry;
277 	}
278 	eventq->count++;
279 
280 	if ((entry->evt->mask == EVT_LINK) ||
281 	    (entry->evt->mask == EVT_RSCN)) {
282 		hba->hba_event.new++;
283 	}
284 
285 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_queued_msg,
286 	    "%s[%d]: bp=%p size=%d cnt=%d", entry->evt->label,
287 	    entry->id, bp, size, eventq->count);
288 
289 	/* Broadcast the event */
290 	cv_broadcast(&eventq->lock_cv);
291 
292 	mutex_exit(&eventq->lock);
293 
294 	return;
295 
296 failed:
297 
298 	if (bp && size) {
299 		kmem_free(bp, size);
300 	}
301 
302 	return;
303 
304 } /* emlxs_event() */
305 
306 
307 extern void
emlxs_timer_check_events(emlxs_hba_t * hba)308 emlxs_timer_check_events(emlxs_hba_t *hba)
309 {
310 	emlxs_config_t *cfg = &CFG;
311 	emlxs_event_queue_t *eventq = &EVENTQ;
312 	emlxs_event_entry_t *entry;
313 	emlxs_event_entry_t *next;
314 
315 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
316 		return;
317 	}
318 
319 	if ((hba->event_timer > hba->timer_tics)) {
320 		return;
321 	}
322 
323 	if (eventq->count) {
324 		mutex_enter(&eventq->lock);
325 
326 		entry = eventq->first;
327 		while (entry) {
328 			if ((!entry->timer) ||
329 			    (entry->timer > hba->timer_tics)) {
330 				entry = entry->next;
331 				continue;
332 			}
333 
334 			/* Event timed out, destroy it */
335 			next = entry->next;
336 			emlxs_event_destroy(hba, entry);
337 			entry = next;
338 		}
339 
340 		mutex_exit(&eventq->lock);
341 	}
342 
343 	/* Set next event timer check */
344 	hba->event_timer = hba->timer_tics + EMLXS_EVENT_PERIOD;
345 
346 	return;
347 
348 } /* emlxs_timer_check_events() */
349 
350 
351 extern void
emlxs_log_rscn_event(emlxs_port_t * port,uint8_t * payload,uint32_t size)352 emlxs_log_rscn_event(emlxs_port_t *port, uint8_t *payload, uint32_t size)
353 {
354 	uint8_t *bp;
355 	uint32_t *ptr;
356 
357 	/* Check if the event is being requested */
358 	if (emlxs_event_check(port, &emlxs_rscn_event) == 0) {
359 		return;
360 	}
361 
362 	if (size > MAX_RSCN_PAYLOAD) {
363 		size = MAX_RSCN_PAYLOAD;
364 	}
365 
366 	size += sizeof (uint32_t);
367 
368 	/* Save a copy of the payload for the event log */
369 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
370 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
371 		    "%s: Unable to allocate buffer.", emlxs_rscn_event.label);
372 
373 		return;
374 	}
375 
376 	/*
377 	 * Buffer Format:
378 	 *	word[0] = DID of the RSCN
379 	 *	word[1] = RSCN Payload
380 	 */
381 	ptr = (uint32_t *)bp;
382 	*ptr++ = port->did;
383 	bcopy(payload, (char *)ptr, (size - sizeof (uint32_t)));
384 
385 	emlxs_event(port, &emlxs_rscn_event, bp, size);
386 
387 	return;
388 
389 } /* emlxs_log_rscn_event() */
390 
391 
392 extern void
emlxs_log_vportrscn_event(emlxs_port_t * port,uint8_t * payload,uint32_t size)393 emlxs_log_vportrscn_event(emlxs_port_t *port, uint8_t *payload, uint32_t size)
394 {
395 	uint8_t *bp;
396 	uint8_t *ptr;
397 
398 	/* Check if the event is being requested */
399 	if (emlxs_event_check(port, &emlxs_vportrscn_event) == 0) {
400 		return;
401 	}
402 
403 	if (size > MAX_RSCN_PAYLOAD) {
404 		size = MAX_RSCN_PAYLOAD;
405 	}
406 
407 	size += sizeof (NAME_TYPE);
408 
409 	/* Save a copy of the payload for the event log */
410 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
411 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
412 		    "%s: Unable to allocate buffer.",
413 		    emlxs_vportrscn_event.label);
414 
415 		return;
416 	}
417 
418 	/*
419 	 * Buffer Format:
420 	 *	word[0 - 4] = WWPN of the RSCN
421 	 *	word[5] = RSCN Payload
422 	 */
423 	ptr = bp;
424 	bcopy(&port->wwpn, ptr, sizeof (NAME_TYPE));
425 	ptr += sizeof (NAME_TYPE);
426 	bcopy(payload, ptr, (size - sizeof (NAME_TYPE)));
427 
428 	emlxs_event(port, &emlxs_vportrscn_event, bp, size);
429 
430 	return;
431 
432 } /* emlxs_log_vportrscn_event() */
433 
434 
435 extern uint32_t
emlxs_flush_ct_event(emlxs_port_t * port,uint32_t rxid)436 emlxs_flush_ct_event(emlxs_port_t *port, uint32_t rxid)
437 {
438 	emlxs_hba_t *hba = HBA;
439 	emlxs_event_queue_t *eventq = &EVENTQ;
440 	emlxs_event_entry_t *entry;
441 	uint32_t *ptr;
442 	uint32_t found = 0;
443 
444 	mutex_enter(&eventq->lock);
445 
446 	for (entry = eventq->first; entry != NULL; entry = entry->next) {
447 		if ((entry->port != port) ||
448 		    (entry->evt != &emlxs_ct_event)) {
449 			continue;
450 		}
451 
452 		ptr = (uint32_t *)entry->bp;
453 		if (rxid == *ptr) {
454 			/* This will prevent a CT exchange abort */
455 			/* in emlxs_ct_event_destroy() */
456 			entry->flag |= EMLXS_DFC_EVENT_DONE;
457 
458 			emlxs_event_destroy(hba, entry);
459 			found = 1;
460 			break;
461 		}
462 	}
463 
464 	mutex_exit(&eventq->lock);
465 
466 	return (found);
467 
468 } /* emlxs_flush_ct_event() */
469 
470 
471 extern uint32_t
emlxs_log_ct_event(emlxs_port_t * port,uint8_t * payload,uint32_t size,uint32_t rxid)472 emlxs_log_ct_event(emlxs_port_t *port, uint8_t *payload, uint32_t size,
473     uint32_t rxid)
474 {
475 	uint8_t *bp;
476 	uint32_t *ptr;
477 
478 	/* Check if the event is being requested */
479 	if (emlxs_event_check(port, &emlxs_ct_event) == 0) {
480 		return (1);
481 	}
482 
483 	if (size > MAX_CT_PAYLOAD) {
484 		size = MAX_CT_PAYLOAD;
485 	}
486 
487 	size += sizeof (uint32_t);
488 
489 	/* Save a copy of the payload for the event log */
490 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
491 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
492 		    "%s: Unable to allocate buffer.", emlxs_ct_event.label);
493 
494 		return (1);
495 	}
496 
497 	/*
498 	 * Buffer Format:
499 	 *	word[0] = RXID tag for outgoing reply to this CT request
500 	 *	word[1] = CT Payload
501 	 */
502 	ptr = (uint32_t *)bp;
503 	*ptr++ = rxid;
504 	bcopy(payload, (char *)ptr, (size - sizeof (uint32_t)));
505 
506 	emlxs_event(port, &emlxs_ct_event, bp, size);
507 
508 	return (0);
509 
510 } /* emlxs_log_ct_event() */
511 
512 
513 extern void
emlxs_ct_event_destroy(emlxs_event_entry_t * entry)514 emlxs_ct_event_destroy(emlxs_event_entry_t *entry)
515 {
516 	emlxs_port_t *port = (emlxs_port_t *)entry->port;
517 	emlxs_hba_t *hba = HBA;
518 	uint32_t rxid;
519 
520 	if (!(entry->flag & EMLXS_DFC_EVENT_DONE)) {
521 
522 		rxid = *(uint32_t *)entry->bp;
523 
524 		/* Abort exchange */
525 		emlxs_thread_spawn(hba, emlxs_abort_ct_exchange,
526 		    entry->port, (void *)(unsigned long)rxid);
527 	}
528 
529 	return;
530 
531 } /* emlxs_ct_event_destroy() */
532 
533 
534 extern void
emlxs_log_link_event(emlxs_port_t * port)535 emlxs_log_link_event(emlxs_port_t *port)
536 {
537 	emlxs_hba_t *hba = HBA;
538 	uint8_t *bp;
539 	dfc_linkinfo_t *linkinfo;
540 	uint8_t *byte;
541 	uint8_t *linkspeed;
542 	uint8_t *liptype;
543 	uint8_t *resv1;
544 	uint8_t *resv2;
545 	uint32_t size;
546 
547 	/* Check if the event is being requested */
548 	if (emlxs_event_check(port, &emlxs_link_event) == 0) {
549 		return;
550 	}
551 
552 	size = sizeof (dfc_linkinfo_t) + sizeof (uint32_t);
553 
554 	/* Save a copy of the buffer for the event log */
555 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
556 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
557 		    "%s: Unable to allocate buffer.", emlxs_link_event.label);
558 
559 		return;
560 	}
561 
562 	/*
563 	 * Buffer Format:
564 	 *	word[0] = Linkspeed:8
565 	 *	word[0] = LIP_type:8
566 	 *	word[0] = resv1:8
567 	 *	word[0] = resv2:8
568 	 *	word[1] = dfc_linkinfo_t data
569 	 */
570 	byte = (uint8_t *)bp;
571 	linkspeed = &byte[0];
572 	liptype = &byte[1];
573 	resv1 = &byte[2];
574 	resv2 = &byte[3];
575 	linkinfo = (dfc_linkinfo_t *)&byte[4];
576 
577 	*resv1 = 0;
578 	*resv2 = 0;
579 
580 	if (hba->state <= FC_LINK_DOWN) {
581 		*linkspeed = 0;
582 		*liptype = 0;
583 	} else {
584 		/* Set linkspeed */
585 		if (hba->linkspeed == LA_2GHZ_LINK) {
586 			*linkspeed = HBA_PORTSPEED_2GBIT;
587 		} else if (hba->linkspeed == LA_4GHZ_LINK) {
588 			*linkspeed = HBA_PORTSPEED_4GBIT;
589 		} else if (hba->linkspeed == LA_8GHZ_LINK) {
590 			*linkspeed = HBA_PORTSPEED_8GBIT;
591 		} else if (hba->linkspeed == LA_10GHZ_LINK) {
592 			*linkspeed = HBA_PORTSPEED_10GBIT;
593 		} else if (hba->linkspeed == LA_16GHZ_LINK) {
594 			*linkspeed = HBA_PORTSPEED_16GBIT;
595 		} else if (hba->linkspeed == LA_32GHZ_LINK) {
596 			*linkspeed = HBA_PORTSPEED_32GBIT;
597 		} else {
598 			*linkspeed = HBA_PORTSPEED_1GBIT;
599 		}
600 
601 		/* Set LIP type */
602 		*liptype = port->lip_type;
603 	}
604 
605 	bzero(linkinfo, sizeof (dfc_linkinfo_t));
606 
607 	linkinfo->a_linkEventTag = hba->link_event_tag;
608 	linkinfo->a_linkUp = HBASTATS.LinkUp;
609 	linkinfo->a_linkDown = HBASTATS.LinkDown;
610 	linkinfo->a_linkMulti = HBASTATS.LinkMultiEvent;
611 
612 	if (hba->state <= FC_LINK_DOWN) {
613 		linkinfo->a_linkState = LNK_DOWN;
614 		linkinfo->a_DID = port->prev_did;
615 	} else if (hba->state < FC_READY) {
616 		linkinfo->a_linkState = LNK_DISCOVERY;
617 	} else {
618 		linkinfo->a_linkState = LNK_READY;
619 	}
620 
621 	if (linkinfo->a_linkState != LNK_DOWN) {
622 		if (hba->topology == TOPOLOGY_LOOP) {
623 			if (hba->flag & FC_FABRIC_ATTACHED) {
624 				linkinfo->a_topology = LNK_PUBLIC_LOOP;
625 			} else {
626 				linkinfo->a_topology = LNK_LOOP;
627 			}
628 
629 			linkinfo->a_alpa = port->did & 0xff;
630 			linkinfo->a_DID = linkinfo->a_alpa;
631 			linkinfo->a_alpaCnt = port->alpa_map[0];
632 
633 			if (linkinfo->a_alpaCnt > 127) {
634 				linkinfo->a_alpaCnt = 127;
635 			}
636 
637 			bcopy((void *)&port->alpa_map[1], linkinfo->a_alpaMap,
638 			    linkinfo->a_alpaCnt);
639 		} else {
640 			if (port->node_count == 1) {
641 				linkinfo->a_topology = LNK_PT2PT;
642 			} else {
643 				linkinfo->a_topology = LNK_FABRIC;
644 			}
645 
646 			linkinfo->a_DID = port->did;
647 		}
648 	}
649 
650 	bcopy(&hba->wwpn, linkinfo->a_wwpName, 8);
651 	bcopy(&hba->wwnn, linkinfo->a_wwnName, 8);
652 
653 	emlxs_event(port, &emlxs_link_event, bp, size);
654 
655 	return;
656 
657 } /* emlxs_log_link_event() */
658 
659 
660 extern void
emlxs_log_dump_event(emlxs_port_t * port,uint8_t * buffer,uint32_t size)661 emlxs_log_dump_event(emlxs_port_t *port, uint8_t *buffer, uint32_t size)
662 {
663 	emlxs_hba_t *hba = HBA;
664 	uint8_t *bp;
665 
666 	/* Check if the event is being requested */
667 	if (emlxs_event_check(port, &emlxs_dump_event) == 0) {
668 #ifdef DUMP_SUPPORT
669 		/* Schedule a dump thread */
670 		emlxs_dump(hba, EMLXS_DRV_DUMP, 0, 0);
671 #endif /* DUMP_SUPPORT */
672 		return;
673 	}
674 
675 	if (buffer && size) {
676 		/* Save a copy of the buffer for the event log */
677 		if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
678 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
679 			    "%s: Unable to allocate buffer.",
680 			    emlxs_dump_event.label);
681 
682 			return;
683 		}
684 
685 		bcopy(buffer, bp, size);
686 	} else {
687 		bp = NULL;
688 		size = 0;
689 	}
690 
691 	emlxs_event(port, &emlxs_dump_event, bp, size);
692 
693 	return;
694 
695 } /* emlxs_log_dump_event() */
696 
697 
698 extern void
emlxs_log_temp_event(emlxs_port_t * port,uint32_t type,uint32_t temp)699 emlxs_log_temp_event(emlxs_port_t *port, uint32_t type, uint32_t temp)
700 {
701 	emlxs_hba_t *hba = HBA;
702 	uint32_t *bp;
703 	uint32_t size;
704 
705 	/* Check if the event is being requested */
706 	if (emlxs_event_check(port, &emlxs_temp_event) == 0) {
707 #ifdef DUMP_SUPPORT
708 		/* Schedule a dump thread */
709 		emlxs_dump(hba, EMLXS_TEMP_DUMP, type, temp);
710 #endif /* DUMP_SUPPORT */
711 		return;
712 	}
713 
714 	size = 2 * sizeof (uint32_t);
715 
716 	if (!(bp = (uint32_t *)kmem_alloc(size, KM_NOSLEEP))) {
717 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
718 		    "%s: Unable to allocate buffer.", emlxs_temp_event.label);
719 
720 		return;
721 	}
722 
723 	bp[0] = type;
724 	bp[1] = temp;
725 
726 	emlxs_event(port, &emlxs_temp_event, bp, size);
727 
728 	return;
729 
730 } /* emlxs_log_temp_event() */
731 
732 
733 
734 extern void
emlxs_log_fcoe_event(emlxs_port_t * port,menlo_init_rsp_t * init_rsp)735 emlxs_log_fcoe_event(emlxs_port_t *port, menlo_init_rsp_t *init_rsp)
736 {
737 	emlxs_hba_t *hba = HBA;
738 	uint8_t *bp;
739 	uint32_t size;
740 
741 	/* Check if the event is being requested */
742 	if (emlxs_event_check(port, &emlxs_fcoe_event) == 0) {
743 		return;
744 	}
745 
746 	/* Check if this is a FCOE adapter */
747 	if (hba->model_info.device_id != PCI_DEVICE_ID_HORNET) {
748 		return;
749 	}
750 
751 	size = sizeof (menlo_init_rsp_t);
752 
753 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
754 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
755 		    "%s: Unable to allocate buffer.", emlxs_fcoe_event.label);
756 
757 		return;
758 	}
759 
760 	bcopy((uint8_t *)init_rsp, bp, size);
761 
762 	emlxs_event(port, &emlxs_fcoe_event, bp, size);
763 
764 	return;
765 
766 } /* emlxs_log_fcoe_event() */
767 
768 
769 extern void
emlxs_log_async_event(emlxs_port_t * port,IOCB * iocb)770 emlxs_log_async_event(emlxs_port_t *port, IOCB *iocb)
771 {
772 	uint8_t *bp;
773 	uint32_t size;
774 
775 	if (emlxs_event_check(port, &emlxs_async_event) == 0) {
776 		return;
777 	}
778 
779 	/* ASYNC_STATUS_CN response size */
780 	size = 64;
781 
782 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
783 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
784 		    "%s: Unable to allocate buffer.", emlxs_async_event.label);
785 
786 		return;
787 	}
788 
789 	bcopy((uint8_t *)iocb, bp, size);
790 
791 	emlxs_event(port, &emlxs_async_event, bp, size);
792 
793 	return;
794 
795 } /* emlxs_log_async_event() */
796 
797 
798 extern uint32_t
emlxs_get_dfc_eventinfo(emlxs_port_t * port,HBA_EVENTINFO * eventinfo,uint32_t * eventcount,uint32_t * missed)799 emlxs_get_dfc_eventinfo(emlxs_port_t *port, HBA_EVENTINFO *eventinfo,
800     uint32_t *eventcount, uint32_t *missed)
801 {
802 	emlxs_hba_t *hba = HBA;
803 	emlxs_event_queue_t *eventq = &EVENTQ;
804 	emlxs_event_entry_t *entry;
805 	uint32_t max_events;
806 	dfc_linkinfo_t *linkinfo;
807 	uint32_t *word;
808 	uint8_t *byte;
809 	uint8_t linkspeed;
810 	uint8_t liptype;
811 	fc_affected_id_t *aid;
812 	uint32_t events;
813 	uint8_t format;
814 
815 	if (!eventinfo || !eventcount || !missed) {
816 		return (DFC_ARG_NULL);
817 	}
818 
819 	max_events = *eventcount;
820 	*eventcount = 0;
821 	*missed = 0;
822 
823 	mutex_enter(&eventq->lock);
824 
825 	/* Account for missed events */
826 	if (hba->hba_event.new > hba->hba_event.missed) {
827 		hba->hba_event.new -= hba->hba_event.missed;
828 	} else {
829 		hba->hba_event.new = 0;
830 	}
831 
832 	*missed = hba->hba_event.missed;
833 	hba->hba_event.missed = 0;
834 
835 	if (!hba->hba_event.new) {
836 		hba->hba_event.last_id = eventq->next_id - 1;
837 		mutex_exit(&eventq->lock);
838 		return (0);
839 	}
840 
841 	/* A new event has occurred since last acquisition */
842 
843 	events = 0;
844 	entry = eventq->first;
845 	while (entry && (events < max_events)) {
846 
847 		/* Skip old events */
848 		if (entry->id <= hba->hba_event.last_id) {
849 			entry = entry->next;
850 			continue;
851 		}
852 
853 		/* Process this entry */
854 		switch (entry->evt->mask) {
855 		case EVT_LINK:
856 			byte = (uint8_t *)entry->bp;
857 			linkspeed = byte[0];
858 			liptype = byte[1];
859 			linkinfo = (dfc_linkinfo_t *)&byte[4];
860 
861 			if (linkinfo->a_linkState == LNK_DOWN) {
862 				eventinfo->EventCode =
863 				    HBA_EVENT_LINK_DOWN;
864 				eventinfo->Event.Link_EventInfo.
865 				    PortFcId = linkinfo->a_DID;
866 				eventinfo->Event.Link_EventInfo.
867 				    Reserved[0] = 0;
868 				eventinfo->Event.Link_EventInfo.
869 				    Reserved[1] = 0;
870 				eventinfo->Event.Link_EventInfo.
871 				    Reserved[2] = 0;
872 			} else {
873 				eventinfo->EventCode =
874 				    HBA_EVENT_LINK_UP;
875 				eventinfo->Event.Link_EventInfo.
876 				    PortFcId = linkinfo->a_DID;
877 
878 				if ((linkinfo->a_topology ==
879 				    LNK_PUBLIC_LOOP) ||
880 				    (linkinfo->a_topology ==
881 				    LNK_LOOP)) {
882 					eventinfo->Event.
883 					    Link_EventInfo.
884 					    Reserved[0] = 2;
885 				} else {
886 					eventinfo->Event.
887 					    Link_EventInfo.
888 					    Reserved[0] = 1;
889 				}
890 
891 				eventinfo->Event.Link_EventInfo.
892 				    Reserved[1] = liptype;
893 				eventinfo->Event.Link_EventInfo.
894 				    Reserved[2] = linkspeed;
895 			}
896 
897 			eventinfo++;
898 			events++;
899 			hba->hba_event.new--;
900 			break;
901 
902 		case EVT_RSCN:
903 			word = (uint32_t *)entry->bp;
904 			eventinfo->EventCode = HBA_EVENT_RSCN;
905 			eventinfo->Event.RSCN_EventInfo.PortFcId =
906 			    word[0] & 0xFFFFFF;
907 			/* word[1] is the RSCN payload command */
908 
909 			aid = (fc_affected_id_t *)&word[2];
910 			format = aid->aff_format;
911 
912 			switch (format) {
913 			case 0:	/* Port */
914 				eventinfo->Event.RSCN_EventInfo.
915 				    NPortPage =
916 				    aid->aff_d_id & 0x00ffffff;
917 				break;
918 
919 			case 1:	/* Area */
920 				eventinfo->Event.RSCN_EventInfo.
921 				    NPortPage =
922 				    aid->aff_d_id & 0x00ffff00;
923 				break;
924 
925 			case 2:	/* Domain */
926 				eventinfo->Event.RSCN_EventInfo.
927 				    NPortPage =
928 				    aid->aff_d_id & 0x00ff0000;
929 				break;
930 
931 			case 3:	/* Network */
932 				eventinfo->Event.RSCN_EventInfo.
933 				    NPortPage = 0;
934 				break;
935 			}
936 
937 			eventinfo->Event.RSCN_EventInfo.Reserved[0] =
938 			    0;
939 			eventinfo->Event.RSCN_EventInfo.Reserved[1] =
940 			    0;
941 
942 			eventinfo++;
943 			events++;
944 			hba->hba_event.new--;
945 			break;
946 		}
947 
948 		hba->hba_event.last_id = entry->id;
949 		entry = entry->next;
950 	}
951 
952 	/* Return number of events acquired */
953 	*eventcount = events;
954 
955 	mutex_exit(&eventq->lock);
956 
957 	return (0);
958 
959 } /* emlxs_get_dfc_eventinfo() */
960 
961 
962 void
emlxs_get_dfc_event(emlxs_port_t * port,emlxs_dfc_event_t * dfc_event,uint32_t sleep)963 emlxs_get_dfc_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event,
964     uint32_t sleep)
965 {
966 	emlxs_hba_t *hba = HBA;
967 	emlxs_event_queue_t *eventq = &EVENTQ;
968 	emlxs_event_entry_t *entry;
969 	uint32_t found;
970 	uint32_t mask;
971 	uint32_t i;
972 	uint32_t size = 0;
973 	uint32_t rc;
974 
975 	if (dfc_event->dataout && dfc_event->size) {
976 		size = dfc_event->size;
977 	}
978 	dfc_event->size = 0;
979 
980 	/* Calculate the event index */
981 	mask = dfc_event->event;
982 	for (i = 0; i < 32; i++) {
983 		if (mask & 0x01) {
984 			break;
985 		}
986 
987 		mask >>= 1;
988 	}
989 
990 	if (i == 32) {
991 		return;
992 	}
993 
994 	mutex_enter(&eventq->lock);
995 
996 wait_for_event:
997 
998 	/* Check if no new event has occurred */
999 	if (dfc_event->last_id == eventq->last_id[i]) {
1000 		if (!sleep) {
1001 			mutex_exit(&eventq->lock);
1002 			return;
1003 		}
1004 
1005 		/* While event is still active and */
1006 		/* no new event has been logged */
1007 		while ((dfc_event->event & hba->event_mask) &&
1008 		    (dfc_event->last_id == eventq->last_id[i])) {
1009 
1010 			rc = cv_wait_sig(&eventq->lock_cv, &eventq->lock);
1011 
1012 			/* Check if thread was killed by kernel */
1013 			if (rc == 0) {
1014 				dfc_event->pid = 0;
1015 				dfc_event->event = 0;
1016 				mutex_exit(&eventq->lock);
1017 				return;
1018 			}
1019 		}
1020 
1021 		/* If the event is no longer registered then */
1022 		/* return immediately */
1023 		if (!(dfc_event->event & hba->event_mask)) {
1024 			mutex_exit(&eventq->lock);
1025 			return;
1026 		}
1027 	}
1028 
1029 	/* !!! An event has occurred since last_id !!! */
1030 
1031 	/* Check if event data is not being requested */
1032 	if (!size) {
1033 		/* If so, then just return the last event id */
1034 		dfc_event->last_id = eventq->last_id[i];
1035 
1036 		mutex_exit(&eventq->lock);
1037 		return;
1038 	}
1039 
1040 	/* !!! The requester wants the next event buffer !!! */
1041 
1042 	found = 0;
1043 	entry = eventq->first;
1044 	while (entry) {
1045 		if ((entry->id > dfc_event->last_id) &&
1046 		    (entry->evt->mask == dfc_event->event)) {
1047 			found = 1;
1048 			break;
1049 		}
1050 
1051 		entry = entry->next;
1052 	}
1053 
1054 	if (!found) {
1055 		/* Update last_id to the last known event */
1056 		dfc_event->last_id = eventq->last_id[i];
1057 
1058 		/* Try waiting again if we can */
1059 		goto wait_for_event;
1060 	}
1061 
1062 	/* !!! Next event found !!! */
1063 
1064 	/* Copy the context buffer to the buffer provided */
1065 	if (entry->bp && entry->size) {
1066 		if (entry->size < size) {
1067 			size = entry->size;
1068 		}
1069 
1070 		bcopy((void *)entry->bp, dfc_event->dataout, size);
1071 
1072 		/* Event has been retrieved by DFCLIB */
1073 		entry->flag |= EMLXS_DFC_EVENT_DONE;
1074 
1075 		dfc_event->size = size;
1076 	}
1077 
1078 	dfc_event->last_id = entry->id;
1079 
1080 	mutex_exit(&eventq->lock);
1081 
1082 	return;
1083 
1084 } /* emlxs_get_dfc_event() */
1085 
1086 
1087 uint32_t
emlxs_kill_dfc_event(emlxs_port_t * port,emlxs_dfc_event_t * dfc_event)1088 emlxs_kill_dfc_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event)
1089 {
1090 	emlxs_hba_t *hba = HBA;
1091 	emlxs_event_queue_t *eventq = &EVENTQ;
1092 
1093 	mutex_enter(&eventq->lock);
1094 	dfc_event->pid = 0;
1095 	dfc_event->event = 0;
1096 	cv_broadcast(&eventq->lock_cv);
1097 	mutex_exit(&eventq->lock);
1098 
1099 	return (0);
1100 
1101 } /* emlxs_kill_dfc_event() */
1102 
1103 
1104 #ifdef SAN_DIAG_SUPPORT
1105 extern void
emlxs_log_sd_basic_els_event(emlxs_port_t * port,uint32_t subcat,HBA_WWN * portname,HBA_WWN * nodename)1106 emlxs_log_sd_basic_els_event(emlxs_port_t *port, uint32_t subcat,
1107     HBA_WWN *portname, HBA_WWN *nodename)
1108 {
1109 	struct sd_plogi_rcv_v0	*bp;
1110 	uint32_t		size;
1111 
1112 	/* Check if the event is being requested */
1113 	if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1114 		return;
1115 	}
1116 
1117 	size = sizeof (struct sd_plogi_rcv_v0);
1118 
1119 	if (!(bp = (struct sd_plogi_rcv_v0 *)kmem_alloc(size, KM_NOSLEEP))) {
1120 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1121 		    "%s: Unable to allocate buffer.", emlxs_sd_els_event.label);
1122 
1123 		return;
1124 	}
1125 
1126 	/*
1127 	 * we are using version field to store subtype, libdfc
1128 	 * will fix this up before returning data to app.
1129 	 */
1130 	bp->sd_plogir_version = subcat;
1131 	bcopy((uint8_t *)portname, (uint8_t *)&bp->sd_plogir_portname,
1132 	    sizeof (HBA_WWN));
1133 	bcopy((uint8_t *)nodename, (uint8_t *)&bp->sd_plogir_nodename,
1134 	    sizeof (HBA_WWN));
1135 
1136 	emlxs_event(port, &emlxs_sd_els_event, bp, size);
1137 
1138 	return;
1139 
1140 } /* emlxs_log_sd_basic_els_event() */
1141 
1142 
1143 extern void
emlxs_log_sd_prlo_event(emlxs_port_t * port,HBA_WWN * remoteport)1144 emlxs_log_sd_prlo_event(emlxs_port_t *port, HBA_WWN *remoteport)
1145 {
1146 	struct sd_prlo_rcv_v0	*bp;
1147 	uint32_t		size;
1148 
1149 	/* Check if the event is being requested */
1150 	if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1151 		return;
1152 	}
1153 
1154 	size = sizeof (struct sd_prlo_rcv_v0);
1155 
1156 	if (!(bp = (struct sd_prlo_rcv_v0 *)kmem_alloc(size,
1157 	    KM_NOSLEEP))) {
1158 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1159 		    "%s PRLO: Unable to allocate buffer.",
1160 		    emlxs_sd_els_event.label);
1161 
1162 		return;
1163 	}
1164 
1165 	/*
1166 	 * we are using version field to store subtype, libdfc
1167 	 * will fix this up before returning data to app.
1168 	 */
1169 	bp->sd_prlor_version = SD_ELS_SUBCATEGORY_PRLO_RCV;
1170 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_prlor_remoteport,
1171 	    sizeof (HBA_WWN));
1172 
1173 	emlxs_event(port, &emlxs_sd_els_event, bp, size);
1174 
1175 	return;
1176 
1177 } /* emlxs_log_sd_prlo_event() */
1178 
1179 
1180 extern void
emlxs_log_sd_lsrjt_event(emlxs_port_t * port,HBA_WWN * remoteport,uint32_t orig_cmd,uint32_t reason,uint32_t reason_expl)1181 emlxs_log_sd_lsrjt_event(emlxs_port_t *port, HBA_WWN *remoteport,
1182     uint32_t orig_cmd, uint32_t reason, uint32_t reason_expl)
1183 {
1184 	struct sd_lsrjt_rcv_v0	*bp;
1185 	uint32_t		size;
1186 
1187 	/* Check if the event is being requested */
1188 	if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1189 		return;
1190 	}
1191 
1192 	size = sizeof (struct sd_lsrjt_rcv_v0);
1193 
1194 	if (!(bp = (struct sd_lsrjt_rcv_v0 *)kmem_alloc(size,
1195 	    KM_NOSLEEP))) {
1196 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1197 		    "%s LSRJT: Unable to allocate buffer.",
1198 		    emlxs_sd_els_event.label);
1199 
1200 		return;
1201 	}
1202 
1203 	/*
1204 	 * we are using version field to store subtype, libdfc
1205 	 * will fix this up before returning data to app.
1206 	 */
1207 	bp->sd_lsrjtr_version = SD_ELS_SUBCATEGORY_LSRJT_RCV;
1208 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_lsrjtr_remoteport,
1209 	    sizeof (HBA_WWN));
1210 	bp->sd_lsrjtr_original_cmd = orig_cmd;
1211 	bp->sd_lsrjtr_reasoncode = reason;
1212 	bp->sd_lsrjtr_reasoncodeexpl = reason_expl;
1213 
1214 	emlxs_event(port, &emlxs_sd_els_event, bp, size);
1215 
1216 	return;
1217 
1218 } /* emlxs_log_sd_lsrjt_event() */
1219 
1220 
1221 extern void
emlxs_log_sd_fc_bsy_event(emlxs_port_t * port,HBA_WWN * remoteport)1222 emlxs_log_sd_fc_bsy_event(emlxs_port_t *port, HBA_WWN *remoteport)
1223 {
1224 	struct sd_pbsy_rcv_v0	*bp;
1225 	uint32_t		size;
1226 
1227 	/* Check if the event is being requested */
1228 	if (emlxs_event_check(port, &emlxs_sd_fabric_event) == 0) {
1229 		return;
1230 	}
1231 
1232 	size = sizeof (struct sd_pbsy_rcv_v0);
1233 
1234 	if (!(bp = (struct sd_pbsy_rcv_v0 *)kmem_alloc(size,
1235 	    KM_NOSLEEP))) {
1236 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1237 		    "%s BSY: Unable to allocate buffer.",
1238 		    emlxs_sd_fabric_event.label);
1239 
1240 		return;
1241 	}
1242 
1243 	/*
1244 	 * we are using version field to store subtype, libdfc
1245 	 * will fix this up before returning data to app.
1246 	 */
1247 	if (remoteport == NULL)
1248 		bp->sd_pbsyr_evt_version = SD_FABRIC_SUBCATEGORY_FABRIC_BUSY;
1249 	else
1250 	{
1251 		bp->sd_pbsyr_evt_version = SD_FABRIC_SUBCATEGORY_PORT_BUSY;
1252 		bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_pbsyr_rport,
1253 		    sizeof (HBA_WWN));
1254 	}
1255 
1256 	emlxs_event(port, &emlxs_sd_fabric_event, bp, size);
1257 
1258 	return;
1259 
1260 } /* emlxs_log_sd_fc_bsy_event() */
1261 
1262 
1263 extern void
emlxs_log_sd_fc_rdchk_event(emlxs_port_t * port,HBA_WWN * remoteport,uint32_t lun,uint32_t opcode,uint32_t fcp_param)1264 emlxs_log_sd_fc_rdchk_event(emlxs_port_t *port, HBA_WWN *remoteport,
1265     uint32_t lun, uint32_t opcode, uint32_t fcp_param)
1266 {
1267 	struct sd_fcprdchkerr_v0	*bp;
1268 	uint32_t			size;
1269 
1270 	/* Check if the event is being requested */
1271 	if (emlxs_event_check(port, &emlxs_sd_fabric_event) == 0) {
1272 		return;
1273 	}
1274 
1275 	size = sizeof (struct sd_fcprdchkerr_v0);
1276 
1277 	if (!(bp = (struct sd_fcprdchkerr_v0 *)kmem_alloc(size,
1278 	    KM_NOSLEEP))) {
1279 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1280 		    "%s RDCHK: Unable to allocate buffer.",
1281 		    emlxs_sd_fabric_event.label);
1282 
1283 		return;
1284 	}
1285 
1286 	/*
1287 	 * we are using version field to store subtype, libdfc
1288 	 * will fix this up before returning data to app.
1289 	 */
1290 	bp->sd_fcprdchkerr_version = SD_FABRIC_SUBCATEGORY_FCPRDCHKERR;
1291 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_fcprdchkerr_rport,
1292 	    sizeof (HBA_WWN));
1293 	bp->sd_fcprdchkerr_lun = lun;
1294 	bp->sd_fcprdchkerr_opcode = opcode;
1295 	bp->sd_fcprdchkerr_fcpiparam = fcp_param;
1296 
1297 	emlxs_event(port, &emlxs_sd_fabric_event, bp, size);
1298 
1299 	return;
1300 
1301 } /* emlxs_log_sd_rdchk_event() */
1302 
1303 
1304 extern void
emlxs_log_sd_scsi_event(emlxs_port_t * port,uint32_t type,HBA_WWN * remoteport,int32_t lun)1305 emlxs_log_sd_scsi_event(emlxs_port_t *port, uint32_t type,
1306     HBA_WWN *remoteport, int32_t lun)
1307 {
1308 	struct sd_scsi_generic_v0	*bp;
1309 	uint32_t			size;
1310 
1311 	/* Check if the event is being requested */
1312 	if (emlxs_event_check(port, &emlxs_sd_scsi_event) == 0) {
1313 		return;
1314 	}
1315 
1316 	size = sizeof (struct sd_scsi_generic_v0);
1317 
1318 	if (!(bp = (struct sd_scsi_generic_v0 *)kmem_alloc(size,
1319 	    KM_NOSLEEP))) {
1320 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1321 		    "%s: Unable to allocate buffer.",
1322 		    emlxs_sd_scsi_event.label);
1323 
1324 		return;
1325 	}
1326 
1327 	/*
1328 	 * we are using version field to store subtype, libdfc
1329 	 * will fix this up before returning data to app.
1330 	 */
1331 	bp->sd_scsi_generic_version = type;
1332 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_scsi_generic_rport,
1333 	    sizeof (HBA_WWN));
1334 	bp->sd_scsi_generic_lun = lun;
1335 
1336 	emlxs_event(port, &emlxs_sd_scsi_event, bp, size);
1337 
1338 	return;
1339 
1340 } /* emlxs_log_sd_scsi_event() */
1341 
1342 
1343 extern void
emlxs_log_sd_scsi_check_event(emlxs_port_t * port,HBA_WWN * remoteport,uint32_t lun,uint32_t cmdcode,uint32_t sensekey,uint32_t asc,uint32_t ascq)1344 emlxs_log_sd_scsi_check_event(emlxs_port_t *port, HBA_WWN *remoteport,
1345     uint32_t lun, uint32_t cmdcode, uint32_t sensekey,
1346     uint32_t asc, uint32_t ascq)
1347 {
1348 	struct sd_scsi_checkcond_v0	*bp;
1349 	uint32_t			size;
1350 
1351 	/* Check if the event is being requested */
1352 	if (emlxs_event_check(port, &emlxs_sd_scsi_event) == 0) {
1353 		return;
1354 	}
1355 
1356 	size = sizeof (struct sd_scsi_checkcond_v0);
1357 
1358 	if (!(bp = (struct sd_scsi_checkcond_v0 *)kmem_alloc(size,
1359 	    KM_NOSLEEP))) {
1360 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1361 		    "%s CHECK: Unable to allocate buffer.",
1362 		    emlxs_sd_scsi_event.label);
1363 
1364 		return;
1365 	}
1366 
1367 	/*
1368 	 * we are using version field to store subtype, libdfc
1369 	 * will fix this up before returning data to app.
1370 	 */
1371 	bp->sd_scsi_checkcond_version = SD_SCSI_SUBCATEGORY_CHECKCONDITION;
1372 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_scsi_checkcond_rport,
1373 	    sizeof (HBA_WWN));
1374 	bp->sd_scsi_checkcond_lun = lun;
1375 	bp->sd_scsi_checkcond_cmdcode = cmdcode;
1376 	bp->sd_scsi_checkcond_sensekey = sensekey;
1377 	bp->sd_scsi_checkcond_asc = asc;
1378 	bp->sd_scsi_checkcond_ascq = ascq;
1379 
1380 	emlxs_event(port, &emlxs_sd_scsi_event, bp, size);
1381 
1382 	return;
1383 
1384 } /* emlxs_log_sd_scsi_check_event() */
1385 
1386 
1387 void
emlxs_get_sd_event(emlxs_port_t * port,emlxs_dfc_event_t * dfc_event,uint32_t sleep)1388 emlxs_get_sd_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event,
1389     uint32_t sleep)
1390 {
1391 	emlxs_hba_t *hba = HBA;
1392 	emlxs_event_queue_t *eventq = &EVENTQ;
1393 	emlxs_event_entry_t *entry;
1394 	uint32_t found;
1395 	uint32_t mask;
1396 	uint32_t i;
1397 	uint32_t size = 0;
1398 	uint32_t rc;
1399 
1400 	if (dfc_event->dataout && dfc_event->size) {
1401 		size = dfc_event->size;
1402 	}
1403 	dfc_event->size = 0;
1404 
1405 	/* Calculate the event index */
1406 	mask = dfc_event->event;
1407 	for (i = 0; i < 32; i++) {
1408 		if (mask & 0x01) {
1409 			break;
1410 		}
1411 
1412 		mask >>= 1;
1413 	}
1414 
1415 	if (i == 32) {
1416 		return;
1417 	}
1418 
1419 	mutex_enter(&eventq->lock);
1420 
1421 wait_for_event:
1422 
1423 	/* Check if no new event has ocurred */
1424 	if (dfc_event->last_id == eventq->last_id[i]) {
1425 		if (!sleep) {
1426 			mutex_exit(&eventq->lock);
1427 			return;
1428 		}
1429 
1430 		/* While event is active and no new event has been logged */
1431 		while ((dfc_event->event & port->sd_event_mask) &&
1432 		    (dfc_event->last_id == eventq->last_id[i])) {
1433 			rc = cv_wait_sig(&eventq->lock_cv, &eventq->lock);
1434 
1435 			/* Check if thread was killed by kernel */
1436 			if (rc == 0) {
1437 				dfc_event->pid = 0;
1438 				dfc_event->event = 0;
1439 				mutex_exit(&eventq->lock);
1440 				return;
1441 			}
1442 		}
1443 
1444 		/* If the event is no longer registered then return */
1445 		if (!(dfc_event->event & port->sd_event_mask)) {
1446 			mutex_exit(&eventq->lock);
1447 			return;
1448 		}
1449 	}
1450 
1451 	/* !!! An event has occurred since last_id !!! */
1452 
1453 	/* Check if event data is not being requested */
1454 	if (!size) {
1455 		/* If so, then just return the last event id */
1456 		dfc_event->last_id = eventq->last_id[i];
1457 
1458 		mutex_exit(&eventq->lock);
1459 		return;
1460 	}
1461 
1462 	/* !!! The requester wants the next event buffer !!! */
1463 
1464 	found = 0;
1465 	entry = eventq->first;
1466 	while (entry) {
1467 		if ((entry->id > dfc_event->last_id) &&
1468 		    (entry->port == (void *)port) &&
1469 		    (entry->evt->mask == dfc_event->event)) {
1470 			found = 1;
1471 			break;
1472 		}
1473 
1474 		entry = entry->next;
1475 	}
1476 
1477 	if (!found) {
1478 		/* Update last_id to the last known event */
1479 		dfc_event->last_id = eventq->last_id[i];
1480 
1481 		/* Try waiting again if we can */
1482 		goto wait_for_event;
1483 	}
1484 
1485 	/* !!! Next event found !!! */
1486 
1487 	/* Copy the context buffer to the buffer provided */
1488 	if (entry->bp && entry->size) {
1489 		if (entry->size < size) {
1490 			size = entry->size;
1491 		}
1492 
1493 		bcopy((void *)entry->bp, dfc_event->dataout, size);
1494 
1495 		/* Event has been retrieved by SANDIAG */
1496 		entry->flag |= EMLXS_SD_EVENT_DONE;
1497 
1498 		dfc_event->size = size;
1499 	}
1500 
1501 	dfc_event->last_id = entry->id;
1502 
1503 	mutex_exit(&eventq->lock);
1504 
1505 	return;
1506 
1507 } /* emlxs_get_sd_event */
1508 #endif /* SAN_DIAG_SUPPORT */
1509