1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <emlxs.h>
28
29 /* Timer period in seconds */
30 #define EMLXS_TIMER_PERIOD 1 /* secs */
31 #define EMLXS_PKT_PERIOD 5 /* secs */
32 #define EMLXS_UB_PERIOD 60 /* secs */
33
34 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
35
36
37 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
38
39 #ifdef DHCHAP_SUPPORT
40 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
41 #endif /* DHCHAP_SUPPORT */
42
43 static void emlxs_timer_check_pools(emlxs_hba_t *hba);
44 static void emlxs_timer(void *arg);
45 static void emlxs_timer_check_fw_update(emlxs_hba_t *hba);
46 static void emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
47 static uint32_t emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
48 static void emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
49 static void emlxs_timer_check_linkup(emlxs_hba_t *hba);
50 static void emlxs_timer_check_discovery(emlxs_port_t *port);
51 static void emlxs_timer_check_clean_address(emlxs_port_t *port);
52 static void emlxs_timer_check_ub(emlxs_port_t *port);
53 static void emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag);
54 static uint32_t emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
55 Q *abortq, uint8_t *flag);
56
57 #ifdef TX_WATCHDOG
58 static void emlxs_tx_watchdog(emlxs_hba_t *hba);
59 #endif /* TX_WATCHDOG */
60
61 extern clock_t
emlxs_timeout(emlxs_hba_t * hba,uint32_t timeout)62 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
63 {
64 emlxs_config_t *cfg = &CFG;
65 clock_t time;
66
67 /* Set thread timeout */
68 if (cfg[CFG_TIMEOUT_ENABLE].current) {
69 (void) drv_getparm(LBOLT, &time);
70 time += (timeout * drv_usectohz(1000000));
71 } else {
72 time = -1;
73 }
74
75 return (time);
76
77 } /* emlxs_timeout() */
78
79
80 static void
emlxs_timer(void * arg)81 emlxs_timer(void *arg)
82 {
83 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
84 emlxs_port_t *port = &PPORT;
85
86 if (!hba->timer_id) {
87 return;
88 }
89
90 mutex_enter(&EMLXS_TIMER_LOCK);
91
92 /* Only one timer thread is allowed */
93 if (hba->timer_flags & EMLXS_TIMER_BUSY) {
94 mutex_exit(&EMLXS_TIMER_LOCK);
95 return;
96 }
97
98 /* Check if a kill request has been made */
99 if (hba->timer_flags & EMLXS_TIMER_KILL) {
100 hba->timer_id = 0;
101 hba->timer_tics = 0;
102 hba->timer_flags |= EMLXS_TIMER_ENDED;
103
104 mutex_exit(&EMLXS_TIMER_LOCK);
105 return;
106 }
107
108 hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
109 hba->timer_tics = DRV_TIME;
110
111 /* Check io_active count (Safety net) */
112 if (hba->io_active & 0x80000000) {
113 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
114 "Timer: io_active=0x%x. Reset to zero.", hba->io_active);
115 hba->io_active = 0;
116 }
117
118 mutex_exit(&EMLXS_TIMER_LOCK);
119
120 EMLXS_SLI_POLL_ERRATT(hba);
121
122 /* Perform standard checks */
123 emlxs_timer_checks(hba);
124
125 /* Restart the timer */
126 mutex_enter(&EMLXS_TIMER_LOCK);
127
128 hba->timer_flags &= ~EMLXS_TIMER_BUSY;
129
130 /* If timer is still enabled, restart it */
131 if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
132 hba->timer_id =
133 timeout(emlxs_timer, (void *)hba,
134 (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
135 } else {
136 hba->timer_id = 0;
137 hba->timer_flags |= EMLXS_TIMER_ENDED;
138 }
139
140 mutex_exit(&EMLXS_TIMER_LOCK);
141
142 return;
143
144 } /* emlxs_timer() */
145
146
147 extern void
emlxs_timer_checks(emlxs_hba_t * hba)148 emlxs_timer_checks(emlxs_hba_t *hba)
149 {
150 emlxs_port_t *port = &PPORT;
151 uint8_t flag[MAX_CHANNEL];
152 uint32_t i;
153 uint32_t rc;
154
155 /* Exit if we are still initializing */
156 if (hba->state < FC_LINK_DOWN) {
157 return;
158 }
159
160 bzero((void *)flag, sizeof (flag));
161
162 /* Check SLI level timeouts */
163 EMLXS_SLI_TIMER(hba);
164
165 /* Check event queue */
166 emlxs_timer_check_events(hba);
167
168 /* Check heartbeat timer */
169 emlxs_timer_check_heartbeat(hba);
170
171 /* Check fw update timer */
172 emlxs_timer_check_fw_update(hba);
173
174 #ifdef IDLE_TIMER
175 emlxs_pm_idle_timer(hba);
176 #endif /* IDLE_TIMER */
177
178 /* Check for loopback timeouts */
179 emlxs_timer_check_loopback(hba);
180
181 /* Check for packet timeouts */
182 rc = emlxs_timer_check_pkts(hba, flag);
183
184 if (rc) {
185 /* Link or adapter is being reset */
186 return;
187 }
188
189 /* Check for linkup timeout */
190 emlxs_timer_check_linkup(hba);
191
192 /* Check the ports */
193 for (i = 0; i < MAX_VPORTS; i++) {
194 port = &VPORT(i);
195
196 if (!(port->flag & EMLXS_PORT_BOUND)) {
197 continue;
198 }
199
200 /* Check for node gate timeouts */
201 emlxs_timer_check_nodes(port, flag);
202
203 /* Check for clean address bit delay timeout */
204 emlxs_timer_check_clean_address(port);
205
206 /* Check for tape discovery timeout */
207 emlxs_timer_check_discovery(port);
208
209 /* Check for UB timeouts */
210 emlxs_timer_check_ub(port);
211
212 #ifdef DHCHAP_SUPPORT
213 /* Check for DHCHAP authentication timeouts */
214 emlxs_timer_check_dhchap(port);
215 #endif /* DHCHAP_SUPPORT */
216
217 }
218
219 /* Check memory pools */
220 emlxs_timer_check_pools(hba);
221
222 /* Check for IO channel service timeouts */
223 /* Always do this last */
224 emlxs_timer_check_channels(hba, flag);
225
226 return;
227
228 } /* emlxs_timer_checks() */
229
230
231 extern void
emlxs_timer_start(emlxs_hba_t * hba)232 emlxs_timer_start(emlxs_hba_t *hba)
233 {
234 if (hba->timer_id) {
235 return;
236 }
237
238 /* Restart the timer */
239 mutex_enter(&EMLXS_TIMER_LOCK);
240 if (!hba->timer_id) {
241 hba->timer_flags = 0;
242 hba->timer_id =
243 timeout(emlxs_timer, (void *)hba, drv_usectohz(1000000));
244 }
245 mutex_exit(&EMLXS_TIMER_LOCK);
246
247 } /* emlxs_timer_start() */
248
249
250 extern void
emlxs_timer_stop(emlxs_hba_t * hba)251 emlxs_timer_stop(emlxs_hba_t *hba)
252 {
253 if (!hba->timer_id) {
254 return;
255 }
256
257 mutex_enter(&EMLXS_TIMER_LOCK);
258 hba->timer_flags |= EMLXS_TIMER_KILL;
259
260 while (hba->timer_id) {
261 mutex_exit(&EMLXS_TIMER_LOCK);
262 delay(drv_usectohz(500000));
263 mutex_enter(&EMLXS_TIMER_LOCK);
264 }
265 mutex_exit(&EMLXS_TIMER_LOCK);
266
267 return;
268
269 } /* emlxs_timer_stop() */
270
271
272 static uint32_t
emlxs_timer_check_pkts(emlxs_hba_t * hba,uint8_t * flag)273 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
274 {
275 emlxs_port_t *port = &PPORT;
276 emlxs_config_t *cfg = &CFG;
277 Q tmo;
278 int32_t channelno;
279 CHANNEL *cp;
280 NODELIST *nlp;
281 IOCBQ *prev;
282 IOCBQ *next;
283 IOCB *iocb;
284 IOCBQ *iocbq;
285 emlxs_buf_t *sbp;
286 fc_packet_t *pkt;
287 Q abort;
288 uint32_t iotag;
289 uint32_t rc;
290
291 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
292 return (0);
293 }
294
295 if (hba->pkt_timer > hba->timer_tics) {
296 return (0);
297 }
298
299 hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
300
301
302 bzero((void *)&tmo, sizeof (Q));
303
304 /*
305 * We must hold the locks here because we never know when an iocb
306 * will be removed out from under us
307 */
308
309 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
310
311 for (channelno = 0; channelno < hba->chan_count; channelno++) {
312 cp = &hba->chan[channelno];
313
314 /* Scan the tx queues for each active node on the channel */
315
316 /* Get the first node */
317 nlp = (NODELIST *)cp->nodeq.q_first;
318
319 while (nlp) {
320 /* Scan the node's priority tx queue */
321 prev = NULL;
322 iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
323
324 while (iocbq) {
325 next = (IOCBQ *)iocbq->next;
326 iocb = &iocbq->iocb;
327 sbp = (emlxs_buf_t *)iocbq->sbp;
328
329 /* Check if iocb has timed out */
330 if (sbp && hba->timer_tics >= sbp->ticks) {
331 /* iocb timed out, now deque it */
332 if (next == NULL) {
333 nlp->nlp_ptx[channelno].q_last =
334 (uint8_t *)prev;
335 }
336
337 if (prev == NULL) {
338 nlp->nlp_ptx[channelno].
339 q_first = (uint8_t *)next;
340 } else {
341 prev->next = next;
342 }
343
344 iocbq->next = NULL;
345 nlp->nlp_ptx[channelno].q_cnt--;
346
347 /* Add this iocb to our local */
348 /* timout queue */
349
350 /*
351 * This way we don't hold the TX_CHANNEL
352 * lock too long
353 */
354
355 if (tmo.q_first) {
356 ((IOCBQ *)tmo.q_last)->next =
357 iocbq;
358 tmo.q_last =
359 (uint8_t *)iocbq;
360 tmo.q_cnt++;
361 } else {
362 tmo.q_first =
363 (uint8_t *)iocbq;
364 tmo.q_last =
365 (uint8_t *)iocbq;
366 tmo.q_cnt = 1;
367 }
368 iocbq->next = NULL;
369
370 } else {
371 prev = iocbq;
372 }
373
374 iocbq = next;
375
376 } /* while (iocbq) */
377
378
379 /* Scan the node's tx queue */
380 prev = NULL;
381 iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
382
383 while (iocbq) {
384 next = (IOCBQ *)iocbq->next;
385 iocb = &iocbq->iocb;
386 sbp = (emlxs_buf_t *)iocbq->sbp;
387
388 /* Check if iocb has timed out */
389 if (sbp && hba->timer_tics >= sbp->ticks) {
390 /* iocb timed out, now deque it */
391 if (next == NULL) {
392 nlp->nlp_tx[channelno].q_last =
393 (uint8_t *)prev;
394 }
395
396 if (prev == NULL) {
397 nlp->nlp_tx[channelno].q_first =
398 (uint8_t *)next;
399 } else {
400 prev->next = next;
401 }
402
403 iocbq->next = NULL;
404 nlp->nlp_tx[channelno].q_cnt--;
405
406 /* Add this iocb to our local */
407 /* timout queue */
408
409 /*
410 * This way we don't hold the TX_CHANNEL
411 * lock too long
412 */
413
414 if (tmo.q_first) {
415 ((IOCBQ *)tmo.q_last)->next =
416 iocbq;
417 tmo.q_last =
418 (uint8_t *)iocbq;
419 tmo.q_cnt++;
420 } else {
421 tmo.q_first =
422 (uint8_t *)iocbq;
423 tmo.q_last =
424 (uint8_t *)iocbq;
425 tmo.q_cnt = 1;
426 }
427 iocbq->next = NULL;
428
429 } else {
430 prev = iocbq;
431 }
432
433 iocbq = next;
434
435 } /* while (iocbq) */
436
437 if (nlp == (NODELIST *)cp->nodeq.q_last) {
438 nlp = NULL;
439 } else {
440 nlp = nlp->nlp_next[channelno];
441 }
442
443 } /* while (nlp) */
444
445 } /* end of for */
446
447 /* Now cleanup the iocb's */
448 iocbq = (IOCBQ *)tmo.q_first;
449 while (iocbq) {
450 /* Free the IoTag and the bmp */
451 iocb = &iocbq->iocb;
452 channelno = ((CHANNEL *)iocbq->channel)->channelno;
453 sbp = iocbq->sbp;
454 if (sbp && (sbp != STALE_PACKET)) {
455 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
456 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
457 } else {
458 (void) emlxs_unregister_pkt(
459 (CHANNEL *)iocbq->channel,
460 iocb->ULPIOTAG, 0);
461 }
462
463 mutex_enter(&sbp->mtx);
464 sbp->pkt_flags |= PACKET_IN_TIMEOUT;
465 mutex_exit(&sbp->mtx);
466 }
467
468 iocbq = (IOCBQ *)iocbq->next;
469
470 } /* end of while */
471
472 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
473
474 /* Now complete the transmit timeouts outside the locks */
475 iocbq = (IOCBQ *)tmo.q_first;
476 while (iocbq) {
477 /* Save the next iocbq for now */
478 next = (IOCBQ *)iocbq->next;
479
480 /* Unlink this iocbq */
481 iocbq->next = NULL;
482
483 /* Get the pkt */
484 sbp = (emlxs_buf_t *)iocbq->sbp;
485
486 if (sbp) {
487 /* Warning: Some FCT sbp's don't have */
488 /* fc_packet objects */
489 pkt = PRIV2PKT(sbp);
490
491 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
492 "TXQ abort: sbp=%p iotag=%d tmo=%d", sbp,
493 sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
494
495 if (hba->state >= FC_LINK_UP) {
496 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
497 IOERR_ABORT_TIMEOUT, 1);
498 } else {
499 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
500 IOERR_LINK_DOWN, 1);
501 }
502
503 }
504
505 iocbq = next;
506
507 } /* end of while */
508
509
510
511 /* Now check the chip */
512 bzero((void *)&abort, sizeof (Q));
513
514 /* Check the HBA for outstanding IOs */
515 rc = 0;
516 mutex_enter(&EMLXS_FCTAB_LOCK);
517 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
518 sbp = hba->fc_table[iotag];
519
520 if (!sbp || sbp == STALE_PACKET) {
521 continue;
522 }
523 /* if channel not set we can not send a abort iocbq */
524 if (sbp->channel == NULL) {
525 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
526 "timer_check_pkts: Invalid IO found. iotag=%d, "
527 "no channel set", iotag);
528 continue;
529 }
530
531 /* Check if IO is valid */
532 if (!(sbp->pkt_flags & PACKET_VALID) ||
533 (sbp->pkt_flags & (PACKET_ULP_OWNED|
534 PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
535 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
536 "timer_check_pkts: Invalid IO found. iotag=%d",
537 iotag);
538
539 hba->fc_table[iotag] = STALE_PACKET;
540 hba->io_count--;
541 continue;
542 }
543
544 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
545 (hba->timer_tics >= sbp->ticks)) {
546 rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
547 sbp, &abort, flag);
548
549 if (rc) {
550 break;
551 }
552 }
553 }
554 mutex_exit(&EMLXS_FCTAB_LOCK);
555
556 /* Now put the iocb's on the tx queue */
557 iocbq = (IOCBQ *)abort.q_first;
558 while (iocbq) {
559 /* Save the next iocbq for now */
560 next = (IOCBQ *)iocbq->next;
561
562 /* Unlink this iocbq */
563 iocbq->next = NULL;
564
565 /* Send this iocbq */
566 emlxs_tx_put(iocbq, 1);
567
568 iocbq = next;
569 }
570
571 /* Now trigger IO channel service to send these abort iocbq */
572 for (channelno = 0; channelno < hba->chan_count; channelno++) {
573 if (!flag[channelno]) {
574 continue;
575 }
576 cp = &hba->chan[channelno];
577
578 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
579 }
580
581 if (rc == 1) {
582 mutex_enter(&EMLXS_PORT_LOCK);
583 /* If a link reset or port reset is already requested, exit */
584 if (!(hba->reset_request & (FC_LINK_RESET | FC_PORT_RESET))) {
585 hba->reset_request |= FC_LINK_RESET;
586 mutex_exit(&EMLXS_PORT_LOCK);
587 /* Spawn a thread to reset the link */
588 emlxs_thread_spawn(hba, emlxs_reset_link_thread, NULL,
589 NULL);
590 goto exit;
591 }
592 mutex_exit(&EMLXS_PORT_LOCK);
593 } else if (rc == 2) {
594 mutex_enter(&EMLXS_PORT_LOCK);
595 /* If a port reset is already requested, exit */
596 if (!(hba->reset_request & FC_PORT_RESET)) {
597 hba->reset_request |= FC_PORT_RESET;
598 mutex_exit(&EMLXS_PORT_LOCK);
599 /* Spawn a thread to reset the adapter */
600 emlxs_thread_spawn(hba, emlxs_restart_thread, NULL,
601 NULL);
602 goto exit;
603 }
604 mutex_exit(&EMLXS_PORT_LOCK);
605 }
606
607 exit:
608 return (rc);
609
610 } /* emlxs_timer_check_pkts() */
611
612
613 static void
emlxs_timer_check_channels(emlxs_hba_t * hba,uint8_t * flag)614 emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag)
615 {
616 emlxs_port_t *port = &PPORT;
617 emlxs_config_t *cfg = &CFG;
618 int32_t channelno;
619 CHANNEL *cp;
620 uint32_t logit;
621
622 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
623 return;
624 }
625
626 for (channelno = 0; channelno < hba->chan_count; channelno++) {
627 cp = &hba->chan[channelno];
628
629 logit = 0;
630
631 /* Check for channel timeout now */
632 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
633 if (cp->timeout && (hba->timer_tics >= cp->timeout)) {
634 /* Check if there is work to do on channel and */
635 /* the link is still up */
636 if (cp->nodeq.q_first) {
637 flag[channelno] = 1;
638 cp->timeout = hba->timer_tics + 10;
639
640 if (hba->state >= FC_LINK_UP) {
641 logit = 1;
642 }
643 } else {
644 cp->timeout = 0;
645 }
646 }
647 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
648
649 if (logit) {
650 EMLXS_MSGF(EMLXS_CONTEXT,
651 &emlxs_chan_watchdog_msg,
652 "IO Channel %d cnt=%d,%d",
653 channelno,
654 hba->channel_tx_count,
655 hba->io_count);
656 }
657
658 /*
659 * If IO channel flag is set, request iocb servicing
660 * here to send any iocb's that may still be queued
661 */
662 if (flag[channelno]) {
663 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
664 }
665 }
666
667 return;
668
669 } /* emlxs_timer_check_channels() */
670
671
672 static void
emlxs_timer_check_nodes(emlxs_port_t * port,uint8_t * flag)673 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
674 {
675 emlxs_hba_t *hba = HBA;
676 uint32_t found;
677 uint32_t i;
678 NODELIST *nlp;
679 int32_t channelno;
680
681 for (;;) {
682 /* Check node gate flag for expiration */
683 found = 0;
684
685 /*
686 * We need to lock, scan, and unlock because we can't hold the
687 * lock while we call node_open
688 */
689 rw_enter(&port->node_rwlock, RW_READER);
690 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
691 nlp = port->node_table[i];
692 while (nlp != NULL) {
693 #ifdef NODE_THROTTLE_SUPPORT
694 /* Check io_active count (Safety net) */
695 if (nlp->io_active & 0x80000000) {
696 EMLXS_MSGF(EMLXS_CONTEXT,
697 &emlxs_sli_debug_msg,
698 "timer_check_nodes: did=%06x "
699 "io_active=0x%x. Reset to zero.",
700 nlp->nlp_DID, nlp->io_active);
701
702 nlp->io_active = 0;
703 }
704 #endif /* NODE_THROTTLE_SUPPORT */
705
706 for (channelno = 0;
707 channelno < hba->chan_count;
708 channelno++) {
709 /* Check if the node timer is active */
710 /* and if timer has expired */
711 if (nlp->nlp_tics[channelno] &&
712 (hba->timer_tics >=
713 nlp->nlp_tics[channelno])) {
714 /* If so, set the flag and */
715 /* break out */
716 found = 1;
717 flag[channelno] = 1;
718 break;
719 }
720 }
721
722 if (nlp->nlp_force_rscn &&
723 (hba->timer_tics >= nlp->nlp_force_rscn)) {
724 nlp->nlp_force_rscn = 0;
725 /*
726 * Generate an RSCN to
727 * wakeup ULP
728 */
729 (void) emlxs_generate_rscn(port,
730 nlp->nlp_DID);
731 }
732
733 if (found) {
734 break;
735 }
736
737 nlp = nlp->nlp_list_next;
738 }
739
740 if (found) {
741 break;
742 }
743
744 }
745 rw_exit(&port->node_rwlock);
746
747 if (!found) {
748 break;
749 }
750
751 emlxs_node_timeout(port, nlp, channelno);
752 }
753
754 } /* emlxs_timer_check_nodes() */
755
756
757 static void
emlxs_timer_check_loopback(emlxs_hba_t * hba)758 emlxs_timer_check_loopback(emlxs_hba_t *hba)
759 {
760 emlxs_port_t *port = &PPORT;
761 emlxs_config_t *cfg = &CFG;
762 int32_t reset = 0;
763
764 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
765 return;
766 }
767
768 /* Check the loopback timer for expiration */
769 mutex_enter(&EMLXS_PORT_LOCK);
770
771 if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) {
772 mutex_exit(&EMLXS_PORT_LOCK);
773 return;
774 }
775
776 hba->loopback_tics = 0;
777
778 if (hba->flag & FC_LOOPBACK_MODE) {
779 reset = 1;
780 }
781
782 mutex_exit(&EMLXS_PORT_LOCK);
783
784 if (reset) {
785 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
786 "LOOPBACK_MODE: Expired. Resetting...");
787 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
788 }
789
790 return;
791
792 } /* emlxs_timer_check_loopback() */
793
794
795 static void
emlxs_timer_check_linkup(emlxs_hba_t * hba)796 emlxs_timer_check_linkup(emlxs_hba_t *hba)
797 {
798 emlxs_port_t *port = &PPORT;
799 uint32_t linkup;
800
801 /* Check if all mbox commands from previous activity are processed */
802 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
803 mutex_enter(&EMLXS_MBOX_LOCK);
804 if (hba->mbox_queue.q_first) {
805 mutex_exit(&EMLXS_MBOX_LOCK);
806 return;
807 }
808 mutex_exit(&EMLXS_MBOX_LOCK);
809 }
810
811 /* Check the linkup timer for expiration */
812 mutex_enter(&EMLXS_PORT_LOCK);
813 linkup = 0;
814 if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
815 hba->linkup_timer = 0;
816
817 /* Make sure link is still ready */
818 if (hba->state >= FC_LINK_UP) {
819 linkup = 1;
820 }
821 }
822 mutex_exit(&EMLXS_PORT_LOCK);
823
824 /* Make the linkup callback */
825 if (linkup) {
826 emlxs_port_online(port);
827 }
828 return;
829
830 } /* emlxs_timer_check_linkup() */
831
832
833 static void
emlxs_timer_check_heartbeat(emlxs_hba_t * hba)834 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
835 {
836 emlxs_port_t *port = &PPORT;
837 MAILBOXQ *mbq;
838 emlxs_config_t *cfg = &CFG;
839 int rc;
840
841 if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
842 return;
843 }
844
845 if (hba->timer_tics < hba->heartbeat_timer) {
846 return;
847 }
848
849 hba->heartbeat_timer = hba->timer_tics + 5;
850
851 /* Return if adapter interrupts have occurred */
852 if (hba->heartbeat_flag) {
853 hba->heartbeat_flag = 0;
854 return;
855 }
856 /* No adapter interrupts have occured for 5 seconds now */
857
858 /* Return if mailbox is busy */
859 /* This means the mailbox timer routine is watching for problems */
860 if (hba->mbox_timer) {
861 return;
862 }
863
864 /* Return if heartbeat is still outstanding */
865 if (hba->heartbeat_active) {
866 return;
867 }
868
869 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == 0) {
870 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
871 "Unable to allocate heartbeat mailbox.");
872 return;
873 }
874
875 emlxs_mb_heartbeat(hba, mbq);
876 hba->heartbeat_active = 1;
877
878 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
879 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
880 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
881 }
882
883 return;
884
885 } /* emlxs_timer_check_heartbeat() */
886
887
888 static void
emlxs_timer_check_fw_update(emlxs_hba_t * hba)889 emlxs_timer_check_fw_update(emlxs_hba_t *hba)
890 {
891 emlxs_port_t *port = &PPORT;
892 char msgbuf[128];
893
894 if (!(hba->fw_flag & FW_UPDATE_NEEDED)) {
895 hba->fw_timer = 0;
896 return;
897 }
898
899 if (hba->timer_tics < hba->fw_timer) {
900 return;
901 }
902
903 if (port->mode == MODE_TARGET) {
904 (void) strncpy(msgbuf,
905 "To trigger an update, a manual HBA or link reset "
906 "using emlxadm is required.",
907 (sizeof (msgbuf)-1));
908 } else {
909 (void) strncpy(msgbuf,
910 "To trigger an update, a manual HBA or link reset "
911 "using luxadm, fcadm, or emlxadm is required.",
912 (sizeof (msgbuf)-1));
913 }
914
915 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fw_update_msg, msgbuf);
916
917 /* Force message to console */
918 cmn_err(CE_WARN,
919 "^%s%d: Firmware update required.\n\t(%s)\n",
920 DRIVER_NAME, hba->ddiinst, msgbuf);
921
922 /* Set timer for 24 hours */
923 hba->fw_timer = hba->timer_tics + (60 * 60 * 24);
924
925 return;
926
927 } /* emlxs_timer_check_fw_update() */
928
929
930 static void
emlxs_timer_check_discovery(emlxs_port_t * port)931 emlxs_timer_check_discovery(emlxs_port_t *port)
932 {
933 emlxs_hba_t *hba = HBA;
934 emlxs_config_t *cfg = &CFG;
935 int32_t send_clear_la;
936 uint32_t found;
937 uint32_t i;
938 NODELIST *nlp;
939 MAILBOXQ *mbox;
940 int rc;
941
942 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
943 return;
944 }
945
946 /* Check the discovery timer for expiration */
947 send_clear_la = 0;
948 mutex_enter(&EMLXS_PORT_LOCK);
949 while (hba->discovery_timer &&
950 (hba->timer_tics >= hba->discovery_timer) &&
951 (hba->state == FC_LINK_UP)) {
952 send_clear_la = 1;
953
954 /* Perform a flush on fcp2 nodes that are still closed */
955 found = 0;
956 rw_enter(&port->node_rwlock, RW_READER);
957 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
958 nlp = port->node_table[i];
959 while (nlp != NULL) {
960 if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
961 (nlp->nlp_flag[hba->channel_fcp] &
962 NLP_CLOSED)) {
963 found = 1;
964 break;
965
966 }
967 nlp = nlp->nlp_list_next;
968 }
969
970 if (found) {
971 break;
972 }
973 }
974 rw_exit(&port->node_rwlock);
975
976 if (!found) {
977 break;
978 }
979
980 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
981 "FCP2 device (did=%06x) missing. Flushing...",
982 nlp->nlp_DID);
983
984 mutex_exit(&EMLXS_PORT_LOCK);
985
986 (void) EMLXS_SLI_UNREG_NODE(port, nlp, NULL, NULL, NULL);
987
988 mutex_enter(&EMLXS_PORT_LOCK);
989
990 }
991 mutex_exit(&EMLXS_PORT_LOCK);
992
993 /* Try to send clear link attention, if needed */
994 if ((hba->sli_mode < EMLXS_HBA_SLI4_MODE) && (send_clear_la == 1) &&
995 (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
996 mutex_enter(&EMLXS_PORT_LOCK);
997
998 /*
999 * If state is not FC_LINK_UP, then either the link has gone
1000 * down or a FC_CLEAR_LA has already been issued
1001 */
1002 if (hba->state != FC_LINK_UP) {
1003 mutex_exit(&EMLXS_PORT_LOCK);
1004 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
1005 } else {
1006 /* Change state and clear discovery timer */
1007 EMLXS_STATE_CHANGE_LOCKED(hba, FC_CLEAR_LA);
1008
1009 hba->discovery_timer = 0;
1010
1011 mutex_exit(&EMLXS_PORT_LOCK);
1012
1013 /* Prepare and send the CLEAR_LA command */
1014 emlxs_mb_clear_la(hba, mbox);
1015
1016 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
1017 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1018 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
1019 }
1020 }
1021 }
1022
1023 return;
1024
1025 } /* emlxs_timer_check_discovery() */
1026
1027
1028 static void
emlxs_timer_check_clean_address(emlxs_port_t * port)1029 emlxs_timer_check_clean_address(emlxs_port_t *port)
1030 {
1031 emlxs_hba_t *hba = HBA;
1032 emlxs_buf_t *sbp;
1033
1034 if (port->clean_address_timer &&
1035 (hba->timer_tics < port->clean_address_timer)) {
1036 return;
1037 }
1038 port->clean_address_timer = 0;
1039
1040 sbp = port->clean_address_sbp;
1041 if (!sbp) {
1042 return;
1043 }
1044 port->clean_address_sbp = 0;
1045
1046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_completion_msg,
1047 "Clean Address timeout: sid=%x prev=%x RATOV %d",
1048 port->did, port->prev_did, hba->fc_ratov);
1049
1050 if (EMLXS_SLI_REG_DID(port, FABRIC_DID, &port->fabric_sparam,
1051 sbp, NULL, NULL) == 0) {
1052 /* Deferred completion of this pkt until */
1053 /* login is complete */
1054 return;
1055 }
1056
1057 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
1058 IOERR_NO_RESOURCES, 1);
1059
1060 return;
1061
1062 } /* emlxs_timer_check_clean_address() */
1063
1064 extern void
emlxs_timer_cancel_clean_address(emlxs_port_t * port)1065 emlxs_timer_cancel_clean_address(emlxs_port_t *port)
1066 {
1067 emlxs_hba_t *hba = HBA;
1068 emlxs_buf_t *sbp;
1069
1070 port->clean_address_timer = 0;
1071 sbp = port->clean_address_sbp;
1072 if (!sbp) {
1073 return;
1074 }
1075 port->clean_address_sbp = 0;
1076
1077 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_completion_msg,
1078 "Clean Address cancel: sid=%x prev=%x RATOV %d",
1079 port->did, port->prev_did, hba->fc_ratov);
1080
1081 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
1082 IOERR_LINK_DOWN, 1);
1083
1084 } /* emlxs_timer_cancel_clean_address() */
1085
1086 static void
emlxs_timer_check_ub(emlxs_port_t * port)1087 emlxs_timer_check_ub(emlxs_port_t *port)
1088 {
1089 emlxs_hba_t *hba = HBA;
1090 emlxs_unsol_buf_t *ulistp;
1091 fc_unsol_buf_t *ubp;
1092 emlxs_ub_priv_t *ub_priv;
1093 uint32_t i;
1094
1095 if (port->ub_timer > hba->timer_tics) {
1096 return;
1097 }
1098
1099 port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
1100
1101 /* Check the unsolicited buffers */
1102 mutex_enter(&EMLXS_UB_LOCK);
1103
1104 ulistp = port->ub_pool;
1105 while (ulistp) {
1106 /* Check buffers in this pool */
1107 for (i = 0; i < ulistp->pool_nentries; i++) {
1108 ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
1109 ub_priv = ubp->ub_fca_private;
1110
1111 if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
1112 continue;
1113 }
1114
1115 /* If buffer has timed out, print message and */
1116 /* increase timeout */
1117 if ((ub_priv->time + ub_priv->timeout) <=
1118 hba->timer_tics) {
1119 ub_priv->flags |= EMLXS_UB_TIMEOUT;
1120
1121 EMLXS_MSGF(EMLXS_CONTEXT,
1122 &emlxs_sfs_debug_msg,
1123 "Stale UB buffer detected (%d mins): "
1124 "buffer=%p (%x,%x,%x,%x)",
1125 (ub_priv->timeout / 60), ubp,
1126 ubp->ub_frame.type, ubp->ub_frame.s_id,
1127 ubp->ub_frame.ox_id, ubp->ub_frame.rx_id);
1128
1129 /* Increase timeout period */
1130
1131 /* If timeout was 5 mins or less, */
1132 /* increase it to 10 mins */
1133 if (ub_priv->timeout <= (5 * 60)) {
1134 ub_priv->timeout = (10 * 60);
1135 }
1136 /* If timeout was 10 mins or less, */
1137 /* increase it to 30 mins */
1138 else if (ub_priv->timeout <= (10 * 60)) {
1139 ub_priv->timeout = (30 * 60);
1140 }
1141 /* Otherwise double it. */
1142 else {
1143 ub_priv->timeout *= 2;
1144 }
1145 }
1146 }
1147
1148 ulistp = ulistp->pool_next;
1149 }
1150
1151 mutex_exit(&EMLXS_UB_LOCK);
1152
1153 return;
1154
1155 } /* emlxs_timer_check_ub() */
1156
1157
1158 /* EMLXS_FCTAB_LOCK must be held to call this */
1159 static uint32_t
emlxs_pkt_chip_timeout(emlxs_port_t * port,emlxs_buf_t * sbp,Q * abortq,uint8_t * flag)1160 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq,
1161 uint8_t *flag)
1162 {
1163 emlxs_hba_t *hba = HBA;
1164 CHANNEL *cp = (CHANNEL *)sbp->channel;
1165 IOCBQ *iocbq = NULL;
1166 fc_packet_t *pkt;
1167 uint32_t rc = 0;
1168
1169 mutex_enter(&sbp->mtx);
1170
1171 /* Warning: Some FCT sbp's don't have fc_packet objects */
1172 pkt = PRIV2PKT(sbp);
1173
1174 switch (sbp->abort_attempts) {
1175 case 0:
1176
1177 /* Create the abort IOCB */
1178 if (hba->state >= FC_LINK_UP) {
1179 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1180 "chipQ: 1:Aborting. sbp=%p iotag=%d tmo=%d "
1181 "flags=%x",
1182 sbp, sbp->iotag,
1183 (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1184
1185 iocbq =
1186 emlxs_create_abort_xri_cn(port, sbp->node,
1187 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
1188
1189 /* The adapter will make 2 attempts to send ABTS */
1190 /* with 2*ratov timeout each time */
1191 sbp->ticks =
1192 hba->timer_tics + (4 * hba->fc_ratov) + 10;
1193 } else {
1194 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1195 "chipQ: 1:Closing. sbp=%p iotag=%d tmo=%d "
1196 "flags=%x",
1197 sbp, sbp->iotag,
1198 (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1199
1200 iocbq =
1201 emlxs_create_close_xri_cn(port, sbp->node,
1202 sbp->iotag, cp);
1203
1204 sbp->ticks = hba->timer_tics + 30;
1205 }
1206
1207 /* set the flags */
1208 sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1209
1210 flag[cp->channelno] = 1;
1211 rc = 0;
1212
1213 break;
1214
1215 case 1:
1216
1217 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1218 "chipQ: 2:Closing. sbp=%p iotag=%d", sbp, sbp->iotag);
1219
1220 iocbq =
1221 emlxs_create_close_xri_cn(port, sbp->node, sbp->iotag,
1222 cp);
1223
1224 sbp->ticks = hba->timer_tics + 30;
1225
1226 flag[cp->channelno] = 1;
1227 rc = 0;
1228
1229 break;
1230
1231 case 2:
1232
1233 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1234 "chipQ: 3:Resetting link. sbp=%p iotag=%d", sbp,
1235 sbp->iotag);
1236
1237 sbp->ticks = hba->timer_tics + 60;
1238 rc = 1;
1239
1240 break;
1241
1242 default:
1243
1244 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1245 "chipQ: %d:Resetting adapter. sbp=%p iotag=%d",
1246 sbp->abort_attempts, sbp, sbp->iotag);
1247
1248 sbp->ticks = hba->timer_tics + 60;
1249 rc = 2;
1250
1251 break;
1252 }
1253
1254 sbp->abort_attempts++;
1255 mutex_exit(&sbp->mtx);
1256
1257 if (iocbq) {
1258 if (abortq->q_first) {
1259 ((IOCBQ *)abortq->q_last)->next = iocbq;
1260 abortq->q_last = (uint8_t *)iocbq;
1261 abortq->q_cnt++;
1262 } else {
1263 abortq->q_first = (uint8_t *)iocbq;
1264 abortq->q_last = (uint8_t *)iocbq;
1265 abortq->q_cnt = 1;
1266 }
1267 iocbq->next = NULL;
1268 }
1269
1270 return (rc);
1271
1272 } /* emlxs_pkt_chip_timeout() */
1273
1274
1275 static void
emlxs_timer_check_pools(emlxs_hba_t * hba)1276 emlxs_timer_check_pools(emlxs_hba_t *hba)
1277 {
1278 uint32_t i;
1279 MEMSEG *seg;
1280 emlxs_config_t *cfg = &CFG;
1281
1282 if (cfg[CFG_MEM_DYNAMIC].current == 0) {
1283 return;
1284 }
1285
1286 if (hba->mem_timer > hba->timer_tics) {
1287 return;
1288 }
1289
1290 hba->mem_timer = hba->timer_tics + cfg[CFG_MEM_DYNAMIC].current;
1291
1292 seg = hba->memseg;
1293 for (i = 0; i < FC_MAX_SEG; i++, seg++) {
1294 if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
1295 emlxs_mem_pool_clean(hba, seg);
1296 }
1297 }
1298
1299 #ifdef SFCT_SUPPORT
1300 {
1301 uint32_t j;
1302 emlxs_port_t *port;
1303
1304 for (i = 0; i < MAX_VPORTS; i++) {
1305 port = &VPORT(i);
1306
1307 if (!(port->flag & EMLXS_PORT_BOUND) ||
1308 !(port->flag & EMLXS_TGT_ENABLED) ||
1309 !port->fct_memseg) {
1310 continue;
1311 }
1312
1313 seg = port->fct_memseg;
1314 for (j = 0; j < port->fct_memseg_cnt; j++, seg++) {
1315 if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
1316 emlxs_mem_pool_clean(hba, seg);
1317 }
1318 }
1319 }
1320 }
1321 #endif /* SFCT_SUPPORT */
1322
1323 return;
1324
1325 } /* emlxs_timer_check_pools() */
1326
1327
1328 #ifdef TX_WATCHDOG
1329
1330 static void
emlxs_tx_watchdog(emlxs_hba_t * hba)1331 emlxs_tx_watchdog(emlxs_hba_t *hba)
1332 {
1333 emlxs_port_t *port = &PPORT;
1334 NODELIST *nlp;
1335 uint32_t channelno;
1336 CHANNEL *cp;
1337 IOCBQ *next;
1338 IOCBQ *iocbq;
1339 IOCB *iocb;
1340 uint32_t found;
1341 MATCHMAP *bmp;
1342 Q abort;
1343 uint32_t iotag;
1344 emlxs_buf_t *sbp;
1345 fc_packet_t *pkt = NULL;
1346 uint32_t cmd;
1347 uint32_t did;
1348
1349 bzero((void *)&abort, sizeof (Q));
1350
1351 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1352
1353 mutex_enter(&EMLXS_FCTAB_LOCK);
1354 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
1355 sbp = hba->fc_table[iotag];
1356 if (sbp && (sbp != STALE_PACKET) &&
1357 (sbp->pkt_flags & PACKET_IN_TXQ)) {
1358 nlp = sbp->node;
1359 iocbq = &sbp->iocbq;
1360
1361 channelno = (CHANNEL *)(sbp->channel)->channelno;
1362 if (iocbq->flag & IOCB_PRIORITY) {
1363 iocbq =
1364 (IOCBQ *)nlp->nlp_ptx[channelno].
1365 q_first;
1366 } else {
1367 iocbq =
1368 (IOCBQ *)nlp->nlp_tx[channelno].
1369 q_first;
1370 }
1371
1372 /* Find a matching entry */
1373 found = 0;
1374 while (iocbq) {
1375 if (iocbq == &sbp->iocbq) {
1376 found = 1;
1377 break;
1378 }
1379
1380 iocbq = (IOCBQ *)iocbq->next;
1381 }
1382
1383 if (!found) {
1384 if (!(sbp->pkt_flags & PACKET_STALE)) {
1385 mutex_enter(&sbp->mtx);
1386 sbp->pkt_flags |=
1387 PACKET_STALE;
1388 mutex_exit(&sbp->mtx);
1389 } else {
1390 if (abort.q_first == 0) {
1391 abort.q_first =
1392 &sbp->iocbq;
1393 } else {
1394 ((IOCBQ *)abort.
1395 q_last)->next =
1396 &sbp->iocbq;
1397 }
1398
1399 abort.q_last = &sbp->iocbq;
1400 abort.q_cnt++;
1401 }
1402
1403 } else {
1404 if ((sbp->pkt_flags & PACKET_STALE)) {
1405 mutex_enter(&sbp->mtx);
1406 sbp->pkt_flags &=
1407 ~PACKET_STALE;
1408 mutex_exit(&sbp->mtx);
1409 }
1410 }
1411 }
1412 }
1413 mutex_exit(&EMLXS_FCTAB_LOCK);
1414
1415 iocbq = (IOCBQ *)abort.q_first;
1416 while (iocbq) {
1417 next = (IOCBQ *)iocbq->next;
1418 iocbq->next = NULL;
1419 sbp = (emlxs_buf_t *)iocbq->sbp;
1420
1421 pkt = PRIV2PKT(sbp);
1422 if (pkt) {
1423 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
1424 cmd = *((uint32_t *)pkt->pkt_cmd);
1425 cmd = LE_SWAP32(cmd);
1426 }
1427
1428
1429 emlxs_tx_put(iocbq, 0);
1430
1431 iocbq = next;
1432
1433 } /* end of while */
1434
1435 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1436
1437 return;
1438
1439 } /* emlxs_tx_watchdog() */
1440
1441 #endif /* TX_WATCHDOG */
1442
1443
1444 #ifdef DHCHAP_SUPPORT
1445
1446 static void
emlxs_timer_check_dhchap(emlxs_port_t * port)1447 emlxs_timer_check_dhchap(emlxs_port_t *port)
1448 {
1449 emlxs_hba_t *hba = HBA;
1450 uint32_t i;
1451 NODELIST *ndlp = NULL;
1452
1453 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1454 ndlp = port->node_table[i];
1455
1456 if (!ndlp) {
1457 continue;
1458 }
1459
1460 /* Check authentication response timeout */
1461 if (ndlp->node_dhc.nlp_authrsp_tmo &&
1462 (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1463 /* Trigger authresp timeout handler */
1464 (void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1465 }
1466
1467 /* Check reauthentication timeout */
1468 if (ndlp->node_dhc.nlp_reauth_tmo &&
1469 (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1470 /* Trigger reauth timeout handler */
1471 emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1472 }
1473 }
1474 return;
1475
1476 } /* emlxs_timer_check_dhchap */
1477
1478 #endif /* DHCHAP_SUPPORT */
1479