1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <emlxs.h>
28
29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 EMLXS_MSG_DEF(EMLXS_FCP_C);
31
32 #define EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
33 PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
34
35 static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
36 Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
37
38 #define SCSI3_PERSISTENT_RESERVE_IN 0x5e
39 #define SCSI_INQUIRY 0x12
40 #define SCSI_RX_DIAG 0x1C
41
42
43 /*
44 * emlxs_handle_fcp_event
45 *
46 * Description: Process an FCP Rsp Ring completion
47 *
48 */
49 /* ARGSUSED */
50 extern void
emlxs_handle_fcp_event(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)51 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
52 {
53 emlxs_port_t *port = &PPORT;
54 emlxs_config_t *cfg = &CFG;
55 IOCB *cmd;
56 emlxs_buf_t *sbp;
57 fc_packet_t *pkt = NULL;
58 #ifdef SAN_DIAG_SUPPORT
59 NODELIST *ndlp;
60 #endif
61 uint32_t iostat;
62 uint8_t localstat;
63 fcp_rsp_t *rsp;
64 uint32_t rsp_data_resid;
65 uint32_t check_underrun;
66 uint8_t asc;
67 uint8_t ascq;
68 uint8_t scsi_status;
69 uint8_t sense;
70 uint32_t did;
71 uint32_t fix_it;
72 uint8_t *scsi_cmd;
73 uint8_t scsi_opcode;
74 uint16_t scsi_dl;
75 uint32_t data_rx;
76 uint32_t length;
77
78 cmd = &iocbq->iocb;
79
80 /* Initialize the status */
81 iostat = cmd->ULPSTATUS;
82 localstat = 0;
83 scsi_status = 0;
84 asc = 0;
85 ascq = 0;
86 sense = 0;
87 check_underrun = 0;
88 fix_it = 0;
89
90 HBASTATS.FcpEvent++;
91
92 sbp = (emlxs_buf_t *)iocbq->sbp;
93
94 if (!sbp) {
95 /* completion with missing xmit command */
96 HBASTATS.FcpStray++;
97
98 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
99 "cmd=%x iotag=%d", cmd->ULPCOMMAND, cmd->ULPIOTAG);
100
101 return;
102 }
103
104 HBASTATS.FcpCompleted++;
105
106 #ifdef SAN_DIAG_SUPPORT
107 emlxs_update_sd_bucket(sbp);
108 #endif /* SAN_DIAG_SUPPORT */
109
110 pkt = PRIV2PKT(sbp);
111
112 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
113 scsi_cmd = (uint8_t *)pkt->pkt_cmd;
114 scsi_opcode = scsi_cmd[12];
115 data_rx = 0;
116
117 /* Sync data in data buffer only on FC_PKT_FCP_READ */
118 if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
119 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
120 DDI_DMA_SYNC_FORKERNEL);
121
122 #ifdef TEST_SUPPORT
123 if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
124 (pkt->pkt_datalen >= 512)) {
125 hba->underrun_counter--;
126 iostat = IOSTAT_FCP_RSP_ERROR;
127
128 /* Report 512 bytes missing by adapter */
129 cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
130
131 /* Corrupt 512 bytes of Data buffer */
132 bzero((uint8_t *)pkt->pkt_data, 512);
133
134 /* Set FCP response to STATUS_GOOD */
135 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
136 }
137 #endif /* TEST_SUPPORT */
138 }
139
140 /* Process the pkt */
141 mutex_enter(&sbp->mtx);
142
143 /* Check for immediate return */
144 if ((iostat == IOSTAT_SUCCESS) &&
145 (pkt->pkt_comp) &&
146 !(sbp->pkt_flags &
147 (PACKET_ULP_OWNED | PACKET_COMPLETED |
148 PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
149 PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
150 PACKET_IN_ABORT | PACKET_POLLED))) {
151 HBASTATS.FcpGood++;
152
153 sbp->pkt_flags |=
154 (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
155 PACKET_COMPLETED | PACKET_ULP_OWNED);
156 mutex_exit(&sbp->mtx);
157
158 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 emlxs_unswap_pkt(sbp);
160 #endif /* EMLXS_MODREV2X */
161
162 #ifdef FMA_SUPPORT
163 emlxs_check_dma(hba, sbp);
164 #endif /* FMA_SUPPORT */
165
166 cp->ulpCmplCmd++;
167 (*pkt->pkt_comp) (pkt);
168
169 #ifdef FMA_SUPPORT
170 if (hba->flag & FC_DMA_CHECK_ERROR) {
171 emlxs_thread_spawn(hba, emlxs_restart_thread,
172 NULL, NULL);
173 }
174 #endif /* FMA_SUPPORT */
175
176 return;
177 }
178
179 /*
180 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
181 * is reported.
182 */
183
184 /* Check if a response buffer was not provided */
185 if ((iostat != IOSTAT_FCP_RSP_ERROR) || (pkt->pkt_rsplen == 0)) {
186 goto done;
187 }
188
189 EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
190 DDI_DMA_SYNC_FORKERNEL);
191
192 /* Get the response buffer pointer */
193 rsp = (fcp_rsp_t *)pkt->pkt_resp;
194
195 /* Validate the response payload */
196 if (!rsp->fcp_u.fcp_status.resid_under &&
197 !rsp->fcp_u.fcp_status.resid_over) {
198 rsp->fcp_resid = 0;
199 }
200
201 if (!rsp->fcp_u.fcp_status.rsp_len_set) {
202 rsp->fcp_response_len = 0;
203 }
204
205 if (!rsp->fcp_u.fcp_status.sense_len_set) {
206 rsp->fcp_sense_len = 0;
207 }
208
209 length = sizeof (fcp_rsp_t) + LE_SWAP32(rsp->fcp_response_len) +
210 LE_SWAP32(rsp->fcp_sense_len);
211
212 if (length > pkt->pkt_rsplen) {
213 iostat = IOSTAT_RSP_INVALID;
214 pkt->pkt_data_resid = pkt->pkt_datalen;
215 goto done;
216 }
217
218 /* Set the valid response flag */
219 sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
220
221 scsi_status = rsp->fcp_u.fcp_status.scsi_status;
222
223 #ifdef SAN_DIAG_SUPPORT
224 ndlp = (NODELIST *)iocbq->node;
225 if (scsi_status == SCSI_STAT_QUE_FULL) {
226 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
227 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
228 } else if (scsi_status == SCSI_STAT_BUSY) {
229 emlxs_log_sd_scsi_event(port,
230 SD_SCSI_SUBCATEGORY_DEVBSY,
231 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
232 }
233 #endif
234
235 /*
236 * Convert a task abort to a check condition with no data
237 * transferred. We saw a data corruption when Solaris received
238 * a Task Abort from a tape.
239 */
240
241 if (scsi_status == SCSI_STAT_TASK_ABORT) {
242 EMLXS_MSGF(EMLXS_CONTEXT,
243 &emlxs_fcp_completion_error_msg,
244 "Task Abort. "
245 "Fixed. did=0x%06x sbp=%p cmd=%02x dl=%d",
246 did, sbp, scsi_opcode, pkt->pkt_datalen);
247
248 rsp->fcp_u.fcp_status.scsi_status =
249 SCSI_STAT_CHECK_COND;
250 rsp->fcp_u.fcp_status.rsp_len_set = 0;
251 rsp->fcp_u.fcp_status.sense_len_set = 0;
252 rsp->fcp_u.fcp_status.resid_over = 0;
253
254 if (pkt->pkt_datalen) {
255 rsp->fcp_u.fcp_status.resid_under = 1;
256 rsp->fcp_resid =
257 LE_SWAP32(pkt->pkt_datalen);
258 } else {
259 rsp->fcp_u.fcp_status.resid_under = 0;
260 rsp->fcp_resid = 0;
261 }
262
263 scsi_status = SCSI_STAT_CHECK_COND;
264 }
265
266 /*
267 * We only need to check underrun if data could
268 * have been sent
269 */
270
271 /* Always check underrun if status is good */
272 if (scsi_status == SCSI_STAT_GOOD) {
273 check_underrun = 1;
274 }
275 /* Check the sense codes if this is a check condition */
276 else if (scsi_status == SCSI_STAT_CHECK_COND) {
277 check_underrun = 1;
278
279 /* Check if sense data was provided */
280 if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
281 sense = *((uint8_t *)rsp + 32 + 2);
282 asc = *((uint8_t *)rsp + 32 + 12);
283 ascq = *((uint8_t *)rsp + 32 + 13);
284 }
285
286 #ifdef SAN_DIAG_SUPPORT
287 emlxs_log_sd_scsi_check_event(port,
288 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
289 scsi_opcode, sense, asc, ascq);
290 #endif
291 }
292 /* Status is not good and this is not a check condition */
293 /* No data should have been sent */
294 else {
295 check_underrun = 0;
296 }
297
298 /* Initialize the resids */
299 pkt->pkt_resp_resid = 0;
300 pkt->pkt_data_resid = 0;
301
302 /* Check if no data was to be transferred */
303 if (pkt->pkt_datalen == 0) {
304 goto done;
305 }
306
307 /* Get the residual underrun count reported by the SCSI reply */
308 rsp_data_resid = (rsp->fcp_u.fcp_status.resid_under) ?
309 LE_SWAP32(rsp->fcp_resid) : 0;
310
311 /* Set the pkt_data_resid to what the scsi response resid */
312 pkt->pkt_data_resid = rsp_data_resid;
313
314 /* Adjust the pkt_data_resid field if needed */
315 if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
316 /*
317 * Get the residual underrun count reported by
318 * our adapter
319 */
320 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
321
322 #ifdef SAN_DIAG_SUPPORT
323 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
324 emlxs_log_sd_fc_rdchk_event(port,
325 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
326 scsi_opcode, pkt->pkt_data_resid);
327 }
328 #endif
329
330 /* Get the actual amount of data transferred */
331 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
332
333 /*
334 * If the residual being reported by the adapter is
335 * greater than the residual being reported in the
336 * reply, then we have a true underrun.
337 */
338 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
339 switch (scsi_opcode) {
340 case SCSI_INQUIRY:
341 scsi_dl = scsi_cmd[16];
342 break;
343
344 case SCSI_RX_DIAG:
345 scsi_dl =
346 (scsi_cmd[15] * 0x100) +
347 scsi_cmd[16];
348 break;
349
350 default:
351 scsi_dl = pkt->pkt_datalen;
352 }
353
354 #ifdef FCP_UNDERRUN_PATCH1
355 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
356 /*
357 * If status is not good and no data was
358 * actually transferred, then we must fix
359 * the issue
360 */
361 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
362 fix_it = 1;
363
364 EMLXS_MSGF(EMLXS_CONTEXT,
365 &emlxs_fcp_completion_error_msg,
366 "Underrun(1). Fixed. "
367 "did=0x%06x sbp=%p cmd=%02x "
368 "dl=%d,%d rx=%d rsp=%d",
369 did, sbp, scsi_opcode,
370 pkt->pkt_datalen, scsi_dl,
371 (pkt->pkt_datalen -
372 pkt->pkt_data_resid),
373 rsp_data_resid);
374
375 }
376 }
377 #endif /* FCP_UNDERRUN_PATCH1 */
378
379
380 #ifdef FCP_UNDERRUN_PATCH2
381 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
382 if (scsi_status == SCSI_STAT_GOOD) {
383 emlxs_msg_t *msg;
384
385 msg = &emlxs_fcp_completion_error_msg;
386 /*
387 * If status is good and this is an
388 * inquiry request and the amount of
389 * data
390 */
391 /*
392 * requested <= data received, then we
393 * must fix the issue.
394 */
395
396 if ((scsi_opcode == SCSI_INQUIRY) &&
397 (pkt->pkt_datalen >= data_rx) &&
398 (scsi_dl <= data_rx)) {
399 fix_it = 1;
400
401 EMLXS_MSGF(EMLXS_CONTEXT, msg,
402 "Underrun(2). Fixed. "
403 "did=0x%06x sbp=%p "
404 "cmd=%02x dl=%d,%d "
405 "rx=%d rsp=%d",
406 did, sbp, scsi_opcode,
407 pkt->pkt_datalen, scsi_dl,
408 data_rx, rsp_data_resid);
409
410 }
411
412 /*
413 * If status is good and this is an
414 * inquiry request and the amount of
415 * data requested >= 128 bytes, but
416 * only 128 bytes were received,
417 * then we must fix the issue.
418 */
419 else if ((scsi_opcode == SCSI_INQUIRY) &&
420 (pkt->pkt_datalen >= 128) &&
421 (scsi_dl >= 128) && (data_rx == 128)) {
422 fix_it = 1;
423
424 EMLXS_MSGF(EMLXS_CONTEXT, msg,
425 "Underrun(3). Fixed. "
426 "did=0x%06x sbp=%p "
427 "cmd=%02x dl=%d,%d "
428 "rx=%d rsp=%d",
429 did, sbp, scsi_opcode,
430 pkt->pkt_datalen, scsi_dl,
431 data_rx, rsp_data_resid);
432
433 }
434 }
435 }
436 #endif /* FCP_UNDERRUN_PATCH2 */
437
438 /*
439 * Check if SCSI response payload should be
440 * fixed or if a DATA_UNDERRUN should be
441 * reported
442 */
443 if (fix_it) {
444 /*
445 * Fix the SCSI response payload itself
446 */
447 rsp->fcp_u.fcp_status.resid_under = 1;
448 rsp->fcp_resid =
449 LE_SWAP32(pkt->pkt_data_resid);
450 } else {
451 /*
452 * Change the status from
453 * IOSTAT_FCP_RSP_ERROR to
454 * IOSTAT_DATA_UNDERRUN
455 */
456 iostat = IOSTAT_DATA_UNDERRUN;
457 pkt->pkt_data_resid =
458 pkt->pkt_datalen;
459 }
460 }
461
462 /*
463 * If the residual being reported by the adapter is
464 * less than the residual being reported in the reply,
465 * then we have a true overrun. Since we don't know
466 * where the extra data came from or went to then we
467 * cannot trust anything we received
468 */
469 else if (rsp_data_resid > pkt->pkt_data_resid) {
470 /*
471 * Change the status from
472 * IOSTAT_FCP_RSP_ERROR to
473 * IOSTAT_DATA_OVERRUN
474 */
475 iostat = IOSTAT_DATA_OVERRUN;
476 pkt->pkt_data_resid = pkt->pkt_datalen;
477 }
478
479 } else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
480 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
481 /*
482 * Get the residual underrun count reported by
483 * our adapter
484 */
485 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
486
487 #ifdef SAN_DIAG_SUPPORT
488 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
489 emlxs_log_sd_fc_rdchk_event(port,
490 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
491 scsi_opcode, pkt->pkt_data_resid);
492 }
493 #endif /* SAN_DIAG_SUPPORT */
494
495 /* Get the actual amount of data transferred */
496 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
497
498 /*
499 * If the residual being reported by the adapter is
500 * greater than the residual being reported in the
501 * reply, then we have a true underrun.
502 */
503 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
504
505 scsi_dl = pkt->pkt_datalen;
506
507 #ifdef FCP_UNDERRUN_PATCH1
508 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
509 /*
510 * If status is not good and no data was
511 * actually transferred, then we must fix
512 * the issue
513 */
514 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
515 fix_it = 1;
516
517 EMLXS_MSGF(EMLXS_CONTEXT,
518 &emlxs_fcp_completion_error_msg,
519 "Underrun(1). Fixed. "
520 "did=0x%06x sbp=%p cmd=%02x "
521 "dl=%d,%d rx=%d rsp=%d",
522 did, sbp, scsi_opcode,
523 pkt->pkt_datalen, scsi_dl,
524 (pkt->pkt_datalen -
525 pkt->pkt_data_resid),
526 rsp_data_resid);
527
528 }
529 }
530 #endif /* FCP_UNDERRUN_PATCH1 */
531
532 /*
533 * Check if SCSI response payload should be
534 * fixed or if a DATA_UNDERRUN should be
535 * reported
536 */
537 if (fix_it) {
538 /*
539 * Fix the SCSI response payload itself
540 */
541 rsp->fcp_u.fcp_status.resid_under = 1;
542 rsp->fcp_resid =
543 LE_SWAP32(pkt->pkt_data_resid);
544 } else {
545 /*
546 * Change the status from
547 * IOSTAT_FCP_RSP_ERROR to
548 * IOSTAT_DATA_UNDERRUN
549 */
550 iostat = IOSTAT_DATA_UNDERRUN;
551 pkt->pkt_data_resid =
552 pkt->pkt_datalen;
553 }
554 }
555
556 /*
557 * If the residual being reported by the adapter is
558 * less than the residual being reported in the reply,
559 * then we have a true overrun. Since we don't know
560 * where the extra data came from or went to then we
561 * cannot trust anything we received
562 */
563 else if (rsp_data_resid > pkt->pkt_data_resid) {
564 /*
565 * Change the status from
566 * IOSTAT_FCP_RSP_ERROR to
567 * IOSTAT_DATA_OVERRUN
568 */
569 iostat = IOSTAT_DATA_OVERRUN;
570 pkt->pkt_data_resid = pkt->pkt_datalen;
571 }
572 }
573
574 done:
575
576 /* Print completion message */
577 switch (iostat) {
578 case IOSTAT_SUCCESS:
579 /* Build SCSI GOOD status */
580 if (pkt->pkt_rsplen) {
581 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
582 }
583 break;
584
585 case IOSTAT_FCP_RSP_ERROR:
586 break;
587
588 case IOSTAT_REMOTE_STOP:
589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
590 "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
591 scsi_opcode);
592 break;
593
594 case IOSTAT_LOCAL_REJECT:
595 localstat = cmd->un.grsp.perr.statLocalError;
596
597 switch (localstat) {
598 case IOERR_SEQUENCE_TIMEOUT:
599 EMLXS_MSGF(EMLXS_CONTEXT,
600 &emlxs_fcp_completion_error_msg,
601 "Local reject. "
602 "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
603 emlxs_error_xlate(localstat), did, sbp,
604 scsi_opcode, pkt->pkt_timeout);
605 break;
606
607 default:
608 EMLXS_MSGF(EMLXS_CONTEXT,
609 &emlxs_fcp_completion_error_msg,
610 "Local reject. %s 0x%06x %p %02x (%x)(%x)",
611 emlxs_error_xlate(localstat), did, sbp,
612 scsi_opcode, (uint16_t)cmd->ULPIOTAG,
613 (uint16_t)cmd->ULPCONTEXT);
614 }
615
616 break;
617
618 case IOSTAT_NPORT_RJT:
619 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
620 "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
621 scsi_opcode);
622 break;
623
624 case IOSTAT_FABRIC_RJT:
625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
626 "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
627 scsi_opcode);
628 break;
629
630 case IOSTAT_NPORT_BSY:
631 #ifdef SAN_DIAG_SUPPORT
632 ndlp = (NODELIST *)iocbq->node;
633 emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
634 #endif
635
636 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
637 "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
638 scsi_opcode);
639 break;
640
641 case IOSTAT_FABRIC_BSY:
642 #ifdef SAN_DIAG_SUPPORT
643 ndlp = (NODELIST *)iocbq->node;
644 emlxs_log_sd_fc_bsy_event(port, NULL);
645 #endif
646
647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
648 "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
649 scsi_opcode);
650 break;
651
652 case IOSTAT_INTERMED_RSP:
653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
654 "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
655 sbp, scsi_opcode);
656 break;
657
658 case IOSTAT_LS_RJT:
659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
660 "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
661 scsi_opcode);
662 break;
663
664 case IOSTAT_DATA_UNDERRUN:
665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
666 "Underrun. did=0x%06x sbp=%p cmd=%02x "
667 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
668 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
669 rsp_data_resid, scsi_status, sense, asc, ascq);
670 break;
671
672 case IOSTAT_DATA_OVERRUN:
673 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
674 "Overrun. did=0x%06x sbp=%p cmd=%02x "
675 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
676 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
677 rsp_data_resid, scsi_status, sense, asc, ascq);
678 break;
679
680 case IOSTAT_RSP_INVALID:
681 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
682 "Rsp Invalid. did=0x%06x sbp=%p cmd=%02x dl=%d rl=%d"
683 "(%d, %d, %d)",
684 did, sbp, scsi_opcode, pkt->pkt_datalen, pkt->pkt_rsplen,
685 LE_SWAP32(rsp->fcp_resid),
686 LE_SWAP32(rsp->fcp_sense_len),
687 LE_SWAP32(rsp->fcp_response_len));
688 break;
689
690 default:
691 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
692 "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
693 iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
694 scsi_opcode);
695 break;
696 }
697
698 if (iostat == IOSTAT_SUCCESS) {
699 HBASTATS.FcpGood++;
700 } else {
701 HBASTATS.FcpError++;
702 }
703
704 mutex_exit(&sbp->mtx);
705
706 emlxs_pkt_complete(sbp, iostat, localstat, 0);
707
708 return;
709
710 } /* emlxs_handle_fcp_event() */
711
712
713 /*
714 * emlxs_post_buffer
715 *
716 * This routine will post count buffers to the
717 * ring with the QUE_RING_BUF_CN command. This
718 * allows 2 buffers / command to be posted.
719 * Returns the number of buffers NOT posted.
720 */
721 /* SLI3 */
722 extern int
emlxs_post_buffer(emlxs_hba_t * hba,RING * rp,int16_t cnt)723 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
724 {
725 emlxs_port_t *port = &PPORT;
726 IOCB *icmd;
727 IOCBQ *iocbq;
728 MATCHMAP *mp;
729 uint16_t tag;
730 uint32_t maxqbuf;
731 int32_t i;
732 int32_t j;
733 uint32_t seg;
734 uint32_t size;
735
736 mp = 0;
737 maxqbuf = 2;
738 tag = (uint16_t)cnt;
739 cnt += rp->fc_missbufcnt;
740
741 if (rp->ringno == hba->channel_els) {
742 seg = MEM_BUF;
743 size = MEM_ELSBUF_SIZE;
744 } else if (rp->ringno == hba->channel_ip) {
745 seg = MEM_IPBUF;
746 size = MEM_IPBUF_SIZE;
747 } else if (rp->ringno == hba->channel_ct) {
748 seg = MEM_CTBUF;
749 size = MEM_CTBUF_SIZE;
750 }
751 #ifdef SFCT_SUPPORT
752 else if (rp->ringno == hba->CHANNEL_FCT) {
753 seg = MEM_FCTBUF;
754 size = MEM_FCTBUF_SIZE;
755 }
756 #endif /* SFCT_SUPPORT */
757 else {
758 return (0);
759 }
760
761 /*
762 * While there are buffers to post
763 */
764 while (cnt) {
765 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
766 rp->fc_missbufcnt = cnt;
767 return (cnt);
768 }
769
770 iocbq->channel = (void *)&hba->chan[rp->ringno];
771 iocbq->port = (void *)port;
772 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
773
774 icmd = &iocbq->iocb;
775
776 /*
777 * Max buffers can be posted per command
778 */
779 for (i = 0; i < maxqbuf; i++) {
780 if (cnt <= 0)
781 break;
782
783 /* fill in BDEs for command */
784 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg))
785 == 0) {
786 icmd->ULPBDECOUNT = i;
787 for (j = 0; j < i; j++) {
788 mp = EMLXS_GET_VADDR(hba, rp, icmd);
789 if (mp) {
790 emlxs_mem_put(hba, seg,
791 (void *)mp);
792 }
793 }
794
795 rp->fc_missbufcnt = cnt + i;
796
797 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
798
799 return (cnt + i);
800 }
801
802 /*
803 * map that page and save the address pair for lookup
804 * later
805 */
806 emlxs_mem_map_vaddr(hba,
807 rp,
808 mp,
809 (uint32_t *)&icmd->un.cont64[i].addrHigh,
810 (uint32_t *)&icmd->un.cont64[i].addrLow);
811
812 icmd->un.cont64[i].tus.f.bdeSize = size;
813 icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
814
815 /*
816 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
817 * "UB Post: ring=%d addr=%08x%08x size=%d",
818 * rp->ringno, icmd->un.cont64[i].addrHigh,
819 * icmd->un.cont64[i].addrLow, size);
820 */
821
822 cnt--;
823 }
824
825 icmd->ULPIOTAG = tag;
826 icmd->ULPBDECOUNT = i;
827 icmd->ULPLE = 1;
828 icmd->ULPOWNER = OWN_CHIP;
829 /* used for delimiter between commands */
830 iocbq->bp = (void *)mp;
831
832 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
833 }
834
835 rp->fc_missbufcnt = 0;
836
837 return (0);
838
839 } /* emlxs_post_buffer() */
840
841
842 static void
emlxs_fcp_tag_nodes(emlxs_port_t * port)843 emlxs_fcp_tag_nodes(emlxs_port_t *port)
844 {
845 NODELIST *nlp;
846 int i;
847
848 /* We will process all nodes with this tag later */
849 rw_enter(&port->node_rwlock, RW_READER);
850 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
851 nlp = port->node_table[i];
852 while (nlp != NULL) {
853 nlp->nlp_tag = 1;
854 nlp = nlp->nlp_list_next;
855 }
856 }
857 rw_exit(&port->node_rwlock);
858 }
859
860
861 static NODELIST *
emlxs_find_tagged_node(emlxs_port_t * port)862 emlxs_find_tagged_node(emlxs_port_t *port)
863 {
864 NODELIST *nlp;
865 NODELIST *tagged;
866 int i;
867
868 /* Find first node */
869 rw_enter(&port->node_rwlock, RW_READER);
870 tagged = 0;
871 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
872 nlp = port->node_table[i];
873 while (nlp != NULL) {
874 if (!nlp->nlp_tag) {
875 nlp = nlp->nlp_list_next;
876 continue;
877 }
878 nlp->nlp_tag = 0;
879
880 if (nlp->nlp_Rpi == FABRIC_RPI) {
881 nlp = nlp->nlp_list_next;
882 continue;
883 }
884 tagged = nlp;
885 break;
886 }
887 if (tagged) {
888 break;
889 }
890 }
891 rw_exit(&port->node_rwlock);
892 return (tagged);
893 }
894
895
896 extern int
emlxs_port_offline(emlxs_port_t * port,uint32_t scope)897 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
898 {
899 emlxs_hba_t *hba = HBA;
900 emlxs_config_t *cfg;
901 NODELIST *nlp;
902 fc_affected_id_t *aid;
903 uint32_t mask;
904 uint32_t aff_d_id;
905 uint32_t linkdown;
906 uint32_t vlinkdown;
907 uint32_t action;
908 int i;
909 uint32_t unreg_vpi;
910 uint32_t update;
911 uint32_t adisc_support;
912 uint32_t clear_all;
913 uint8_t format;
914
915 /* Target mode only uses this routine for linkdowns */
916 if ((port->mode == MODE_TARGET) && (scope != 0xffffffff) &&
917 (scope != 0xfeffffff) && (scope != 0xfdffffff)) {
918 return (0);
919 }
920
921 cfg = &CFG;
922 aid = (fc_affected_id_t *)&scope;
923 linkdown = 0;
924 vlinkdown = 0;
925 unreg_vpi = 0;
926 update = 0;
927 clear_all = 0;
928
929 if (!(port->flag & EMLXS_PORT_BOUND)) {
930 return (0);
931 }
932
933 format = aid->aff_format;
934
935 switch (format) {
936 case 0: /* Port */
937 mask = 0x00ffffff;
938 break;
939
940 case 1: /* Area */
941 mask = 0x00ffff00;
942 break;
943
944 case 2: /* Domain */
945 mask = 0x00ff0000;
946 break;
947
948 case 3: /* Network */
949 mask = 0x00000000;
950 break;
951
952 #ifdef DHCHAP_SUPPORT
953 case 0xfe: /* Virtual link down */
954 mask = 0x00000000;
955 vlinkdown = 1;
956 break;
957 #endif /* DHCHAP_SUPPORT */
958
959 case 0xff: /* link is down */
960 mask = 0x00000000;
961 linkdown = 1;
962 break;
963
964 case 0xfd: /* New fabric */
965 default:
966 mask = 0x00000000;
967 linkdown = 1;
968 clear_all = 1;
969 break;
970 }
971
972 aff_d_id = aid->aff_d_id & mask;
973
974
975 /*
976 * If link is down then this is a hard shutdown and flush
977 * If link not down then this is a soft shutdown and flush
978 * (e.g. RSCN)
979 */
980 if (linkdown) {
981 mutex_enter(&EMLXS_PORT_LOCK);
982
983 port->flag &= EMLXS_PORT_LINKDOWN_MASK;
984
985 if (port->ulp_statec != FC_STATE_OFFLINE) {
986 port->ulp_statec = FC_STATE_OFFLINE;
987
988 port->prev_did = port->did;
989 port->did = 0;
990 port->rdid = 0;
991
992 bcopy(&port->fabric_sparam, &port->prev_fabric_sparam,
993 sizeof (SERV_PARM));
994 bzero(&port->fabric_sparam, sizeof (SERV_PARM));
995
996 update = 1;
997 }
998
999 mutex_exit(&EMLXS_PORT_LOCK);
1000
1001 emlxs_timer_cancel_clean_address(port);
1002
1003 /* Tell ULP about it */
1004 if (update) {
1005 if (port->flag & EMLXS_PORT_BOUND) {
1006 if (port->vpi == 0) {
1007 EMLXS_MSGF(EMLXS_CONTEXT,
1008 &emlxs_link_down_msg, NULL);
1009 }
1010
1011 if (port->mode == MODE_INITIATOR) {
1012 emlxs_fca_link_down(port);
1013 }
1014 #ifdef SFCT_SUPPORT
1015 else if (port->mode == MODE_TARGET) {
1016 emlxs_fct_link_down(port);
1017 }
1018 #endif /* SFCT_SUPPORT */
1019
1020 } else {
1021 if (port->vpi == 0) {
1022 EMLXS_MSGF(EMLXS_CONTEXT,
1023 &emlxs_link_down_msg, "*");
1024 }
1025 }
1026
1027
1028 }
1029
1030 unreg_vpi = 1;
1031
1032 #ifdef DHCHAP_SUPPORT
1033 /* Stop authentication with all nodes */
1034 emlxs_dhc_auth_stop(port, NULL);
1035 #endif /* DHCHAP_SUPPORT */
1036
1037 /* Flush the base node */
1038 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1039 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1040
1041 /* Flush any pending ub buffers */
1042 emlxs_ub_flush(port);
1043 }
1044 #ifdef DHCHAP_SUPPORT
1045 /* virtual link down */
1046 else if (vlinkdown) {
1047 mutex_enter(&EMLXS_PORT_LOCK);
1048
1049 if (port->ulp_statec != FC_STATE_OFFLINE) {
1050 port->ulp_statec = FC_STATE_OFFLINE;
1051 update = 1;
1052 }
1053
1054 mutex_exit(&EMLXS_PORT_LOCK);
1055
1056 emlxs_timer_cancel_clean_address(port);
1057
1058 /* Tell ULP about it */
1059 if (update) {
1060 if (port->flag & EMLXS_PORT_BOUND) {
1061 if (port->vpi == 0) {
1062 EMLXS_MSGF(EMLXS_CONTEXT,
1063 &emlxs_link_down_msg,
1064 "Switch authentication failed.");
1065 }
1066
1067 if (port->mode == MODE_INITIATOR) {
1068 emlxs_fca_link_down(port);
1069 }
1070 #ifdef SFCT_SUPPORT
1071 else if (port->mode == MODE_TARGET) {
1072 emlxs_fct_link_down(port);
1073 }
1074 #endif /* SFCT_SUPPORT */
1075 } else {
1076 if (port->vpi == 0) {
1077 EMLXS_MSGF(EMLXS_CONTEXT,
1078 &emlxs_link_down_msg,
1079 "Switch authentication failed. *");
1080 }
1081 }
1082
1083
1084 }
1085
1086 /* Flush the base node */
1087 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1088 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1089 }
1090 #endif /* DHCHAP_SUPPORT */
1091 else {
1092 emlxs_timer_cancel_clean_address(port);
1093 }
1094
1095 if (port->mode == MODE_TARGET) {
1096 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1097 /* Set the node tags */
1098 emlxs_fcp_tag_nodes(port);
1099 unreg_vpi = 0;
1100 while ((nlp = emlxs_find_tagged_node(port))) {
1101 (void) emlxs_rpi_pause_notify(port,
1102 nlp->rpip);
1103 /*
1104 * In port_online we need to resume
1105 * these RPIs before we can use them.
1106 */
1107 }
1108 }
1109 goto done;
1110 }
1111
1112 /* Set the node tags */
1113 emlxs_fcp_tag_nodes(port);
1114
1115 if (!clear_all && (hba->flag & FC_ONLINE_MODE)) {
1116 adisc_support = cfg[CFG_ADISC_SUPPORT].current;
1117 } else {
1118 adisc_support = 0;
1119 }
1120
1121 /* Check ADISC support level */
1122 switch (adisc_support) {
1123 case 0: /* No support - Flush all IO to all matching nodes */
1124
1125 for (;;) {
1126 /*
1127 * We need to hold the locks this way because
1128 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1129 * same locks. Also, when we release the lock the list
1130 * can change out from under us.
1131 */
1132
1133 /* Find first node */
1134 rw_enter(&port->node_rwlock, RW_READER);
1135 action = 0;
1136 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1137 nlp = port->node_table[i];
1138 while (nlp != NULL) {
1139 if (!nlp->nlp_tag) {
1140 nlp = nlp->nlp_list_next;
1141 continue;
1142 }
1143 nlp->nlp_tag = 0;
1144
1145 /*
1146 * Check for any device that matches
1147 * our mask
1148 */
1149 if ((nlp->nlp_DID & mask) == aff_d_id) {
1150 if (linkdown) {
1151 action = 1;
1152 break;
1153 } else { /* Must be an RCSN */
1154
1155 action = 2;
1156 break;
1157 }
1158 }
1159 nlp = nlp->nlp_list_next;
1160 }
1161
1162 if (action) {
1163 break;
1164 }
1165 }
1166 rw_exit(&port->node_rwlock);
1167
1168
1169 /* Check if nothing was found */
1170 if (action == 0) {
1171 break;
1172 } else if (action == 1) {
1173 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1174 NULL, NULL, NULL);
1175 } else if (action == 2) {
1176 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1177
1178 #ifdef DHCHAP_SUPPORT
1179 emlxs_dhc_auth_stop(port, nlp);
1180 #endif /* DHCHAP_SUPPORT */
1181
1182 /*
1183 * Close the node for any further normal IO
1184 * A PLOGI with reopen the node
1185 */
1186 emlxs_node_close(port, nlp,
1187 hba->channel_fcp, 60);
1188 emlxs_node_close(port, nlp,
1189 hba->channel_ip, 60);
1190
1191 /* Flush tx queue */
1192 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1193
1194 /* Flush chip queue */
1195 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1196 }
1197
1198 }
1199
1200 break;
1201
1202 case 1: /* Partial support - Flush IO for non-FCP2 matching nodes */
1203
1204 for (;;) {
1205
1206 /*
1207 * We need to hold the locks this way because
1208 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1209 * same locks. Also, when we release the lock the list
1210 * can change out from under us.
1211 */
1212 rw_enter(&port->node_rwlock, RW_READER);
1213 action = 0;
1214 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1215 nlp = port->node_table[i];
1216 while (nlp != NULL) {
1217 if (!nlp->nlp_tag) {
1218 nlp = nlp->nlp_list_next;
1219 continue;
1220 }
1221 nlp->nlp_tag = 0;
1222
1223 /*
1224 * Check for special FCP2 target device
1225 * that matches our mask
1226 */
1227 if ((nlp->nlp_fcp_info &
1228 NLP_FCP_TGT_DEVICE) &&
1229 (nlp-> nlp_fcp_info &
1230 NLP_FCP_2_DEVICE) &&
1231 (nlp->nlp_DID & mask) ==
1232 aff_d_id) {
1233 action = 3;
1234 break;
1235 }
1236
1237 /*
1238 * Check for any other device that
1239 * matches our mask
1240 */
1241 else if ((nlp->nlp_DID & mask) ==
1242 aff_d_id) {
1243 if (linkdown) {
1244 action = 1;
1245 break;
1246 } else { /* Must be an RSCN */
1247
1248 action = 2;
1249 break;
1250 }
1251 }
1252
1253 nlp = nlp->nlp_list_next;
1254 }
1255
1256 if (action) {
1257 break;
1258 }
1259 }
1260 rw_exit(&port->node_rwlock);
1261
1262 /* Check if nothing was found */
1263 if (action == 0) {
1264 break;
1265 } else if (action == 1) {
1266 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1267 NULL, NULL, NULL);
1268 } else if (action == 2) {
1269 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1270
1271 #ifdef DHCHAP_SUPPORT
1272 emlxs_dhc_auth_stop(port, nlp);
1273 #endif /* DHCHAP_SUPPORT */
1274
1275 /*
1276 * Close the node for any further normal IO
1277 * A PLOGI with reopen the node
1278 */
1279 emlxs_node_close(port, nlp,
1280 hba->channel_fcp, 60);
1281 emlxs_node_close(port, nlp,
1282 hba->channel_ip, 60);
1283
1284 /* Flush tx queue */
1285 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1286
1287 /* Flush chip queue */
1288 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1289
1290 } else if (action == 3) { /* FCP2 devices */
1291 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1292
1293 unreg_vpi = 0;
1294
1295 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1296 (void) emlxs_rpi_pause_notify(port,
1297 nlp->rpip);
1298 }
1299
1300 #ifdef DHCHAP_SUPPORT
1301 emlxs_dhc_auth_stop(port, nlp);
1302 #endif /* DHCHAP_SUPPORT */
1303
1304 /*
1305 * Close the node for any further normal IO
1306 * An ADISC or a PLOGI with reopen the node
1307 */
1308 emlxs_node_close(port, nlp,
1309 hba->channel_fcp, -1);
1310 emlxs_node_close(port, nlp, hba->channel_ip,
1311 ((linkdown) ? 0 : 60));
1312
1313 /* Flush tx queues except for FCP ring */
1314 (void) emlxs_tx_node_flush(port, nlp,
1315 &hba->chan[hba->channel_ct], 0, 0);
1316 (void) emlxs_tx_node_flush(port, nlp,
1317 &hba->chan[hba->channel_els], 0, 0);
1318 (void) emlxs_tx_node_flush(port, nlp,
1319 &hba->chan[hba->channel_ip], 0, 0);
1320
1321 /* Flush chip queues except for FCP ring */
1322 (void) emlxs_chipq_node_flush(port,
1323 &hba->chan[hba->channel_ct], nlp, 0);
1324 (void) emlxs_chipq_node_flush(port,
1325 &hba->chan[hba->channel_els], nlp, 0);
1326 (void) emlxs_chipq_node_flush(port,
1327 &hba->chan[hba->channel_ip], nlp, 0);
1328 }
1329 }
1330 break;
1331
1332 case 2: /* Full support - Hold FCP IO to FCP target matching nodes */
1333
1334 if (!linkdown && !vlinkdown) {
1335 break;
1336 }
1337
1338 for (;;) {
1339 /*
1340 * We need to hold the locks this way because
1341 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1342 * same locks. Also, when we release the lock the list
1343 * can change out from under us.
1344 */
1345 rw_enter(&port->node_rwlock, RW_READER);
1346 action = 0;
1347 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1348 nlp = port->node_table[i];
1349 while (nlp != NULL) {
1350 if (!nlp->nlp_tag) {
1351 nlp = nlp->nlp_list_next;
1352 continue;
1353 }
1354 nlp->nlp_tag = 0;
1355
1356 /*
1357 * Check for FCP target device that
1358 * matches our mask
1359 */
1360 if ((nlp-> nlp_fcp_info &
1361 NLP_FCP_TGT_DEVICE) &&
1362 (nlp->nlp_DID & mask) ==
1363 aff_d_id) {
1364 action = 3;
1365 break;
1366 }
1367
1368 /*
1369 * Check for any other device that
1370 * matches our mask
1371 */
1372 else if ((nlp->nlp_DID & mask) ==
1373 aff_d_id) {
1374 if (linkdown) {
1375 action = 1;
1376 break;
1377 } else { /* Must be an RSCN */
1378
1379 action = 2;
1380 break;
1381 }
1382 }
1383
1384 nlp = nlp->nlp_list_next;
1385 }
1386 if (action) {
1387 break;
1388 }
1389 }
1390 rw_exit(&port->node_rwlock);
1391
1392 /* Check if nothing was found */
1393 if (action == 0) {
1394 break;
1395 } else if (action == 1) {
1396 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1397 NULL, NULL, NULL);
1398 } else if (action == 2) {
1399 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1400
1401 /*
1402 * Close the node for any further normal IO
1403 * A PLOGI with reopen the node
1404 */
1405 emlxs_node_close(port, nlp,
1406 hba->channel_fcp, 60);
1407 emlxs_node_close(port, nlp,
1408 hba->channel_ip, 60);
1409
1410 /* Flush tx queue */
1411 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1412
1413 /* Flush chip queue */
1414 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1415
1416 } else if (action == 3) { /* FCP2 devices */
1417 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1418
1419 unreg_vpi = 0;
1420
1421 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1422 (void) emlxs_rpi_pause_notify(port,
1423 nlp->rpip);
1424 }
1425
1426 /*
1427 * Close the node for any further normal IO
1428 * An ADISC or a PLOGI with reopen the node
1429 */
1430 emlxs_node_close(port, nlp,
1431 hba->channel_fcp, -1);
1432 emlxs_node_close(port, nlp, hba->channel_ip,
1433 ((linkdown) ? 0 : 60));
1434
1435 /* Flush tx queues except for FCP ring */
1436 (void) emlxs_tx_node_flush(port, nlp,
1437 &hba->chan[hba->channel_ct], 0, 0);
1438 (void) emlxs_tx_node_flush(port, nlp,
1439 &hba->chan[hba->channel_els], 0, 0);
1440 (void) emlxs_tx_node_flush(port, nlp,
1441 &hba->chan[hba->channel_ip], 0, 0);
1442
1443 /* Flush chip queues except for FCP ring */
1444 (void) emlxs_chipq_node_flush(port,
1445 &hba->chan[hba->channel_ct], nlp, 0);
1446 (void) emlxs_chipq_node_flush(port,
1447 &hba->chan[hba->channel_els], nlp, 0);
1448 (void) emlxs_chipq_node_flush(port,
1449 &hba->chan[hba->channel_ip], nlp, 0);
1450 }
1451 }
1452
1453 break;
1454
1455 } /* switch() */
1456
1457 done:
1458
1459 if (unreg_vpi) {
1460 (void) emlxs_mb_unreg_vpi(port);
1461 }
1462
1463 return (0);
1464
1465 } /* emlxs_port_offline() */
1466
1467
1468 extern void
emlxs_port_online(emlxs_port_t * vport)1469 emlxs_port_online(emlxs_port_t *vport)
1470 {
1471 emlxs_hba_t *hba = vport->hba;
1472 emlxs_port_t *port = &PPORT;
1473 NODELIST *nlp;
1474 uint32_t state;
1475 uint32_t update;
1476 uint32_t npiv_linkup;
1477 char topology[32];
1478 char linkspeed[32];
1479 char mode[32];
1480
1481 /*
1482 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1483 * "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1484 */
1485
1486 if ((vport->vpi > 0) &&
1487 (!(hba->flag & FC_NPIV_ENABLED) ||
1488 !(hba->flag & FC_NPIV_SUPPORTED))) {
1489 return;
1490 }
1491
1492 if (!(vport->flag & EMLXS_PORT_BOUND) ||
1493 !(vport->flag & EMLXS_PORT_ENABLED)) {
1494 return;
1495 }
1496
1497 /* Check for mode */
1498 if (port->mode == MODE_TARGET) {
1499 (void) strlcpy(mode, ", target", sizeof (mode));
1500
1501 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1502 /* Set the node tags */
1503 emlxs_fcp_tag_nodes(vport);
1504 while ((nlp = emlxs_find_tagged_node(vport))) {
1505 /* The RPI was paused in port_offline */
1506 (void) emlxs_rpi_resume_notify(vport,
1507 nlp->rpip, 0);
1508 }
1509 }
1510 } else if (port->mode == MODE_INITIATOR) {
1511 (void) strlcpy(mode, ", initiator", sizeof (mode));
1512 } else {
1513 (void) strlcpy(mode, "unknown", sizeof (mode));
1514 }
1515 mutex_enter(&EMLXS_PORT_LOCK);
1516
1517 /* Check for loop topology */
1518 if (hba->topology == TOPOLOGY_LOOP) {
1519 state = FC_STATE_LOOP;
1520 (void) strlcpy(topology, ", loop", sizeof (topology));
1521 } else {
1522 state = FC_STATE_ONLINE;
1523 (void) strlcpy(topology, ", fabric", sizeof (topology));
1524 }
1525
1526 /* Set the link speed */
1527 switch (hba->linkspeed) {
1528 case 0:
1529 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1530 state |= FC_STATE_1GBIT_SPEED;
1531 break;
1532
1533 case LA_1GHZ_LINK:
1534 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1535 state |= FC_STATE_1GBIT_SPEED;
1536 break;
1537 case LA_2GHZ_LINK:
1538 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1539 state |= FC_STATE_2GBIT_SPEED;
1540 break;
1541 case LA_4GHZ_LINK:
1542 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1543 state |= FC_STATE_4GBIT_SPEED;
1544 break;
1545 case LA_8GHZ_LINK:
1546 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1547 state |= FC_STATE_8GBIT_SPEED;
1548 break;
1549 case LA_10GHZ_LINK:
1550 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1551 state |= FC_STATE_10GBIT_SPEED;
1552 break;
1553 case LA_16GHZ_LINK:
1554 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1555 state |= FC_STATE_16GBIT_SPEED;
1556 break;
1557 default:
1558 (void) snprintf(linkspeed, sizeof (linkspeed), "unknown(0x%x)",
1559 hba->linkspeed);
1560 break;
1561 }
1562
1563 npiv_linkup = 0;
1564 update = 0;
1565
1566 if ((hba->state >= FC_LINK_UP) &&
1567 !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1568 update = 1;
1569 vport->ulp_statec = state;
1570
1571 if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1572 hba->flag |= FC_NPIV_LINKUP;
1573 npiv_linkup = 1;
1574 }
1575 }
1576
1577 mutex_exit(&EMLXS_PORT_LOCK);
1578
1579 if (update) {
1580 if (vport->flag & EMLXS_PORT_BOUND) {
1581 if (vport->vpi == 0) {
1582 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1583 "%s%s%s", linkspeed, topology, mode);
1584
1585 } else if (npiv_linkup) {
1586 EMLXS_MSGF(EMLXS_CONTEXT,
1587 &emlxs_npiv_link_up_msg, "%s%s%s",
1588 linkspeed, topology, mode);
1589 }
1590
1591 if (vport->mode == MODE_INITIATOR) {
1592 emlxs_fca_link_up(vport);
1593 }
1594 #ifdef SFCT_SUPPORT
1595 else if (vport->mode == MODE_TARGET) {
1596 emlxs_fct_link_up(vport);
1597 }
1598 #endif /* SFCT_SUPPORT */
1599 } else {
1600 if (vport->vpi == 0) {
1601 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1602 "%s%s%s *", linkspeed, topology, mode);
1603
1604 } else if (npiv_linkup) {
1605 EMLXS_MSGF(EMLXS_CONTEXT,
1606 &emlxs_npiv_link_up_msg, "%s%s%s *",
1607 linkspeed, topology, mode);
1608 }
1609 }
1610
1611 /* Check for waiting threads */
1612 if (vport->vpi == 0) {
1613 mutex_enter(&EMLXS_LINKUP_LOCK);
1614 if (hba->linkup_wait_flag == TRUE) {
1615 hba->linkup_wait_flag = FALSE;
1616 cv_broadcast(&EMLXS_LINKUP_CV);
1617 }
1618 mutex_exit(&EMLXS_LINKUP_LOCK);
1619 }
1620
1621 /* Flush any pending ub buffers */
1622 emlxs_ub_flush(vport);
1623 }
1624
1625 return;
1626
1627 } /* emlxs_port_online() */
1628
1629
1630 /* SLI3 */
1631 extern void
emlxs_linkdown(emlxs_hba_t * hba)1632 emlxs_linkdown(emlxs_hba_t *hba)
1633 {
1634 emlxs_port_t *port = &PPORT;
1635 int i;
1636 uint32_t scope;
1637
1638 mutex_enter(&EMLXS_PORT_LOCK);
1639
1640 if (hba->state > FC_LINK_DOWN) {
1641 HBASTATS.LinkDown++;
1642 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1643 }
1644
1645 /* Set scope */
1646 scope = (hba->flag & FC_NEW_FABRIC)? 0xFDFFFFFF:0xFFFFFFFF;
1647
1648 /* Filter hba flags */
1649 hba->flag &= FC_LINKDOWN_MASK;
1650 hba->discovery_timer = 0;
1651 hba->linkup_timer = 0;
1652
1653 mutex_exit(&EMLXS_PORT_LOCK);
1654
1655 for (i = 0; i < MAX_VPORTS; i++) {
1656 port = &VPORT(i);
1657
1658 if (!(port->flag & EMLXS_PORT_BOUND)) {
1659 continue;
1660 }
1661
1662 (void) emlxs_port_offline(port, scope);
1663
1664 }
1665
1666 emlxs_log_link_event(port);
1667
1668 return;
1669
1670 } /* emlxs_linkdown() */
1671
1672
1673 /* SLI3 */
1674 extern void
emlxs_linkup(emlxs_hba_t * hba)1675 emlxs_linkup(emlxs_hba_t *hba)
1676 {
1677 emlxs_port_t *port = &PPORT;
1678 emlxs_config_t *cfg = &CFG;
1679
1680 mutex_enter(&EMLXS_PORT_LOCK);
1681
1682 /* Check for any mode changes */
1683 emlxs_mode_set(hba);
1684
1685 HBASTATS.LinkUp++;
1686 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1687
1688 #ifdef MENLO_SUPPORT
1689 if (hba->flag & FC_MENLO_MODE) {
1690 mutex_exit(&EMLXS_PORT_LOCK);
1691
1692 /*
1693 * Trigger linkup CV and don't start linkup & discovery
1694 * timers
1695 */
1696 mutex_enter(&EMLXS_LINKUP_LOCK);
1697 cv_broadcast(&EMLXS_LINKUP_CV);
1698 mutex_exit(&EMLXS_LINKUP_LOCK);
1699
1700 emlxs_log_link_event(port);
1701
1702 return;
1703 }
1704 #endif /* MENLO_SUPPORT */
1705
1706 /* Set the linkup & discovery timers */
1707 hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1708 hba->discovery_timer =
1709 hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1710 cfg[CFG_DISC_TIMEOUT].current;
1711
1712 mutex_exit(&EMLXS_PORT_LOCK);
1713
1714 emlxs_log_link_event(port);
1715
1716 return;
1717
1718 } /* emlxs_linkup() */
1719
1720
1721 /*
1722 * emlxs_reset_link
1723 *
1724 * Description:
1725 * Called to reset the link with an init_link
1726 *
1727 * Returns:
1728 *
1729 */
1730 extern int
emlxs_reset_link(emlxs_hba_t * hba,uint32_t linkup,uint32_t wait)1731 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1732 {
1733 emlxs_port_t *port = &PPORT;
1734 emlxs_config_t *cfg;
1735 MAILBOXQ *mbq = NULL;
1736 MAILBOX *mb = NULL;
1737 int rval = 0;
1738 int tmo;
1739 int rc;
1740
1741 /*
1742 * Get a buffer to use for the mailbox command
1743 */
1744 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1745 == NULL) {
1746 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1747 "Unable to allocate mailbox buffer.");
1748 rval = 1;
1749 goto reset_link_fail;
1750 }
1751
1752 if (linkup) {
1753 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1754 "Resetting link...");
1755 } else {
1756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1757 "Disabling link...");
1758 }
1759
1760 mb = (MAILBOX *)mbq;
1761
1762 /* Bring link down first */
1763 emlxs_mb_down_link(hba, mbq);
1764
1765 #define MBXERR_LINK_DOWN 0x33
1766
1767 if (wait) {
1768 wait = MBX_WAIT;
1769 } else {
1770 wait = MBX_NOWAIT;
1771 }
1772 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1773 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1774 (rc != MBXERR_LINK_DOWN)) {
1775 rval = 1;
1776 goto reset_link_fail;
1777 }
1778
1779 tmo = 120;
1780 do {
1781 delay(drv_usectohz(500000));
1782 tmo--;
1783
1784 if (!tmo) {
1785 rval = 1;
1786
1787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1788 "Linkdown timeout.");
1789
1790 goto reset_link_fail;
1791 }
1792 } while ((hba->state >= FC_LINK_UP) && (hba->state != FC_ERROR));
1793
1794 if (linkup) {
1795 /*
1796 * Setup and issue mailbox INITIALIZE LINK command
1797 */
1798
1799 if (wait == MBX_NOWAIT) {
1800 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1801 == NULL) {
1802 EMLXS_MSGF(EMLXS_CONTEXT,
1803 &emlxs_link_reset_failed_msg,
1804 "Unable to allocate mailbox buffer.");
1805 rval = 1;
1806 goto reset_link_fail;
1807 }
1808 mb = (MAILBOX *)mbq;
1809 } else {
1810 /* Reuse mbq from previous mbox */
1811 mb = (MAILBOX *)mbq;
1812 }
1813 cfg = &CFG;
1814
1815 emlxs_mb_init_link(hba, mbq,
1816 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1817
1818 mb->un.varInitLnk.lipsr_AL_PA = 0;
1819
1820 /* Clear the loopback mode */
1821 mutex_enter(&EMLXS_PORT_LOCK);
1822 hba->flag &= ~FC_LOOPBACK_MODE;
1823 hba->loopback_tics = 0;
1824 mutex_exit(&EMLXS_PORT_LOCK);
1825
1826 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1827 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1828 rval = 1;
1829 goto reset_link_fail;
1830 }
1831
1832 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1833 }
1834
1835 reset_link_fail:
1836
1837 if ((wait == MBX_WAIT) && mbq) {
1838 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1839 }
1840
1841 return (rval);
1842 } /* emlxs_reset_link() */
1843
1844
1845 extern int
emlxs_online(emlxs_hba_t * hba)1846 emlxs_online(emlxs_hba_t *hba)
1847 {
1848 emlxs_port_t *port = &PPORT;
1849 int32_t rval = 0;
1850 uint32_t i = 0;
1851
1852 /* Make sure adapter is offline or exit trying (30 seconds) */
1853 while (i++ < 30) {
1854 /* Check if adapter is already going online */
1855 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1856 return (0);
1857 }
1858
1859 mutex_enter(&EMLXS_PORT_LOCK);
1860
1861 /* Check again */
1862 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1863 mutex_exit(&EMLXS_PORT_LOCK);
1864 return (0);
1865 }
1866
1867 /* Check if adapter is offline */
1868 if (hba->flag & FC_OFFLINE_MODE) {
1869 /* Mark it going online */
1870 hba->flag &= ~FC_OFFLINE_MODE;
1871 hba->flag |= FC_ONLINING_MODE;
1872
1873 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1874 mutex_exit(&EMLXS_PORT_LOCK);
1875 break;
1876 }
1877
1878 mutex_exit(&EMLXS_PORT_LOCK);
1879
1880 BUSYWAIT_MS(1000);
1881 }
1882
1883 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1884 "Going online...");
1885
1886 if (rval = EMLXS_SLI_ONLINE(hba)) {
1887 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1888 rval);
1889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1890
1891 /* Set FC_OFFLINE_MODE */
1892 mutex_enter(&EMLXS_PORT_LOCK);
1893 hba->flag |= FC_OFFLINE_MODE;
1894 hba->flag &= ~FC_ONLINING_MODE;
1895 mutex_exit(&EMLXS_PORT_LOCK);
1896
1897 return (rval);
1898 }
1899
1900 /* Start the timer */
1901 emlxs_timer_start(hba);
1902
1903 /* Set FC_ONLINE_MODE */
1904 mutex_enter(&EMLXS_PORT_LOCK);
1905 hba->flag |= FC_ONLINE_MODE;
1906 hba->flag &= ~FC_ONLINING_MODE;
1907 mutex_exit(&EMLXS_PORT_LOCK);
1908
1909 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1910
1911 #ifdef SFCT_SUPPORT
1912 if (port->flag & EMLXS_TGT_ENABLED) {
1913 (void) emlxs_fct_port_initialize(port);
1914 }
1915 #endif /* SFCT_SUPPORT */
1916
1917 return (rval);
1918
1919 } /* emlxs_online() */
1920
1921
1922 extern int
emlxs_offline(emlxs_hba_t * hba,uint32_t reset_requested)1923 emlxs_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1924 {
1925 emlxs_port_t *port = &PPORT;
1926 uint32_t i = 0;
1927 int rval = 1;
1928
1929 /* Make sure adapter is online or exit trying (30 seconds) */
1930 while (i++ < 30) {
1931 /* Check if adapter is already going offline */
1932 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1933 return (0);
1934 }
1935
1936 mutex_enter(&EMLXS_PORT_LOCK);
1937
1938 /* Check again */
1939 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1940 mutex_exit(&EMLXS_PORT_LOCK);
1941 return (0);
1942 }
1943
1944 /* Check if adapter is online */
1945 if (hba->flag & FC_ONLINE_MODE) {
1946 /* Mark it going offline */
1947 hba->flag &= ~FC_ONLINE_MODE;
1948 hba->flag |= FC_OFFLINING_MODE;
1949
1950 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1951 mutex_exit(&EMLXS_PORT_LOCK);
1952 break;
1953 }
1954
1955 mutex_exit(&EMLXS_PORT_LOCK);
1956
1957 BUSYWAIT_MS(1000);
1958 }
1959
1960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1961 "Going offline...");
1962
1963 /* Declare link down */
1964 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1965 (void) emlxs_fcf_shutdown_notify(port, 1);
1966 } else {
1967 emlxs_linkdown(hba);
1968 }
1969
1970 #ifdef SFCT_SUPPORT
1971 if (port->flag & EMLXS_TGT_ENABLED) {
1972 (void) emlxs_fct_port_shutdown(port);
1973 }
1974 #endif /* SFCT_SUPPORT */
1975
1976 /* Check if adapter was shutdown */
1977 if (hba->flag & FC_HARDWARE_ERROR) {
1978 /*
1979 * Force mailbox cleanup
1980 * This will wake any sleeping or polling threads
1981 */
1982 emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1983 }
1984
1985 /* Pause here for the IO to settle */
1986 delay(drv_usectohz(1000000)); /* 1 sec */
1987
1988 /* Unregister all nodes */
1989 emlxs_ffcleanup(hba);
1990
1991 if (hba->bus_type == SBUS_FC) {
1992 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
1993 #ifdef FMA_SUPPORT
1994 /* Access handle validation */
1995 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
1996 #endif /* FMA_SUPPORT */
1997 }
1998
1999 /* Stop the timer */
2000 emlxs_timer_stop(hba);
2001
2002 /* For safety flush every iotag list */
2003 if (emlxs_iotag_flush(hba)) {
2004 /* Pause here for the IO to flush */
2005 delay(drv_usectohz(1000));
2006 }
2007
2008 /* Wait for poll command request to settle */
2009 while (hba->io_poll_count > 0) {
2010 delay(drv_usectohz(2000000)); /* 2 sec */
2011 }
2012
2013 /* Shutdown the adapter interface */
2014 EMLXS_SLI_OFFLINE(hba, reset_requested);
2015
2016 mutex_enter(&EMLXS_PORT_LOCK);
2017 hba->flag |= FC_OFFLINE_MODE;
2018 hba->flag &= ~FC_OFFLINING_MODE;
2019 mutex_exit(&EMLXS_PORT_LOCK);
2020
2021 rval = 0;
2022
2023 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
2024
2025 done:
2026
2027 return (rval);
2028
2029 } /* emlxs_offline() */
2030
2031
2032
2033 extern int
emlxs_power_down(emlxs_hba_t * hba)2034 emlxs_power_down(emlxs_hba_t *hba)
2035 {
2036 #ifdef FMA_SUPPORT
2037 emlxs_port_t *port = &PPORT;
2038 #endif /* FMA_SUPPORT */
2039 int32_t rval = 0;
2040
2041 if ((rval = emlxs_offline(hba, 0))) {
2042 return (rval);
2043 }
2044 EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
2045
2046
2047 #ifdef FMA_SUPPORT
2048 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2049 != DDI_FM_OK) {
2050 EMLXS_MSGF(EMLXS_CONTEXT,
2051 &emlxs_invalid_access_handle_msg, NULL);
2052 return (1);
2053 }
2054 #endif /* FMA_SUPPORT */
2055
2056 return (0);
2057
2058 } /* End emlxs_power_down */
2059
2060
2061 extern int
emlxs_power_up(emlxs_hba_t * hba)2062 emlxs_power_up(emlxs_hba_t *hba)
2063 {
2064 #ifdef FMA_SUPPORT
2065 emlxs_port_t *port = &PPORT;
2066 #endif /* FMA_SUPPORT */
2067 int32_t rval = 0;
2068
2069
2070 #ifdef FMA_SUPPORT
2071 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2072 != DDI_FM_OK) {
2073 EMLXS_MSGF(EMLXS_CONTEXT,
2074 &emlxs_invalid_access_handle_msg, NULL);
2075 return (1);
2076 }
2077 #endif /* FMA_SUPPORT */
2078
2079 /* Bring adapter online */
2080 if ((rval = emlxs_online(hba))) {
2081 if (hba->pci_cap_offset[PCI_CAP_ID_PM]) {
2082 /* Put chip in D3 state */
2083 (void) ddi_put8(hba->pci_acc_handle,
2084 (uint8_t *)(hba->pci_addr +
2085 hba->pci_cap_offset[PCI_CAP_ID_PM] +
2086 PCI_PMCSR),
2087 (uint8_t)PCI_PMCSR_D3HOT);
2088 }
2089 return (rval);
2090 }
2091
2092 return (rval);
2093
2094 } /* emlxs_power_up() */
2095
2096
2097 /*
2098 *
2099 * NAME: emlxs_ffcleanup
2100 *
2101 * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
2102 *
2103 * EXECUTION ENVIRONMENT: process only
2104 *
2105 * CALLED FROM: CFG_TERM
2106 *
2107 * INPUT: hba - pointer to the dev_ctl area.
2108 *
2109 * RETURNS: none
2110 */
2111 extern void
emlxs_ffcleanup(emlxs_hba_t * hba)2112 emlxs_ffcleanup(emlxs_hba_t *hba)
2113 {
2114 emlxs_port_t *port = &PPORT;
2115 uint32_t i;
2116
2117 /* Disable all but the mailbox interrupt */
2118 EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
2119
2120 /* Make sure all port nodes are destroyed */
2121 for (i = 0; i < MAX_VPORTS; i++) {
2122 port = &VPORT(i);
2123
2124 if (port->node_count) {
2125 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2126 }
2127 }
2128
2129 /* Clear all interrupt enable conditions */
2130 EMLXS_SLI_DISABLE_INTR(hba, 0);
2131
2132 return;
2133
2134 } /* emlxs_ffcleanup() */
2135
2136
2137 extern uint16_t
emlxs_register_pkt(CHANNEL * cp,emlxs_buf_t * sbp)2138 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
2139 {
2140 emlxs_hba_t *hba;
2141 emlxs_port_t *port;
2142 uint16_t iotag;
2143 uint32_t i;
2144
2145 hba = cp->hba;
2146
2147 mutex_enter(&EMLXS_FCTAB_LOCK);
2148
2149 if (sbp->iotag != 0) {
2150 port = &PPORT;
2151
2152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2153 "Pkt already registered! channel=%d iotag=%d sbp=%p",
2154 sbp->channel, sbp->iotag, sbp);
2155 }
2156
2157 iotag = 0;
2158 for (i = 0; i < hba->max_iotag; i++) {
2159 if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
2160 hba->fc_iotag = 1;
2161 }
2162 iotag = hba->fc_iotag++;
2163
2164 if (hba->fc_table[iotag] == 0 ||
2165 hba->fc_table[iotag] == STALE_PACKET) {
2166 hba->io_count++;
2167 hba->fc_table[iotag] = sbp;
2168
2169 sbp->iotag = iotag;
2170 sbp->channel = cp;
2171
2172 break;
2173 }
2174 iotag = 0;
2175 }
2176
2177 mutex_exit(&EMLXS_FCTAB_LOCK);
2178
2179 /*
2180 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2181 * "register_pkt: channel=%d iotag=%d sbp=%p",
2182 * cp->channelno, iotag, sbp);
2183 */
2184
2185 return (iotag);
2186
2187 } /* emlxs_register_pkt() */
2188
2189
2190
2191 extern emlxs_buf_t *
emlxs_unregister_pkt(CHANNEL * cp,uint16_t iotag,uint32_t forced)2192 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
2193 {
2194 emlxs_hba_t *hba;
2195 emlxs_buf_t *sbp;
2196
2197 sbp = NULL;
2198 hba = cp->hba;
2199
2200 /* Check the iotag range */
2201 if ((iotag == 0) || (iotag >= hba->max_iotag)) {
2202 return (NULL);
2203 }
2204
2205 /* Remove the sbp from the table */
2206 mutex_enter(&EMLXS_FCTAB_LOCK);
2207 sbp = hba->fc_table[iotag];
2208
2209 if (!sbp || (sbp == STALE_PACKET)) {
2210 mutex_exit(&EMLXS_FCTAB_LOCK);
2211 return (sbp);
2212 }
2213
2214 hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
2215 hba->io_count--;
2216 sbp->iotag = 0;
2217
2218 mutex_exit(&EMLXS_FCTAB_LOCK);
2219
2220
2221 /* Clean up the sbp */
2222 mutex_enter(&sbp->mtx);
2223
2224 if (sbp->pkt_flags & PACKET_IN_TXQ) {
2225 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2226 hba->channel_tx_count--;
2227 }
2228
2229 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2230 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
2231 }
2232
2233 if (sbp->bmp) {
2234 emlxs_mem_put(hba, MEM_BPL, (void *)sbp->bmp);
2235 sbp->bmp = 0;
2236 }
2237
2238 mutex_exit(&sbp->mtx);
2239
2240 return (sbp);
2241
2242 } /* emlxs_unregister_pkt() */
2243
2244
2245
2246 /* Flush all IO's to all nodes for a given IO Channel */
2247 extern uint32_t
emlxs_tx_channel_flush(emlxs_hba_t * hba,CHANNEL * cp,emlxs_buf_t * fpkt)2248 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2249 {
2250 emlxs_port_t *port = &PPORT;
2251 emlxs_buf_t *sbp;
2252 IOCBQ *iocbq;
2253 IOCBQ *next;
2254 IOCB *iocb;
2255 uint32_t channelno;
2256 Q abort;
2257 NODELIST *ndlp;
2258 IOCB *icmd;
2259 MATCHMAP *mp;
2260 uint32_t i;
2261 uint8_t flag[MAX_CHANNEL];
2262
2263 channelno = cp->channelno;
2264 bzero((void *)&abort, sizeof (Q));
2265 bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2266
2267 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2268
2269 /* While a node needs servicing */
2270 while (cp->nodeq.q_first) {
2271 ndlp = (NODELIST *) cp->nodeq.q_first;
2272
2273 /* Check if priority queue is not empty */
2274 if (ndlp->nlp_ptx[channelno].q_first) {
2275 /* Transfer all iocb's to local queue */
2276 if (abort.q_first == 0) {
2277 abort.q_first =
2278 ndlp->nlp_ptx[channelno].q_first;
2279 } else {
2280 ((IOCBQ *)abort.q_last)->next =
2281 (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2282 }
2283 flag[channelno] = 1;
2284
2285 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2286 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2287 }
2288
2289 /* Check if tx queue is not empty */
2290 if (ndlp->nlp_tx[channelno].q_first) {
2291 /* Transfer all iocb's to local queue */
2292 if (abort.q_first == 0) {
2293 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2294 } else {
2295 ((IOCBQ *)abort.q_last)->next =
2296 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2297 }
2298
2299 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2300 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2301 }
2302
2303 /* Clear the queue pointers */
2304 ndlp->nlp_ptx[channelno].q_first = NULL;
2305 ndlp->nlp_ptx[channelno].q_last = NULL;
2306 ndlp->nlp_ptx[channelno].q_cnt = 0;
2307
2308 ndlp->nlp_tx[channelno].q_first = NULL;
2309 ndlp->nlp_tx[channelno].q_last = NULL;
2310 ndlp->nlp_tx[channelno].q_cnt = 0;
2311
2312 /* Remove node from service queue */
2313
2314 /* If this is the last node on list */
2315 if (cp->nodeq.q_last == (void *)ndlp) {
2316 cp->nodeq.q_last = NULL;
2317 cp->nodeq.q_first = NULL;
2318 cp->nodeq.q_cnt = 0;
2319 } else {
2320 /* Remove node from head */
2321 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2322 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2323 cp->nodeq.q_first;
2324 cp->nodeq.q_cnt--;
2325 }
2326
2327 /* Clear node */
2328 ndlp->nlp_next[channelno] = NULL;
2329 }
2330
2331 /* First cleanup the iocb's while still holding the lock */
2332 iocbq = (IOCBQ *) abort.q_first;
2333 while (iocbq) {
2334 /* Free the IoTag and the bmp */
2335 iocb = &iocbq->iocb;
2336
2337 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2338 sbp = iocbq->sbp;
2339 if (sbp) {
2340 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2341 }
2342 } else {
2343 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2344 iocb->ULPIOTAG, 0);
2345 }
2346
2347 if (sbp && (sbp != STALE_PACKET)) {
2348 mutex_enter(&sbp->mtx);
2349
2350 sbp->pkt_flags |= PACKET_IN_FLUSH;
2351 /*
2352 * If the fpkt is already set, then we will leave it
2353 * alone. This ensures that this pkt is only accounted
2354 * for on one fpkt->flush_count
2355 */
2356 if (!sbp->fpkt && fpkt) {
2357 mutex_enter(&fpkt->mtx);
2358 sbp->fpkt = fpkt;
2359 fpkt->flush_count++;
2360 mutex_exit(&fpkt->mtx);
2361 }
2362
2363 mutex_exit(&sbp->mtx);
2364 }
2365
2366 iocbq = (IOCBQ *)iocbq->next;
2367 } /* end of while */
2368
2369 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2370
2371 /* Now abort the iocb's */
2372 iocbq = (IOCBQ *)abort.q_first;
2373 while (iocbq) {
2374 /* Save the next iocbq for now */
2375 next = (IOCBQ *)iocbq->next;
2376
2377 /* Unlink this iocbq */
2378 iocbq->next = NULL;
2379
2380 /* Get the pkt */
2381 sbp = (emlxs_buf_t *)iocbq->sbp;
2382
2383 if (sbp) {
2384 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2385 "tx: sbp=%p node=%p", sbp, sbp->node);
2386
2387 if (hba->state >= FC_LINK_UP) {
2388 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2389 IOERR_ABORT_REQUESTED, 1);
2390 } else {
2391 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2392 IOERR_LINK_DOWN, 1);
2393 }
2394
2395 }
2396 /* Free the iocb and its associated buffers */
2397 else {
2398 icmd = &iocbq->iocb;
2399
2400 /* SLI3 */
2401 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2402 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2403 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2404 if ((hba->flag &
2405 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2406 /* HBA is detaching or offlining */
2407 if (icmd->ULPCOMMAND !=
2408 CMD_QUE_RING_LIST64_CN) {
2409 void *tmp;
2410 RING *rp;
2411
2412 rp = &hba->sli.sli3.
2413 ring[channelno];
2414 for (i = 0;
2415 i < icmd->ULPBDECOUNT;
2416 i++) {
2417 mp = EMLXS_GET_VADDR(
2418 hba, rp, icmd);
2419
2420 tmp = (void *)mp;
2421 if (mp) {
2422 emlxs_mem_put(
2423 hba, MEM_BUF, tmp);
2424 }
2425 }
2426 }
2427
2428 emlxs_mem_put(hba, MEM_IOCB,
2429 (void *)iocbq);
2430 } else {
2431 /* repost the unsolicited buffer */
2432 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2433 iocbq);
2434 }
2435 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2436 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2437
2438 emlxs_tx_put(iocbq, 1);
2439 }
2440 }
2441
2442 iocbq = next;
2443
2444 } /* end of while */
2445
2446 /* Now trigger channel service */
2447 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2448 if (!flag[channelno]) {
2449 continue;
2450 }
2451
2452 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2453 }
2454
2455 return (abort.q_cnt);
2456
2457 } /* emlxs_tx_channel_flush() */
2458
2459
2460 /* Flush all IO's on all or a given ring for a given node */
2461 extern uint32_t
emlxs_tx_node_flush(emlxs_port_t * port,NODELIST * ndlp,CHANNEL * chan,uint32_t shutdown,emlxs_buf_t * fpkt)2462 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2463 uint32_t shutdown, emlxs_buf_t *fpkt)
2464 {
2465 emlxs_hba_t *hba = HBA;
2466 emlxs_buf_t *sbp;
2467 uint32_t channelno;
2468 CHANNEL *cp;
2469 IOCB *icmd;
2470 IOCBQ *iocbq;
2471 NODELIST *prev;
2472 IOCBQ *next;
2473 IOCB *iocb;
2474 Q abort;
2475 uint32_t i;
2476 MATCHMAP *mp;
2477 uint8_t flag[MAX_CHANNEL];
2478
2479 bzero((void *)&abort, sizeof (Q));
2480
2481 /* Flush all I/O's on tx queue to this target */
2482 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2483
2484 if (!ndlp->nlp_base && shutdown) {
2485 ndlp->nlp_active = 0;
2486 }
2487
2488 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2489 cp = &hba->chan[channelno];
2490
2491 if (chan && cp != chan) {
2492 continue;
2493 }
2494
2495 if (!ndlp->nlp_base || shutdown) {
2496 /* Check if priority queue is not empty */
2497 if (ndlp->nlp_ptx[channelno].q_first) {
2498 /* Transfer all iocb's to local queue */
2499 if (abort.q_first == 0) {
2500 abort.q_first =
2501 ndlp->nlp_ptx[channelno].q_first;
2502 } else {
2503 ((IOCBQ *)(abort.q_last))->next =
2504 (IOCBQ *)ndlp->nlp_ptx[channelno].
2505 q_first;
2506 }
2507
2508 flag[channelno] = 1;
2509
2510 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2511 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2512 }
2513 }
2514
2515 /* Check if tx queue is not empty */
2516 if (ndlp->nlp_tx[channelno].q_first) {
2517
2518 /* Transfer all iocb's to local queue */
2519 if (abort.q_first == 0) {
2520 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2521 } else {
2522 ((IOCBQ *)abort.q_last)->next =
2523 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2524 }
2525
2526 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2527 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2528 }
2529
2530 /* Clear the queue pointers */
2531 ndlp->nlp_ptx[channelno].q_first = NULL;
2532 ndlp->nlp_ptx[channelno].q_last = NULL;
2533 ndlp->nlp_ptx[channelno].q_cnt = 0;
2534
2535 ndlp->nlp_tx[channelno].q_first = NULL;
2536 ndlp->nlp_tx[channelno].q_last = NULL;
2537 ndlp->nlp_tx[channelno].q_cnt = 0;
2538
2539 /* If this node was on the channel queue, remove it */
2540 if (ndlp->nlp_next[channelno]) {
2541 /* If this is the only node on list */
2542 if (cp->nodeq.q_first == (void *)ndlp &&
2543 cp->nodeq.q_last == (void *)ndlp) {
2544 cp->nodeq.q_last = NULL;
2545 cp->nodeq.q_first = NULL;
2546 cp->nodeq.q_cnt = 0;
2547 } else if (cp->nodeq.q_first == (void *)ndlp) {
2548 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2549 ((NODELIST *) cp->nodeq.q_last)->
2550 nlp_next[channelno] = cp->nodeq.q_first;
2551 cp->nodeq.q_cnt--;
2552 } else {
2553 /*
2554 * This is a little more difficult find the
2555 * previous node in the circular channel queue
2556 */
2557 prev = ndlp;
2558 while (prev->nlp_next[channelno] != ndlp) {
2559 prev = prev->nlp_next[channelno];
2560 }
2561
2562 prev->nlp_next[channelno] =
2563 ndlp->nlp_next[channelno];
2564
2565 if (cp->nodeq.q_last == (void *)ndlp) {
2566 cp->nodeq.q_last = (void *)prev;
2567 }
2568 cp->nodeq.q_cnt--;
2569
2570 }
2571
2572 /* Clear node */
2573 ndlp->nlp_next[channelno] = NULL;
2574 }
2575
2576 }
2577
2578 /* First cleanup the iocb's while still holding the lock */
2579 iocbq = (IOCBQ *) abort.q_first;
2580 while (iocbq) {
2581 /* Free the IoTag and the bmp */
2582 iocb = &iocbq->iocb;
2583
2584 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2585 sbp = iocbq->sbp;
2586 if (sbp) {
2587 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2588 }
2589 } else {
2590 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2591 iocb->ULPIOTAG, 0);
2592 }
2593
2594 if (sbp && (sbp != STALE_PACKET)) {
2595 mutex_enter(&sbp->mtx);
2596 sbp->pkt_flags |= PACKET_IN_FLUSH;
2597 /*
2598 * If the fpkt is already set, then we will leave it
2599 * alone. This ensures that this pkt is only accounted
2600 * for on one fpkt->flush_count
2601 */
2602 if (!sbp->fpkt && fpkt) {
2603 mutex_enter(&fpkt->mtx);
2604 sbp->fpkt = fpkt;
2605 fpkt->flush_count++;
2606 mutex_exit(&fpkt->mtx);
2607 }
2608
2609 mutex_exit(&sbp->mtx);
2610 }
2611
2612 iocbq = (IOCBQ *) iocbq->next;
2613
2614 } /* end of while */
2615
2616 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2617
2618 /* Now abort the iocb's outside the locks */
2619 iocbq = (IOCBQ *)abort.q_first;
2620 while (iocbq) {
2621 /* Save the next iocbq for now */
2622 next = (IOCBQ *)iocbq->next;
2623
2624 /* Unlink this iocbq */
2625 iocbq->next = NULL;
2626
2627 /* Get the pkt */
2628 sbp = (emlxs_buf_t *)iocbq->sbp;
2629
2630 if (sbp) {
2631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2632 "tx: sbp=%p node=%p", sbp, sbp->node);
2633
2634 if (hba->state >= FC_LINK_UP) {
2635 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2636 IOERR_ABORT_REQUESTED, 1);
2637 } else {
2638 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2639 IOERR_LINK_DOWN, 1);
2640 }
2641
2642 }
2643 /* Free the iocb and its associated buffers */
2644 else {
2645 /* CMD_CLOSE_XRI_CN should also free the memory */
2646 icmd = &iocbq->iocb;
2647
2648 /* SLI3 */
2649 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2650 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2651 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2652 if ((hba->flag &
2653 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2654 /* HBA is detaching or offlining */
2655 if (icmd->ULPCOMMAND !=
2656 CMD_QUE_RING_LIST64_CN) {
2657 void *tmp;
2658 RING *rp;
2659 int ch;
2660
2661 ch = ((CHANNEL *)
2662 iocbq->channel)->channelno;
2663 rp = &hba->sli.sli3.ring[ch];
2664 for (i = 0;
2665 i < icmd->ULPBDECOUNT;
2666 i++) {
2667 mp = EMLXS_GET_VADDR(
2668 hba, rp, icmd);
2669
2670 tmp = (void *)mp;
2671 if (mp) {
2672 emlxs_mem_put(
2673 hba, MEM_BUF, tmp);
2674 }
2675 }
2676 }
2677
2678 emlxs_mem_put(hba, MEM_IOCB,
2679 (void *)iocbq);
2680 } else {
2681 /* repost the unsolicited buffer */
2682 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2683 (CHANNEL *)iocbq->channel, iocbq);
2684 }
2685 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2686 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2687 /*
2688 * Resend the abort iocbq if any
2689 */
2690 emlxs_tx_put(iocbq, 1);
2691 }
2692 }
2693
2694 iocbq = next;
2695
2696 } /* end of while */
2697
2698 /* Now trigger channel service */
2699 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2700 if (!flag[channelno]) {
2701 continue;
2702 }
2703
2704 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2705 }
2706
2707 return (abort.q_cnt);
2708
2709 } /* emlxs_tx_node_flush() */
2710
2711
2712 /* Check for IO's on all or a given ring for a given node */
2713 extern uint32_t
emlxs_tx_node_check(emlxs_port_t * port,NODELIST * ndlp,CHANNEL * chan)2714 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2715 {
2716 emlxs_hba_t *hba = HBA;
2717 uint32_t channelno;
2718 CHANNEL *cp;
2719 uint32_t count;
2720
2721 count = 0;
2722
2723 /* Flush all I/O's on tx queue to this target */
2724 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2725
2726 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2727 cp = &hba->chan[channelno];
2728
2729 if (chan && cp != chan) {
2730 continue;
2731 }
2732
2733 /* Check if priority queue is not empty */
2734 if (ndlp->nlp_ptx[channelno].q_first) {
2735 count += ndlp->nlp_ptx[channelno].q_cnt;
2736 }
2737
2738 /* Check if tx queue is not empty */
2739 if (ndlp->nlp_tx[channelno].q_first) {
2740 count += ndlp->nlp_tx[channelno].q_cnt;
2741 }
2742
2743 }
2744
2745 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2746
2747 return (count);
2748
2749 } /* emlxs_tx_node_check() */
2750
2751
2752
2753 /* Flush all IO's on the any ring for a given node's lun */
2754 extern uint32_t
emlxs_tx_lun_flush(emlxs_port_t * port,NODELIST * ndlp,uint32_t lun,emlxs_buf_t * fpkt)2755 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2756 emlxs_buf_t *fpkt)
2757 {
2758 emlxs_hba_t *hba = HBA;
2759 emlxs_buf_t *sbp;
2760 uint32_t channelno;
2761 IOCBQ *iocbq;
2762 IOCBQ *prev;
2763 IOCBQ *next;
2764 IOCB *iocb;
2765 IOCB *icmd;
2766 Q abort;
2767 uint32_t i;
2768 MATCHMAP *mp;
2769 uint8_t flag[MAX_CHANNEL];
2770
2771 if (lun == EMLXS_LUN_NONE) {
2772 return (0);
2773 }
2774
2775 bzero((void *)&abort, sizeof (Q));
2776
2777 /* Flush I/O's on txQ to this target's lun */
2778 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2779
2780 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2781
2782 /* Scan the priority queue first */
2783 prev = NULL;
2784 iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2785
2786 while (iocbq) {
2787 next = (IOCBQ *)iocbq->next;
2788 iocb = &iocbq->iocb;
2789 sbp = (emlxs_buf_t *)iocbq->sbp;
2790
2791 /* Check if this IO is for our lun */
2792 if (sbp && (sbp->lun == lun)) {
2793 /* Remove iocb from the node's ptx queue */
2794 if (next == 0) {
2795 ndlp->nlp_ptx[channelno].q_last =
2796 (uint8_t *)prev;
2797 }
2798
2799 if (prev == 0) {
2800 ndlp->nlp_ptx[channelno].q_first =
2801 (uint8_t *)next;
2802 } else {
2803 prev->next = next;
2804 }
2805
2806 iocbq->next = NULL;
2807 ndlp->nlp_ptx[channelno].q_cnt--;
2808
2809 /*
2810 * Add this iocb to our local abort Q
2811 */
2812 if (abort.q_first) {
2813 ((IOCBQ *)abort.q_last)->next = iocbq;
2814 abort.q_last = (uint8_t *)iocbq;
2815 abort.q_cnt++;
2816 } else {
2817 abort.q_first = (uint8_t *)iocbq;
2818 abort.q_last = (uint8_t *)iocbq;
2819 abort.q_cnt = 1;
2820 }
2821 iocbq->next = NULL;
2822 flag[channelno] = 1;
2823
2824 } else {
2825 prev = iocbq;
2826 }
2827
2828 iocbq = next;
2829
2830 } /* while (iocbq) */
2831
2832
2833 /* Scan the regular queue */
2834 prev = NULL;
2835 iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2836
2837 while (iocbq) {
2838 next = (IOCBQ *)iocbq->next;
2839 iocb = &iocbq->iocb;
2840 sbp = (emlxs_buf_t *)iocbq->sbp;
2841
2842 /* Check if this IO is for our lun */
2843 if (sbp && (sbp->lun == lun)) {
2844 /* Remove iocb from the node's tx queue */
2845 if (next == 0) {
2846 ndlp->nlp_tx[channelno].q_last =
2847 (uint8_t *)prev;
2848 }
2849
2850 if (prev == 0) {
2851 ndlp->nlp_tx[channelno].q_first =
2852 (uint8_t *)next;
2853 } else {
2854 prev->next = next;
2855 }
2856
2857 iocbq->next = NULL;
2858 ndlp->nlp_tx[channelno].q_cnt--;
2859
2860 /*
2861 * Add this iocb to our local abort Q
2862 */
2863 if (abort.q_first) {
2864 ((IOCBQ *) abort.q_last)->next = iocbq;
2865 abort.q_last = (uint8_t *)iocbq;
2866 abort.q_cnt++;
2867 } else {
2868 abort.q_first = (uint8_t *)iocbq;
2869 abort.q_last = (uint8_t *)iocbq;
2870 abort.q_cnt = 1;
2871 }
2872 iocbq->next = NULL;
2873 } else {
2874 prev = iocbq;
2875 }
2876
2877 iocbq = next;
2878
2879 } /* while (iocbq) */
2880 } /* for loop */
2881
2882 /* First cleanup the iocb's while still holding the lock */
2883 iocbq = (IOCBQ *)abort.q_first;
2884 while (iocbq) {
2885 /* Free the IoTag and the bmp */
2886 iocb = &iocbq->iocb;
2887
2888 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2889 sbp = iocbq->sbp;
2890 if (sbp) {
2891 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2892 }
2893 } else {
2894 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2895 iocb->ULPIOTAG, 0);
2896 }
2897
2898 if (sbp && (sbp != STALE_PACKET)) {
2899 mutex_enter(&sbp->mtx);
2900 sbp->pkt_flags |= PACKET_IN_FLUSH;
2901 /*
2902 * If the fpkt is already set, then we will leave it
2903 * alone. This ensures that this pkt is only accounted
2904 * for on one fpkt->flush_count
2905 */
2906 if (!sbp->fpkt && fpkt) {
2907 mutex_enter(&fpkt->mtx);
2908 sbp->fpkt = fpkt;
2909 fpkt->flush_count++;
2910 mutex_exit(&fpkt->mtx);
2911 }
2912
2913 mutex_exit(&sbp->mtx);
2914 }
2915
2916 iocbq = (IOCBQ *) iocbq->next;
2917
2918 } /* end of while */
2919
2920 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2921
2922 /* Now abort the iocb's outside the locks */
2923 iocbq = (IOCBQ *)abort.q_first;
2924 while (iocbq) {
2925 /* Save the next iocbq for now */
2926 next = (IOCBQ *)iocbq->next;
2927
2928 /* Unlink this iocbq */
2929 iocbq->next = NULL;
2930
2931 /* Get the pkt */
2932 sbp = (emlxs_buf_t *)iocbq->sbp;
2933
2934 if (sbp) {
2935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2936 "tx: sbp=%p node=%p", sbp, sbp->node);
2937
2938 if (hba->state >= FC_LINK_UP) {
2939 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2940 IOERR_ABORT_REQUESTED, 1);
2941 } else {
2942 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2943 IOERR_LINK_DOWN, 1);
2944 }
2945 }
2946
2947 /* Free the iocb and its associated buffers */
2948 else {
2949 /* Should never happen! */
2950 icmd = &iocbq->iocb;
2951
2952 /* SLI3 */
2953 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2954 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2955 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2956 if ((hba->flag &
2957 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2958 /* HBA is detaching or offlining */
2959 if (icmd->ULPCOMMAND !=
2960 CMD_QUE_RING_LIST64_CN) {
2961 void *tmp;
2962 RING *rp;
2963 int ch;
2964
2965 ch = ((CHANNEL *)
2966 iocbq->channel)->channelno;
2967 rp = &hba->sli.sli3.ring[ch];
2968 for (i = 0;
2969 i < icmd->ULPBDECOUNT;
2970 i++) {
2971 mp = EMLXS_GET_VADDR(
2972 hba, rp, icmd);
2973
2974 tmp = (void *)mp;
2975 if (mp) {
2976 emlxs_mem_put(
2977 hba, MEM_BUF, tmp);
2978 }
2979 }
2980 }
2981
2982 emlxs_mem_put(hba, MEM_IOCB,
2983 (void *)iocbq);
2984 } else {
2985 /* repost the unsolicited buffer */
2986 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2987 (CHANNEL *)iocbq->channel, iocbq);
2988 }
2989 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2990 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2991 /*
2992 * Resend the abort iocbq if any
2993 */
2994 emlxs_tx_put(iocbq, 1);
2995 }
2996 }
2997
2998 iocbq = next;
2999
3000 } /* end of while */
3001
3002 /* Now trigger channel service */
3003 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3004 if (!flag[channelno]) {
3005 continue;
3006 }
3007
3008 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3009 }
3010
3011 return (abort.q_cnt);
3012
3013 } /* emlxs_tx_lun_flush() */
3014
3015
3016 extern void
emlxs_tx_put(IOCBQ * iocbq,uint32_t lock)3017 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
3018 {
3019 emlxs_hba_t *hba;
3020 emlxs_port_t *port;
3021 uint32_t channelno;
3022 NODELIST *nlp;
3023 CHANNEL *cp;
3024 emlxs_buf_t *sbp;
3025
3026 port = (emlxs_port_t *)iocbq->port;
3027 hba = HBA;
3028 cp = (CHANNEL *)iocbq->channel;
3029 nlp = (NODELIST *)iocbq->node;
3030 channelno = cp->channelno;
3031 sbp = (emlxs_buf_t *)iocbq->sbp;
3032
3033 if (nlp == NULL) {
3034 /* Set node to base node by default */
3035 nlp = &port->node_base;
3036
3037 iocbq->node = (void *)nlp;
3038
3039 if (sbp) {
3040 sbp->node = (void *)nlp;
3041 }
3042 }
3043
3044 if (lock) {
3045 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3046 }
3047
3048 if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
3049 if (sbp) {
3050 mutex_enter(&sbp->mtx);
3051 sbp->pkt_flags |= PACKET_IN_FLUSH;
3052 mutex_exit(&sbp->mtx);
3053
3054 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3055 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3056 } else {
3057 (void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
3058 }
3059
3060 if (lock) {
3061 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3062 }
3063
3064 if (hba->state >= FC_LINK_UP) {
3065 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3066 IOERR_ABORT_REQUESTED, 1);
3067 } else {
3068 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3069 IOERR_LINK_DOWN, 1);
3070 }
3071 return;
3072 } else {
3073 if (lock) {
3074 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3075 }
3076
3077 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3078 }
3079
3080 return;
3081 }
3082
3083 if (sbp) {
3084
3085 mutex_enter(&sbp->mtx);
3086
3087 if (sbp->pkt_flags &
3088 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
3089 mutex_exit(&sbp->mtx);
3090 if (lock) {
3091 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3092 }
3093 return;
3094 }
3095
3096 sbp->pkt_flags |= PACKET_IN_TXQ;
3097 hba->channel_tx_count++;
3098
3099 mutex_exit(&sbp->mtx);
3100 }
3101
3102
3103 /* Check iocbq priority */
3104 /* Some IOCB has the high priority like reset/close xri etc */
3105 if (iocbq->flag & IOCB_PRIORITY) {
3106 /* Add the iocb to the bottom of the node's ptx queue */
3107 if (nlp->nlp_ptx[channelno].q_first) {
3108 ((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
3109 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3110 nlp->nlp_ptx[channelno].q_cnt++;
3111 } else {
3112 nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
3113 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3114 nlp->nlp_ptx[channelno].q_cnt = 1;
3115 }
3116
3117 iocbq->next = NULL;
3118 } else { /* Normal priority */
3119
3120
3121 /* Add the iocb to the bottom of the node's tx queue */
3122 if (nlp->nlp_tx[channelno].q_first) {
3123 ((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
3124 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3125 nlp->nlp_tx[channelno].q_cnt++;
3126 } else {
3127 nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
3128 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3129 nlp->nlp_tx[channelno].q_cnt = 1;
3130 }
3131
3132 iocbq->next = NULL;
3133 }
3134
3135
3136 /*
3137 * Check if the node is not already on channel queue and
3138 * (is not closed or is a priority request)
3139 */
3140 if (!nlp->nlp_next[channelno] &&
3141 (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
3142 (iocbq->flag & IOCB_PRIORITY))) {
3143 /* If so, then add it to the channel queue */
3144 if (cp->nodeq.q_first) {
3145 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
3146 (uint8_t *)nlp;
3147 nlp->nlp_next[channelno] = cp->nodeq.q_first;
3148
3149 /*
3150 * If this is not the base node then add it
3151 * to the tail
3152 */
3153 if (!nlp->nlp_base) {
3154 cp->nodeq.q_last = (uint8_t *)nlp;
3155 } else { /* Otherwise, add it to the head */
3156
3157 /* The command node always gets priority */
3158 cp->nodeq.q_first = (uint8_t *)nlp;
3159 }
3160
3161 cp->nodeq.q_cnt++;
3162 } else {
3163 cp->nodeq.q_first = (uint8_t *)nlp;
3164 cp->nodeq.q_last = (uint8_t *)nlp;
3165 nlp->nlp_next[channelno] = nlp;
3166 cp->nodeq.q_cnt = 1;
3167 }
3168 }
3169
3170 HBASTATS.IocbTxPut[channelno]++;
3171
3172 /* Adjust the channel timeout timer */
3173 cp->timeout = hba->timer_tics + 5;
3174
3175 if (lock) {
3176 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3177 }
3178
3179 return;
3180
3181 } /* emlxs_tx_put() */
3182
3183
3184 extern IOCBQ *
emlxs_tx_get(CHANNEL * cp,uint32_t lock)3185 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
3186 {
3187 emlxs_hba_t *hba;
3188 uint32_t channelno;
3189 IOCBQ *iocbq;
3190 NODELIST *nlp;
3191 emlxs_buf_t *sbp;
3192
3193 hba = cp->hba;
3194 channelno = cp->channelno;
3195
3196 if (lock) {
3197 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3198 }
3199
3200 begin:
3201
3202 iocbq = NULL;
3203
3204 /* Check if a node needs servicing */
3205 if (cp->nodeq.q_first) {
3206 nlp = (NODELIST *)cp->nodeq.q_first;
3207
3208 /* Get next iocb from node's priority queue */
3209
3210 if (nlp->nlp_ptx[channelno].q_first) {
3211 iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
3212
3213 /* Check if this is last entry */
3214 if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
3215 nlp->nlp_ptx[channelno].q_first = NULL;
3216 nlp->nlp_ptx[channelno].q_last = NULL;
3217 nlp->nlp_ptx[channelno].q_cnt = 0;
3218 } else {
3219 /* Remove iocb from head */
3220 nlp->nlp_ptx[channelno].q_first =
3221 (void *)iocbq->next;
3222 nlp->nlp_ptx[channelno].q_cnt--;
3223 }
3224
3225 iocbq->next = NULL;
3226 }
3227
3228 /* Get next iocb from node tx queue if node not closed */
3229 else if (nlp->nlp_tx[channelno].q_first &&
3230 !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3231 iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3232
3233 /* Check if this is last entry */
3234 if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3235 nlp->nlp_tx[channelno].q_first = NULL;
3236 nlp->nlp_tx[channelno].q_last = NULL;
3237 nlp->nlp_tx[channelno].q_cnt = 0;
3238 } else {
3239 /* Remove iocb from head */
3240 nlp->nlp_tx[channelno].q_first =
3241 (void *)iocbq->next;
3242 nlp->nlp_tx[channelno].q_cnt--;
3243 }
3244
3245 iocbq->next = NULL;
3246 }
3247
3248 /* Now deal with node itself */
3249
3250 /* Check if node still needs servicing */
3251 if ((nlp->nlp_ptx[channelno].q_first) ||
3252 (nlp->nlp_tx[channelno].q_first &&
3253 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3254
3255 /*
3256 * If this is the base node, then don't shift the
3257 * pointers. We want to drain the base node before
3258 * moving on
3259 */
3260 if (!nlp->nlp_base) {
3261 /*
3262 * Just shift channel queue pointers to next
3263 * node
3264 */
3265 cp->nodeq.q_last = (void *)nlp;
3266 cp->nodeq.q_first = nlp->nlp_next[channelno];
3267 }
3268 } else {
3269 /* Remove node from channel queue */
3270
3271 /* If this is the last node on list */
3272 if (cp->nodeq.q_last == (void *)nlp) {
3273 cp->nodeq.q_last = NULL;
3274 cp->nodeq.q_first = NULL;
3275 cp->nodeq.q_cnt = 0;
3276 } else {
3277 /* Remove node from head */
3278 cp->nodeq.q_first = nlp->nlp_next[channelno];
3279 ((NODELIST *)cp->nodeq.q_last)->
3280 nlp_next[channelno] = cp->nodeq.q_first;
3281 cp->nodeq.q_cnt--;
3282
3283 }
3284
3285 /* Clear node */
3286 nlp->nlp_next[channelno] = NULL;
3287 }
3288
3289 /*
3290 * If no iocbq was found on this node, then it will have
3291 * been removed. So try again.
3292 */
3293 if (!iocbq) {
3294 goto begin;
3295 }
3296
3297 sbp = (emlxs_buf_t *)iocbq->sbp;
3298
3299 if (sbp) {
3300 /*
3301 * Check flags before we enter mutex in case this
3302 * has been flushed and destroyed
3303 */
3304 if ((sbp->pkt_flags &
3305 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3306 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3307 goto begin;
3308 }
3309
3310 mutex_enter(&sbp->mtx);
3311
3312 if ((sbp->pkt_flags &
3313 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3314 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3315 mutex_exit(&sbp->mtx);
3316 goto begin;
3317 }
3318
3319 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3320 hba->channel_tx_count--;
3321
3322 mutex_exit(&sbp->mtx);
3323 }
3324 }
3325
3326 if (iocbq) {
3327 HBASTATS.IocbTxGet[channelno]++;
3328 }
3329
3330 /* Adjust the ring timeout timer */
3331 cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3332
3333 if (lock) {
3334 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3335 }
3336
3337 return (iocbq);
3338
3339 } /* emlxs_tx_get() */
3340
3341
3342 /*
3343 * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3344 * The old IoTag has to be released, the new one has to be
3345 * allocated. Others no change
3346 * TX_CHANNEL lock is held
3347 */
3348 extern void
emlxs_tx_move(NODELIST * ndlp,CHANNEL * from_chan,CHANNEL * to_chan,uint32_t cmd,emlxs_buf_t * fpkt,uint32_t lock)3349 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3350 uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3351 {
3352 emlxs_hba_t *hba;
3353 emlxs_port_t *port;
3354 uint32_t fchanno, tchanno, i;
3355
3356 IOCBQ *iocbq;
3357 IOCBQ *prev;
3358 IOCBQ *next;
3359 IOCB *iocb, *icmd;
3360 Q tbm; /* To Be Moved Q */
3361 MATCHMAP *mp;
3362
3363 NODELIST *nlp = ndlp;
3364 emlxs_buf_t *sbp;
3365
3366 NODELIST *n_prev = NULL;
3367 NODELIST *n_next = NULL;
3368 uint16_t count = 0;
3369
3370 hba = from_chan->hba;
3371 port = &PPORT;
3372 cmd = cmd; /* To pass lint */
3373
3374 fchanno = from_chan->channelno;
3375 tchanno = to_chan->channelno;
3376
3377 if (lock) {
3378 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3379 }
3380
3381 bzero((void *)&tbm, sizeof (Q));
3382
3383 /* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3384 prev = NULL;
3385 iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3386
3387 while (iocbq) {
3388 next = (IOCBQ *)iocbq->next;
3389 /* Check if this iocb is fcp cmd */
3390 iocb = &iocbq->iocb;
3391
3392 switch (iocb->ULPCOMMAND) {
3393 /* FCP commands */
3394 case CMD_FCP_ICMND_CR:
3395 case CMD_FCP_ICMND_CX:
3396 case CMD_FCP_IREAD_CR:
3397 case CMD_FCP_IREAD_CX:
3398 case CMD_FCP_IWRITE_CR:
3399 case CMD_FCP_IWRITE_CX:
3400 case CMD_FCP_ICMND64_CR:
3401 case CMD_FCP_ICMND64_CX:
3402 case CMD_FCP_IREAD64_CR:
3403 case CMD_FCP_IREAD64_CX:
3404 case CMD_FCP_IWRITE64_CR:
3405 case CMD_FCP_IWRITE64_CX:
3406 /* We found a fcp cmd */
3407 break;
3408 default:
3409 /* this is not fcp cmd continue */
3410 prev = iocbq;
3411 iocbq = next;
3412 continue;
3413 }
3414
3415 /* found a fcp cmd iocb in fchanno txq, now deque it */
3416 if (next == NULL) {
3417 /* This is the last iocbq */
3418 nlp->nlp_tx[fchanno].q_last =
3419 (uint8_t *)prev;
3420 }
3421
3422 if (prev == NULL) {
3423 /* This is the first one then remove it from head */
3424 nlp->nlp_tx[fchanno].q_first =
3425 (uint8_t *)next;
3426 } else {
3427 prev->next = next;
3428 }
3429
3430 iocbq->next = NULL;
3431 nlp->nlp_tx[fchanno].q_cnt--;
3432
3433 /* Add this iocb to our local toberemovedq */
3434 /* This way we donot hold the TX_CHANNEL lock too long */
3435
3436 if (tbm.q_first) {
3437 ((IOCBQ *)tbm.q_last)->next = iocbq;
3438 tbm.q_last = (uint8_t *)iocbq;
3439 tbm.q_cnt++;
3440 } else {
3441 tbm.q_first = (uint8_t *)iocbq;
3442 tbm.q_last = (uint8_t *)iocbq;
3443 tbm.q_cnt = 1;
3444 }
3445
3446 iocbq = next;
3447
3448 } /* While (iocbq) */
3449
3450 if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3451
3452 /* from_chan->nodeq.q_first must be non NULL */
3453 if (from_chan->nodeq.q_first) {
3454
3455 /* nodeq is not empty, now deal with the node itself */
3456 if ((nlp->nlp_tx[fchanno].q_first)) {
3457
3458 if (!nlp->nlp_base) {
3459 from_chan->nodeq.q_last =
3460 (void *)nlp;
3461 from_chan->nodeq.q_first =
3462 nlp->nlp_next[fchanno];
3463 }
3464
3465 } else {
3466 n_prev = (NODELIST *)from_chan->nodeq.q_first;
3467 count = from_chan->nodeq.q_cnt;
3468
3469 if (n_prev == nlp) {
3470
3471 /* If this is the only node on list */
3472 if (from_chan->nodeq.q_last ==
3473 (void *)nlp) {
3474 from_chan->nodeq.q_last =
3475 NULL;
3476 from_chan->nodeq.q_first =
3477 NULL;
3478 from_chan->nodeq.q_cnt = 0;
3479 } else {
3480 from_chan->nodeq.q_first =
3481 nlp->nlp_next[fchanno];
3482 ((NODELIST *)from_chan->
3483 nodeq.q_last)->
3484 nlp_next[fchanno] =
3485 from_chan->nodeq.q_first;
3486 from_chan->nodeq.q_cnt--;
3487 }
3488 /* Clear node */
3489 nlp->nlp_next[fchanno] = NULL;
3490 } else {
3491 count--;
3492 do {
3493 n_next =
3494 n_prev->nlp_next[fchanno];
3495 if (n_next == nlp) {
3496 break;
3497 }
3498 n_prev = n_next;
3499 } while (count--);
3500
3501 if (count != 0) {
3502
3503 if (n_next ==
3504 (NODELIST *)from_chan->
3505 nodeq.q_last) {
3506 n_prev->
3507 nlp_next[fchanno]
3508 =
3509 ((NODELIST *)
3510 from_chan->
3511 nodeq.q_last)->
3512 nlp_next
3513 [fchanno];
3514 from_chan->nodeq.q_last
3515 = (uint8_t *)n_prev;
3516 } else {
3517
3518 n_prev->
3519 nlp_next[fchanno]
3520 =
3521 n_next-> nlp_next
3522 [fchanno];
3523 }
3524 from_chan->nodeq.q_cnt--;
3525 /* Clear node */
3526 nlp->nlp_next[fchanno] =
3527 NULL;
3528 }
3529 }
3530 }
3531 }
3532 }
3533
3534 /* Now cleanup the iocb's */
3535 prev = NULL;
3536 iocbq = (IOCBQ *)tbm.q_first;
3537
3538 while (iocbq) {
3539
3540 next = (IOCBQ *)iocbq->next;
3541
3542 /* Free the IoTag and the bmp */
3543 iocb = &iocbq->iocb;
3544
3545 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3546 sbp = iocbq->sbp;
3547 if (sbp) {
3548 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3549 }
3550 } else {
3551 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3552 iocb->ULPIOTAG, 0);
3553 }
3554
3555 if (sbp && (sbp != STALE_PACKET)) {
3556 mutex_enter(&sbp->mtx);
3557 sbp->pkt_flags |= PACKET_IN_FLUSH;
3558
3559 /*
3560 * If the fpkt is already set, then we will leave it
3561 * alone. This ensures that this pkt is only accounted
3562 * for on one fpkt->flush_count
3563 */
3564 if (!sbp->fpkt && fpkt) {
3565 mutex_enter(&fpkt->mtx);
3566 sbp->fpkt = fpkt;
3567 fpkt->flush_count++;
3568 mutex_exit(&fpkt->mtx);
3569 }
3570 mutex_exit(&sbp->mtx);
3571 }
3572 iocbq = next;
3573
3574 } /* end of while */
3575
3576 iocbq = (IOCBQ *)tbm.q_first;
3577 while (iocbq) {
3578 /* Save the next iocbq for now */
3579 next = (IOCBQ *)iocbq->next;
3580
3581 /* Unlink this iocbq */
3582 iocbq->next = NULL;
3583
3584 /* Get the pkt */
3585 sbp = (emlxs_buf_t *)iocbq->sbp;
3586
3587 if (sbp) {
3588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3589 "tx: sbp=%p node=%p", sbp, sbp->node);
3590
3591 if (hba->state >= FC_LINK_UP) {
3592 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3593 IOERR_ABORT_REQUESTED, 1);
3594 } else {
3595 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3596 IOERR_LINK_DOWN, 1);
3597 }
3598
3599 }
3600 /* Free the iocb and its associated buffers */
3601 else {
3602 icmd = &iocbq->iocb;
3603
3604 /* SLI3 */
3605 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3606 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3607 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3608 if ((hba->flag &
3609 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3610 /* HBA is detaching or offlining */
3611 if (icmd->ULPCOMMAND !=
3612 CMD_QUE_RING_LIST64_CN) {
3613 void *tmp;
3614 RING *rp;
3615 int ch;
3616
3617 ch = from_chan->channelno;
3618 rp = &hba->sli.sli3.ring[ch];
3619
3620 for (i = 0;
3621 i < icmd->ULPBDECOUNT;
3622 i++) {
3623 mp = EMLXS_GET_VADDR(
3624 hba, rp, icmd);
3625
3626 tmp = (void *)mp;
3627 if (mp) {
3628 emlxs_mem_put(
3629 hba,
3630 MEM_BUF,
3631 tmp);
3632 }
3633 }
3634
3635 }
3636
3637 emlxs_mem_put(hba, MEM_IOCB,
3638 (void *)iocbq);
3639 } else {
3640 /* repost the unsolicited buffer */
3641 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3642 from_chan, iocbq);
3643 }
3644 }
3645 }
3646
3647 iocbq = next;
3648
3649 } /* end of while */
3650
3651 /* Now flush the chipq if any */
3652 if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3653
3654 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3655
3656 (void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3657
3658 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3659 }
3660
3661 if (lock) {
3662 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3663 }
3664
3665 return;
3666
3667 } /* emlxs_tx_move */
3668
3669
3670 extern uint32_t
emlxs_chipq_node_flush(emlxs_port_t * port,CHANNEL * chan,NODELIST * ndlp,emlxs_buf_t * fpkt)3671 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3672 emlxs_buf_t *fpkt)
3673 {
3674 emlxs_hba_t *hba = HBA;
3675 emlxs_buf_t *sbp;
3676 IOCBQ *iocbq;
3677 IOCBQ *next;
3678 Q abort;
3679 CHANNEL *cp;
3680 uint32_t channelno;
3681 uint8_t flag[MAX_CHANNEL];
3682 uint32_t iotag;
3683
3684 bzero((void *)&abort, sizeof (Q));
3685 bzero((void *)flag, sizeof (flag));
3686
3687 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3688 cp = &hba->chan[channelno];
3689
3690 if (chan && cp != chan) {
3691 continue;
3692 }
3693
3694 mutex_enter(&EMLXS_FCTAB_LOCK);
3695
3696 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3697 sbp = hba->fc_table[iotag];
3698
3699 if (sbp && (sbp != STALE_PACKET) &&
3700 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3701 (sbp->node == ndlp) &&
3702 (sbp->channel == cp) &&
3703 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3704 emlxs_sbp_abort_add(port, sbp, &abort, flag,
3705 fpkt);
3706 }
3707
3708 }
3709 mutex_exit(&EMLXS_FCTAB_LOCK);
3710
3711 } /* for */
3712
3713 /* Now put the iocb's on the tx queue */
3714 iocbq = (IOCBQ *)abort.q_first;
3715 while (iocbq) {
3716 /* Save the next iocbq for now */
3717 next = (IOCBQ *)iocbq->next;
3718
3719 /* Unlink this iocbq */
3720 iocbq->next = NULL;
3721
3722 /* Send this iocbq */
3723 emlxs_tx_put(iocbq, 1);
3724
3725 iocbq = next;
3726 }
3727
3728 /* Now trigger channel service */
3729 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3730 if (!flag[channelno]) {
3731 continue;
3732 }
3733
3734 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3735 }
3736
3737 return (abort.q_cnt);
3738
3739 } /* emlxs_chipq_node_flush() */
3740
3741
3742 /* Flush all IO's left on all iotag lists */
3743 extern uint32_t
emlxs_iotag_flush(emlxs_hba_t * hba)3744 emlxs_iotag_flush(emlxs_hba_t *hba)
3745 {
3746 emlxs_port_t *port = &PPORT;
3747 emlxs_buf_t *sbp;
3748 IOCBQ *iocbq;
3749 IOCB *iocb;
3750 Q abort;
3751 CHANNEL *cp;
3752 uint32_t channelno;
3753 uint32_t iotag;
3754 uint32_t count;
3755
3756 count = 0;
3757 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3758 cp = &hba->chan[channelno];
3759
3760 bzero((void *)&abort, sizeof (Q));
3761
3762 mutex_enter(&EMLXS_FCTAB_LOCK);
3763
3764 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3765 sbp = hba->fc_table[iotag];
3766
3767 /* Check if the slot is empty */
3768 if (!sbp || (sbp == STALE_PACKET)) {
3769 continue;
3770 }
3771
3772 /* We are building an abort list per channel */
3773 if (sbp->channel != cp) {
3774 continue;
3775 }
3776
3777 hba->fc_table[iotag] = STALE_PACKET;
3778 hba->io_count--;
3779
3780 /* Check if IO is valid */
3781 if (!(sbp->pkt_flags & PACKET_VALID) ||
3782 (sbp->pkt_flags & (PACKET_ULP_OWNED|
3783 PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
3784 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3785 "iotag_flush: Invalid IO found. iotag=%d",
3786 iotag);
3787
3788 continue;
3789 }
3790
3791 sbp->iotag = 0;
3792
3793 /* Set IOCB status */
3794 iocbq = &sbp->iocbq;
3795 iocb = &iocbq->iocb;
3796
3797 iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3798 iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3799 iocb->ULPLE = 1;
3800 iocbq->next = NULL;
3801
3802 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3803 if (sbp->xrip) {
3804 EMLXS_MSGF(EMLXS_CONTEXT,
3805 &emlxs_sli_debug_msg,
3806 "iotag_flush: iotag=%d sbp=%p "
3807 "xrip=%p state=%x flag=%x",
3808 iotag, sbp, sbp->xrip,
3809 sbp->xrip->state, sbp->xrip->flag);
3810 } else {
3811 EMLXS_MSGF(EMLXS_CONTEXT,
3812 &emlxs_sli_debug_msg,
3813 "iotag_flush: iotag=%d sbp=%p "
3814 "xrip=NULL", iotag, sbp);
3815 }
3816
3817 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
3818 } else {
3819 /* Clean up the sbp */
3820 mutex_enter(&sbp->mtx);
3821
3822 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3823 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3824 hba->channel_tx_count --;
3825 }
3826
3827 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3828 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3829 }
3830
3831 if (sbp->bmp) {
3832 emlxs_mem_put(hba, MEM_BPL,
3833 (void *)sbp->bmp);
3834 sbp->bmp = 0;
3835 }
3836
3837 mutex_exit(&sbp->mtx);
3838 }
3839
3840 /* At this point all nodes are assumed destroyed */
3841 mutex_enter(&sbp->mtx);
3842 sbp->node = 0;
3843 mutex_exit(&sbp->mtx);
3844
3845 /* Add this iocb to our local abort Q */
3846 if (abort.q_first) {
3847 ((IOCBQ *)abort.q_last)->next = iocbq;
3848 abort.q_last = (uint8_t *)iocbq;
3849 abort.q_cnt++;
3850 } else {
3851 abort.q_first = (uint8_t *)iocbq;
3852 abort.q_last = (uint8_t *)iocbq;
3853 abort.q_cnt = 1;
3854 }
3855 }
3856
3857 mutex_exit(&EMLXS_FCTAB_LOCK);
3858
3859 /* Trigger deferred completion */
3860 if (abort.q_first) {
3861 mutex_enter(&cp->rsp_lock);
3862 if (cp->rsp_head == NULL) {
3863 cp->rsp_head = (IOCBQ *)abort.q_first;
3864 cp->rsp_tail = (IOCBQ *)abort.q_last;
3865 } else {
3866 cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3867 cp->rsp_tail = (IOCBQ *)abort.q_last;
3868 }
3869 mutex_exit(&cp->rsp_lock);
3870
3871 emlxs_thread_trigger2(&cp->intr_thread,
3872 emlxs_proc_channel, cp);
3873
3874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3875 "iotag_flush: channel=%d count=%d",
3876 channelno, abort.q_cnt);
3877
3878 count += abort.q_cnt;
3879 }
3880 }
3881
3882 return (count);
3883
3884 } /* emlxs_iotag_flush() */
3885
3886
3887
3888 /* Checks for IO's on all or a given channel for a given node */
3889 extern uint32_t
emlxs_chipq_node_check(emlxs_port_t * port,CHANNEL * chan,NODELIST * ndlp)3890 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3891 {
3892 emlxs_hba_t *hba = HBA;
3893 emlxs_buf_t *sbp;
3894 CHANNEL *cp;
3895 uint32_t channelno;
3896 uint32_t count;
3897 uint32_t iotag;
3898
3899 count = 0;
3900
3901 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3902 cp = &hba->chan[channelno];
3903
3904 if (chan && cp != chan) {
3905 continue;
3906 }
3907
3908 mutex_enter(&EMLXS_FCTAB_LOCK);
3909
3910 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3911 sbp = hba->fc_table[iotag];
3912
3913 if (sbp && (sbp != STALE_PACKET) &&
3914 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3915 (sbp->node == ndlp) &&
3916 (sbp->channel == cp) &&
3917 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3918 count++;
3919 }
3920
3921 }
3922 mutex_exit(&EMLXS_FCTAB_LOCK);
3923
3924 } /* for */
3925
3926 return (count);
3927
3928 } /* emlxs_chipq_node_check() */
3929
3930
3931
3932 /* Flush all IO's for a given node's lun (on any channel) */
3933 extern uint32_t
emlxs_chipq_lun_flush(emlxs_port_t * port,NODELIST * ndlp,uint32_t lun,emlxs_buf_t * fpkt)3934 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3935 uint32_t lun, emlxs_buf_t *fpkt)
3936 {
3937 emlxs_hba_t *hba = HBA;
3938 emlxs_buf_t *sbp;
3939 IOCBQ *iocbq;
3940 IOCBQ *next;
3941 Q abort;
3942 uint32_t iotag;
3943 uint8_t flag[MAX_CHANNEL];
3944 uint32_t channelno;
3945
3946 if (lun == EMLXS_LUN_NONE) {
3947 return (0);
3948 }
3949
3950 bzero((void *)flag, sizeof (flag));
3951 bzero((void *)&abort, sizeof (Q));
3952
3953 mutex_enter(&EMLXS_FCTAB_LOCK);
3954 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3955 sbp = hba->fc_table[iotag];
3956
3957 if (sbp && (sbp != STALE_PACKET) &&
3958 sbp->pkt_flags & PACKET_IN_CHIPQ &&
3959 sbp->node == ndlp &&
3960 sbp->lun == lun &&
3961 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3962 emlxs_sbp_abort_add(port, sbp,
3963 &abort, flag, fpkt);
3964 }
3965 }
3966 mutex_exit(&EMLXS_FCTAB_LOCK);
3967
3968 /* Now put the iocb's on the tx queue */
3969 iocbq = (IOCBQ *)abort.q_first;
3970 while (iocbq) {
3971 /* Save the next iocbq for now */
3972 next = (IOCBQ *)iocbq->next;
3973
3974 /* Unlink this iocbq */
3975 iocbq->next = NULL;
3976
3977 /* Send this iocbq */
3978 emlxs_tx_put(iocbq, 1);
3979
3980 iocbq = next;
3981 }
3982
3983 /* Now trigger channel service */
3984 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3985 if (!flag[channelno]) {
3986 continue;
3987 }
3988
3989 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3990 }
3991
3992 return (abort.q_cnt);
3993
3994 } /* emlxs_chipq_lun_flush() */
3995
3996
3997
3998 /*
3999 * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
4000 * This must be called while holding the EMLXS_FCTAB_LOCK
4001 */
4002 extern IOCBQ *
emlxs_create_abort_xri_cn(emlxs_port_t * port,NODELIST * ndlp,uint16_t iotag,CHANNEL * cp,uint8_t class,int32_t flag)4003 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4004 uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
4005 {
4006 emlxs_hba_t *hba = HBA;
4007 IOCBQ *iocbq;
4008 IOCB *iocb;
4009 emlxs_wqe_t *wqe;
4010 emlxs_buf_t *sbp;
4011 uint16_t abort_iotag;
4012
4013 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4014 return (NULL);
4015 }
4016
4017 iocbq->channel = (void *)cp;
4018 iocbq->port = (void *)port;
4019 iocbq->node = (void *)ndlp;
4020 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4021
4022 /*
4023 * set up an iotag using special Abort iotags
4024 */
4025 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4026 hba->fc_oor_iotag = hba->max_iotag;
4027 }
4028 abort_iotag = hba->fc_oor_iotag++;
4029
4030
4031 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4032 wqe = &iocbq->wqe;
4033 sbp = hba->fc_table[iotag];
4034
4035 /* Try to issue abort by XRI if possible */
4036 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4037 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4038 wqe->AbortTag = iotag;
4039 } else {
4040 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4041 wqe->AbortTag = sbp->xrip->XRI;
4042 }
4043 wqe->un.Abort.IA = 0;
4044 wqe->RequestTag = abort_iotag;
4045 wqe->Command = CMD_ABORT_XRI_CX;
4046 wqe->Class = CLASS3;
4047 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4048 wqe->CmdType = WQE_TYPE_ABORT;
4049 } else {
4050 iocb = &iocbq->iocb;
4051 iocb->ULPIOTAG = abort_iotag;
4052 iocb->un.acxri.abortType = flag;
4053 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4054 iocb->un.acxri.abortIoTag = iotag;
4055 iocb->ULPLE = 1;
4056 iocb->ULPCLASS = class;
4057 iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
4058 iocb->ULPOWNER = OWN_CHIP;
4059 }
4060
4061 return (iocbq);
4062
4063 } /* emlxs_create_abort_xri_cn() */
4064
4065
4066 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4067 extern IOCBQ *
emlxs_create_abort_xri_cx(emlxs_port_t * port,NODELIST * ndlp,uint16_t xid,CHANNEL * cp,uint8_t class,int32_t flag)4068 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4069 CHANNEL *cp, uint8_t class, int32_t flag)
4070 {
4071 emlxs_hba_t *hba = HBA;
4072 IOCBQ *iocbq;
4073 IOCB *iocb;
4074 emlxs_wqe_t *wqe;
4075 uint16_t abort_iotag;
4076
4077 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4078 return (NULL);
4079 }
4080
4081 iocbq->channel = (void *)cp;
4082 iocbq->port = (void *)port;
4083 iocbq->node = (void *)ndlp;
4084 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4085
4086 /*
4087 * set up an iotag using special Abort iotags
4088 */
4089 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4090 hba->fc_oor_iotag = hba->max_iotag;
4091 }
4092 abort_iotag = hba->fc_oor_iotag++;
4093
4094 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4095 wqe = &iocbq->wqe;
4096 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4097 wqe->un.Abort.IA = 0;
4098 wqe->RequestTag = abort_iotag;
4099 wqe->AbortTag = xid;
4100 wqe->Command = CMD_ABORT_XRI_CX;
4101 wqe->Class = CLASS3;
4102 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4103 wqe->CmdType = WQE_TYPE_ABORT;
4104 } else {
4105 iocb = &iocbq->iocb;
4106 iocb->ULPCONTEXT = xid;
4107 iocb->ULPIOTAG = abort_iotag;
4108 iocb->un.acxri.abortType = flag;
4109 iocb->ULPLE = 1;
4110 iocb->ULPCLASS = class;
4111 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
4112 iocb->ULPOWNER = OWN_CHIP;
4113 }
4114
4115 return (iocbq);
4116
4117 } /* emlxs_create_abort_xri_cx() */
4118
4119
4120
4121 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4122 extern IOCBQ *
emlxs_create_close_xri_cn(emlxs_port_t * port,NODELIST * ndlp,uint16_t iotag,CHANNEL * cp)4123 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4124 uint16_t iotag, CHANNEL *cp)
4125 {
4126 emlxs_hba_t *hba = HBA;
4127 IOCBQ *iocbq;
4128 IOCB *iocb;
4129 emlxs_wqe_t *wqe;
4130 emlxs_buf_t *sbp;
4131 uint16_t abort_iotag;
4132
4133 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4134 return (NULL);
4135 }
4136
4137 iocbq->channel = (void *)cp;
4138 iocbq->port = (void *)port;
4139 iocbq->node = (void *)ndlp;
4140 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4141
4142 /*
4143 * set up an iotag using special Abort iotags
4144 */
4145 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4146 hba->fc_oor_iotag = hba->max_iotag;
4147 }
4148 abort_iotag = hba->fc_oor_iotag++;
4149
4150 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4151 wqe = &iocbq->wqe;
4152 sbp = hba->fc_table[iotag];
4153
4154 /* Try to issue close by XRI if possible */
4155 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4156 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4157 wqe->AbortTag = iotag;
4158 } else {
4159 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4160 wqe->AbortTag = sbp->xrip->XRI;
4161 }
4162 wqe->un.Abort.IA = 1;
4163 wqe->RequestTag = abort_iotag;
4164 wqe->Command = CMD_ABORT_XRI_CX;
4165 wqe->Class = CLASS3;
4166 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4167 wqe->CmdType = WQE_TYPE_ABORT;
4168 } else {
4169 iocb = &iocbq->iocb;
4170 iocb->ULPIOTAG = abort_iotag;
4171 iocb->un.acxri.abortType = 0;
4172 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4173 iocb->un.acxri.abortIoTag = iotag;
4174 iocb->ULPLE = 1;
4175 iocb->ULPCLASS = 0;
4176 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
4177 iocb->ULPOWNER = OWN_CHIP;
4178 }
4179
4180 return (iocbq);
4181
4182 } /* emlxs_create_close_xri_cn() */
4183
4184
4185 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4186 extern IOCBQ *
emlxs_create_close_xri_cx(emlxs_port_t * port,NODELIST * ndlp,uint16_t xid,CHANNEL * cp)4187 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4188 CHANNEL *cp)
4189 {
4190 emlxs_hba_t *hba = HBA;
4191 IOCBQ *iocbq;
4192 IOCB *iocb;
4193 emlxs_wqe_t *wqe;
4194 uint16_t abort_iotag;
4195
4196 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4197 return (NULL);
4198 }
4199
4200 iocbq->channel = (void *)cp;
4201 iocbq->port = (void *)port;
4202 iocbq->node = (void *)ndlp;
4203 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4204
4205 /*
4206 * set up an iotag using special Abort iotags
4207 */
4208 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4209 hba->fc_oor_iotag = hba->max_iotag;
4210 }
4211 abort_iotag = hba->fc_oor_iotag++;
4212
4213 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4214 wqe = &iocbq->wqe;
4215 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4216 wqe->un.Abort.IA = 1;
4217 wqe->RequestTag = abort_iotag;
4218 wqe->AbortTag = xid;
4219 wqe->Command = CMD_ABORT_XRI_CX;
4220 wqe->Class = CLASS3;
4221 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4222 wqe->CmdType = WQE_TYPE_ABORT;
4223 } else {
4224 iocb = &iocbq->iocb;
4225 iocb->ULPCONTEXT = xid;
4226 iocb->ULPIOTAG = abort_iotag;
4227 iocb->ULPLE = 1;
4228 iocb->ULPCLASS = 0;
4229 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
4230 iocb->ULPOWNER = OWN_CHIP;
4231 }
4232
4233 return (iocbq);
4234
4235 } /* emlxs_create_close_xri_cx() */
4236
4237
4238 void
emlxs_close_els_exchange(emlxs_hba_t * hba,emlxs_port_t * port,uint32_t rxid)4239 emlxs_close_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4240 {
4241 CHANNEL *cp;
4242 IOCBQ *iocbq;
4243 IOCB *iocb;
4244
4245 if (rxid == 0 || rxid == 0xFFFF) {
4246 return;
4247 }
4248
4249 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4251 "Closing ELS exchange: xid=%x", rxid);
4252
4253 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4254 return;
4255 }
4256 }
4257
4258 cp = &hba->chan[hba->channel_els];
4259
4260 mutex_enter(&EMLXS_FCTAB_LOCK);
4261
4262 /* Create the abort IOCB */
4263 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4264
4265 mutex_exit(&EMLXS_FCTAB_LOCK);
4266
4267 if (iocbq) {
4268 iocb = &iocbq->iocb;
4269 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4270 "Closing ELS exchange: xid=%x iotag=%d", rxid,
4271 iocb->ULPIOTAG);
4272
4273 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4274 }
4275
4276 } /* emlxs_close_els_exchange() */
4277
4278
4279 void
emlxs_abort_els_exchange(emlxs_hba_t * hba,emlxs_port_t * port,uint32_t rxid)4280 emlxs_abort_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4281 {
4282 CHANNEL *cp;
4283 IOCBQ *iocbq;
4284 IOCB *iocb;
4285
4286 if (rxid == 0 || rxid == 0xFFFF) {
4287 return;
4288 }
4289
4290 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4291
4292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4293 "Aborting ELS exchange: xid=%x", rxid);
4294
4295 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4296 /* We have no way to abort unsolicited exchanges */
4297 /* that we have not responded to at this time */
4298 /* So we will return for now */
4299 return;
4300 }
4301 }
4302
4303 cp = &hba->chan[hba->channel_els];
4304
4305 mutex_enter(&EMLXS_FCTAB_LOCK);
4306
4307 /* Create the abort IOCB */
4308 if (hba->state >= FC_LINK_UP) {
4309 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4310 CLASS3, ABORT_TYPE_ABTS);
4311 } else {
4312 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4313 }
4314
4315 mutex_exit(&EMLXS_FCTAB_LOCK);
4316
4317 if (iocbq) {
4318 iocb = &iocbq->iocb;
4319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4320 "Aborting ELS exchange: xid=%x iotag=%d", rxid,
4321 iocb->ULPIOTAG);
4322
4323 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4324 }
4325
4326 } /* emlxs_abort_els_exchange() */
4327
4328
4329 void
emlxs_abort_ct_exchange(emlxs_hba_t * hba,emlxs_port_t * port,uint32_t rxid)4330 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4331 {
4332 CHANNEL *cp;
4333 IOCBQ *iocbq;
4334 IOCB *iocb;
4335
4336 if (rxid == 0 || rxid == 0xFFFF) {
4337 return;
4338 }
4339
4340 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4341 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
4342 "Aborting CT exchange: xid=%x", rxid);
4343
4344 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4345 /* We have no way to abort unsolicited exchanges */
4346 /* that we have not responded to at this time */
4347 /* So we will return for now */
4348 return;
4349 }
4350 }
4351
4352 cp = &hba->chan[hba->channel_ct];
4353
4354 mutex_enter(&EMLXS_FCTAB_LOCK);
4355
4356 /* Create the abort IOCB */
4357 if (hba->state >= FC_LINK_UP) {
4358 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4359 CLASS3, ABORT_TYPE_ABTS);
4360 } else {
4361 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4362 }
4363
4364 mutex_exit(&EMLXS_FCTAB_LOCK);
4365
4366 if (iocbq) {
4367 iocb = &iocbq->iocb;
4368 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4369 "Aborting CT exchange: xid=%x iotag=%d", rxid,
4370 iocb->ULPIOTAG);
4371
4372 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4373 }
4374
4375 } /* emlxs_abort_ct_exchange() */
4376
4377
4378 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4379 static void
emlxs_sbp_abort_add(emlxs_port_t * port,emlxs_buf_t * sbp,Q * abort,uint8_t * flag,emlxs_buf_t * fpkt)4380 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4381 uint8_t *flag, emlxs_buf_t *fpkt)
4382 {
4383 emlxs_hba_t *hba = HBA;
4384 IOCBQ *iocbq;
4385 CHANNEL *cp;
4386 NODELIST *ndlp;
4387
4388 cp = (CHANNEL *)sbp->channel;
4389 ndlp = sbp->node;
4390
4391 /* Create the close XRI IOCB */
4392 if (hba->state >= FC_LINK_UP) {
4393 iocbq = emlxs_create_abort_xri_cn(port, ndlp, sbp->iotag, cp,
4394 CLASS3, ABORT_TYPE_ABTS);
4395 } else {
4396 iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4397 }
4398 /*
4399 * Add this iocb to our local abort Q
4400 * This way we don't hold the CHIPQ lock too long
4401 */
4402 if (iocbq) {
4403 if (abort->q_first) {
4404 ((IOCBQ *)abort->q_last)->next = iocbq;
4405 abort->q_last = (uint8_t *)iocbq;
4406 abort->q_cnt++;
4407 } else {
4408 abort->q_first = (uint8_t *)iocbq;
4409 abort->q_last = (uint8_t *)iocbq;
4410 abort->q_cnt = 1;
4411 }
4412 iocbq->next = NULL;
4413 }
4414
4415 /* set the flags */
4416 mutex_enter(&sbp->mtx);
4417
4418 sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4419
4420 sbp->ticks = hba->timer_tics + 10;
4421 sbp->abort_attempts++;
4422
4423 flag[cp->channelno] = 1;
4424
4425 /*
4426 * If the fpkt is already set, then we will leave it alone
4427 * This ensures that this pkt is only accounted for on one
4428 * fpkt->flush_count
4429 */
4430 if (!sbp->fpkt && fpkt) {
4431 mutex_enter(&fpkt->mtx);
4432 sbp->fpkt = fpkt;
4433 fpkt->flush_count++;
4434 mutex_exit(&fpkt->mtx);
4435 }
4436
4437 mutex_exit(&sbp->mtx);
4438
4439 return;
4440
4441 } /* emlxs_sbp_abort_add() */
4442