xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_fcp.c (revision 67dbe2be0c0f1e2eb428b89088bb5667e8f0b9f6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_FCP_C);
32 
33 #define	EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
34 	PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
35 
36 static void	emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
37     Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
38 
39 #define	SCSI3_PERSISTENT_RESERVE_IN	0x5e
40 #define	SCSI_INQUIRY			0x12
41 #define	SCSI_RX_DIAG    		0x1C
42 
43 
44 /*
45  *  emlxs_handle_fcp_event
46  *
47  *  Description: Process an FCP Rsp Ring completion
48  *
49  */
50 /* ARGSUSED */
51 extern void
52 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
53 {
54 	emlxs_port_t *port = &PPORT;
55 	emlxs_config_t	*cfg = &CFG;
56 	IOCB *cmd;
57 	emlxs_buf_t *sbp;
58 	fc_packet_t *pkt = NULL;
59 #ifdef SAN_DIAG_SUPPORT
60 	NODELIST *ndlp;
61 #endif
62 	uint32_t iostat;
63 	uint8_t localstat;
64 	fcp_rsp_t *rsp;
65 	uint32_t rsp_data_resid;
66 	uint32_t check_underrun;
67 	uint8_t asc;
68 	uint8_t ascq;
69 	uint8_t scsi_status;
70 	uint8_t sense;
71 	uint32_t did;
72 	uint32_t fix_it;
73 	uint8_t *scsi_cmd;
74 	uint8_t scsi_opcode;
75 	uint16_t scsi_dl;
76 	uint32_t data_rx;
77 
78 	cmd = &iocbq->iocb;
79 
80 	/* Initialize the status */
81 	iostat = cmd->ULPSTATUS;
82 	localstat = 0;
83 	scsi_status = 0;
84 	asc = 0;
85 	ascq = 0;
86 	sense = 0;
87 	check_underrun = 0;
88 	fix_it = 0;
89 
90 	HBASTATS.FcpEvent++;
91 
92 	sbp = (emlxs_buf_t *)iocbq->sbp;
93 
94 	if (!sbp) {
95 		/* completion with missing xmit command */
96 		HBASTATS.FcpStray++;
97 
98 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
99 		    "cmd=%x iotag=%x", cmd->ULPCOMMAND, cmd->ULPIOTAG);
100 
101 		return;
102 	}
103 
104 	HBASTATS.FcpCompleted++;
105 
106 #ifdef SAN_DIAG_SUPPORT
107 	emlxs_update_sd_bucket(sbp);
108 #endif /* SAN_DIAG_SUPPORT */
109 
110 	pkt = PRIV2PKT(sbp);
111 
112 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
113 	scsi_cmd = (uint8_t *)pkt->pkt_cmd;
114 	scsi_opcode = scsi_cmd[12];
115 	data_rx = 0;
116 
117 	/* Sync data in data buffer only on FC_PKT_FCP_READ */
118 	if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
119 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
120 		    DDI_DMA_SYNC_FORKERNEL);
121 
122 #ifdef TEST_SUPPORT
123 		if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
124 		    (pkt->pkt_datalen >= 512)) {
125 			hba->underrun_counter--;
126 			iostat = IOSTAT_FCP_RSP_ERROR;
127 
128 			/* Report 512 bytes missing by adapter */
129 			cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
130 
131 			/* Corrupt 512 bytes of Data buffer */
132 			bzero((uint8_t *)pkt->pkt_data, 512);
133 
134 			/* Set FCP response to STATUS_GOOD */
135 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
136 		}
137 #endif /* TEST_SUPPORT */
138 	}
139 
140 	/* Process the pkt */
141 	mutex_enter(&sbp->mtx);
142 
143 	/* Check for immediate return */
144 	if ((iostat == IOSTAT_SUCCESS) &&
145 	    (pkt->pkt_comp) &&
146 	    !(sbp->pkt_flags &
147 	    (PACKET_ULP_OWNED | PACKET_COMPLETED |
148 	    PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
149 	    PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
150 	    PACKET_IN_ABORT | PACKET_POLLED))) {
151 		HBASTATS.FcpGood++;
152 
153 		sbp->pkt_flags |=
154 		    (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
155 		    PACKET_COMPLETED | PACKET_ULP_OWNED);
156 		mutex_exit(&sbp->mtx);
157 
158 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 		emlxs_unswap_pkt(sbp);
160 #endif /* EMLXS_MODREV2X */
161 		cp->ulpCmplCmd++;
162 		(*pkt->pkt_comp) (pkt);
163 
164 		return;
165 	}
166 
167 	/*
168 	 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
169 	 * is reported.
170 	 */
171 
172 	/* Check if a response buffer was provided */
173 	if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) {
174 		EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
175 		    DDI_DMA_SYNC_FORKERNEL);
176 
177 		/* Get the response buffer pointer */
178 		rsp = (fcp_rsp_t *)pkt->pkt_resp;
179 
180 		/* Set the valid response flag */
181 		sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
182 
183 		scsi_status = rsp->fcp_u.fcp_status.scsi_status;
184 
185 #ifdef SAN_DIAG_SUPPORT
186 		ndlp = (NODELIST *)iocbq->node;
187 		if (scsi_status == SCSI_STAT_QUE_FULL) {
188 			emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
189 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
190 		} else if (scsi_status == SCSI_STAT_BUSY) {
191 			emlxs_log_sd_scsi_event(port,
192 			    SD_SCSI_SUBCATEGORY_DEVBSY,
193 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
194 		}
195 #endif
196 
197 		/*
198 		 * Convert a task abort to a check condition with no data
199 		 * transferred. We saw a data corruption when Solaris received
200 		 * a Task Abort from a tape.
201 		 */
202 		if (scsi_status == SCSI_STAT_TASK_ABORT) {
203 			EMLXS_MSGF(EMLXS_CONTEXT,
204 			    &emlxs_fcp_completion_error_msg,
205 			    "Task Abort. "
206 			    "Fixed.did=0x%06x sbp=%p cmd=%02x dl=%d",
207 			    did, sbp, scsi_opcode, pkt->pkt_datalen);
208 
209 			rsp->fcp_u.fcp_status.scsi_status =
210 			    SCSI_STAT_CHECK_COND;
211 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
212 			rsp->fcp_u.fcp_status.sense_len_set = 0;
213 			rsp->fcp_u.fcp_status.resid_over = 0;
214 
215 			if (pkt->pkt_datalen) {
216 				rsp->fcp_u.fcp_status.resid_under = 1;
217 				rsp->fcp_resid =
218 				    LE_SWAP32(pkt->pkt_datalen);
219 			} else {
220 				rsp->fcp_u.fcp_status.resid_under = 0;
221 				rsp->fcp_resid = 0;
222 			}
223 
224 			scsi_status = SCSI_STAT_CHECK_COND;
225 		}
226 
227 		/*
228 		 * We only need to check underrun if data could
229 		 * have been sent
230 		 */
231 
232 		/* Always check underrun if status is good */
233 		if (scsi_status == SCSI_STAT_GOOD) {
234 			check_underrun = 1;
235 		}
236 		/* Check the sense codes if this is a check condition */
237 		else if (scsi_status == SCSI_STAT_CHECK_COND) {
238 			check_underrun = 1;
239 
240 			/* Check if sense data was provided */
241 			if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
242 				sense = *((uint8_t *)rsp + 32 + 2);
243 				asc = *((uint8_t *)rsp + 32 + 12);
244 				ascq = *((uint8_t *)rsp + 32 + 13);
245 			}
246 
247 #ifdef SAN_DIAG_SUPPORT
248 			emlxs_log_sd_scsi_check_event(port,
249 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
250 			    scsi_opcode, sense, asc, ascq);
251 #endif
252 		}
253 		/* Status is not good and this is not a check condition */
254 		/* No data should have been sent */
255 		else {
256 			check_underrun = 0;
257 		}
258 
259 		/* Get the residual underrun count reported by the SCSI reply */
260 		rsp_data_resid = (pkt->pkt_datalen &&
261 		    rsp->fcp_u.fcp_status.resid_under) ? LE_SWAP32(rsp->
262 		    fcp_resid) : 0;
263 
264 		/* Set the pkt resp_resid field */
265 		pkt->pkt_resp_resid = 0;
266 
267 		/* Set the pkt data_resid field */
268 		if (pkt->pkt_datalen &&
269 		    (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
270 			/*
271 			 * Get the residual underrun count reported by
272 			 * our adapter
273 			 */
274 			pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
275 
276 #ifdef SAN_DIAG_SUPPORT
277 			if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
278 				emlxs_log_sd_fc_rdchk_event(port,
279 				    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
280 				    scsi_opcode, pkt->pkt_data_resid);
281 			}
282 #endif
283 
284 			/* Get the actual amount of data transferred */
285 			data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
286 
287 			/*
288 			 * If the residual being reported by the adapter is
289 			 * greater than the residual being reported in the
290 			 * reply, then we have a true underrun.
291 			 */
292 			if (check_underrun &&
293 			    (pkt->pkt_data_resid > rsp_data_resid)) {
294 				switch (scsi_opcode) {
295 				case SCSI_INQUIRY:
296 					scsi_dl = scsi_cmd[16];
297 					break;
298 
299 				case SCSI_RX_DIAG:
300 					scsi_dl =
301 					    (scsi_cmd[15] * 0x100) +
302 					    scsi_cmd[16];
303 					break;
304 
305 				default:
306 					scsi_dl = pkt->pkt_datalen;
307 				}
308 
309 #ifdef FCP_UNDERRUN_PATCH1
310 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
311 				/*
312 				 * If status is not good and no data was
313 				 * actually transferred, then we must fix
314 				 * the issue
315 				 */
316 				if ((scsi_status != SCSI_STAT_GOOD) &&
317 				    (data_rx == 0)) {
318 					fix_it = 1;
319 
320 					EMLXS_MSGF(EMLXS_CONTEXT,
321 					    &emlxs_fcp_completion_error_msg,
322 					    "Underrun(1). Fixed. "
323 					    "did=0x%06x sbp=%p cmd=%02x "
324 					    "dl=%d,%d rx=%d rsp=%d",
325 					    did, sbp, scsi_opcode,
326 					    pkt->pkt_datalen, scsi_dl,
327 					    (pkt->pkt_datalen -
328 					    cmd->un.fcpi.fcpi_parm),
329 					    rsp_data_resid);
330 
331 				}
332 }
333 #endif /* FCP_UNDERRUN_PATCH1 */
334 
335 
336 #ifdef FCP_UNDERRUN_PATCH2
337 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
338 				if ((scsi_status == SCSI_STAT_GOOD)) {
339 					emlxs_msg_t	*msg;
340 
341 					msg = &emlxs_fcp_completion_error_msg;
342 					/*
343 					 * If status is good and this is an
344 					 * inquiry request and the amount of
345 					 * data
346 					 */
347 					/*
348 					 * requested <= data received, then we
349 					 * must fix the issue.
350 					 */
351 
352 					if ((scsi_opcode == SCSI_INQUIRY) &&
353 					    (pkt->pkt_datalen >= data_rx) &&
354 					    (scsi_dl <= data_rx)) {
355 						fix_it = 1;
356 
357 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
358 						    "Underrun(2). Fixed. "
359 						    "did=0x%06x sbp=%p "
360 						    "cmd=%02x dl=%d,%d "
361 						    "rx=%d rsp=%d",
362 						    did, sbp, scsi_opcode,
363 						    pkt->pkt_datalen, scsi_dl,
364 						    data_rx, rsp_data_resid);
365 
366 					}
367 
368 					/*
369 					 * If status is good and this is an
370 					 * inquiry request and the amount of
371 					 * data requested >= 128 bytes, but
372 					 * only 128 bytes were received,
373 					 * then we must fix the issue.
374 					 */
375 					else if ((scsi_opcode ==
376 					    SCSI_INQUIRY) &&
377 					    (pkt->pkt_datalen >= 128) &&
378 					    (scsi_dl >= 128) &&
379 					    (data_rx == 128)) {
380 						fix_it = 1;
381 
382 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
383 						    "Underrun(3). Fixed. "
384 						    "did=0x%06x sbp=%p "
385 						    "cmd=%02x dl=%d,%d "
386 						    "rx=%d rsp=%d",
387 						    did, sbp, scsi_opcode,
388 						    pkt->pkt_datalen, scsi_dl,
389 						    data_rx, rsp_data_resid);
390 
391 					}
392 
393 				}
394 }
395 #endif /* FCP_UNDERRUN_PATCH2 */
396 
397 				/*
398 				 * Check if SCSI response payload should be
399 				 * fixed or if a DATA_UNDERRUN should be
400 				 * reported
401 				 */
402 				if (fix_it) {
403 					/*
404 					 * Fix the SCSI response payload itself
405 					 */
406 					rsp->fcp_u.fcp_status.resid_under = 1;
407 					rsp->fcp_resid =
408 					    LE_SWAP32(pkt->pkt_data_resid);
409 				} else {
410 					/*
411 					 * Change the status from
412 					 * IOSTAT_FCP_RSP_ERROR to
413 					 * IOSTAT_DATA_UNDERRUN
414 					 */
415 					iostat = IOSTAT_DATA_UNDERRUN;
416 					pkt->pkt_data_resid =
417 					    pkt->pkt_datalen;
418 				}
419 			}
420 
421 			/*
422 			 * If the residual being reported by the adapter is
423 			 * less than the residual being reported in the reply,
424 			 * then we have a true overrun. Since we don't know
425 			 * where the extra data came from or went to then we
426 			 * cannot trust anything we received
427 			 */
428 			else if (rsp_data_resid > pkt->pkt_data_resid) {
429 				/*
430 				 * Change the status from
431 				 * IOSTAT_FCP_RSP_ERROR to
432 				 * IOSTAT_DATA_OVERRUN
433 				 */
434 				iostat = IOSTAT_DATA_OVERRUN;
435 				pkt->pkt_data_resid = pkt->pkt_datalen;
436 			}
437 		} else {	/* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */
438 
439 			/* Report whatever the target reported */
440 			pkt->pkt_data_resid = rsp_data_resid;
441 		}
442 	}
443 
444 	/* Print completion message */
445 	switch (iostat) {
446 	case IOSTAT_SUCCESS:
447 		/* Build SCSI GOOD status */
448 		if (pkt->pkt_rsplen) {
449 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
450 		}
451 		break;
452 
453 	case IOSTAT_FCP_RSP_ERROR:
454 		break;
455 
456 	case IOSTAT_REMOTE_STOP:
457 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
458 		    "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
459 		    scsi_opcode);
460 		break;
461 
462 	case IOSTAT_LOCAL_REJECT:
463 		localstat = cmd->un.grsp.perr.statLocalError;
464 
465 		switch (localstat) {
466 		case IOERR_SEQUENCE_TIMEOUT:
467 			EMLXS_MSGF(EMLXS_CONTEXT,
468 			    &emlxs_fcp_completion_error_msg,
469 			    "Local reject. "
470 			    "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
471 			    emlxs_error_xlate(localstat), did, sbp,
472 			    scsi_opcode, pkt->pkt_timeout);
473 			break;
474 
475 		default:
476 			EMLXS_MSGF(EMLXS_CONTEXT,
477 			    &emlxs_fcp_completion_error_msg,
478 			    "Local reject. %s did=0x%06x sbp=%p cmd=%02x",
479 			    emlxs_error_xlate(localstat), did, sbp,
480 			    scsi_opcode);
481 		}
482 
483 		break;
484 
485 	case IOSTAT_NPORT_RJT:
486 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
487 		    "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
488 		    scsi_opcode);
489 		break;
490 
491 	case IOSTAT_FABRIC_RJT:
492 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
493 		    "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
494 		    scsi_opcode);
495 		break;
496 
497 	case IOSTAT_NPORT_BSY:
498 #ifdef SAN_DIAG_SUPPORT
499 		ndlp = (NODELIST *)iocbq->node;
500 		emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
501 #endif
502 
503 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
504 		    "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
505 		    scsi_opcode);
506 		break;
507 
508 	case IOSTAT_FABRIC_BSY:
509 #ifdef SAN_DIAG_SUPPORT
510 		ndlp = (NODELIST *)iocbq->node;
511 		emlxs_log_sd_fc_bsy_event(port, NULL);
512 #endif
513 
514 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
515 		    "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
516 		    scsi_opcode);
517 		break;
518 
519 	case IOSTAT_INTERMED_RSP:
520 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
521 		    "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
522 		    sbp, scsi_opcode);
523 		break;
524 
525 	case IOSTAT_LS_RJT:
526 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
527 		    "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
528 		    scsi_opcode);
529 		break;
530 
531 	case IOSTAT_DATA_UNDERRUN:
532 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
533 		    "Underrun. did=0x%06x sbp=%p cmd=%02x "
534 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
535 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
536 		    rsp_data_resid, scsi_status, sense, asc, ascq);
537 		break;
538 
539 	case IOSTAT_DATA_OVERRUN:
540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
541 		    "Overrun. did=0x%06x sbp=%p cmd=%02x "
542 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
543 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
544 		    rsp_data_resid, scsi_status, sense, asc, ascq);
545 		break;
546 
547 	default:
548 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
549 		    "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
550 		    iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
551 		    scsi_opcode);
552 		break;
553 	}
554 
555 done:
556 
557 	if (iostat == IOSTAT_SUCCESS) {
558 		HBASTATS.FcpGood++;
559 	} else {
560 		HBASTATS.FcpError++;
561 	}
562 
563 	mutex_exit(&sbp->mtx);
564 
565 	emlxs_pkt_complete(sbp, iostat, localstat, 0);
566 
567 	return;
568 
569 } /* emlxs_handle_fcp_event() */
570 
571 
572 
573 /*
574  *  emlxs_post_buffer
575  *
576  *  This routine will post count buffers to the
577  *  ring with the QUE_RING_BUF_CN command. This
578  *  allows 2 buffers / command to be posted.
579  *  Returns the number of buffers NOT posted.
580  */
581 /* SLI3 */
582 extern int
583 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
584 {
585 	emlxs_port_t *port = &PPORT;
586 	IOCB *icmd;
587 	IOCBQ *iocbq;
588 	MATCHMAP *mp;
589 	uint16_t tag;
590 	uint32_t maxqbuf;
591 	int32_t i;
592 	int32_t j;
593 	uint32_t seg;
594 	uint32_t size;
595 
596 	mp = 0;
597 	maxqbuf = 2;
598 	tag = (uint16_t)cnt;
599 	cnt += rp->fc_missbufcnt;
600 
601 	if (rp->ringno == hba->channel_els) {
602 		seg = MEM_BUF;
603 		size = MEM_ELSBUF_SIZE;
604 	} else if (rp->ringno == hba->channel_ip) {
605 		seg = MEM_IPBUF;
606 		size = MEM_IPBUF_SIZE;
607 	} else if (rp->ringno == hba->channel_ct) {
608 		seg = MEM_CTBUF;
609 		size = MEM_CTBUF_SIZE;
610 	}
611 #ifdef SFCT_SUPPORT
612 	else if (rp->ringno == hba->CHANNEL_FCT) {
613 		seg = MEM_FCTBUF;
614 		size = MEM_FCTBUF_SIZE;
615 	}
616 #endif /* SFCT_SUPPORT */
617 	else {
618 		return (0);
619 	}
620 
621 	/*
622 	 * While there are buffers to post
623 	 */
624 	while (cnt) {
625 		if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == 0) {
626 			rp->fc_missbufcnt = cnt;
627 			return (cnt);
628 		}
629 
630 		iocbq->channel = (void *)&hba->chan[rp->ringno];
631 		iocbq->port = (void *)port;
632 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
633 
634 		icmd = &iocbq->iocb;
635 
636 		/*
637 		 * Max buffers can be posted per command
638 		 */
639 		for (i = 0; i < maxqbuf; i++) {
640 			if (cnt <= 0)
641 				break;
642 
643 			/* fill in BDEs for command */
644 			if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg, 1))
645 			    == 0) {
646 				icmd->ULPBDECOUNT = i;
647 				for (j = 0; j < i; j++) {
648 					mp = EMLXS_GET_VADDR(hba, rp, icmd);
649 					if (mp) {
650 						(void) emlxs_mem_put(hba, seg,
651 						    (uint8_t *)mp);
652 					}
653 				}
654 
655 				rp->fc_missbufcnt = cnt + i;
656 
657 				(void) emlxs_mem_put(hba, MEM_IOCB,
658 				    (uint8_t *)iocbq);
659 
660 				return (cnt + i);
661 			}
662 
663 			/*
664 			 * map that page and save the address pair for lookup
665 			 * later
666 			 */
667 			emlxs_mem_map_vaddr(hba,
668 			    rp,
669 			    mp,
670 			    (uint32_t *)&icmd->un.cont64[i].addrHigh,
671 			    (uint32_t *)&icmd->un.cont64[i].addrLow);
672 
673 			icmd->un.cont64[i].tus.f.bdeSize = size;
674 			icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
675 
676 			/*
677 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
678 			 *    "UB Post: ring=%d addr=%08x%08x size=%d",
679 			 *    rp->ringno, icmd->un.cont64[i].addrHigh,
680 			 *    icmd->un.cont64[i].addrLow, size);
681 			 */
682 
683 			cnt--;
684 		}
685 
686 		icmd->ULPIOTAG = tag;
687 		icmd->ULPBDECOUNT = i;
688 		icmd->ULPLE = 1;
689 		icmd->ULPOWNER = OWN_CHIP;
690 		/* used for delimiter between commands */
691 		iocbq->bp = (uint8_t *)mp;
692 
693 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
694 	}
695 
696 	rp->fc_missbufcnt = 0;
697 
698 	return (0);
699 
700 } /* emlxs_post_buffer() */
701 
702 
703 extern int
704 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
705 {
706 	emlxs_hba_t *hba = HBA;
707 	emlxs_config_t *cfg;
708 	NODELIST *nlp;
709 	fc_affected_id_t *aid;
710 	uint32_t mask;
711 	uint32_t aff_d_id;
712 	uint32_t linkdown;
713 	uint32_t vlinkdown;
714 	uint32_t action;
715 	int i;
716 	uint32_t unreg_vpi;
717 	uint32_t update;
718 	uint32_t adisc_support;
719 	uint8_t format;
720 
721 	/* Target mode only uses this routine for linkdowns */
722 	if (port->tgt_mode && (scope != 0xffffffff) && (scope != 0xfeffffff)) {
723 		return (0);
724 	}
725 
726 	cfg = &CFG;
727 	aid = (fc_affected_id_t *)&scope;
728 	linkdown = 0;
729 	vlinkdown = 0;
730 	unreg_vpi = 0;
731 	update = 0;
732 
733 	if (!(port->flag & EMLXS_PORT_BOUND)) {
734 		return (0);
735 	}
736 
737 	format = aid->aff_format;
738 
739 	switch (format) {
740 	case 0:	/* Port */
741 		mask = 0x00ffffff;
742 		break;
743 
744 	case 1:	/* Area */
745 		mask = 0x00ffff00;
746 		break;
747 
748 	case 2:	/* Domain */
749 		mask = 0x00ff0000;
750 		break;
751 
752 	case 3:	/* Network */
753 		mask = 0x00000000;
754 		break;
755 
756 #ifdef DHCHAP_SUPPORT
757 	case 0xfe:	/* Virtual link down */
758 		mask = 0x00000000;
759 		vlinkdown = 1;
760 		break;
761 #endif /* DHCHAP_SUPPORT */
762 
763 	case 0xff:	/* link is down */
764 		mask = 0x00000000;
765 		linkdown = 1;
766 		break;
767 
768 	}
769 
770 	aff_d_id = aid->aff_d_id & mask;
771 
772 
773 	/*
774 	 * If link is down then this is a hard shutdown and flush
775 	 * If link not down then this is a soft shutdown and flush
776 	 * (e.g. RSCN)
777 	 */
778 	if (linkdown) {
779 		mutex_enter(&EMLXS_PORT_LOCK);
780 
781 		port->flag &= EMLXS_PORT_LINKDOWN_MASK;
782 		port->prev_did = port->did;
783 		port->did = 0;
784 
785 		if (port->ulp_statec != FC_STATE_OFFLINE) {
786 			port->ulp_statec = FC_STATE_OFFLINE;
787 			update = 1;
788 		}
789 
790 		mutex_exit(&EMLXS_PORT_LOCK);
791 
792 		/* Tell ULP about it */
793 		if (update) {
794 			if (port->flag & EMLXS_PORT_BOUND) {
795 				if (port->vpi == 0) {
796 					EMLXS_MSGF(EMLXS_CONTEXT,
797 					    &emlxs_link_down_msg, NULL);
798 				}
799 
800 				if (port->ini_mode) {
801 					port->ulp_statec_cb(port->ulp_handle,
802 					    FC_STATE_OFFLINE);
803 				}
804 #ifdef SFCT_SUPPORT
805 				else if (port->tgt_mode) {
806 					emlxs_fct_link_down(port);
807 				}
808 #endif /* SFCT_SUPPORT */
809 
810 			} else {
811 				if (port->vpi == 0) {
812 					EMLXS_MSGF(EMLXS_CONTEXT,
813 					    &emlxs_link_down_msg, "*");
814 				}
815 			}
816 
817 
818 		}
819 
820 		unreg_vpi = 1;
821 
822 #ifdef DHCHAP_SUPPORT
823 		/* Stop authentication with all nodes */
824 		emlxs_dhc_auth_stop(port, NULL);
825 #endif /* DHCHAP_SUPPORT */
826 
827 		/* Flush the base node */
828 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
829 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
830 
831 		/* Flush any pending ub buffers */
832 		emlxs_ub_flush(port);
833 	}
834 #ifdef DHCHAP_SUPPORT
835 	/* virtual link down */
836 	else if (vlinkdown) {
837 		mutex_enter(&EMLXS_PORT_LOCK);
838 
839 		if (port->ulp_statec != FC_STATE_OFFLINE) {
840 			port->ulp_statec = FC_STATE_OFFLINE;
841 			update = 1;
842 		}
843 
844 		mutex_exit(&EMLXS_PORT_LOCK);
845 
846 		/* Tell ULP about it */
847 		if (update) {
848 			if (port->flag & EMLXS_PORT_BOUND) {
849 				if (port->vpi == 0) {
850 					EMLXS_MSGF(EMLXS_CONTEXT,
851 					    &emlxs_link_down_msg,
852 					    "Switch authentication failed.");
853 				}
854 
855 #ifdef SFCT_SUPPORT
856 				if (port->tgt_mode) {
857 					emlxs_fct_link_down(port);
858 
859 				} else if (port->ini_mode) {
860 					port->ulp_statec_cb(port->ulp_handle,
861 					    FC_STATE_OFFLINE);
862 				}
863 #else
864 				port->ulp_statec_cb(port->ulp_handle,
865 				    FC_STATE_OFFLINE);
866 #endif	/* SFCT_SUPPORT */
867 			} else {
868 				if (port->vpi == 0) {
869 					EMLXS_MSGF(EMLXS_CONTEXT,
870 					    &emlxs_link_down_msg,
871 					    "Switch authentication failed. *");
872 				}
873 			}
874 
875 
876 		}
877 
878 		/* Flush the base node */
879 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
880 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
881 	}
882 #endif /* DHCHAP_SUPPORT */
883 
884 	if (port->tgt_mode) {
885 		goto done;
886 	}
887 
888 	/* Set the node tags */
889 	/* We will process all nodes with this tag */
890 	rw_enter(&port->node_rwlock, RW_READER);
891 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
892 		nlp = port->node_table[i];
893 		while (nlp != NULL) {
894 			nlp->nlp_tag = 1;
895 			nlp = nlp->nlp_list_next;
896 		}
897 	}
898 	rw_exit(&port->node_rwlock);
899 
900 	if (hba->flag & FC_ONLINE_MODE) {
901 		adisc_support = cfg[CFG_ADISC_SUPPORT].current;
902 	} else {
903 		adisc_support = 0;
904 	}
905 
906 	/* Check ADISC support level */
907 	switch (adisc_support) {
908 	case 0:	/* No support - Flush all IO to all matching nodes */
909 
910 		for (;;) {
911 			/*
912 			 * We need to hold the locks this way because
913 			 * emlxs_mb_unreg_did and the flush routines enter the
914 			 * same locks. Also, when we release the lock the list
915 			 * can change out from under us.
916 			 */
917 
918 			/* Find first node */
919 			rw_enter(&port->node_rwlock, RW_READER);
920 			action = 0;
921 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
922 				nlp = port->node_table[i];
923 				while (nlp != NULL) {
924 					if (!nlp->nlp_tag) {
925 						nlp = nlp->nlp_list_next;
926 						continue;
927 					}
928 					nlp->nlp_tag = 0;
929 
930 					/*
931 					 * Check for any device that matches
932 					 * our mask
933 					 */
934 					if ((nlp->nlp_DID & mask) == aff_d_id) {
935 						if (linkdown) {
936 							action = 1;
937 							break;
938 						} else { /* Must be an RCSN */
939 
940 							action = 2;
941 							break;
942 						}
943 					}
944 					nlp = nlp->nlp_list_next;
945 				}
946 
947 				if (action) {
948 					break;
949 				}
950 			}
951 			rw_exit(&port->node_rwlock);
952 
953 
954 			/* Check if nothing was found */
955 			if (action == 0) {
956 				break;
957 			} else if (action == 1) {
958 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
959 				    NULL, NULL, NULL);
960 			} else if (action == 2) {
961 #ifdef DHCHAP_SUPPORT
962 				emlxs_dhc_auth_stop(port, nlp);
963 #endif /* DHCHAP_SUPPORT */
964 
965 				/*
966 				 * Close the node for any further normal IO
967 				 * A PLOGI with reopen the node
968 				 */
969 				emlxs_node_close(port, nlp,
970 				    hba->channel_fcp, 60);
971 				emlxs_node_close(port, nlp,
972 				    hba->channel_ip, 60);
973 
974 				/* Flush tx queue */
975 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
976 
977 				/* Flush chip queue */
978 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
979 			}
980 
981 		}
982 
983 		break;
984 
985 	case 1:	/* Partial support - Flush IO for non-FCP2 matching nodes */
986 
987 		for (;;) {
988 
989 			/*
990 			 * We need to hold the locks this way because
991 			 * emlxs_mb_unreg_did and the flush routines enter the
992 			 * same locks. Also, when we release the lock the list
993 			 * can change out from under us.
994 			 */
995 			rw_enter(&port->node_rwlock, RW_READER);
996 			action = 0;
997 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
998 				nlp = port->node_table[i];
999 				while (nlp != NULL) {
1000 					if (!nlp->nlp_tag) {
1001 						nlp = nlp->nlp_list_next;
1002 						continue;
1003 					}
1004 					nlp->nlp_tag = 0;
1005 
1006 					/*
1007 					 * Check for special FCP2 target device
1008 					 * that matches our mask
1009 					 */
1010 					if ((nlp->nlp_fcp_info &
1011 					    NLP_FCP_TGT_DEVICE) &&
1012 					    (nlp-> nlp_fcp_info &
1013 					    NLP_FCP_2_DEVICE) &&
1014 					    (nlp->nlp_DID & mask) ==
1015 					    aff_d_id) {
1016 						action = 3;
1017 						break;
1018 					}
1019 
1020 					/*
1021 					 * Check for any other device that
1022 					 * matches our mask
1023 					 */
1024 					else if ((nlp->nlp_DID & mask) ==
1025 					    aff_d_id) {
1026 						if (linkdown) {
1027 							action = 1;
1028 							break;
1029 						} else { /* Must be an RSCN */
1030 
1031 							action = 2;
1032 							break;
1033 						}
1034 					}
1035 
1036 					nlp = nlp->nlp_list_next;
1037 				}
1038 
1039 				if (action) {
1040 					break;
1041 				}
1042 			}
1043 			rw_exit(&port->node_rwlock);
1044 
1045 			/* Check if nothing was found */
1046 			if (action == 0) {
1047 				break;
1048 			} else if (action == 1) {
1049 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1050 				    NULL, NULL, NULL);
1051 			} else if (action == 2) {
1052 #ifdef DHCHAP_SUPPORT
1053 				emlxs_dhc_auth_stop(port, nlp);
1054 #endif /* DHCHAP_SUPPORT */
1055 
1056 				/*
1057 				 * Close the node for any further normal IO
1058 				 * A PLOGI with reopen the node
1059 				 */
1060 				emlxs_node_close(port, nlp,
1061 				    hba->channel_fcp, 60);
1062 				emlxs_node_close(port, nlp,
1063 				    hba->channel_ip, 60);
1064 
1065 				/* Flush tx queue */
1066 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1067 
1068 				/* Flush chip queue */
1069 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1070 
1071 			} else if (action == 3) {	/* FCP2 devices */
1072 				unreg_vpi = 0;
1073 
1074 #ifdef DHCHAP_SUPPORT
1075 				emlxs_dhc_auth_stop(port, nlp);
1076 #endif /* DHCHAP_SUPPORT */
1077 
1078 				/*
1079 				 * Close the node for any further normal IO
1080 				 * An ADISC or a PLOGI with reopen the node
1081 				 */
1082 				emlxs_node_close(port, nlp,
1083 				    hba->channel_fcp, -1);
1084 				emlxs_node_close(port, nlp, hba->channel_ip,
1085 				    ((linkdown) ? 0 : 60));
1086 
1087 				/* Flush tx queues except for FCP ring */
1088 				(void) emlxs_tx_node_flush(port, nlp,
1089 				    &hba->chan[hba->channel_ct], 0, 0);
1090 				(void) emlxs_tx_node_flush(port, nlp,
1091 				    &hba->chan[hba->channel_els], 0, 0);
1092 				(void) emlxs_tx_node_flush(port, nlp,
1093 				    &hba->chan[hba->channel_ip], 0, 0);
1094 
1095 				/* Flush chip queues except for FCP ring */
1096 				(void) emlxs_chipq_node_flush(port,
1097 				    &hba->chan[hba->channel_ct], nlp, 0);
1098 				(void) emlxs_chipq_node_flush(port,
1099 				    &hba->chan[hba->channel_els], nlp, 0);
1100 				(void) emlxs_chipq_node_flush(port,
1101 				    &hba->chan[hba->channel_ip], nlp, 0);
1102 			}
1103 		}
1104 		break;
1105 
1106 	case 2:	/* Full support - Hold FCP IO to FCP target matching nodes */
1107 
1108 		if (!linkdown && !vlinkdown) {
1109 			break;
1110 		}
1111 
1112 		for (;;) {
1113 			/*
1114 			 * We need to hold the locks this way because
1115 			 * emlxs_mb_unreg_did and the flush routines enter the
1116 			 * same locks. Also, when we release the lock the list
1117 			 * can change out from under us.
1118 			 */
1119 			rw_enter(&port->node_rwlock, RW_READER);
1120 			action = 0;
1121 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1122 				nlp = port->node_table[i];
1123 				while (nlp != NULL) {
1124 					if (!nlp->nlp_tag) {
1125 						nlp = nlp->nlp_list_next;
1126 						continue;
1127 					}
1128 					nlp->nlp_tag = 0;
1129 
1130 					/*
1131 					 * Check for FCP target device that
1132 					 * matches our mask
1133 					 */
1134 					if ((nlp-> nlp_fcp_info &
1135 					    NLP_FCP_TGT_DEVICE) &&
1136 					    (nlp->nlp_DID & mask) ==
1137 					    aff_d_id) {
1138 						action = 3;
1139 						break;
1140 					}
1141 
1142 					/*
1143 					 * Check for any other device that
1144 					 * matches our mask
1145 					 */
1146 					else if ((nlp->nlp_DID & mask) ==
1147 					    aff_d_id) {
1148 						if (linkdown) {
1149 							action = 1;
1150 							break;
1151 						} else { /* Must be an RSCN */
1152 
1153 							action = 2;
1154 							break;
1155 						}
1156 					}
1157 
1158 					nlp = nlp->nlp_list_next;
1159 				}
1160 				if (action) {
1161 					break;
1162 				}
1163 			}
1164 			rw_exit(&port->node_rwlock);
1165 
1166 			/* Check if nothing was found */
1167 			if (action == 0) {
1168 				break;
1169 			} else if (action == 1) {
1170 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1171 				    NULL, NULL, NULL);
1172 			} else if (action == 2) {
1173 				/*
1174 				 * Close the node for any further normal IO
1175 				 * A PLOGI with reopen the node
1176 				 */
1177 				emlxs_node_close(port, nlp,
1178 				    hba->channel_fcp, 60);
1179 				emlxs_node_close(port, nlp,
1180 				    hba->channel_ip, 60);
1181 
1182 				/* Flush tx queue */
1183 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1184 
1185 				/* Flush chip queue */
1186 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1187 
1188 			} else if (action == 3) {	/* FCP2 devices */
1189 				unreg_vpi = 0;
1190 
1191 				/*
1192 				 * Close the node for any further normal IO
1193 				 * An ADISC or a PLOGI with reopen the node
1194 				 */
1195 				emlxs_node_close(port, nlp,
1196 				    hba->channel_fcp, -1);
1197 				emlxs_node_close(port, nlp, hba->channel_ip,
1198 				    ((linkdown) ? 0 : 60));
1199 
1200 				/* Flush tx queues except for FCP ring */
1201 				(void) emlxs_tx_node_flush(port, nlp,
1202 				    &hba->chan[hba->channel_ct], 0, 0);
1203 				(void) emlxs_tx_node_flush(port, nlp,
1204 				    &hba->chan[hba->channel_els], 0, 0);
1205 				(void) emlxs_tx_node_flush(port, nlp,
1206 				    &hba->chan[hba->channel_ip], 0, 0);
1207 
1208 				/* Flush chip queues except for FCP ring */
1209 				(void) emlxs_chipq_node_flush(port,
1210 				    &hba->chan[hba->channel_ct], nlp, 0);
1211 				(void) emlxs_chipq_node_flush(port,
1212 				    &hba->chan[hba->channel_els], nlp, 0);
1213 				(void) emlxs_chipq_node_flush(port,
1214 				    &hba->chan[hba->channel_ip], nlp, 0);
1215 			}
1216 		}
1217 
1218 		break;
1219 
1220 	}	/* switch() */
1221 
1222 done:
1223 
1224 	if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
1225 		if (unreg_vpi) {
1226 			(void) emlxs_mb_unreg_vpi(port);
1227 		}
1228 	}
1229 
1230 	return (0);
1231 
1232 } /* emlxs_port_offline() */
1233 
1234 
1235 extern void
1236 emlxs_port_online(emlxs_port_t *vport)
1237 {
1238 	emlxs_hba_t *hba = vport->hba;
1239 	emlxs_port_t *port = &PPORT;
1240 	uint32_t state;
1241 	uint32_t update;
1242 	uint32_t npiv_linkup;
1243 	char topology[32];
1244 	char linkspeed[32];
1245 	char mode[32];
1246 
1247 	/*
1248 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1249 	 *    "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1250 	 */
1251 
1252 	if ((vport->vpi > 0) &&
1253 	    (!(hba->flag & FC_NPIV_ENABLED) ||
1254 	    !(hba->flag & FC_NPIV_SUPPORTED))) {
1255 		return;
1256 	}
1257 
1258 	if (!(vport->flag & EMLXS_PORT_BOUND) ||
1259 	    !(vport->flag & EMLXS_PORT_ENABLE)) {
1260 		return;
1261 	}
1262 
1263 	mutex_enter(&EMLXS_PORT_LOCK);
1264 
1265 	/* Check for mode */
1266 	if (port->tgt_mode) {
1267 		(void) strcpy(mode, ", target");
1268 	} else if (port->ini_mode) {
1269 		(void) strcpy(mode, ", initiator");
1270 	} else {
1271 		(void) strcpy(mode, "");
1272 	}
1273 
1274 	/* Check for loop topology */
1275 	if (hba->topology == TOPOLOGY_LOOP) {
1276 		state = FC_STATE_LOOP;
1277 		(void) strcpy(topology, ", loop");
1278 	} else {
1279 		state = FC_STATE_ONLINE;
1280 		(void) strcpy(topology, ", fabric");
1281 	}
1282 
1283 	/* Set the link speed */
1284 	switch (hba->linkspeed) {
1285 	case 0:
1286 		(void) strcpy(linkspeed, "Gb");
1287 		state |= FC_STATE_1GBIT_SPEED;
1288 		break;
1289 
1290 	case LA_1GHZ_LINK:
1291 		(void) strcpy(linkspeed, "1Gb");
1292 		state |= FC_STATE_1GBIT_SPEED;
1293 		break;
1294 	case LA_2GHZ_LINK:
1295 		(void) strcpy(linkspeed, "2Gb");
1296 		state |= FC_STATE_2GBIT_SPEED;
1297 		break;
1298 	case LA_4GHZ_LINK:
1299 		(void) strcpy(linkspeed, "4Gb");
1300 		state |= FC_STATE_4GBIT_SPEED;
1301 		break;
1302 	case LA_8GHZ_LINK:
1303 		(void) strcpy(linkspeed, "8Gb");
1304 		state |= FC_STATE_8GBIT_SPEED;
1305 		break;
1306 	case LA_10GHZ_LINK:
1307 		(void) strcpy(linkspeed, "10Gb");
1308 		state |= FC_STATE_10GBIT_SPEED;
1309 		break;
1310 	default:
1311 		(void) sprintf(linkspeed, "unknown(0x%x)", hba->linkspeed);
1312 		break;
1313 	}
1314 
1315 	npiv_linkup = 0;
1316 	update = 0;
1317 
1318 	if ((hba->state >= FC_LINK_UP) &&
1319 	    !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1320 		update = 1;
1321 		vport->ulp_statec = state;
1322 
1323 		if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1324 			hba->flag |= FC_NPIV_LINKUP;
1325 			npiv_linkup = 1;
1326 		}
1327 	}
1328 
1329 	mutex_exit(&EMLXS_PORT_LOCK);
1330 
1331 
1332 	/*
1333 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1334 	 *    "linkup_callback: update=%d vpi=%d flag=%d fc_flag=%x state=%x"
1335 	 *    "statec=%x", update, vport->vpi, npiv_linkup, hba->flag,
1336 	 *    hba->state, vport->ulp_statec);
1337 	 */
1338 
1339 	if (update) {
1340 		if (vport->flag & EMLXS_PORT_BOUND) {
1341 			if (vport->vpi == 0) {
1342 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1343 				    "%s%s%s", linkspeed, topology, mode);
1344 			} else if (npiv_linkup) {
1345 				EMLXS_MSGF(EMLXS_CONTEXT,
1346 				    &emlxs_npiv_link_up_msg, "%s%s%s",
1347 				    linkspeed, topology, mode);
1348 			}
1349 
1350 			if (vport->ini_mode) {
1351 				vport->ulp_statec_cb(vport->ulp_handle,
1352 				    state);
1353 			}
1354 #ifdef SFCT_SUPPORT
1355 			else if (vport->tgt_mode) {
1356 				emlxs_fct_link_up(vport);
1357 			}
1358 #endif /* SFCT_SUPPORT */
1359 		} else {
1360 			if (vport->vpi == 0) {
1361 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1362 				    "%s%s%s *", linkspeed, topology, mode);
1363 			} else if (npiv_linkup) {
1364 				EMLXS_MSGF(EMLXS_CONTEXT,
1365 				    &emlxs_npiv_link_up_msg, "%s%s%s *",
1366 				    linkspeed, topology, mode);
1367 			}
1368 		}
1369 
1370 		/* Check for waiting threads */
1371 		if (vport->vpi == 0) {
1372 			mutex_enter(&EMLXS_LINKUP_LOCK);
1373 			if (hba->linkup_wait_flag == TRUE) {
1374 				hba->linkup_wait_flag = FALSE;
1375 				cv_broadcast(&EMLXS_LINKUP_CV);
1376 			}
1377 			mutex_exit(&EMLXS_LINKUP_LOCK);
1378 		}
1379 
1380 		/* Flush any pending ub buffers */
1381 		emlxs_ub_flush(vport);
1382 	}
1383 
1384 	return;
1385 
1386 } /* emlxs_port_online() */
1387 
1388 
1389 extern void
1390 emlxs_linkdown(emlxs_hba_t *hba)
1391 {
1392 	emlxs_port_t *port = &PPORT;
1393 	RPIobj_t *rp;
1394 	int i;
1395 
1396 	mutex_enter(&EMLXS_PORT_LOCK);
1397 
1398 	if (hba->state > FC_LINK_DOWN) {
1399 		HBASTATS.LinkDown++;
1400 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1401 	}
1402 
1403 	/* Filter hba flags */
1404 	hba->flag &= FC_LINKDOWN_MASK;
1405 	hba->discovery_timer = 0;
1406 	hba->linkup_timer = 0;
1407 
1408 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1409 		rp = hba->sli.sli4.RPIp;
1410 		for (i = 0; i < hba->sli.sli4.RPICount; i++) {
1411 			if (rp->state & RESOURCE_ALLOCATED) {
1412 				rp->state |= RESOURCE_RPI_PAUSED;
1413 			}
1414 			rp++;
1415 		}
1416 	}
1417 
1418 	mutex_exit(&EMLXS_PORT_LOCK);
1419 
1420 	for (i = 0; i < MAX_VPORTS; i++) {
1421 		port = &VPORT(i);
1422 
1423 		if (!(port->flag & EMLXS_PORT_BOUND)) {
1424 			continue;
1425 		}
1426 
1427 		(void) emlxs_port_offline(port, 0xffffffff);
1428 
1429 	}
1430 
1431 	return;
1432 
1433 } /* emlxs_linkdown() */
1434 
1435 
1436 extern void
1437 emlxs_linkup(emlxs_hba_t *hba)
1438 {
1439 	emlxs_port_t *port = &PPORT;
1440 	emlxs_config_t *cfg = &CFG;
1441 
1442 	mutex_enter(&EMLXS_PORT_LOCK);
1443 
1444 	HBASTATS.LinkUp++;
1445 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1446 
1447 #ifdef MENLO_SUPPORT
1448 	if (hba->flag & FC_MENLO_MODE) {
1449 		mutex_exit(&EMLXS_PORT_LOCK);
1450 
1451 		/*
1452 		 * Trigger linkup CV and don't start linkup & discovery
1453 		 * timers
1454 		 */
1455 		mutex_enter(&EMLXS_LINKUP_LOCK);
1456 		cv_broadcast(&EMLXS_LINKUP_CV);
1457 		mutex_exit(&EMLXS_LINKUP_LOCK);
1458 
1459 		return;
1460 	}
1461 #endif /* MENLO_SUPPORT */
1462 
1463 	/* Set the linkup & discovery timers */
1464 	hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1465 	hba->discovery_timer =
1466 	    hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1467 	    cfg[CFG_DISC_TIMEOUT].current;
1468 
1469 	mutex_exit(&EMLXS_PORT_LOCK);
1470 
1471 	return;
1472 
1473 } /* emlxs_linkup() */
1474 
1475 
1476 /*
1477  *  emlxs_reset_link
1478  *
1479  *  Description:
1480  *  Called to reset the link with an init_link
1481  *
1482  *    Returns:
1483  *
1484  */
1485 extern int
1486 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1487 {
1488 	emlxs_port_t *port = &PPORT;
1489 	emlxs_config_t *cfg;
1490 	MAILBOXQ *mbq = NULL;
1491 	MAILBOX *mb = NULL;
1492 	int rval = 0;
1493 	int rc;
1494 
1495 	/*
1496 	 * Get a buffer to use for the mailbox command
1497 	 */
1498 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))
1499 	    == NULL) {
1500 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1501 		    "Unable to allocate mailbox buffer.");
1502 		rval = 1;
1503 		goto reset_link_fail;
1504 	}
1505 
1506 	mb = (MAILBOX *)mbq;
1507 
1508 	/* Bring link down first */
1509 	emlxs_mb_down_link(hba, mbq);
1510 
1511 #define	MBXERR_LINK_DOWN	0x33
1512 
1513 	if (wait) {
1514 		wait = MBX_WAIT;
1515 	} else {
1516 		wait = MBX_NOWAIT;
1517 	}
1518 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1519 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1520 	    (rc != MBXERR_LINK_DOWN)) {
1521 		rval = 1;
1522 		goto reset_link_fail;
1523 	}
1524 
1525 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1526 	    "Disabling link...");
1527 
1528 	if (linkup) {
1529 		/*
1530 		 * Setup and issue mailbox INITIALIZE LINK command
1531 		 */
1532 
1533 		if (wait == MBX_NOWAIT) {
1534 			if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))
1535 			    == NULL) {
1536 				EMLXS_MSGF(EMLXS_CONTEXT,
1537 				    &emlxs_link_reset_failed_msg,
1538 				    "Unable to allocate mailbox buffer.");
1539 				rval = 1;
1540 				goto reset_link_fail;
1541 			}
1542 			mb = (MAILBOX *)mbq;
1543 		} else {
1544 			/* Reuse mbq from previous mbox */
1545 			mb = (MAILBOX *)mbq;
1546 		}
1547 		cfg = &CFG;
1548 
1549 		emlxs_mb_init_link(hba, mbq,
1550 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1551 
1552 		mb->un.varInitLnk.lipsr_AL_PA = 0;
1553 
1554 		/* Clear the loopback mode */
1555 		mutex_enter(&EMLXS_PORT_LOCK);
1556 		hba->flag &= ~FC_LOOPBACK_MODE;
1557 		hba->loopback_tics = 0;
1558 		mutex_exit(&EMLXS_PORT_LOCK);
1559 
1560 		rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1561 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1562 			rval = 1;
1563 			goto reset_link_fail;
1564 		}
1565 
1566 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1567 	}
1568 
1569 reset_link_fail:
1570 
1571 	if ((wait == MBX_WAIT) && mbq) {
1572 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
1573 	}
1574 
1575 	return (rval);
1576 } /* emlxs_reset_link() */
1577 
1578 
1579 extern int
1580 emlxs_online(emlxs_hba_t *hba)
1581 {
1582 	emlxs_port_t *port = &PPORT;
1583 	int32_t rval = 0;
1584 	uint32_t i = 0;
1585 
1586 	/* Make sure adapter is offline or exit trying (30 seconds) */
1587 	while (i++ < 30) {
1588 		/* Check if adapter is already going online */
1589 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1590 			return (0);
1591 		}
1592 
1593 		mutex_enter(&EMLXS_PORT_LOCK);
1594 
1595 		/* Check again */
1596 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1597 			mutex_exit(&EMLXS_PORT_LOCK);
1598 			return (0);
1599 		}
1600 
1601 		/* Check if adapter is offline */
1602 		if (hba->flag & FC_OFFLINE_MODE) {
1603 			/* Mark it going online */
1604 			hba->flag &= ~FC_OFFLINE_MODE;
1605 			hba->flag |= FC_ONLINING_MODE;
1606 
1607 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1608 			mutex_exit(&EMLXS_PORT_LOCK);
1609 			break;
1610 		}
1611 
1612 		mutex_exit(&EMLXS_PORT_LOCK);
1613 
1614 		DELAYMS(1000);
1615 	}
1616 
1617 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1618 	    "Going online...");
1619 
1620 	if (rval = EMLXS_SLI_ONLINE(hba)) {
1621 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1622 		    rval);
1623 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1624 
1625 		/* Set FC_OFFLINE_MODE */
1626 		mutex_enter(&EMLXS_PORT_LOCK);
1627 		emlxs_diag_state = DDI_OFFDI;
1628 		hba->flag |= FC_OFFLINE_MODE;
1629 		hba->flag &= ~FC_ONLINING_MODE;
1630 		mutex_exit(&EMLXS_PORT_LOCK);
1631 
1632 		return (rval);
1633 	}
1634 
1635 	/* Start the timer */
1636 	emlxs_timer_start(hba);
1637 
1638 	/* Set FC_ONLINE_MODE */
1639 	mutex_enter(&EMLXS_PORT_LOCK);
1640 	emlxs_diag_state = DDI_ONDI;
1641 	hba->flag |= FC_ONLINE_MODE;
1642 	hba->flag &= ~FC_ONLINING_MODE;
1643 	mutex_exit(&EMLXS_PORT_LOCK);
1644 
1645 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1646 
1647 #ifdef SFCT_SUPPORT
1648 	(void) emlxs_fct_port_initialize(port);
1649 #endif /* SFCT_SUPPORT */
1650 
1651 	return (rval);
1652 
1653 } /* emlxs_online() */
1654 
1655 
1656 extern int
1657 emlxs_offline(emlxs_hba_t *hba)
1658 {
1659 	emlxs_port_t *port = &PPORT;
1660 	uint32_t i = 0;
1661 	int rval = 1;
1662 
1663 	/* Make sure adapter is online or exit trying (30 seconds) */
1664 	while (i++ < 30) {
1665 		/* Check if adapter is already going offline */
1666 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1667 			return (0);
1668 		}
1669 
1670 		mutex_enter(&EMLXS_PORT_LOCK);
1671 
1672 		/* Check again */
1673 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1674 			mutex_exit(&EMLXS_PORT_LOCK);
1675 			return (0);
1676 		}
1677 
1678 		/* Check if adapter is online */
1679 		if (hba->flag & FC_ONLINE_MODE) {
1680 			/* Mark it going offline */
1681 			hba->flag &= ~FC_ONLINE_MODE;
1682 			hba->flag |= FC_OFFLINING_MODE;
1683 
1684 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1685 			mutex_exit(&EMLXS_PORT_LOCK);
1686 			break;
1687 		}
1688 
1689 		mutex_exit(&EMLXS_PORT_LOCK);
1690 
1691 		DELAYMS(1000);
1692 	}
1693 
1694 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1695 	    "Going offline...");
1696 
1697 	if (port->ini_mode) {
1698 		/* Flush all IO */
1699 		emlxs_linkdown(hba);
1700 	}
1701 #ifdef SFCT_SUPPORT
1702 	else {
1703 		(void) emlxs_fct_port_shutdown(port);
1704 	}
1705 #endif /* SFCT_SUPPORT */
1706 
1707 	/* Check if adapter was shutdown */
1708 	if (hba->flag & FC_HARDWARE_ERROR) {
1709 		/*
1710 		 * Force mailbox cleanup
1711 		 * This will wake any sleeping or polling threads
1712 		 */
1713 		emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1714 	}
1715 
1716 	/* Pause here for the IO to settle */
1717 	delay(drv_usectohz(1000000));	/* 1 sec */
1718 
1719 	/* Unregister all nodes */
1720 	emlxs_ffcleanup(hba);
1721 
1722 	if (hba->bus_type == SBUS_FC) {
1723 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
1724 #ifdef FMA_SUPPORT
1725 		/* Access handle validation */
1726 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
1727 #endif  /* FMA_SUPPORT */
1728 	}
1729 
1730 	/* Stop the timer */
1731 	emlxs_timer_stop(hba);
1732 
1733 	/* For safety flush every iotag list */
1734 	if (emlxs_iotag_flush(hba)) {
1735 		/* Pause here for the IO to flush */
1736 		delay(drv_usectohz(1000));
1737 	}
1738 
1739 	/* Wait for poll command request to settle */
1740 	while (hba->io_poll_count > 0) {
1741 		delay(drv_usectohz(2000000));   /* 2 sec */
1742 	}
1743 
1744 	/* Shutdown the adapter interface */
1745 	EMLXS_SLI_OFFLINE(hba);
1746 
1747 	mutex_enter(&EMLXS_PORT_LOCK);
1748 	hba->flag |= FC_OFFLINE_MODE;
1749 	hba->flag &= ~FC_OFFLINING_MODE;
1750 	emlxs_diag_state = DDI_OFFDI;
1751 	mutex_exit(&EMLXS_PORT_LOCK);
1752 
1753 	rval = 0;
1754 
1755 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1756 
1757 done:
1758 
1759 	return (rval);
1760 
1761 } /* emlxs_offline() */
1762 
1763 
1764 
1765 extern int
1766 emlxs_power_down(emlxs_hba_t *hba)
1767 {
1768 #ifdef FMA_SUPPORT
1769 	emlxs_port_t *port = &PPORT;
1770 #endif  /* FMA_SUPPORT */
1771 	int32_t rval = 0;
1772 	uint32_t *ptr;
1773 	uint32_t i;
1774 
1775 	if ((rval = emlxs_offline(hba))) {
1776 		return (rval);
1777 	}
1778 	EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
1779 
1780 	/* Save pci config space */
1781 	ptr = (uint32_t *)hba->pm_config;
1782 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1783 		*ptr =
1784 		    ddi_get32(hba->pci_acc_handle,
1785 		    (uint32_t *)(hba->pci_addr + i));
1786 	}
1787 
1788 	/* Put chip in D3 state */
1789 	(void) ddi_put8(hba->pci_acc_handle,
1790 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1791 	    (uint8_t)PCI_PM_D3_STATE);
1792 
1793 #ifdef FMA_SUPPORT
1794 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1795 	    != DDI_FM_OK) {
1796 		EMLXS_MSGF(EMLXS_CONTEXT,
1797 		    &emlxs_invalid_access_handle_msg, NULL);
1798 		return (1);
1799 	}
1800 #endif  /* FMA_SUPPORT */
1801 
1802 	return (0);
1803 
1804 } /* End emlxs_power_down */
1805 
1806 
1807 extern int
1808 emlxs_power_up(emlxs_hba_t *hba)
1809 {
1810 #ifdef FMA_SUPPORT
1811 	emlxs_port_t *port = &PPORT;
1812 #endif  /* FMA_SUPPORT */
1813 	int32_t rval = 0;
1814 	uint32_t *ptr;
1815 	uint32_t i;
1816 
1817 
1818 	/* Take chip out of D3 state */
1819 	(void) ddi_put8(hba->pci_acc_handle,
1820 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1821 	    (uint8_t)PCI_PM_D0_STATE);
1822 
1823 	/* Must have at least 10 ms delay here */
1824 	DELAYMS(100);
1825 
1826 	/* Restore pci config space */
1827 	ptr = (uint32_t *)hba->pm_config;
1828 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1829 		(void) ddi_put32(hba->pci_acc_handle,
1830 		    (uint32_t *)(hba->pci_addr + i), *ptr);
1831 	}
1832 
1833 #ifdef FMA_SUPPORT
1834 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1835 	    != DDI_FM_OK) {
1836 		EMLXS_MSGF(EMLXS_CONTEXT,
1837 		    &emlxs_invalid_access_handle_msg, NULL);
1838 		return (1);
1839 	}
1840 #endif  /* FMA_SUPPORT */
1841 
1842 	/* Bring adapter online */
1843 	if ((rval = emlxs_online(hba))) {
1844 		(void) ddi_put8(hba->pci_acc_handle,
1845 		    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1846 		    (uint8_t)PCI_PM_D3_STATE);
1847 
1848 		return (rval);
1849 	}
1850 
1851 	return (rval);
1852 
1853 } /* End emlxs_power_up */
1854 
1855 
1856 /*
1857  *
1858  * NAME:     emlxs_ffcleanup
1859  *
1860  * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
1861  *
1862  * EXECUTION ENVIRONMENT: process only
1863  *
1864  * CALLED FROM: CFG_TERM
1865  *
1866  * INPUT: hba       - pointer to the dev_ctl area.
1867  *
1868  * RETURNS: none
1869  */
1870 extern void
1871 emlxs_ffcleanup(emlxs_hba_t *hba)
1872 {
1873 	emlxs_port_t *port = &PPORT;
1874 	uint32_t i;
1875 
1876 	/* Disable all but the mailbox interrupt */
1877 	EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
1878 
1879 	/* Make sure all port nodes are destroyed */
1880 	for (i = 0; i < MAX_VPORTS; i++) {
1881 		port = &VPORT(i);
1882 
1883 		if (port->node_count) {
1884 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1885 				(void) emlxs_sli4_unreg_all_rpi_by_port(port);
1886 			} else {
1887 				(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0,
1888 				    0);
1889 			}
1890 		}
1891 	}
1892 
1893 	/* Clear all interrupt enable conditions */
1894 	EMLXS_SLI_DISABLE_INTR(hba, 0);
1895 
1896 	return;
1897 
1898 } /* emlxs_ffcleanup() */
1899 
1900 
1901 extern uint16_t
1902 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
1903 {
1904 	emlxs_hba_t *hba;
1905 	emlxs_port_t *port;
1906 	uint16_t iotag;
1907 	uint32_t i;
1908 
1909 	hba = cp->hba;
1910 
1911 	mutex_enter(&EMLXS_FCTAB_LOCK);
1912 
1913 	if (sbp->iotag != 0) {
1914 		port = &PPORT;
1915 
1916 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1917 		    "Pkt already registered! channel=%d iotag=%d sbp=%p",
1918 		    sbp->channel, sbp->iotag, sbp);
1919 	}
1920 
1921 	iotag = 0;
1922 	for (i = 0; i < hba->max_iotag; i++) {
1923 		if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
1924 			hba->fc_iotag = 1;
1925 		}
1926 		iotag = hba->fc_iotag++;
1927 
1928 		if (hba->fc_table[iotag] == 0 ||
1929 		    hba->fc_table[iotag] == STALE_PACKET) {
1930 			hba->io_count++;
1931 			hba->fc_table[iotag] = sbp;
1932 
1933 			sbp->iotag = iotag;
1934 			sbp->channel = cp;
1935 
1936 			break;
1937 		}
1938 		iotag = 0;
1939 	}
1940 
1941 	mutex_exit(&EMLXS_FCTAB_LOCK);
1942 
1943 	/*
1944 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1945 	 *    "emlxs_register_pkt: channel=%d iotag=%d sbp=%p",
1946 	 *    cp->channelno, iotag, sbp);
1947 	 */
1948 
1949 	return (iotag);
1950 
1951 } /* emlxs_register_pkt() */
1952 
1953 
1954 
1955 extern emlxs_buf_t *
1956 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
1957 {
1958 	emlxs_hba_t *hba;
1959 	emlxs_buf_t *sbp;
1960 
1961 	sbp = NULL;
1962 	hba = cp->hba;
1963 
1964 	/* Check the iotag range */
1965 	if ((iotag == 0) || (iotag >= hba->max_iotag)) {
1966 		return (NULL);
1967 	}
1968 
1969 	/* Remove the sbp from the table */
1970 	mutex_enter(&EMLXS_FCTAB_LOCK);
1971 	sbp = hba->fc_table[iotag];
1972 
1973 	if (!sbp || (sbp == STALE_PACKET)) {
1974 		mutex_exit(&EMLXS_FCTAB_LOCK);
1975 		return (sbp);
1976 	}
1977 
1978 	hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
1979 	hba->io_count--;
1980 	sbp->iotag = 0;
1981 
1982 	mutex_exit(&EMLXS_FCTAB_LOCK);
1983 
1984 
1985 	/* Clean up the sbp */
1986 	mutex_enter(&sbp->mtx);
1987 
1988 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
1989 		sbp->pkt_flags &= ~PACKET_IN_TXQ;
1990 		hba->channel_tx_count--;
1991 	}
1992 
1993 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
1994 		sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
1995 	}
1996 
1997 	if (sbp->bmp) {
1998 		(void) emlxs_mem_put(hba, MEM_BPL, (uint8_t *)sbp->bmp);
1999 		sbp->bmp = 0;
2000 	}
2001 
2002 	mutex_exit(&sbp->mtx);
2003 
2004 	return (sbp);
2005 
2006 } /* emlxs_unregister_pkt() */
2007 
2008 
2009 
2010 /* Flush all IO's to all nodes for a given IO Channel */
2011 extern uint32_t
2012 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2013 {
2014 	emlxs_port_t *port = &PPORT;
2015 	emlxs_buf_t *sbp;
2016 	IOCBQ *iocbq;
2017 	IOCBQ *next;
2018 	IOCB *iocb;
2019 	uint32_t channelno;
2020 	Q abort;
2021 	NODELIST *ndlp;
2022 	IOCB *icmd;
2023 	MATCHMAP *mp;
2024 	uint32_t i;
2025 	uint8_t flag[MAX_CHANNEL];
2026 
2027 	channelno = cp->channelno;
2028 	bzero((void *)&abort, sizeof (Q));
2029 	bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2030 
2031 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2032 
2033 	/* While a node needs servicing */
2034 	while (cp->nodeq.q_first) {
2035 		ndlp = (NODELIST *) cp->nodeq.q_first;
2036 
2037 		/* Check if priority queue is not empty */
2038 		if (ndlp->nlp_ptx[channelno].q_first) {
2039 			/* Transfer all iocb's to local queue */
2040 			if (abort.q_first == 0) {
2041 				abort.q_first =
2042 				    ndlp->nlp_ptx[channelno].q_first;
2043 			} else {
2044 				((IOCBQ *)abort.q_last)->next =
2045 				    (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2046 			}
2047 			flag[channelno] = 1;
2048 
2049 			abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2050 			abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2051 		}
2052 
2053 		/* Check if tx queue is not empty */
2054 		if (ndlp->nlp_tx[channelno].q_first) {
2055 			/* Transfer all iocb's to local queue */
2056 			if (abort.q_first == 0) {
2057 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2058 			} else {
2059 				((IOCBQ *)abort.q_last)->next =
2060 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2061 			}
2062 
2063 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2064 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2065 		}
2066 
2067 		/* Clear the queue pointers */
2068 		ndlp->nlp_ptx[channelno].q_first = NULL;
2069 		ndlp->nlp_ptx[channelno].q_last = NULL;
2070 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2071 
2072 		ndlp->nlp_tx[channelno].q_first = NULL;
2073 		ndlp->nlp_tx[channelno].q_last = NULL;
2074 		ndlp->nlp_tx[channelno].q_cnt = 0;
2075 
2076 		/* Remove node from service queue */
2077 
2078 		/* If this is the last node on list */
2079 		if (cp->nodeq.q_last == (void *)ndlp) {
2080 			cp->nodeq.q_last = NULL;
2081 			cp->nodeq.q_first = NULL;
2082 			cp->nodeq.q_cnt = 0;
2083 		} else {
2084 			/* Remove node from head */
2085 			cp->nodeq.q_first = ndlp->nlp_next[channelno];
2086 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2087 			    cp->nodeq.q_first;
2088 			cp->nodeq.q_cnt--;
2089 		}
2090 
2091 		/* Clear node */
2092 		ndlp->nlp_next[channelno] = NULL;
2093 	}
2094 
2095 	/* First cleanup the iocb's while still holding the lock */
2096 	iocbq = (IOCBQ *) abort.q_first;
2097 	while (iocbq) {
2098 		/* Free the IoTag and the bmp */
2099 		iocb = &iocbq->iocb;
2100 
2101 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2102 			sbp = iocbq->sbp;
2103 			if (sbp) {
2104 				hba->fc_table[sbp->iotag] = NULL;
2105 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2106 			}
2107 		} else {
2108 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2109 			    iocb->ULPIOTAG, 0);
2110 		}
2111 
2112 		if (sbp && (sbp != STALE_PACKET)) {
2113 			mutex_enter(&sbp->mtx);
2114 
2115 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2116 			/*
2117 			 * If the fpkt is already set, then we will leave it
2118 			 * alone. This ensures that this pkt is only accounted
2119 			 * for on one fpkt->flush_count
2120 			 */
2121 			if (!sbp->fpkt && fpkt) {
2122 				mutex_enter(&fpkt->mtx);
2123 				sbp->fpkt = fpkt;
2124 				fpkt->flush_count++;
2125 				mutex_exit(&fpkt->mtx);
2126 			}
2127 
2128 			mutex_exit(&sbp->mtx);
2129 		}
2130 
2131 		iocbq = (IOCBQ *)iocbq->next;
2132 	}	/* end of while */
2133 
2134 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2135 
2136 	/* Now abort the iocb's */
2137 	iocbq = (IOCBQ *)abort.q_first;
2138 	while (iocbq) {
2139 		/* Save the next iocbq for now */
2140 		next = (IOCBQ *)iocbq->next;
2141 
2142 		/* Unlink this iocbq */
2143 		iocbq->next = NULL;
2144 
2145 		/* Get the pkt */
2146 		sbp = (emlxs_buf_t *)iocbq->sbp;
2147 
2148 		if (sbp) {
2149 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2150 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2151 
2152 			if (hba->state >= FC_LINK_UP) {
2153 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2154 				    IOERR_ABORT_REQUESTED, 1);
2155 			} else {
2156 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2157 				    IOERR_LINK_DOWN, 1);
2158 			}
2159 
2160 		}
2161 		/* Free the iocb and its associated buffers */
2162 		else {
2163 			icmd = &iocbq->iocb;
2164 
2165 			/* SLI3 */
2166 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2167 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2168 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2169 				if ((hba->flag &
2170 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2171 					/* HBA is detaching or offlining */
2172 					if (icmd->ULPCOMMAND !=
2173 					    CMD_QUE_RING_LIST64_CN) {
2174 						uint8_t	*tmp;
2175 						RING *rp;
2176 
2177 						rp = &hba->sli.sli3.
2178 						    ring[channelno];
2179 						for (i = 0;
2180 						    i < icmd->ULPBDECOUNT;
2181 						    i++) {
2182 							mp = EMLXS_GET_VADDR(
2183 							    hba, rp, icmd);
2184 
2185 							tmp = (uint8_t *)mp;
2186 							if (mp) {
2187 							(void) emlxs_mem_put(
2188 							    hba, MEM_BUF, tmp);
2189 							}
2190 						}
2191 					}
2192 
2193 					(void) emlxs_mem_put(hba, MEM_IOCB,
2194 					    (uint8_t *)iocbq);
2195 				} else {
2196 					/* repost the unsolicited buffer */
2197 					EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2198 					    iocbq);
2199 				}
2200 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2201 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2202 
2203 				emlxs_tx_put(iocbq, 1);
2204 			}
2205 		}
2206 
2207 		iocbq = next;
2208 
2209 	}	/* end of while */
2210 
2211 	/* Now trigger channel service */
2212 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2213 		if (!flag[channelno]) {
2214 			continue;
2215 		}
2216 
2217 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2218 	}
2219 
2220 	return (abort.q_cnt);
2221 
2222 } /* emlxs_tx_channel_flush() */
2223 
2224 
2225 /* Flush all IO's on all or a given ring for a given node */
2226 extern uint32_t
2227 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2228     uint32_t shutdown, emlxs_buf_t *fpkt)
2229 {
2230 	emlxs_hba_t *hba = HBA;
2231 	emlxs_buf_t *sbp;
2232 	uint32_t channelno;
2233 	CHANNEL *cp;
2234 	IOCB *icmd;
2235 	IOCBQ *iocbq;
2236 	NODELIST *prev;
2237 	IOCBQ *next;
2238 	IOCB *iocb;
2239 	Q abort;
2240 	uint32_t i;
2241 	MATCHMAP *mp;
2242 	uint8_t flag[MAX_CHANNEL];
2243 
2244 	bzero((void *)&abort, sizeof (Q));
2245 
2246 	/* Flush all I/O's on tx queue to this target */
2247 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2248 
2249 	if (!ndlp->nlp_base && shutdown) {
2250 		ndlp->nlp_active = 0;
2251 	}
2252 
2253 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2254 		cp = &hba->chan[channelno];
2255 
2256 		if (chan && cp != chan) {
2257 			continue;
2258 		}
2259 
2260 		if (!ndlp->nlp_base || shutdown) {
2261 			/* Check if priority queue is not empty */
2262 			if (ndlp->nlp_ptx[channelno].q_first) {
2263 				/* Transfer all iocb's to local queue */
2264 				if (abort.q_first == 0) {
2265 					abort.q_first =
2266 					    ndlp->nlp_ptx[channelno].q_first;
2267 				} else {
2268 					((IOCBQ *)(abort.q_last))->next =
2269 					    (IOCBQ *)ndlp->nlp_ptx[channelno].
2270 					    q_first;
2271 				}
2272 
2273 				flag[channelno] = 1;
2274 
2275 				abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2276 				abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2277 			}
2278 		}
2279 
2280 		/* Check if tx queue is not empty */
2281 		if (ndlp->nlp_tx[channelno].q_first) {
2282 
2283 			/* Transfer all iocb's to local queue */
2284 			if (abort.q_first == 0) {
2285 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2286 			} else {
2287 				((IOCBQ *)abort.q_last)->next =
2288 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2289 			}
2290 
2291 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2292 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2293 		}
2294 
2295 		/* Clear the queue pointers */
2296 		ndlp->nlp_ptx[channelno].q_first = NULL;
2297 		ndlp->nlp_ptx[channelno].q_last = NULL;
2298 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2299 
2300 		ndlp->nlp_tx[channelno].q_first = NULL;
2301 		ndlp->nlp_tx[channelno].q_last = NULL;
2302 		ndlp->nlp_tx[channelno].q_cnt = 0;
2303 
2304 		/* If this node was on the channel queue, remove it */
2305 		if (ndlp->nlp_next[channelno]) {
2306 			/* If this is the only node on list */
2307 			if (cp->nodeq.q_first == (void *)ndlp &&
2308 			    cp->nodeq.q_last == (void *)ndlp) {
2309 				cp->nodeq.q_last = NULL;
2310 				cp->nodeq.q_first = NULL;
2311 				cp->nodeq.q_cnt = 0;
2312 			} else if (cp->nodeq.q_first == (void *)ndlp) {
2313 				cp->nodeq.q_first = ndlp->nlp_next[channelno];
2314 				((NODELIST *) cp->nodeq.q_last)->
2315 				    nlp_next[channelno] = cp->nodeq.q_first;
2316 				cp->nodeq.q_cnt--;
2317 			} else {
2318 				/*
2319 				 * This is a little more difficult find the
2320 				 * previous node in the circular channel queue
2321 				 */
2322 				prev = ndlp;
2323 				while (prev->nlp_next[channelno] != ndlp) {
2324 					prev = prev->nlp_next[channelno];
2325 				}
2326 
2327 				prev->nlp_next[channelno] =
2328 				    ndlp->nlp_next[channelno];
2329 
2330 				if (cp->nodeq.q_last == (void *)ndlp) {
2331 					cp->nodeq.q_last = (void *)prev;
2332 				}
2333 				cp->nodeq.q_cnt--;
2334 
2335 			}
2336 
2337 			/* Clear node */
2338 			ndlp->nlp_next[channelno] = NULL;
2339 		}
2340 
2341 	}
2342 
2343 	/* First cleanup the iocb's while still holding the lock */
2344 	iocbq = (IOCBQ *) abort.q_first;
2345 	while (iocbq) {
2346 		/* Free the IoTag and the bmp */
2347 		iocb = &iocbq->iocb;
2348 
2349 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2350 			sbp = iocbq->sbp;
2351 			if (sbp) {
2352 				hba->fc_table[sbp->iotag] = NULL;
2353 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2354 			}
2355 		} else {
2356 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2357 			    iocb->ULPIOTAG, 0);
2358 		}
2359 
2360 		if (sbp && (sbp != STALE_PACKET)) {
2361 			mutex_enter(&sbp->mtx);
2362 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2363 			/*
2364 			 * If the fpkt is already set, then we will leave it
2365 			 * alone. This ensures that this pkt is only accounted
2366 			 * for on one fpkt->flush_count
2367 			 */
2368 			if (!sbp->fpkt && fpkt) {
2369 				mutex_enter(&fpkt->mtx);
2370 				sbp->fpkt = fpkt;
2371 				fpkt->flush_count++;
2372 				mutex_exit(&fpkt->mtx);
2373 			}
2374 
2375 			mutex_exit(&sbp->mtx);
2376 		}
2377 
2378 		iocbq = (IOCBQ *) iocbq->next;
2379 
2380 	}	/* end of while */
2381 
2382 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2383 
2384 	/* Now abort the iocb's outside the locks */
2385 	iocbq = (IOCBQ *)abort.q_first;
2386 	while (iocbq) {
2387 		/* Save the next iocbq for now */
2388 		next = (IOCBQ *)iocbq->next;
2389 
2390 		/* Unlink this iocbq */
2391 		iocbq->next = NULL;
2392 
2393 		/* Get the pkt */
2394 		sbp = (emlxs_buf_t *)iocbq->sbp;
2395 
2396 		if (sbp) {
2397 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2398 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2399 
2400 			if (hba->state >= FC_LINK_UP) {
2401 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2402 				    IOERR_ABORT_REQUESTED, 1);
2403 			} else {
2404 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2405 				    IOERR_LINK_DOWN, 1);
2406 			}
2407 
2408 		}
2409 		/* Free the iocb and its associated buffers */
2410 		else {
2411 			/* CMD_CLOSE_XRI_CN should also free the memory */
2412 			icmd = &iocbq->iocb;
2413 
2414 			/* SLI3 */
2415 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2416 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2417 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2418 				if ((hba->flag &
2419 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2420 					/* HBA is detaching or offlining */
2421 					if (icmd->ULPCOMMAND !=
2422 					    CMD_QUE_RING_LIST64_CN) {
2423 						uint8_t	*tmp;
2424 						RING *rp;
2425 						int ch;
2426 
2427 						ch = ((CHANNEL *)
2428 						    iocbq->channel)->channelno;
2429 						rp = &hba->sli.sli3.ring[ch];
2430 						for (i = 0;
2431 						    i < icmd->ULPBDECOUNT;
2432 						    i++) {
2433 							mp = EMLXS_GET_VADDR(
2434 							    hba, rp, icmd);
2435 
2436 							tmp = (uint8_t *)mp;
2437 							if (mp) {
2438 							(void) emlxs_mem_put(
2439 							    hba, MEM_BUF, tmp);
2440 							}
2441 						}
2442 					}
2443 
2444 					(void) emlxs_mem_put(hba, MEM_IOCB,
2445 					    (uint8_t *)iocbq);
2446 				} else {
2447 					/* repost the unsolicited buffer */
2448 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2449 					    (CHANNEL *)iocbq->channel, iocbq);
2450 				}
2451 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2452 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2453 				/*
2454 				 * Resend the abort iocbq if any
2455 				 */
2456 				emlxs_tx_put(iocbq, 1);
2457 			}
2458 		}
2459 
2460 		iocbq = next;
2461 
2462 	}	/* end of while */
2463 
2464 	/* Now trigger channel service */
2465 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2466 		if (!flag[channelno]) {
2467 			continue;
2468 		}
2469 
2470 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2471 	}
2472 
2473 	return (abort.q_cnt);
2474 
2475 } /* emlxs_tx_node_flush() */
2476 
2477 
2478 /* Check for IO's on all or a given ring for a given node */
2479 extern uint32_t
2480 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2481 {
2482 	emlxs_hba_t *hba = HBA;
2483 	uint32_t channelno;
2484 	CHANNEL *cp;
2485 	uint32_t count;
2486 
2487 	count = 0;
2488 
2489 	/* Flush all I/O's on tx queue to this target */
2490 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2491 
2492 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2493 		cp = &hba->chan[channelno];
2494 
2495 		if (chan && cp != chan) {
2496 			continue;
2497 		}
2498 
2499 		/* Check if priority queue is not empty */
2500 		if (ndlp->nlp_ptx[channelno].q_first) {
2501 			count += ndlp->nlp_ptx[channelno].q_cnt;
2502 		}
2503 
2504 		/* Check if tx queue is not empty */
2505 		if (ndlp->nlp_tx[channelno].q_first) {
2506 			count += ndlp->nlp_tx[channelno].q_cnt;
2507 		}
2508 
2509 	}
2510 
2511 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2512 
2513 	return (count);
2514 
2515 } /* emlxs_tx_node_check() */
2516 
2517 
2518 
2519 /* Flush all IO's on the any ring for a given node's lun */
2520 extern uint32_t
2521 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2522     emlxs_buf_t *fpkt)
2523 {
2524 	emlxs_hba_t *hba = HBA;
2525 	emlxs_buf_t *sbp;
2526 	uint32_t channelno;
2527 	IOCBQ *iocbq;
2528 	IOCBQ *prev;
2529 	IOCBQ *next;
2530 	IOCB *iocb;
2531 	IOCB *icmd;
2532 	Q abort;
2533 	uint32_t i;
2534 	MATCHMAP *mp;
2535 	CHANNEL *cp;
2536 	CHANNEL *channel;
2537 	uint8_t flag[MAX_CHANNEL];
2538 
2539 	bzero((void *)&abort, sizeof (Q));
2540 
2541 	/* Flush I/O's on txQ to this target's lun */
2542 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2543 
2544 	channel = &hba->chan[hba->channel_fcp];
2545 
2546 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2547 		cp = &hba->chan[channelno];
2548 
2549 		if (channel && cp != channel) {
2550 			continue;
2551 		}
2552 
2553 		/* Scan the priority queue first */
2554 		prev = NULL;
2555 		iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2556 
2557 		while (iocbq) {
2558 			next = (IOCBQ *)iocbq->next;
2559 			iocb = &iocbq->iocb;
2560 			sbp = (emlxs_buf_t *)iocbq->sbp;
2561 
2562 			/* Check if this IO is for our lun */
2563 			if (sbp && (sbp->lun == lun)) {
2564 				/* Remove iocb from the node's ptx queue */
2565 				if (next == 0) {
2566 					ndlp->nlp_ptx[channelno].q_last =
2567 					    (uint8_t *)prev;
2568 				}
2569 
2570 				if (prev == 0) {
2571 					ndlp->nlp_ptx[channelno].q_first =
2572 					    (uint8_t *)next;
2573 				} else {
2574 					prev->next = next;
2575 				}
2576 
2577 				iocbq->next = NULL;
2578 				ndlp->nlp_ptx[channelno].q_cnt--;
2579 
2580 				/*
2581 				 * Add this iocb to our local abort Q
2582 				 */
2583 				if (abort.q_first) {
2584 					((IOCBQ *)abort.q_last)->next = iocbq;
2585 					abort.q_last = (uint8_t *)iocbq;
2586 					abort.q_cnt++;
2587 				} else {
2588 					abort.q_first = (uint8_t *)iocbq;
2589 					abort.q_last = (uint8_t *)iocbq;
2590 					abort.q_cnt = 1;
2591 				}
2592 				iocbq->next = NULL;
2593 				flag[channelno] = 1;
2594 
2595 			} else {
2596 				prev = iocbq;
2597 			}
2598 
2599 			iocbq = next;
2600 
2601 		}	/* while (iocbq) */
2602 
2603 
2604 		/* Scan the regular queue */
2605 		prev = NULL;
2606 		iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2607 
2608 		while (iocbq) {
2609 			next = (IOCBQ *)iocbq->next;
2610 			iocb = &iocbq->iocb;
2611 			sbp = (emlxs_buf_t *)iocbq->sbp;
2612 
2613 			/* Check if this IO is for our lun */
2614 			if (sbp && (sbp->lun == lun)) {
2615 				/* Remove iocb from the node's tx queue */
2616 				if (next == 0) {
2617 					ndlp->nlp_tx[channelno].q_last =
2618 					    (uint8_t *)prev;
2619 				}
2620 
2621 				if (prev == 0) {
2622 					ndlp->nlp_tx[channelno].q_first =
2623 					    (uint8_t *)next;
2624 				} else {
2625 					prev->next = next;
2626 				}
2627 
2628 				iocbq->next = NULL;
2629 				ndlp->nlp_tx[channelno].q_cnt--;
2630 
2631 				/*
2632 				 * Add this iocb to our local abort Q
2633 				 */
2634 				if (abort.q_first) {
2635 					((IOCBQ *) abort.q_last)->next = iocbq;
2636 					abort.q_last = (uint8_t *)iocbq;
2637 					abort.q_cnt++;
2638 				} else {
2639 					abort.q_first = (uint8_t *)iocbq;
2640 					abort.q_last = (uint8_t *)iocbq;
2641 					abort.q_cnt = 1;
2642 				}
2643 				iocbq->next = NULL;
2644 			} else {
2645 				prev = iocbq;
2646 			}
2647 
2648 			iocbq = next;
2649 
2650 		}	/* while (iocbq) */
2651 	}	/* for loop */
2652 
2653 	/* First cleanup the iocb's while still holding the lock */
2654 	iocbq = (IOCBQ *)abort.q_first;
2655 	while (iocbq) {
2656 		/* Free the IoTag and the bmp */
2657 		iocb = &iocbq->iocb;
2658 
2659 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2660 			sbp = iocbq->sbp;
2661 			if (sbp) {
2662 				hba->fc_table[sbp->iotag] = NULL;
2663 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2664 			}
2665 		} else {
2666 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2667 			    iocb->ULPIOTAG, 0);
2668 		}
2669 
2670 		if (sbp && (sbp != STALE_PACKET)) {
2671 			mutex_enter(&sbp->mtx);
2672 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2673 			/*
2674 			 * If the fpkt is already set, then we will leave it
2675 			 * alone. This ensures that this pkt is only accounted
2676 			 * for on one fpkt->flush_count
2677 			 */
2678 			if (!sbp->fpkt && fpkt) {
2679 				mutex_enter(&fpkt->mtx);
2680 				sbp->fpkt = fpkt;
2681 				fpkt->flush_count++;
2682 				mutex_exit(&fpkt->mtx);
2683 			}
2684 
2685 			mutex_exit(&sbp->mtx);
2686 		}
2687 
2688 		iocbq = (IOCBQ *) iocbq->next;
2689 
2690 	}	/* end of while */
2691 
2692 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2693 
2694 	/* Now abort the iocb's outside the locks */
2695 	iocbq = (IOCBQ *)abort.q_first;
2696 	while (iocbq) {
2697 		/* Save the next iocbq for now */
2698 		next = (IOCBQ *)iocbq->next;
2699 
2700 		/* Unlink this iocbq */
2701 		iocbq->next = NULL;
2702 
2703 		/* Get the pkt */
2704 		sbp = (emlxs_buf_t *)iocbq->sbp;
2705 
2706 		if (sbp) {
2707 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2708 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2709 
2710 			if (hba->state >= FC_LINK_UP) {
2711 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2712 				    IOERR_ABORT_REQUESTED, 1);
2713 			} else {
2714 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2715 				    IOERR_LINK_DOWN, 1);
2716 			}
2717 		}
2718 
2719 		/* Free the iocb and its associated buffers */
2720 		else {
2721 			/* Should never happen! */
2722 			icmd = &iocbq->iocb;
2723 
2724 			/* SLI3 */
2725 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2726 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2727 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2728 				if ((hba->flag &
2729 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2730 					/* HBA is detaching or offlining */
2731 					if (icmd->ULPCOMMAND !=
2732 					    CMD_QUE_RING_LIST64_CN) {
2733 						uint8_t	*tmp;
2734 						RING *rp;
2735 						int ch;
2736 
2737 						ch = ((CHANNEL *)
2738 						    iocbq->channel)->channelno;
2739 						rp = &hba->sli.sli3.ring[ch];
2740 						for (i = 0;
2741 						    i < icmd->ULPBDECOUNT;
2742 						    i++) {
2743 							mp = EMLXS_GET_VADDR(
2744 							    hba, rp, icmd);
2745 
2746 							tmp = (uint8_t *)mp;
2747 							if (mp) {
2748 							(void) emlxs_mem_put(
2749 							    hba, MEM_BUF, tmp);
2750 							}
2751 						}
2752 					}
2753 
2754 					(void) emlxs_mem_put(hba, MEM_IOCB,
2755 					    (uint8_t *)iocbq);
2756 				} else {
2757 					/* repost the unsolicited buffer */
2758 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2759 					    (CHANNEL *)iocbq->channel, iocbq);
2760 				}
2761 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2762 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2763 				/*
2764 				 * Resend the abort iocbq if any
2765 				 */
2766 				emlxs_tx_put(iocbq, 1);
2767 			}
2768 		}
2769 
2770 		iocbq = next;
2771 
2772 	}	/* end of while */
2773 
2774 	/* Now trigger channel service */
2775 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2776 		if (!flag[channelno]) {
2777 			continue;
2778 		}
2779 
2780 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2781 	}
2782 
2783 	return (abort.q_cnt);
2784 
2785 } /* emlxs_tx_lun_flush() */
2786 
2787 
2788 extern void
2789 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
2790 {
2791 	emlxs_hba_t *hba;
2792 	emlxs_port_t *port;
2793 	uint32_t channelno;
2794 	NODELIST *nlp;
2795 	CHANNEL *cp;
2796 	emlxs_buf_t *sbp;
2797 
2798 	port = (emlxs_port_t *)iocbq->port;
2799 	hba = HBA;
2800 	cp = (CHANNEL *)iocbq->channel;
2801 	nlp = (NODELIST *)iocbq->node;
2802 	channelno = cp->channelno;
2803 	sbp = (emlxs_buf_t *)iocbq->sbp;
2804 
2805 	/* under what cases, nlp is NULL */
2806 	if (nlp == NULL) {
2807 		/* Set node to base node by default */
2808 		nlp = &port->node_base;
2809 
2810 		iocbq->node = (void *)nlp;
2811 
2812 		if (sbp) {
2813 			sbp->node = (void *)nlp;
2814 		}
2815 	}
2816 
2817 	if (lock) {
2818 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2819 	}
2820 
2821 	if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
2822 		if (sbp) {
2823 			mutex_enter(&sbp->mtx);
2824 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2825 			mutex_exit(&sbp->mtx);
2826 
2827 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2828 				hba->fc_table[sbp->iotag] = NULL;
2829 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2830 			} else {
2831 				(void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
2832 			}
2833 
2834 			if (lock) {
2835 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2836 			}
2837 
2838 			if (hba->state >= FC_LINK_UP) {
2839 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2840 				    IOERR_ABORT_REQUESTED, 1);
2841 			} else {
2842 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2843 				    IOERR_LINK_DOWN, 1);
2844 			}
2845 			return;
2846 		} else {
2847 			if (lock) {
2848 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2849 			}
2850 
2851 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2852 		}
2853 
2854 		return;
2855 	}
2856 
2857 	if (sbp) {
2858 
2859 		mutex_enter(&sbp->mtx);
2860 
2861 		if (sbp->pkt_flags &
2862 		    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
2863 			mutex_exit(&sbp->mtx);
2864 			if (lock) {
2865 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2866 			}
2867 			return;
2868 		}
2869 
2870 		sbp->pkt_flags |= PACKET_IN_TXQ;
2871 		hba->channel_tx_count++;
2872 
2873 		mutex_exit(&sbp->mtx);
2874 	}
2875 
2876 
2877 	/* Check iocbq priority */
2878 	/* Some IOCB has the high priority like reset/close xri etc */
2879 	if (iocbq->flag & IOCB_PRIORITY) {
2880 		/* Add the iocb to the bottom of the node's ptx queue */
2881 		if (nlp->nlp_ptx[channelno].q_first) {
2882 			((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
2883 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
2884 			nlp->nlp_ptx[channelno].q_cnt++;
2885 		} else {
2886 			nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
2887 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
2888 			nlp->nlp_ptx[channelno].q_cnt = 1;
2889 		}
2890 
2891 		iocbq->next = NULL;
2892 	} else {	/* Normal priority */
2893 
2894 
2895 		/* Add the iocb to the bottom of the node's tx queue */
2896 		if (nlp->nlp_tx[channelno].q_first) {
2897 			((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
2898 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
2899 			nlp->nlp_tx[channelno].q_cnt++;
2900 		} else {
2901 			nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
2902 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
2903 			nlp->nlp_tx[channelno].q_cnt = 1;
2904 		}
2905 
2906 		iocbq->next = NULL;
2907 	}
2908 
2909 
2910 	/*
2911 	 * Check if the node is not already on channel queue and
2912 	 * (is not closed or  is a priority request)
2913 	 */
2914 	if (!nlp->nlp_next[channelno] &&
2915 	    (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
2916 	    (iocbq->flag & IOCB_PRIORITY))) {
2917 		/* If so, then add it to the channel queue */
2918 		if (cp->nodeq.q_first) {
2919 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2920 			    (uint8_t *)nlp;
2921 			nlp->nlp_next[channelno] = cp->nodeq.q_first;
2922 
2923 			/*
2924 			 * If this is not the base node then add it
2925 			 * to the tail
2926 			 */
2927 			if (!nlp->nlp_base) {
2928 				cp->nodeq.q_last = (uint8_t *)nlp;
2929 			} else {	/* Otherwise, add it to the head */
2930 
2931 				/* The command node always gets priority */
2932 				cp->nodeq.q_first = (uint8_t *)nlp;
2933 			}
2934 
2935 			cp->nodeq.q_cnt++;
2936 		} else {
2937 			cp->nodeq.q_first = (uint8_t *)nlp;
2938 			cp->nodeq.q_last = (uint8_t *)nlp;
2939 			nlp->nlp_next[channelno] = nlp;
2940 			cp->nodeq.q_cnt = 1;
2941 		}
2942 	}
2943 
2944 	HBASTATS.IocbTxPut[channelno]++;
2945 
2946 	/* Adjust the channel timeout timer */
2947 	cp->timeout = hba->timer_tics + 5;
2948 
2949 	if (lock) {
2950 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2951 	}
2952 
2953 	return;
2954 
2955 } /* emlxs_tx_put() */
2956 
2957 
2958 extern IOCBQ *
2959 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
2960 {
2961 	emlxs_hba_t *hba;
2962 	uint32_t channelno;
2963 	IOCBQ *iocbq;
2964 	NODELIST *nlp;
2965 	emlxs_buf_t *sbp;
2966 
2967 	hba = cp->hba;
2968 	channelno = cp->channelno;
2969 
2970 	if (lock) {
2971 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2972 	}
2973 
2974 begin:
2975 
2976 	iocbq = NULL;
2977 
2978 	/* Check if a node needs servicing */
2979 	if (cp->nodeq.q_first) {
2980 		nlp = (NODELIST *)cp->nodeq.q_first;
2981 
2982 		/* Get next iocb from node's priority queue */
2983 
2984 		if (nlp->nlp_ptx[channelno].q_first) {
2985 			iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
2986 
2987 			/* Check if this is last entry */
2988 			if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
2989 				nlp->nlp_ptx[channelno].q_first = NULL;
2990 				nlp->nlp_ptx[channelno].q_last = NULL;
2991 				nlp->nlp_ptx[channelno].q_cnt = 0;
2992 			} else {
2993 				/* Remove iocb from head */
2994 				nlp->nlp_ptx[channelno].q_first =
2995 				    (void *)iocbq->next;
2996 				nlp->nlp_ptx[channelno].q_cnt--;
2997 			}
2998 
2999 			iocbq->next = NULL;
3000 		}
3001 
3002 		/* Get next iocb from node tx queue if node not closed */
3003 		else if (nlp->nlp_tx[channelno].q_first &&
3004 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3005 			iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3006 
3007 			/* Check if this is last entry */
3008 			if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3009 				nlp->nlp_tx[channelno].q_first = NULL;
3010 				nlp->nlp_tx[channelno].q_last = NULL;
3011 				nlp->nlp_tx[channelno].q_cnt = 0;
3012 			} else {
3013 				/* Remove iocb from head */
3014 				nlp->nlp_tx[channelno].q_first =
3015 				    (void *)iocbq->next;
3016 				nlp->nlp_tx[channelno].q_cnt--;
3017 			}
3018 
3019 			iocbq->next = NULL;
3020 		}
3021 
3022 		/* Now deal with node itself */
3023 
3024 		/* Check if node still needs servicing */
3025 		if ((nlp->nlp_ptx[channelno].q_first) ||
3026 		    (nlp->nlp_tx[channelno].q_first &&
3027 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3028 
3029 			/*
3030 			 * If this is the base node, then don't shift the
3031 			 * pointers. We want to drain the base node before
3032 			 * moving on
3033 			 */
3034 			if (!nlp->nlp_base) {
3035 				/*
3036 				 * Just shift channel queue pointers to next
3037 				 * node
3038 				 */
3039 				cp->nodeq.q_last = (void *)nlp;
3040 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3041 			}
3042 		} else {
3043 			/* Remove node from channel queue */
3044 
3045 			/* If this is the last node on list */
3046 			if (cp->nodeq.q_last == (void *)nlp) {
3047 				cp->nodeq.q_last = NULL;
3048 				cp->nodeq.q_first = NULL;
3049 				cp->nodeq.q_cnt = 0;
3050 			} else {
3051 				/* Remove node from head */
3052 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3053 				((NODELIST *)cp->nodeq.q_last)->
3054 				    nlp_next[channelno] = cp->nodeq.q_first;
3055 				cp->nodeq.q_cnt--;
3056 
3057 			}
3058 
3059 			/* Clear node */
3060 			nlp->nlp_next[channelno] = NULL;
3061 		}
3062 
3063 		/*
3064 		 * If no iocbq was found on this node, then it will have
3065 		 * been removed. So try again.
3066 		 */
3067 		if (!iocbq) {
3068 			goto begin;
3069 		}
3070 
3071 		sbp = (emlxs_buf_t *)iocbq->sbp;
3072 
3073 		if (sbp) {
3074 			/*
3075 			 * Check flags before we enter mutex in case this
3076 			 * has been flushed and destroyed
3077 			 */
3078 			if ((sbp->pkt_flags &
3079 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3080 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3081 				goto begin;
3082 			}
3083 
3084 			mutex_enter(&sbp->mtx);
3085 
3086 			if ((sbp->pkt_flags &
3087 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3088 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3089 				mutex_exit(&sbp->mtx);
3090 				goto begin;
3091 			}
3092 
3093 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
3094 			hba->channel_tx_count--;
3095 
3096 			mutex_exit(&sbp->mtx);
3097 		}
3098 	}
3099 
3100 	if (iocbq) {
3101 		HBASTATS.IocbTxGet[channelno]++;
3102 	}
3103 
3104 	/* Adjust the ring timeout timer */
3105 	cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3106 
3107 	if (lock) {
3108 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3109 	}
3110 
3111 	return (iocbq);
3112 
3113 } /* emlxs_tx_get() */
3114 
3115 
3116 /*
3117  * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3118  * The old IoTag has to be released, the new one has to be
3119  * allocated.  Others no change
3120  * TX_CHANNEL lock is held
3121  */
3122 extern void
3123 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3124     uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3125 {
3126 	emlxs_hba_t *hba;
3127 	emlxs_port_t *port;
3128 	uint32_t fchanno, tchanno, i;
3129 
3130 	IOCBQ *iocbq;
3131 	IOCBQ *prev;
3132 	IOCBQ *next;
3133 	IOCB *iocb, *icmd;
3134 	Q tbm;		/* To Be Moved Q */
3135 	MATCHMAP *mp;
3136 
3137 	NODELIST *nlp = ndlp;
3138 	emlxs_buf_t *sbp;
3139 
3140 	NODELIST *n_prev = NULL;
3141 	NODELIST *n_next = NULL;
3142 	uint16_t count = 0;
3143 
3144 	hba = from_chan->hba;
3145 	port = &PPORT;
3146 	cmd = cmd; /* To pass lint */
3147 
3148 	fchanno = from_chan->channelno;
3149 	tchanno = to_chan->channelno;
3150 
3151 	if (lock) {
3152 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3153 	}
3154 
3155 	bzero((void *)&tbm, sizeof (Q));
3156 
3157 	/* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3158 	prev = NULL;
3159 	iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3160 
3161 	while (iocbq) {
3162 		next = (IOCBQ *)iocbq->next;
3163 		/* Check if this iocb is fcp cmd */
3164 		iocb = &iocbq->iocb;
3165 
3166 		switch (iocb->ULPCOMMAND) {
3167 		/* FCP commands */
3168 		case CMD_FCP_ICMND_CR:
3169 		case CMD_FCP_ICMND_CX:
3170 		case CMD_FCP_IREAD_CR:
3171 		case CMD_FCP_IREAD_CX:
3172 		case CMD_FCP_IWRITE_CR:
3173 		case CMD_FCP_IWRITE_CX:
3174 		case CMD_FCP_ICMND64_CR:
3175 		case CMD_FCP_ICMND64_CX:
3176 		case CMD_FCP_IREAD64_CR:
3177 		case CMD_FCP_IREAD64_CX:
3178 		case CMD_FCP_IWRITE64_CR:
3179 		case CMD_FCP_IWRITE64_CX:
3180 			/* We found a fcp cmd */
3181 			break;
3182 		default:
3183 			/* this is not fcp cmd continue */
3184 			prev = iocbq;
3185 			iocbq = next;
3186 			continue;
3187 		}
3188 
3189 		/* found a fcp cmd iocb in fchanno txq, now deque it */
3190 		if (next == NULL) {
3191 			/* This is the last iocbq */
3192 			nlp->nlp_tx[fchanno].q_last =
3193 			    (uint8_t *)prev;
3194 		}
3195 
3196 		if (prev == NULL) {
3197 			/* This is the first one then remove it from head */
3198 			nlp->nlp_tx[fchanno].q_first =
3199 			    (uint8_t *)next;
3200 		} else {
3201 			prev->next = next;
3202 		}
3203 
3204 		iocbq->next = NULL;
3205 		nlp->nlp_tx[fchanno].q_cnt--;
3206 
3207 		/* Add this iocb to our local toberemovedq */
3208 		/* This way we donot hold the TX_CHANNEL lock too long */
3209 
3210 		if (tbm.q_first) {
3211 			((IOCBQ *)tbm.q_last)->next = iocbq;
3212 			tbm.q_last = (uint8_t *)iocbq;
3213 			tbm.q_cnt++;
3214 		} else {
3215 			tbm.q_first = (uint8_t *)iocbq;
3216 			tbm.q_last = (uint8_t *)iocbq;
3217 			tbm.q_cnt = 1;
3218 		}
3219 
3220 		iocbq = next;
3221 
3222 	}	/* While (iocbq) */
3223 
3224 	if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3225 
3226 		/* from_chan->nodeq.q_first must be non NULL */
3227 		if (from_chan->nodeq.q_first) {
3228 
3229 			/* nodeq is not empty, now deal with the node itself */
3230 			if ((nlp->nlp_tx[fchanno].q_first)) {
3231 
3232 				if (!nlp->nlp_base) {
3233 					from_chan->nodeq.q_last =
3234 					    (void *)nlp;
3235 					from_chan->nodeq.q_first =
3236 					    nlp->nlp_next[fchanno];
3237 				}
3238 
3239 			} else {
3240 				n_prev = (NODELIST *)from_chan->nodeq.q_first;
3241 				count = from_chan->nodeq.q_cnt;
3242 
3243 				if (n_prev == nlp) {
3244 
3245 					/* If this is the only node on list */
3246 					if (from_chan->nodeq.q_last ==
3247 					    (void *)nlp) {
3248 						from_chan->nodeq.q_last =
3249 						    NULL;
3250 						from_chan->nodeq.q_first =
3251 						    NULL;
3252 						from_chan->nodeq.q_cnt = 0;
3253 					} else {
3254 						from_chan->nodeq.q_first =
3255 						    nlp->nlp_next[fchanno];
3256 						((NODELIST *)from_chan->
3257 						    nodeq.q_last)->
3258 						    nlp_next[fchanno] =
3259 						    from_chan->nodeq.q_first;
3260 						from_chan->nodeq.q_cnt--;
3261 					}
3262 					/* Clear node */
3263 					nlp->nlp_next[fchanno] = NULL;
3264 				} else {
3265 					count--;
3266 					do {
3267 						n_next =
3268 						    n_prev->nlp_next[fchanno];
3269 						if (n_next == nlp) {
3270 							break;
3271 						}
3272 						n_prev = n_next;
3273 					} while (count--);
3274 
3275 					if (count != 0) {
3276 
3277 						if (n_next ==
3278 						    (NODELIST *)from_chan->
3279 						    nodeq.q_last) {
3280 							n_prev->
3281 							    nlp_next[fchanno]
3282 							    =
3283 							    ((NODELIST *)
3284 							    from_chan->
3285 							    nodeq.q_last)->
3286 							    nlp_next
3287 							    [fchanno];
3288 							from_chan->nodeq.q_last
3289 							    = (uint8_t *)n_prev;
3290 						} else {
3291 
3292 							n_prev->
3293 							    nlp_next[fchanno]
3294 							    =
3295 							    n_next-> nlp_next
3296 							    [fchanno];
3297 						}
3298 						from_chan->nodeq.q_cnt--;
3299 						/* Clear node */
3300 						nlp->nlp_next[fchanno] =
3301 						    NULL;
3302 					}
3303 				}
3304 			}
3305 		}
3306 	}
3307 
3308 	/* Now cleanup the iocb's */
3309 	prev = NULL;
3310 	iocbq = (IOCBQ *)tbm.q_first;
3311 
3312 	while (iocbq) {
3313 
3314 		next = (IOCBQ *)iocbq->next;
3315 
3316 		/* Free the IoTag and the bmp */
3317 		iocb = &iocbq->iocb;
3318 
3319 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3320 			sbp = iocbq->sbp;
3321 			if (sbp) {
3322 				hba->fc_table[sbp->iotag] = NULL;
3323 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3324 			}
3325 		} else {
3326 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3327 			    iocb->ULPIOTAG, 0);
3328 		}
3329 
3330 		if (sbp && (sbp != STALE_PACKET)) {
3331 			mutex_enter(&sbp->mtx);
3332 			sbp->pkt_flags |= PACKET_IN_FLUSH;
3333 
3334 			/*
3335 			 * If the fpkt is already set, then we will leave it
3336 			 * alone. This ensures that this pkt is only accounted
3337 			 * for on one fpkt->flush_count
3338 			 */
3339 			if (!sbp->fpkt && fpkt) {
3340 				mutex_enter(&fpkt->mtx);
3341 				sbp->fpkt = fpkt;
3342 				fpkt->flush_count++;
3343 				mutex_exit(&fpkt->mtx);
3344 			}
3345 			mutex_exit(&sbp->mtx);
3346 		}
3347 		iocbq = next;
3348 
3349 	}	/* end of while */
3350 
3351 	iocbq = (IOCBQ *)tbm.q_first;
3352 	while (iocbq) {
3353 		/* Save the next iocbq for now */
3354 		next = (IOCBQ *)iocbq->next;
3355 
3356 		/* Unlink this iocbq */
3357 		iocbq->next = NULL;
3358 
3359 		/* Get the pkt */
3360 		sbp = (emlxs_buf_t *)iocbq->sbp;
3361 
3362 		if (sbp) {
3363 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3364 			"tx: sbp=%p node=%p", sbp, sbp->node);
3365 
3366 			if (hba->state >= FC_LINK_UP) {
3367 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3368 				    IOERR_ABORT_REQUESTED, 1);
3369 			} else {
3370 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3371 				    IOERR_LINK_DOWN, 1);
3372 			}
3373 
3374 		}
3375 		/* Free the iocb and its associated buffers */
3376 		else {
3377 			icmd = &iocbq->iocb;
3378 
3379 			/* SLI3 */
3380 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3381 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3382 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3383 				if ((hba->flag &
3384 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3385 					/* HBA is detaching or offlining */
3386 					if (icmd->ULPCOMMAND !=
3387 					    CMD_QUE_RING_LIST64_CN) {
3388 						uint8_t *tmp;
3389 						RING *rp;
3390 						int ch;
3391 
3392 						ch = from_chan->channelno;
3393 						rp = &hba->sli.sli3.ring[ch];
3394 
3395 						for (i = 0;
3396 						    i < icmd->ULPBDECOUNT;
3397 						    i++) {
3398 							mp = EMLXS_GET_VADDR(
3399 							    hba, rp, icmd);
3400 
3401 							tmp = (uint8_t *)mp;
3402 							if (mp) {
3403 							(void) emlxs_mem_put(
3404 							    hba,
3405 							    MEM_BUF,
3406 							    tmp);
3407 							}
3408 						}
3409 
3410 					}
3411 
3412 					(void) emlxs_mem_put(hba, MEM_IOCB,
3413 					    (uint8_t *)iocbq);
3414 				} else {
3415 					/* repost the unsolicited buffer */
3416 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3417 					    from_chan, iocbq);
3418 				}
3419 			}
3420 		}
3421 
3422 		iocbq = next;
3423 
3424 	}	/* end of while */
3425 
3426 	/* Now flush the chipq if any */
3427 	if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3428 
3429 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3430 
3431 		(void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3432 
3433 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3434 	}
3435 
3436 	if (lock) {
3437 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3438 	}
3439 
3440 	return;
3441 
3442 } /* emlxs_tx_move */
3443 
3444 
3445 extern uint32_t
3446 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3447     emlxs_buf_t *fpkt)
3448 {
3449 	emlxs_hba_t *hba = HBA;
3450 	emlxs_buf_t *sbp;
3451 	IOCBQ *iocbq;
3452 	IOCBQ *next;
3453 	Q abort;
3454 	CHANNEL *cp;
3455 	uint32_t channelno;
3456 	uint8_t flag[MAX_CHANNEL];
3457 	uint32_t iotag;
3458 
3459 	bzero((void *)&abort, sizeof (Q));
3460 	bzero((void *)flag, sizeof (flag));
3461 
3462 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3463 		cp = &hba->chan[channelno];
3464 
3465 		if (chan && cp != chan) {
3466 			continue;
3467 		}
3468 
3469 		mutex_enter(&EMLXS_FCTAB_LOCK);
3470 
3471 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3472 			sbp = hba->fc_table[iotag];
3473 
3474 			if (sbp && (sbp != STALE_PACKET) &&
3475 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3476 			    (sbp->node == ndlp) &&
3477 			    (sbp->channel == cp) &&
3478 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3479 				emlxs_sbp_abort_add(port, sbp, &abort, flag,
3480 				    fpkt);
3481 			}
3482 
3483 		}
3484 		mutex_exit(&EMLXS_FCTAB_LOCK);
3485 
3486 	}	/* for */
3487 
3488 	/* Now put the iocb's on the tx queue */
3489 	iocbq = (IOCBQ *)abort.q_first;
3490 	while (iocbq) {
3491 		/* Save the next iocbq for now */
3492 		next = (IOCBQ *)iocbq->next;
3493 
3494 		/* Unlink this iocbq */
3495 		iocbq->next = NULL;
3496 
3497 		/* Send this iocbq */
3498 		emlxs_tx_put(iocbq, 1);
3499 
3500 		iocbq = next;
3501 	}
3502 
3503 	/* Now trigger channel service */
3504 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3505 		if (!flag[channelno]) {
3506 			continue;
3507 		}
3508 
3509 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3510 	}
3511 
3512 	return (abort.q_cnt);
3513 
3514 } /* emlxs_chipq_node_flush() */
3515 
3516 
3517 /* Flush all IO's left on all iotag lists */
3518 extern uint32_t
3519 emlxs_iotag_flush(emlxs_hba_t *hba)
3520 {
3521 	emlxs_port_t *port = &PPORT;
3522 	emlxs_buf_t *sbp;
3523 	IOCBQ *iocbq;
3524 	IOCB *iocb;
3525 	Q abort;
3526 	CHANNEL *cp;
3527 	uint32_t channelno;
3528 	uint32_t iotag;
3529 	uint32_t count;
3530 
3531 	count = 0;
3532 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3533 		cp = &hba->chan[channelno];
3534 
3535 		bzero((void *)&abort, sizeof (Q));
3536 
3537 		mutex_enter(&EMLXS_FCTAB_LOCK);
3538 
3539 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3540 			sbp = hba->fc_table[iotag];
3541 
3542 			/* Check if the slot is empty */
3543 			if (!sbp || (sbp == STALE_PACKET)) {
3544 				continue;
3545 			}
3546 
3547 			/* We are building an abort list per channel */
3548 			if (sbp->channel != cp) {
3549 				continue;
3550 			}
3551 
3552 			/* Set IOCB status */
3553 			iocbq = &sbp->iocbq;
3554 			iocb = &iocbq->iocb;
3555 
3556 			iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3557 			iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3558 			iocb->ULPLE = 1;
3559 			iocbq->next = NULL;
3560 
3561 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3562 				hba->fc_table[iotag] = NULL;
3563 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3564 			} else {
3565 				hba->fc_table[iotag] = STALE_PACKET;
3566 				hba->io_count --;
3567 				sbp->iotag = 0;
3568 
3569 				/* Clean up the sbp */
3570 				mutex_enter(&sbp->mtx);
3571 
3572 				if (sbp->pkt_flags & PACKET_IN_TXQ) {
3573 					sbp->pkt_flags &= ~PACKET_IN_TXQ;
3574 					hba->channel_tx_count --;
3575 				}
3576 
3577 				if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3578 					sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3579 				}
3580 
3581 				if (sbp->bmp) {
3582 					(void) emlxs_mem_put(hba, MEM_BPL,
3583 					    (uint8_t *)sbp->bmp);
3584 					sbp->bmp = 0;
3585 				}
3586 
3587 				mutex_exit(&sbp->mtx);
3588 			}
3589 
3590 			/* At this point all nodes are assumed destroyed */
3591 			mutex_enter(&sbp->mtx);
3592 			sbp->node = 0;
3593 			mutex_exit(&sbp->mtx);
3594 
3595 			/* Add this iocb to our local abort Q */
3596 			if (abort.q_first) {
3597 				((IOCBQ *)abort.q_last)->next = iocbq;
3598 				abort.q_last = (uint8_t *)iocbq;
3599 				abort.q_cnt++;
3600 			} else {
3601 				abort.q_first = (uint8_t *)iocbq;
3602 				abort.q_last = (uint8_t *)iocbq;
3603 				abort.q_cnt = 1;
3604 			}
3605 		}
3606 
3607 		mutex_exit(&EMLXS_FCTAB_LOCK);
3608 
3609 		/* Trigger deferred completion */
3610 		if (abort.q_first) {
3611 			mutex_enter(&cp->rsp_lock);
3612 			if (cp->rsp_head == NULL) {
3613 				cp->rsp_head = (IOCBQ *)abort.q_first;
3614 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3615 			} else {
3616 				cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3617 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3618 			}
3619 			mutex_exit(&cp->rsp_lock);
3620 
3621 			emlxs_thread_trigger2(&cp->intr_thread,
3622 			    emlxs_proc_channel, cp);
3623 
3624 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
3625 			    "Forced iotag completion. channel=%d count=%d",
3626 			    channelno, abort.q_cnt);
3627 
3628 			count += abort.q_cnt;
3629 		}
3630 	}
3631 
3632 	return (count);
3633 
3634 } /* emlxs_iotag_flush() */
3635 
3636 
3637 
3638 /* Checks for IO's on all or a given channel for a given node */
3639 extern uint32_t
3640 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3641 {
3642 	emlxs_hba_t *hba = HBA;
3643 	emlxs_buf_t *sbp;
3644 	CHANNEL *cp;
3645 	uint32_t channelno;
3646 	uint32_t count;
3647 	uint32_t iotag;
3648 
3649 	count = 0;
3650 
3651 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3652 		cp = &hba->chan[channelno];
3653 
3654 		if (chan && cp != chan) {
3655 			continue;
3656 		}
3657 
3658 		mutex_enter(&EMLXS_FCTAB_LOCK);
3659 
3660 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3661 			sbp = hba->fc_table[iotag];
3662 
3663 			if (sbp && (sbp != STALE_PACKET) &&
3664 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3665 			    (sbp->node == ndlp) &&
3666 			    (sbp->channel == cp) &&
3667 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3668 				count++;
3669 			}
3670 
3671 		}
3672 		mutex_exit(&EMLXS_FCTAB_LOCK);
3673 
3674 	}	/* for */
3675 
3676 	return (count);
3677 
3678 } /* emlxs_chipq_node_check() */
3679 
3680 
3681 
3682 /* Flush all IO's for a given node's lun (on any channel) */
3683 extern uint32_t
3684 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3685     uint32_t lun, emlxs_buf_t *fpkt)
3686 {
3687 	emlxs_hba_t *hba = HBA;
3688 	emlxs_buf_t *sbp;
3689 	IOCBQ *iocbq;
3690 	IOCBQ *next;
3691 	Q abort;
3692 	uint32_t iotag;
3693 	uint8_t flag[MAX_CHANNEL];
3694 	uint32_t channelno;
3695 
3696 	bzero((void *)flag, sizeof (flag));
3697 	bzero((void *)&abort, sizeof (Q));
3698 
3699 	mutex_enter(&EMLXS_FCTAB_LOCK);
3700 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3701 		sbp = hba->fc_table[iotag];
3702 
3703 		if (sbp && (sbp != STALE_PACKET) &&
3704 		    sbp->pkt_flags & PACKET_IN_CHIPQ &&
3705 		    sbp->node == ndlp &&
3706 		    sbp->lun == lun &&
3707 		    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3708 			emlxs_sbp_abort_add(port, sbp,
3709 			    &abort, flag, fpkt);
3710 		}
3711 	}
3712 	mutex_exit(&EMLXS_FCTAB_LOCK);
3713 
3714 	/* Now put the iocb's on the tx queue */
3715 	iocbq = (IOCBQ *)abort.q_first;
3716 	while (iocbq) {
3717 		/* Save the next iocbq for now */
3718 		next = (IOCBQ *)iocbq->next;
3719 
3720 		/* Unlink this iocbq */
3721 		iocbq->next = NULL;
3722 
3723 		/* Send this iocbq */
3724 		emlxs_tx_put(iocbq, 1);
3725 
3726 		iocbq = next;
3727 	}
3728 
3729 	/* Now trigger channel service */
3730 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3731 		if (!flag[channelno]) {
3732 			continue;
3733 		}
3734 
3735 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3736 	}
3737 
3738 	return (abort.q_cnt);
3739 
3740 } /* emlxs_chipq_lun_flush() */
3741 
3742 
3743 
3744 /*
3745  * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
3746  * This must be called while holding the EMLXS_FCCTAB_LOCK
3747  */
3748 extern IOCBQ *
3749 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3750     uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
3751 {
3752 	emlxs_hba_t *hba = HBA;
3753 	IOCBQ *iocbq;
3754 	IOCB *iocb;
3755 	emlxs_wqe_t *wqe;
3756 	emlxs_buf_t *sbp;
3757 	uint16_t abort_iotag;
3758 
3759 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3760 		return (NULL);
3761 	}
3762 
3763 	iocbq->channel = (void *)cp;
3764 	iocbq->port = (void *)port;
3765 	iocbq->node = (void *)ndlp;
3766 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3767 
3768 	/*
3769 	 * set up an iotag using special Abort iotags
3770 	 */
3771 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3772 		hba->fc_oor_iotag = hba->max_iotag;
3773 	}
3774 	abort_iotag = hba->fc_oor_iotag++;
3775 
3776 
3777 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3778 		wqe = &iocbq->wqe;
3779 		sbp = hba->fc_table[iotag];
3780 
3781 		/* Try to issue abort by XRI if possible */
3782 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xp == NULL) {
3783 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
3784 			wqe->AbortTag = iotag;
3785 		} else {
3786 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3787 			wqe->AbortTag = sbp->xp->XRI;
3788 		}
3789 		wqe->un.Abort.IA = 0;
3790 		wqe->RequestTag = abort_iotag;
3791 		wqe->Command = CMD_ABORT_XRI_CX;
3792 		wqe->Class = CLASS3;
3793 		wqe->CQId = 0x3ff;
3794 		wqe->CmdType = WQE_TYPE_ABORT;
3795 	} else {
3796 		iocb = &iocbq->iocb;
3797 		iocb->ULPIOTAG = abort_iotag;
3798 		iocb->un.acxri.abortType = flag;
3799 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3800 		iocb->un.acxri.abortIoTag = iotag;
3801 		iocb->ULPLE = 1;
3802 		iocb->ULPCLASS = class;
3803 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
3804 		iocb->ULPOWNER = OWN_CHIP;
3805 	}
3806 
3807 	return (iocbq);
3808 
3809 } /* emlxs_create_abort_xri_cn() */
3810 
3811 
3812 extern IOCBQ *
3813 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3814     CHANNEL *cp, uint8_t class, int32_t flag)
3815 {
3816 	emlxs_hba_t *hba = HBA;
3817 	IOCBQ *iocbq;
3818 	IOCB *iocb;
3819 	emlxs_wqe_t *wqe;
3820 	uint16_t abort_iotag;
3821 
3822 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3823 		return (NULL);
3824 	}
3825 
3826 	iocbq->channel = (void *)cp;
3827 	iocbq->port = (void *)port;
3828 	iocbq->node = (void *)ndlp;
3829 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3830 
3831 	/*
3832 	 * set up an iotag using special Abort iotags
3833 	 */
3834 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3835 		hba->fc_oor_iotag = hba->max_iotag;
3836 	}
3837 	abort_iotag = hba->fc_oor_iotag++;
3838 
3839 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3840 		wqe = &iocbq->wqe;
3841 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3842 		wqe->un.Abort.IA = 0;
3843 		wqe->RequestTag = abort_iotag;
3844 		wqe->AbortTag = xid;
3845 		wqe->Command = CMD_ABORT_XRI_CX;
3846 		wqe->Class = CLASS3;
3847 		wqe->CQId = 0x3ff;
3848 		wqe->CmdType = WQE_TYPE_ABORT;
3849 	} else {
3850 		iocb = &iocbq->iocb;
3851 		iocb->ULPCONTEXT = xid;
3852 		iocb->ULPIOTAG = abort_iotag;
3853 		iocb->un.acxri.abortType = flag;
3854 		iocb->ULPLE = 1;
3855 		iocb->ULPCLASS = class;
3856 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
3857 		iocb->ULPOWNER = OWN_CHIP;
3858 	}
3859 
3860 	return (iocbq);
3861 
3862 } /* emlxs_create_abort_xri_cx() */
3863 
3864 
3865 
3866 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3867 extern IOCBQ *
3868 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3869     uint16_t iotag, CHANNEL *cp)
3870 {
3871 	emlxs_hba_t *hba = HBA;
3872 	IOCBQ *iocbq;
3873 	IOCB *iocb;
3874 	emlxs_wqe_t *wqe;
3875 	emlxs_buf_t *sbp;
3876 	uint16_t abort_iotag;
3877 
3878 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3879 		return (NULL);
3880 	}
3881 
3882 	iocbq->channel = (void *)cp;
3883 	iocbq->port = (void *)port;
3884 	iocbq->node = (void *)ndlp;
3885 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3886 
3887 	/*
3888 	 * set up an iotag using special Abort iotags
3889 	 */
3890 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3891 		hba->fc_oor_iotag = hba->max_iotag;
3892 	}
3893 	abort_iotag = hba->fc_oor_iotag++;
3894 
3895 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3896 		wqe = &iocbq->wqe;
3897 		sbp = hba->fc_table[iotag];
3898 
3899 		/* Try to issue close by XRI if possible */
3900 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xp == NULL) {
3901 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
3902 			wqe->AbortTag = iotag;
3903 		} else {
3904 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3905 			wqe->AbortTag = sbp->xp->XRI;
3906 		}
3907 		wqe->un.Abort.IA = 1;
3908 		wqe->RequestTag = abort_iotag;
3909 		wqe->Command = CMD_ABORT_XRI_CX;
3910 		wqe->Class = CLASS3;
3911 		wqe->CQId = 0x3ff;
3912 		wqe->CmdType = WQE_TYPE_ABORT;
3913 	} else {
3914 		iocb = &iocbq->iocb;
3915 		iocb->ULPIOTAG = abort_iotag;
3916 		iocb->un.acxri.abortType = 0;
3917 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3918 		iocb->un.acxri.abortIoTag = iotag;
3919 		iocb->ULPLE = 1;
3920 		iocb->ULPCLASS = 0;
3921 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
3922 		iocb->ULPOWNER = OWN_CHIP;
3923 	}
3924 
3925 	return (iocbq);
3926 
3927 } /* emlxs_create_close_xri_cn() */
3928 
3929 
3930 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3931 extern IOCBQ *
3932 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3933     CHANNEL *cp)
3934 {
3935 	emlxs_hba_t *hba = HBA;
3936 	IOCBQ *iocbq;
3937 	IOCB *iocb;
3938 	emlxs_wqe_t *wqe;
3939 	uint16_t abort_iotag;
3940 
3941 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3942 		return (NULL);
3943 	}
3944 
3945 	iocbq->channel = (void *)cp;
3946 	iocbq->port = (void *)port;
3947 	iocbq->node = (void *)ndlp;
3948 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3949 
3950 	/*
3951 	 * set up an iotag using special Abort iotags
3952 	 */
3953 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3954 		hba->fc_oor_iotag = hba->max_iotag;
3955 	}
3956 	abort_iotag = hba->fc_oor_iotag++;
3957 
3958 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3959 		wqe = &iocbq->wqe;
3960 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3961 		wqe->un.Abort.IA = 1;
3962 		wqe->RequestTag = abort_iotag;
3963 		wqe->AbortTag = xid;
3964 		wqe->Command = CMD_ABORT_XRI_CX;
3965 		wqe->Class = CLASS3;
3966 		wqe->CQId = 0x3ff;
3967 		wqe->CmdType = WQE_TYPE_ABORT;
3968 	} else {
3969 		iocb = &iocbq->iocb;
3970 		iocb->ULPCONTEXT = xid;
3971 		iocb->ULPIOTAG = abort_iotag;
3972 		iocb->ULPLE = 1;
3973 		iocb->ULPCLASS = 0;
3974 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
3975 		iocb->ULPOWNER = OWN_CHIP;
3976 	}
3977 
3978 	return (iocbq);
3979 
3980 } /* emlxs_create_close_xri_cx() */
3981 
3982 
3983 void
3984 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
3985 {
3986 	CHANNEL *cp;
3987 	IOCBQ *iocbq;
3988 
3989 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
3990 	    "Aborting CT exchange: xid=%x", rxid);
3991 
3992 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3993 		if (emlxs_sli4_unreserve_xri(hba, rxid) == 0) {
3994 			/* We have no way to abort unsolicited exchanges */
3995 			/* that we have not responded to at this time */
3996 			/* So we will return for now */
3997 			return;
3998 		}
3999 	}
4000 
4001 	cp = &hba->chan[hba->channel_ct];
4002 
4003 	/* Create the abort IOCB */
4004 	if (hba->state >= FC_LINK_UP) {
4005 		iocbq =
4006 		    emlxs_create_abort_xri_cx(port, NULL, rxid, cp, CLASS3,
4007 		    ABORT_TYPE_ABTS);
4008 	} else {
4009 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4010 	}
4011 
4012 	if (iocbq) {
4013 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4014 	}
4015 
4016 } /* emlxs_abort_ct_exchange() */
4017 
4018 
4019 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
4020 static void
4021 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4022     uint8_t *flag, emlxs_buf_t *fpkt)
4023 {
4024 	emlxs_hba_t *hba = HBA;
4025 	IOCBQ *iocbq;
4026 	CHANNEL *cp;
4027 	NODELIST *ndlp;
4028 
4029 	cp = (CHANNEL *)sbp->channel;
4030 	ndlp = sbp->node;
4031 
4032 	/* Create the close XRI IOCB */
4033 	iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4034 
4035 	/*
4036 	 * Add this iocb to our local abort Q
4037 	 * This way we don't hold the CHIPQ lock too long
4038 	 */
4039 	if (iocbq) {
4040 		if (abort->q_first) {
4041 			((IOCBQ *)abort->q_last)->next = iocbq;
4042 			abort->q_last = (uint8_t *)iocbq;
4043 			abort->q_cnt++;
4044 		} else {
4045 			abort->q_first = (uint8_t *)iocbq;
4046 			abort->q_last = (uint8_t *)iocbq;
4047 			abort->q_cnt = 1;
4048 		}
4049 		iocbq->next = NULL;
4050 	}
4051 
4052 	/* set the flags */
4053 	mutex_enter(&sbp->mtx);
4054 
4055 	sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4056 
4057 	sbp->ticks = hba->timer_tics + 10;
4058 	sbp->abort_attempts++;
4059 
4060 	flag[cp->channelno] = 1;
4061 
4062 	/*
4063 	 * If the fpkt is already set, then we will leave it alone
4064 	 * This ensures that this pkt is only accounted for on one
4065 	 * fpkt->flush_count
4066 	 */
4067 	if (!sbp->fpkt && fpkt) {
4068 		mutex_enter(&fpkt->mtx);
4069 		sbp->fpkt = fpkt;
4070 		fpkt->flush_count++;
4071 		mutex_exit(&fpkt->mtx);
4072 	}
4073 
4074 	mutex_exit(&sbp->mtx);
4075 
4076 	return;
4077 
4078 }	/* emlxs_sbp_abort_add() */
4079