xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_fcp.c (revision 088c6f3f90c806c9ed1bdffa1b625233a27eb084)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2020 RackTop Systems, Inc.
26  */
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_FCP_C);
32 
33 #define	EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
34 	PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
35 
36 static void	emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
37     Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
38 
39 #define	SCSI3_PERSISTENT_RESERVE_IN	0x5e
40 #define	SCSI_INQUIRY			0x12
41 #define	SCSI_RX_DIAG    		0x1C
42 
43 
44 /*
45  *  emlxs_handle_fcp_event
46  *
47  *  Description: Process an FCP Rsp Ring completion
48  *
49  */
50 /* ARGSUSED */
51 extern void
emlxs_handle_fcp_event(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)52 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
53 {
54 	emlxs_port_t *port = &PPORT;
55 	emlxs_config_t	*cfg = &CFG;
56 	IOCB *cmd;
57 	emlxs_buf_t *sbp;
58 	fc_packet_t *pkt = NULL;
59 #ifdef SAN_DIAG_SUPPORT
60 	NODELIST *ndlp;
61 #endif
62 	uint32_t iostat;
63 	uint8_t localstat;
64 	fcp_rsp_t *rsp;
65 	uint32_t rsp_data_resid;
66 	uint32_t check_underrun;
67 	uint8_t asc;
68 	uint8_t ascq;
69 	uint8_t scsi_status;
70 	uint8_t sense;
71 	uint32_t did;
72 	uint32_t fix_it;
73 	uint8_t *scsi_cmd;
74 	uint8_t scsi_opcode;
75 	uint16_t scsi_dl;
76 	uint32_t data_rx;
77 	uint32_t length;
78 
79 	cmd = &iocbq->iocb;
80 
81 	/* Initialize the status */
82 	iostat = cmd->ULPSTATUS;
83 	localstat = 0;
84 	scsi_status = 0;
85 	asc = 0;
86 	ascq = 0;
87 	sense = 0;
88 	check_underrun = 0;
89 	fix_it = 0;
90 
91 	HBASTATS.FcpEvent++;
92 
93 	sbp = (emlxs_buf_t *)iocbq->sbp;
94 
95 	if (!sbp) {
96 		/* completion with missing xmit command */
97 		HBASTATS.FcpStray++;
98 
99 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
100 		    "cmd=%x iotag=%d", cmd->ULPCOMMAND, cmd->ULPIOTAG);
101 
102 		return;
103 	}
104 
105 	HBASTATS.FcpCompleted++;
106 
107 #ifdef SAN_DIAG_SUPPORT
108 	emlxs_update_sd_bucket(sbp);
109 #endif /* SAN_DIAG_SUPPORT */
110 
111 	pkt = PRIV2PKT(sbp);
112 
113 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
114 	scsi_cmd = (uint8_t *)pkt->pkt_cmd;
115 	scsi_opcode = scsi_cmd[12];
116 	data_rx = 0;
117 
118 	/* Sync data in data buffer only on FC_PKT_FCP_READ */
119 	if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
120 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
121 		    DDI_DMA_SYNC_FORKERNEL);
122 
123 #ifdef TEST_SUPPORT
124 		if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
125 		    (pkt->pkt_datalen >= 512)) {
126 			hba->underrun_counter--;
127 			iostat = IOSTAT_FCP_RSP_ERROR;
128 
129 			/* Report 512 bytes missing by adapter */
130 			cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
131 
132 			/* Corrupt 512 bytes of Data buffer */
133 			bzero((uint8_t *)pkt->pkt_data, 512);
134 
135 			/* Set FCP response to STATUS_GOOD */
136 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
137 		}
138 #endif /* TEST_SUPPORT */
139 	}
140 
141 	/* Process the pkt */
142 	mutex_enter(&sbp->mtx);
143 
144 	/* Check for immediate return */
145 	if ((iostat == IOSTAT_SUCCESS) &&
146 	    (pkt->pkt_comp) &&
147 	    !(sbp->pkt_flags &
148 	    (PACKET_ULP_OWNED | PACKET_COMPLETED |
149 	    PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
150 	    PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
151 	    PACKET_IN_ABORT | PACKET_POLLED))) {
152 		HBASTATS.FcpGood++;
153 
154 		sbp->pkt_flags |=
155 		    (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
156 		    PACKET_COMPLETED | PACKET_ULP_OWNED);
157 		mutex_exit(&sbp->mtx);
158 
159 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
160 		emlxs_unswap_pkt(sbp);
161 #endif /* EMLXS_MODREV2X */
162 
163 #ifdef FMA_SUPPORT
164 		emlxs_check_dma(hba, sbp);
165 #endif  /* FMA_SUPPORT */
166 
167 		cp->ulpCmplCmd++;
168 		(*pkt->pkt_comp) (pkt);
169 
170 #ifdef FMA_SUPPORT
171 		if (hba->flag & FC_DMA_CHECK_ERROR) {
172 			emlxs_thread_spawn(hba, emlxs_restart_thread,
173 			    NULL, NULL);
174 		}
175 #endif  /* FMA_SUPPORT */
176 
177 		return;
178 	}
179 
180 	/*
181 	 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
182 	 * is reported.
183 	 */
184 
185 	/* Check if a response buffer was not provided */
186 	if ((iostat != IOSTAT_FCP_RSP_ERROR) || (pkt->pkt_rsplen == 0)) {
187 		goto done;
188 	}
189 
190 	EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
191 	    DDI_DMA_SYNC_FORKERNEL);
192 
193 	/* Get the response buffer pointer */
194 	rsp = (fcp_rsp_t *)pkt->pkt_resp;
195 
196 	/* Validate the response payload */
197 	if (!rsp->fcp_u.fcp_status.resid_under &&
198 	    !rsp->fcp_u.fcp_status.resid_over) {
199 		rsp->fcp_resid = 0;
200 	}
201 
202 	if (!rsp->fcp_u.fcp_status.rsp_len_set) {
203 		rsp->fcp_response_len = 0;
204 	}
205 
206 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
207 		rsp->fcp_sense_len = 0;
208 	}
209 
210 	length = sizeof (fcp_rsp_t) + LE_SWAP32(rsp->fcp_response_len) +
211 	    LE_SWAP32(rsp->fcp_sense_len);
212 
213 	if (length > pkt->pkt_rsplen) {
214 		iostat = IOSTAT_RSP_INVALID;
215 		pkt->pkt_data_resid = pkt->pkt_datalen;
216 		goto done;
217 	}
218 
219 	/* Set the valid response flag */
220 	sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
221 
222 	scsi_status = rsp->fcp_u.fcp_status.scsi_status;
223 
224 #ifdef SAN_DIAG_SUPPORT
225 	ndlp = (NODELIST *)iocbq->node;
226 	if (scsi_status == SCSI_STAT_QUE_FULL) {
227 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
228 		    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
229 	} else if (scsi_status == SCSI_STAT_BUSY) {
230 		emlxs_log_sd_scsi_event(port,
231 		    SD_SCSI_SUBCATEGORY_DEVBSY,
232 		    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
233 	}
234 #endif
235 
236 	/*
237 	 * Convert a task abort to a check condition with no data
238 	 * transferred. We saw a data corruption when Solaris received
239 	 * a Task Abort from a tape.
240 	 */
241 
242 	if (scsi_status == SCSI_STAT_TASK_ABORT) {
243 		EMLXS_MSGF(EMLXS_CONTEXT,
244 		    &emlxs_fcp_completion_error_msg,
245 		    "Task Abort. "
246 		    "Fixed. did=0x%06x sbp=%p cmd=%02x dl=%d",
247 		    did, sbp, scsi_opcode, pkt->pkt_datalen);
248 
249 		rsp->fcp_u.fcp_status.scsi_status =
250 		    SCSI_STAT_CHECK_COND;
251 		rsp->fcp_u.fcp_status.rsp_len_set = 0;
252 		rsp->fcp_u.fcp_status.sense_len_set = 0;
253 		rsp->fcp_u.fcp_status.resid_over = 0;
254 
255 		if (pkt->pkt_datalen) {
256 			rsp->fcp_u.fcp_status.resid_under = 1;
257 			rsp->fcp_resid =
258 			    LE_SWAP32(pkt->pkt_datalen);
259 		} else {
260 			rsp->fcp_u.fcp_status.resid_under = 0;
261 			rsp->fcp_resid = 0;
262 		}
263 
264 		scsi_status = SCSI_STAT_CHECK_COND;
265 	}
266 
267 	/*
268 	 * We only need to check underrun if data could
269 	 * have been sent
270 	 */
271 
272 	/* Always check underrun if status is good */
273 	if (scsi_status == SCSI_STAT_GOOD) {
274 		check_underrun = 1;
275 	}
276 	/* Check the sense codes if this is a check condition */
277 	else if (scsi_status == SCSI_STAT_CHECK_COND) {
278 		check_underrun = 1;
279 
280 		/* Check if sense data was provided */
281 		if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
282 			sense = *((uint8_t *)rsp + 32 + 2);
283 			asc = *((uint8_t *)rsp + 32 + 12);
284 			ascq = *((uint8_t *)rsp + 32 + 13);
285 		}
286 
287 #ifdef SAN_DIAG_SUPPORT
288 		emlxs_log_sd_scsi_check_event(port,
289 		    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
290 		    scsi_opcode, sense, asc, ascq);
291 #endif
292 	}
293 	/* Status is not good and this is not a check condition */
294 	/* No data should have been sent */
295 	else {
296 		check_underrun = 0;
297 	}
298 
299 	/* Initialize the resids */
300 	pkt->pkt_resp_resid = 0;
301 	pkt->pkt_data_resid = 0;
302 
303 	/* Check if no data was to be transferred */
304 	if (pkt->pkt_datalen == 0) {
305 		goto done;
306 	}
307 
308 	/* Get the residual underrun count reported by the SCSI reply */
309 	rsp_data_resid = (rsp->fcp_u.fcp_status.resid_under) ?
310 	    LE_SWAP32(rsp->fcp_resid) : 0;
311 
312 	/* Set the pkt_data_resid to what the scsi response resid */
313 	pkt->pkt_data_resid = rsp_data_resid;
314 
315 	/* Adjust the pkt_data_resid field if needed */
316 	if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
317 		/*
318 		 * Get the residual underrun count reported by
319 		 * our adapter
320 		 */
321 		pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
322 
323 #ifdef SAN_DIAG_SUPPORT
324 		if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
325 			emlxs_log_sd_fc_rdchk_event(port,
326 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
327 			    scsi_opcode, pkt->pkt_data_resid);
328 		}
329 #endif
330 
331 		/* Get the actual amount of data transferred */
332 		data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
333 
334 		/*
335 		 * If the residual being reported by the adapter is
336 		 * greater than the residual being reported in the
337 		 * reply, then we have a true underrun.
338 		 */
339 		if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
340 			switch (scsi_opcode) {
341 			case SCSI_INQUIRY:
342 				scsi_dl = scsi_cmd[16];
343 				break;
344 
345 			case SCSI_RX_DIAG:
346 				scsi_dl =
347 				    (scsi_cmd[15] * 0x100) +
348 				    scsi_cmd[16];
349 				break;
350 
351 			default:
352 				scsi_dl = pkt->pkt_datalen;
353 			}
354 
355 #ifdef FCP_UNDERRUN_PATCH1
356 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
357 			/*
358 			 * If status is not good and no data was
359 			 * actually transferred, then we must fix
360 			 * the issue
361 			 */
362 			if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
363 				fix_it = 1;
364 
365 				EMLXS_MSGF(EMLXS_CONTEXT,
366 				    &emlxs_fcp_completion_error_msg,
367 				    "Underrun(1). Fixed. "
368 				    "did=0x%06x sbp=%p cmd=%02x "
369 				    "dl=%d,%d rx=%d rsp=%d",
370 				    did, sbp, scsi_opcode,
371 				    pkt->pkt_datalen, scsi_dl,
372 				    (pkt->pkt_datalen -
373 				    pkt->pkt_data_resid),
374 				    rsp_data_resid);
375 
376 			}
377 }
378 #endif /* FCP_UNDERRUN_PATCH1 */
379 
380 
381 #ifdef FCP_UNDERRUN_PATCH2
382 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
383 			if (scsi_status == SCSI_STAT_GOOD) {
384 				emlxs_msg_t	*msg;
385 
386 				msg = &emlxs_fcp_completion_error_msg;
387 				/*
388 				 * If status is good and this is an
389 				 * inquiry request and the amount of
390 				 * data
391 				 */
392 				/*
393 				 * requested <= data received, then we
394 				 * must fix the issue.
395 				 */
396 
397 				if ((scsi_opcode == SCSI_INQUIRY) &&
398 				    (pkt->pkt_datalen >= data_rx) &&
399 				    (scsi_dl <= data_rx)) {
400 					fix_it = 1;
401 
402 					EMLXS_MSGF(EMLXS_CONTEXT, msg,
403 					    "Underrun(2). Fixed. "
404 					    "did=0x%06x sbp=%p "
405 					    "cmd=%02x dl=%d,%d "
406 					    "rx=%d rsp=%d",
407 					    did, sbp, scsi_opcode,
408 					    pkt->pkt_datalen, scsi_dl,
409 					    data_rx, rsp_data_resid);
410 
411 				}
412 
413 				/*
414 				 * If status is good and this is an
415 				 * inquiry request and the amount of
416 				 * data requested >= 128 bytes, but
417 				 * only 128 bytes were received,
418 				 * then we must fix the issue.
419 				 */
420 				else if ((scsi_opcode == SCSI_INQUIRY) &&
421 				    (pkt->pkt_datalen >= 128) &&
422 				    (scsi_dl >= 128) && (data_rx == 128)) {
423 					fix_it = 1;
424 
425 					EMLXS_MSGF(EMLXS_CONTEXT, msg,
426 					    "Underrun(3). Fixed. "
427 					    "did=0x%06x sbp=%p "
428 					    "cmd=%02x dl=%d,%d "
429 					    "rx=%d rsp=%d",
430 					    did, sbp, scsi_opcode,
431 					    pkt->pkt_datalen, scsi_dl,
432 					    data_rx, rsp_data_resid);
433 
434 				}
435 			}
436 }
437 #endif /* FCP_UNDERRUN_PATCH2 */
438 
439 			/*
440 			 * Check if SCSI response payload should be
441 			 * fixed or if a DATA_UNDERRUN should be
442 			 * reported
443 			 */
444 			if (fix_it) {
445 				/*
446 				 * Fix the SCSI response payload itself
447 				 */
448 				rsp->fcp_u.fcp_status.resid_under = 1;
449 				rsp->fcp_resid =
450 				    LE_SWAP32(pkt->pkt_data_resid);
451 			} else {
452 				/*
453 				 * Change the status from
454 				 * IOSTAT_FCP_RSP_ERROR to
455 				 * IOSTAT_DATA_UNDERRUN
456 				 */
457 				iostat = IOSTAT_DATA_UNDERRUN;
458 				pkt->pkt_data_resid =
459 				    pkt->pkt_datalen;
460 			}
461 		}
462 
463 		/*
464 		 * If the residual being reported by the adapter is
465 		 * less than the residual being reported in the reply,
466 		 * then we have a true overrun. Since we don't know
467 		 * where the extra data came from or went to then we
468 		 * cannot trust anything we received
469 		 */
470 		else if (rsp_data_resid > pkt->pkt_data_resid) {
471 			/*
472 			 * Change the status from
473 			 * IOSTAT_FCP_RSP_ERROR to
474 			 * IOSTAT_DATA_OVERRUN
475 			 */
476 			iostat = IOSTAT_DATA_OVERRUN;
477 			pkt->pkt_data_resid = pkt->pkt_datalen;
478 		}
479 
480 	} else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
481 	    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
482 		/*
483 		 * Get the residual underrun count reported by
484 		 * our adapter
485 		 */
486 		pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
487 
488 #ifdef SAN_DIAG_SUPPORT
489 		if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
490 			emlxs_log_sd_fc_rdchk_event(port,
491 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
492 			    scsi_opcode, pkt->pkt_data_resid);
493 		}
494 #endif /* SAN_DIAG_SUPPORT */
495 
496 		/* Get the actual amount of data transferred */
497 		data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
498 
499 		/*
500 		 * If the residual being reported by the adapter is
501 		 * greater than the residual being reported in the
502 		 * reply, then we have a true underrun.
503 		 */
504 		if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
505 
506 			scsi_dl = pkt->pkt_datalen;
507 
508 #ifdef FCP_UNDERRUN_PATCH1
509 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
510 			/*
511 			 * If status is not good and no data was
512 			 * actually transferred, then we must fix
513 			 * the issue
514 			 */
515 			if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
516 				fix_it = 1;
517 
518 				EMLXS_MSGF(EMLXS_CONTEXT,
519 				    &emlxs_fcp_completion_error_msg,
520 				    "Underrun(1). Fixed. "
521 				    "did=0x%06x sbp=%p cmd=%02x "
522 				    "dl=%d,%d rx=%d rsp=%d",
523 				    did, sbp, scsi_opcode,
524 				    pkt->pkt_datalen, scsi_dl,
525 				    (pkt->pkt_datalen -
526 				    pkt->pkt_data_resid),
527 				    rsp_data_resid);
528 
529 			}
530 }
531 #endif /* FCP_UNDERRUN_PATCH1 */
532 
533 			/*
534 			 * Check if SCSI response payload should be
535 			 * fixed or if a DATA_UNDERRUN should be
536 			 * reported
537 			 */
538 			if (fix_it) {
539 				/*
540 				 * Fix the SCSI response payload itself
541 				 */
542 				rsp->fcp_u.fcp_status.resid_under = 1;
543 				rsp->fcp_resid =
544 				    LE_SWAP32(pkt->pkt_data_resid);
545 			} else {
546 				/*
547 				 * Change the status from
548 				 * IOSTAT_FCP_RSP_ERROR to
549 				 * IOSTAT_DATA_UNDERRUN
550 				 */
551 				iostat = IOSTAT_DATA_UNDERRUN;
552 				pkt->pkt_data_resid =
553 				    pkt->pkt_datalen;
554 			}
555 		}
556 
557 		/*
558 		 * If the residual being reported by the adapter is
559 		 * less than the residual being reported in the reply,
560 		 * then we have a true overrun. Since we don't know
561 		 * where the extra data came from or went to then we
562 		 * cannot trust anything we received
563 		 */
564 		else if (rsp_data_resid > pkt->pkt_data_resid) {
565 			/*
566 			 * Change the status from
567 			 * IOSTAT_FCP_RSP_ERROR to
568 			 * IOSTAT_DATA_OVERRUN
569 			 */
570 			iostat = IOSTAT_DATA_OVERRUN;
571 			pkt->pkt_data_resid = pkt->pkt_datalen;
572 		}
573 	}
574 
575 done:
576 
577 	/* Print completion message */
578 	switch (iostat) {
579 	case IOSTAT_SUCCESS:
580 		/* Build SCSI GOOD status */
581 		if (pkt->pkt_rsplen) {
582 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
583 		}
584 		break;
585 
586 	case IOSTAT_FCP_RSP_ERROR:
587 		break;
588 
589 	case IOSTAT_REMOTE_STOP:
590 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
591 		    "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
592 		    scsi_opcode);
593 		break;
594 
595 	case IOSTAT_LOCAL_REJECT:
596 		localstat = cmd->un.grsp.perr.statLocalError;
597 
598 		switch (localstat) {
599 		case IOERR_SEQUENCE_TIMEOUT:
600 			EMLXS_MSGF(EMLXS_CONTEXT,
601 			    &emlxs_fcp_completion_error_msg,
602 			    "Local reject. "
603 			    "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
604 			    emlxs_error_xlate(localstat), did, sbp,
605 			    scsi_opcode, pkt->pkt_timeout);
606 			break;
607 
608 		default:
609 			EMLXS_MSGF(EMLXS_CONTEXT,
610 			    &emlxs_fcp_completion_error_msg,
611 			    "Local reject. %s 0x%06x %p %02x (%x)(%x)",
612 			    emlxs_error_xlate(localstat), did, sbp,
613 			    scsi_opcode, (uint16_t)cmd->ULPIOTAG,
614 			    (uint16_t)cmd->ULPCONTEXT);
615 		}
616 
617 		break;
618 
619 	case IOSTAT_NPORT_RJT:
620 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
621 		    "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
622 		    scsi_opcode);
623 		break;
624 
625 	case IOSTAT_FABRIC_RJT:
626 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
627 		    "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
628 		    scsi_opcode);
629 		break;
630 
631 	case IOSTAT_NPORT_BSY:
632 #ifdef SAN_DIAG_SUPPORT
633 		ndlp = (NODELIST *)iocbq->node;
634 		emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
635 #endif
636 
637 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
638 		    "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
639 		    scsi_opcode);
640 		break;
641 
642 	case IOSTAT_FABRIC_BSY:
643 #ifdef SAN_DIAG_SUPPORT
644 		ndlp = (NODELIST *)iocbq->node;
645 		emlxs_log_sd_fc_bsy_event(port, NULL);
646 #endif
647 
648 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
649 		    "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
650 		    scsi_opcode);
651 		break;
652 
653 	case IOSTAT_INTERMED_RSP:
654 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
655 		    "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
656 		    sbp, scsi_opcode);
657 		break;
658 
659 	case IOSTAT_LS_RJT:
660 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
661 		    "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
662 		    scsi_opcode);
663 		break;
664 
665 	case IOSTAT_DATA_UNDERRUN:
666 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
667 		    "Underrun. did=0x%06x sbp=%p cmd=%02x "
668 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
669 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
670 		    rsp_data_resid, scsi_status, sense, asc, ascq);
671 		break;
672 
673 	case IOSTAT_DATA_OVERRUN:
674 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
675 		    "Overrun. did=0x%06x sbp=%p cmd=%02x "
676 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
677 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
678 		    rsp_data_resid, scsi_status, sense, asc, ascq);
679 		break;
680 
681 	case IOSTAT_RSP_INVALID:
682 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
683 		    "Rsp Invalid. did=0x%06x sbp=%p cmd=%02x dl=%d rl=%d"
684 		    "(%d, %d, %d)",
685 		    did, sbp, scsi_opcode, pkt->pkt_datalen, pkt->pkt_rsplen,
686 		    LE_SWAP32(rsp->fcp_resid),
687 		    LE_SWAP32(rsp->fcp_sense_len),
688 		    LE_SWAP32(rsp->fcp_response_len));
689 		break;
690 
691 	default:
692 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
693 		    "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
694 		    iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
695 		    scsi_opcode);
696 		break;
697 	}
698 
699 	if (iostat == IOSTAT_SUCCESS) {
700 		HBASTATS.FcpGood++;
701 	} else {
702 		HBASTATS.FcpError++;
703 	}
704 
705 	mutex_exit(&sbp->mtx);
706 
707 	emlxs_pkt_complete(sbp, iostat, localstat, 0);
708 
709 	return;
710 
711 } /* emlxs_handle_fcp_event() */
712 
713 
714 /*
715  *  emlxs_post_buffer
716  *
717  *  This routine will post count buffers to the
718  *  ring with the QUE_RING_BUF_CN command. This
719  *  allows 2 buffers / command to be posted.
720  *  Returns the number of buffers NOT posted.
721  */
722 /* SLI3 */
723 extern int
emlxs_post_buffer(emlxs_hba_t * hba,RING * rp,int16_t cnt)724 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
725 {
726 	emlxs_port_t *port = &PPORT;
727 	IOCB *icmd;
728 	IOCBQ *iocbq;
729 	MATCHMAP *mp;
730 	uint16_t tag;
731 	uint32_t maxqbuf;
732 	int32_t i;
733 	int32_t j;
734 	uint32_t seg;
735 	uint32_t size;
736 
737 	mp = 0;
738 	maxqbuf = 2;
739 	tag = (uint16_t)cnt;
740 	cnt += rp->fc_missbufcnt;
741 
742 	if (rp->ringno == hba->channel_els) {
743 		seg = MEM_BUF;
744 		size = MEM_ELSBUF_SIZE;
745 	} else if (rp->ringno == hba->channel_ip) {
746 		seg = MEM_IPBUF;
747 		size = MEM_IPBUF_SIZE;
748 	} else if (rp->ringno == hba->channel_ct) {
749 		seg = MEM_CTBUF;
750 		size = MEM_CTBUF_SIZE;
751 	}
752 #ifdef SFCT_SUPPORT
753 	else if (rp->ringno == hba->CHANNEL_FCT) {
754 		seg = MEM_FCTBUF;
755 		size = MEM_FCTBUF_SIZE;
756 	}
757 #endif /* SFCT_SUPPORT */
758 	else {
759 		return (0);
760 	}
761 
762 	/*
763 	 * While there are buffers to post
764 	 */
765 	while (cnt) {
766 		if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
767 			rp->fc_missbufcnt = cnt;
768 			return (cnt);
769 		}
770 
771 		iocbq->channel = (void *)&hba->chan[rp->ringno];
772 		iocbq->port = (void *)port;
773 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
774 
775 		icmd = &iocbq->iocb;
776 
777 		/*
778 		 * Max buffers can be posted per command
779 		 */
780 		for (i = 0; i < maxqbuf; i++) {
781 			if (cnt <= 0)
782 				break;
783 
784 			/* fill in BDEs for command */
785 			if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg))
786 			    == 0) {
787 				icmd->ULPBDECOUNT = i;
788 				for (j = 0; j < i; j++) {
789 					mp = EMLXS_GET_VADDR(hba, rp, icmd);
790 					if (mp) {
791 						emlxs_mem_put(hba, seg,
792 						    (void *)mp);
793 					}
794 				}
795 
796 				rp->fc_missbufcnt = cnt + i;
797 
798 				emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
799 
800 				return (cnt + i);
801 			}
802 
803 			/*
804 			 * map that page and save the address pair for lookup
805 			 * later
806 			 */
807 			emlxs_mem_map_vaddr(hba,
808 			    rp,
809 			    mp,
810 			    (uint32_t *)&icmd->un.cont64[i].addrHigh,
811 			    (uint32_t *)&icmd->un.cont64[i].addrLow);
812 
813 			icmd->un.cont64[i].tus.f.bdeSize = size;
814 			icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
815 
816 			/*
817 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
818 			 *    "UB Post: ring=%d addr=%08x%08x size=%d",
819 			 *    rp->ringno, icmd->un.cont64[i].addrHigh,
820 			 *    icmd->un.cont64[i].addrLow, size);
821 			 */
822 
823 			cnt--;
824 		}
825 
826 		icmd->ULPIOTAG = tag;
827 		icmd->ULPBDECOUNT = i;
828 		icmd->ULPLE = 1;
829 		icmd->ULPOWNER = OWN_CHIP;
830 		/* used for delimiter between commands */
831 		iocbq->bp = (void *)mp;
832 
833 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
834 	}
835 
836 	rp->fc_missbufcnt = 0;
837 
838 	return (0);
839 
840 } /* emlxs_post_buffer() */
841 
842 
843 static void
emlxs_fcp_tag_nodes(emlxs_port_t * port)844 emlxs_fcp_tag_nodes(emlxs_port_t *port)
845 {
846 	NODELIST *nlp;
847 	int i;
848 
849 	/* We will process all nodes with this tag later */
850 	rw_enter(&port->node_rwlock, RW_READER);
851 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
852 		nlp = port->node_table[i];
853 		while (nlp != NULL) {
854 			nlp->nlp_tag = 1;
855 			nlp = nlp->nlp_list_next;
856 		}
857 	}
858 	rw_exit(&port->node_rwlock);
859 }
860 
861 
862 static NODELIST *
emlxs_find_tagged_node(emlxs_port_t * port)863 emlxs_find_tagged_node(emlxs_port_t *port)
864 {
865 	NODELIST *nlp;
866 	NODELIST *tagged;
867 	int i;
868 
869 	/* Find first node */
870 	rw_enter(&port->node_rwlock, RW_READER);
871 	tagged = 0;
872 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
873 		nlp = port->node_table[i];
874 		while (nlp != NULL) {
875 			if (!nlp->nlp_tag) {
876 				nlp = nlp->nlp_list_next;
877 				continue;
878 			}
879 			nlp->nlp_tag = 0;
880 
881 			if (nlp->nlp_Rpi == FABRIC_RPI) {
882 				nlp = nlp->nlp_list_next;
883 				continue;
884 			}
885 			tagged = nlp;
886 			break;
887 		}
888 		if (tagged) {
889 			break;
890 		}
891 	}
892 	rw_exit(&port->node_rwlock);
893 	return (tagged);
894 }
895 
896 
897 extern int
emlxs_port_offline(emlxs_port_t * port,uint32_t scope)898 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
899 {
900 	emlxs_hba_t *hba = HBA;
901 	emlxs_config_t *cfg;
902 	NODELIST *nlp;
903 	fc_affected_id_t *aid;
904 	uint32_t mask;
905 	uint32_t aff_d_id;
906 	uint32_t linkdown;
907 	uint32_t vlinkdown;
908 	uint32_t action;
909 	int i;
910 	uint32_t unreg_vpi;
911 	uint32_t update;
912 	uint32_t adisc_support;
913 	uint32_t clear_all;
914 	uint8_t format;
915 
916 	/* Target mode only uses this routine for linkdowns */
917 	if ((port->mode == MODE_TARGET) && (scope != 0xffffffff) &&
918 	    (scope != 0xfeffffff) && (scope != 0xfdffffff)) {
919 		return (0);
920 	}
921 
922 	cfg = &CFG;
923 	aid = (fc_affected_id_t *)&scope;
924 	linkdown = 0;
925 	vlinkdown = 0;
926 	unreg_vpi = 0;
927 	update = 0;
928 	clear_all = 0;
929 
930 	if (!(port->flag & EMLXS_PORT_BOUND)) {
931 		return (0);
932 	}
933 
934 	format = aid->aff_format;
935 
936 	switch (format) {
937 	case 0:	/* Port */
938 		mask = 0x00ffffff;
939 		break;
940 
941 	case 1:	/* Area */
942 		mask = 0x00ffff00;
943 		break;
944 
945 	case 2:	/* Domain */
946 		mask = 0x00ff0000;
947 		break;
948 
949 	case 3:	/* Network */
950 		mask = 0x00000000;
951 		break;
952 
953 #ifdef DHCHAP_SUPPORT
954 	case 0xfe:	/* Virtual link down */
955 		mask = 0x00000000;
956 		vlinkdown = 1;
957 		break;
958 #endif /* DHCHAP_SUPPORT */
959 
960 	case 0xff:	/* link is down */
961 		mask = 0x00000000;
962 		linkdown = 1;
963 		break;
964 
965 	case 0xfd:	/* New fabric */
966 	default:
967 		mask = 0x00000000;
968 		linkdown = 1;
969 		clear_all = 1;
970 		break;
971 	}
972 
973 	aff_d_id = aid->aff_d_id & mask;
974 
975 
976 	/*
977 	 * If link is down then this is a hard shutdown and flush
978 	 * If link not down then this is a soft shutdown and flush
979 	 * (e.g. RSCN)
980 	 */
981 	if (linkdown) {
982 		hba->flag &= ~FC_GPIO_LINK_UP;
983 
984 		mutex_enter(&EMLXS_PORT_LOCK);
985 
986 		port->flag &= EMLXS_PORT_LINKDOWN_MASK;
987 
988 		if (port->ulp_statec != FC_STATE_OFFLINE) {
989 			port->ulp_statec = FC_STATE_OFFLINE;
990 
991 			port->prev_did = port->did;
992 			port->did = 0;
993 			port->rdid = 0;
994 
995 			bcopy(&port->fabric_sparam, &port->prev_fabric_sparam,
996 			    sizeof (SERV_PARM));
997 			bzero(&port->fabric_sparam, sizeof (SERV_PARM));
998 
999 			update = 1;
1000 		}
1001 
1002 		mutex_exit(&EMLXS_PORT_LOCK);
1003 
1004 		emlxs_timer_cancel_clean_address(port);
1005 
1006 		/* Tell ULP about it */
1007 		if (update) {
1008 			if (port->flag & EMLXS_PORT_BOUND) {
1009 				if (port->vpi == 0) {
1010 					EMLXS_MSGF(EMLXS_CONTEXT,
1011 					    &emlxs_link_down_msg, NULL);
1012 				}
1013 
1014 				if (port->mode == MODE_INITIATOR) {
1015 					emlxs_fca_link_down(port);
1016 				}
1017 #ifdef SFCT_SUPPORT
1018 				else if (port->mode == MODE_TARGET) {
1019 					emlxs_fct_link_down(port);
1020 				}
1021 #endif /* SFCT_SUPPORT */
1022 
1023 			} else {
1024 				if (port->vpi == 0) {
1025 					EMLXS_MSGF(EMLXS_CONTEXT,
1026 					    &emlxs_link_down_msg, "*");
1027 				}
1028 			}
1029 
1030 
1031 		}
1032 
1033 		unreg_vpi = 1;
1034 
1035 #ifdef DHCHAP_SUPPORT
1036 		/* Stop authentication with all nodes */
1037 		emlxs_dhc_auth_stop(port, NULL);
1038 #endif /* DHCHAP_SUPPORT */
1039 
1040 		/* Flush the base node */
1041 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1042 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1043 
1044 		/* Flush any pending ub buffers */
1045 		emlxs_ub_flush(port);
1046 	}
1047 #ifdef DHCHAP_SUPPORT
1048 	/* virtual link down */
1049 	else if (vlinkdown) {
1050 		mutex_enter(&EMLXS_PORT_LOCK);
1051 
1052 		if (port->ulp_statec != FC_STATE_OFFLINE) {
1053 			port->ulp_statec = FC_STATE_OFFLINE;
1054 			update = 1;
1055 		}
1056 
1057 		mutex_exit(&EMLXS_PORT_LOCK);
1058 
1059 		emlxs_timer_cancel_clean_address(port);
1060 
1061 		/* Tell ULP about it */
1062 		if (update) {
1063 			if (port->flag & EMLXS_PORT_BOUND) {
1064 				if (port->vpi == 0) {
1065 					EMLXS_MSGF(EMLXS_CONTEXT,
1066 					    &emlxs_link_down_msg,
1067 					    "Switch authentication failed.");
1068 				}
1069 
1070 				if (port->mode == MODE_INITIATOR) {
1071 					emlxs_fca_link_down(port);
1072 				}
1073 #ifdef SFCT_SUPPORT
1074 				else if (port->mode == MODE_TARGET) {
1075 					emlxs_fct_link_down(port);
1076 				}
1077 #endif /* SFCT_SUPPORT */
1078 			} else {
1079 				if (port->vpi == 0) {
1080 					EMLXS_MSGF(EMLXS_CONTEXT,
1081 					    &emlxs_link_down_msg,
1082 					    "Switch authentication failed. *");
1083 				}
1084 			}
1085 
1086 
1087 		}
1088 
1089 		/* Flush the base node */
1090 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1091 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1092 	}
1093 #endif /* DHCHAP_SUPPORT */
1094 	else {
1095 		emlxs_timer_cancel_clean_address(port);
1096 	}
1097 
1098 	if (port->mode == MODE_TARGET) {
1099 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1100 			/* Set the node tags */
1101 			emlxs_fcp_tag_nodes(port);
1102 			unreg_vpi = 0;
1103 			while ((nlp = emlxs_find_tagged_node(port))) {
1104 				(void) emlxs_rpi_pause_notify(port,
1105 				    nlp->rpip);
1106 				/*
1107 				 * In port_online we need to resume
1108 				 * these RPIs before we can use them.
1109 				 */
1110 			}
1111 		}
1112 		goto done;
1113 	}
1114 
1115 	/* Set the node tags */
1116 	emlxs_fcp_tag_nodes(port);
1117 
1118 	if (!clear_all && (hba->flag & FC_ONLINE_MODE)) {
1119 		adisc_support = cfg[CFG_ADISC_SUPPORT].current;
1120 	} else {
1121 		adisc_support = 0;
1122 	}
1123 
1124 	/* Check ADISC support level */
1125 	switch (adisc_support) {
1126 	case 0:	/* No support - Flush all IO to all matching nodes */
1127 
1128 		for (;;) {
1129 			/*
1130 			 * We need to hold the locks this way because
1131 			 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1132 			 * same locks. Also, when we release the lock the list
1133 			 * can change out from under us.
1134 			 */
1135 
1136 			/* Find first node */
1137 			rw_enter(&port->node_rwlock, RW_READER);
1138 			action = 0;
1139 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1140 				nlp = port->node_table[i];
1141 				while (nlp != NULL) {
1142 					if (!nlp->nlp_tag) {
1143 						nlp = nlp->nlp_list_next;
1144 						continue;
1145 					}
1146 					nlp->nlp_tag = 0;
1147 
1148 					/*
1149 					 * Check for any device that matches
1150 					 * our mask
1151 					 */
1152 					if ((nlp->nlp_DID & mask) == aff_d_id) {
1153 						if (linkdown) {
1154 							action = 1;
1155 							break;
1156 						} else { /* Must be an RCSN */
1157 
1158 							action = 2;
1159 							break;
1160 						}
1161 					}
1162 					nlp = nlp->nlp_list_next;
1163 				}
1164 
1165 				if (action) {
1166 					break;
1167 				}
1168 			}
1169 			rw_exit(&port->node_rwlock);
1170 
1171 
1172 			/* Check if nothing was found */
1173 			if (action == 0) {
1174 				break;
1175 			} else if (action == 1) {
1176 				(void) EMLXS_SLI_UNREG_NODE(port, nlp,
1177 				    NULL, NULL, NULL);
1178 			} else if (action == 2) {
1179 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1180 
1181 #ifdef DHCHAP_SUPPORT
1182 				emlxs_dhc_auth_stop(port, nlp);
1183 #endif /* DHCHAP_SUPPORT */
1184 
1185 				/*
1186 				 * Close the node for any further normal IO
1187 				 * A PLOGI with reopen the node
1188 				 */
1189 				emlxs_node_close(port, nlp,
1190 				    hba->channel_fcp, 60);
1191 				emlxs_node_close(port, nlp,
1192 				    hba->channel_ip, 60);
1193 
1194 				/* Flush tx queue */
1195 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1196 
1197 				/* Flush chip queue */
1198 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1199 			}
1200 
1201 		}
1202 
1203 		break;
1204 
1205 	case 1:	/* Partial support - Flush IO for non-FCP2 matching nodes */
1206 
1207 		for (;;) {
1208 
1209 			/*
1210 			 * We need to hold the locks this way because
1211 			 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1212 			 * same locks. Also, when we release the lock the list
1213 			 * can change out from under us.
1214 			 */
1215 			rw_enter(&port->node_rwlock, RW_READER);
1216 			action = 0;
1217 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1218 				nlp = port->node_table[i];
1219 				while (nlp != NULL) {
1220 					if (!nlp->nlp_tag) {
1221 						nlp = nlp->nlp_list_next;
1222 						continue;
1223 					}
1224 					nlp->nlp_tag = 0;
1225 
1226 					/*
1227 					 * Check for special FCP2 target device
1228 					 * that matches our mask
1229 					 */
1230 					if ((nlp->nlp_fcp_info &
1231 					    NLP_FCP_TGT_DEVICE) &&
1232 					    (nlp-> nlp_fcp_info &
1233 					    NLP_FCP_2_DEVICE) &&
1234 					    (nlp->nlp_DID & mask) ==
1235 					    aff_d_id) {
1236 						action = 3;
1237 						break;
1238 					}
1239 
1240 					/*
1241 					 * Check for any other device that
1242 					 * matches our mask
1243 					 */
1244 					else if ((nlp->nlp_DID & mask) ==
1245 					    aff_d_id) {
1246 						if (linkdown) {
1247 							action = 1;
1248 							break;
1249 						} else { /* Must be an RSCN */
1250 
1251 							action = 2;
1252 							break;
1253 						}
1254 					}
1255 
1256 					nlp = nlp->nlp_list_next;
1257 				}
1258 
1259 				if (action) {
1260 					break;
1261 				}
1262 			}
1263 			rw_exit(&port->node_rwlock);
1264 
1265 			/* Check if nothing was found */
1266 			if (action == 0) {
1267 				break;
1268 			} else if (action == 1) {
1269 				(void) EMLXS_SLI_UNREG_NODE(port, nlp,
1270 				    NULL, NULL, NULL);
1271 			} else if (action == 2) {
1272 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1273 
1274 #ifdef DHCHAP_SUPPORT
1275 				emlxs_dhc_auth_stop(port, nlp);
1276 #endif /* DHCHAP_SUPPORT */
1277 
1278 				/*
1279 				 * Close the node for any further normal IO
1280 				 * A PLOGI with reopen the node
1281 				 */
1282 				emlxs_node_close(port, nlp,
1283 				    hba->channel_fcp, 60);
1284 				emlxs_node_close(port, nlp,
1285 				    hba->channel_ip, 60);
1286 
1287 				/* Flush tx queue */
1288 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1289 
1290 				/* Flush chip queue */
1291 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1292 
1293 			} else if (action == 3) {	/* FCP2 devices */
1294 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1295 
1296 				unreg_vpi = 0;
1297 
1298 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1299 					(void) emlxs_rpi_pause_notify(port,
1300 					    nlp->rpip);
1301 				}
1302 
1303 #ifdef DHCHAP_SUPPORT
1304 				emlxs_dhc_auth_stop(port, nlp);
1305 #endif /* DHCHAP_SUPPORT */
1306 
1307 				/*
1308 				 * Close the node for any further normal IO
1309 				 * An ADISC or a PLOGI with reopen the node
1310 				 */
1311 				emlxs_node_close(port, nlp,
1312 				    hba->channel_fcp, -1);
1313 				emlxs_node_close(port, nlp, hba->channel_ip,
1314 				    ((linkdown) ? 0 : 60));
1315 
1316 				/* Flush tx queues except for FCP ring */
1317 				(void) emlxs_tx_node_flush(port, nlp,
1318 				    &hba->chan[hba->channel_ct], 0, 0);
1319 				(void) emlxs_tx_node_flush(port, nlp,
1320 				    &hba->chan[hba->channel_els], 0, 0);
1321 				(void) emlxs_tx_node_flush(port, nlp,
1322 				    &hba->chan[hba->channel_ip], 0, 0);
1323 
1324 				/* Flush chip queues except for FCP ring */
1325 				(void) emlxs_chipq_node_flush(port,
1326 				    &hba->chan[hba->channel_ct], nlp, 0);
1327 				(void) emlxs_chipq_node_flush(port,
1328 				    &hba->chan[hba->channel_els], nlp, 0);
1329 				(void) emlxs_chipq_node_flush(port,
1330 				    &hba->chan[hba->channel_ip], nlp, 0);
1331 			}
1332 		}
1333 		break;
1334 
1335 	case 2:	/* Full support - Hold FCP IO to FCP target matching nodes */
1336 
1337 		if (!linkdown && !vlinkdown) {
1338 			break;
1339 		}
1340 
1341 		for (;;) {
1342 			/*
1343 			 * We need to hold the locks this way because
1344 			 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1345 			 * same locks. Also, when we release the lock the list
1346 			 * can change out from under us.
1347 			 */
1348 			rw_enter(&port->node_rwlock, RW_READER);
1349 			action = 0;
1350 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1351 				nlp = port->node_table[i];
1352 				while (nlp != NULL) {
1353 					if (!nlp->nlp_tag) {
1354 						nlp = nlp->nlp_list_next;
1355 						continue;
1356 					}
1357 					nlp->nlp_tag = 0;
1358 
1359 					/*
1360 					 * Check for FCP target device that
1361 					 * matches our mask
1362 					 */
1363 					if ((nlp-> nlp_fcp_info &
1364 					    NLP_FCP_TGT_DEVICE) &&
1365 					    (nlp->nlp_DID & mask) ==
1366 					    aff_d_id) {
1367 						action = 3;
1368 						break;
1369 					}
1370 
1371 					/*
1372 					 * Check for any other device that
1373 					 * matches our mask
1374 					 */
1375 					else if ((nlp->nlp_DID & mask) ==
1376 					    aff_d_id) {
1377 						if (linkdown) {
1378 							action = 1;
1379 							break;
1380 						} else { /* Must be an RSCN */
1381 
1382 							action = 2;
1383 							break;
1384 						}
1385 					}
1386 
1387 					nlp = nlp->nlp_list_next;
1388 				}
1389 				if (action) {
1390 					break;
1391 				}
1392 			}
1393 			rw_exit(&port->node_rwlock);
1394 
1395 			/* Check if nothing was found */
1396 			if (action == 0) {
1397 				break;
1398 			} else if (action == 1) {
1399 				(void) EMLXS_SLI_UNREG_NODE(port, nlp,
1400 				    NULL, NULL, NULL);
1401 			} else if (action == 2) {
1402 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1403 
1404 				/*
1405 				 * Close the node for any further normal IO
1406 				 * A PLOGI with reopen the node
1407 				 */
1408 				emlxs_node_close(port, nlp,
1409 				    hba->channel_fcp, 60);
1410 				emlxs_node_close(port, nlp,
1411 				    hba->channel_ip, 60);
1412 
1413 				/* Flush tx queue */
1414 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1415 
1416 				/* Flush chip queue */
1417 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1418 
1419 			} else if (action == 3) {	/* FCP2 devices */
1420 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1421 
1422 				unreg_vpi = 0;
1423 
1424 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1425 					(void) emlxs_rpi_pause_notify(port,
1426 					    nlp->rpip);
1427 				}
1428 
1429 				/*
1430 				 * Close the node for any further normal IO
1431 				 * An ADISC or a PLOGI with reopen the node
1432 				 */
1433 				emlxs_node_close(port, nlp,
1434 				    hba->channel_fcp, -1);
1435 				emlxs_node_close(port, nlp, hba->channel_ip,
1436 				    ((linkdown) ? 0 : 60));
1437 
1438 				/* Flush tx queues except for FCP ring */
1439 				(void) emlxs_tx_node_flush(port, nlp,
1440 				    &hba->chan[hba->channel_ct], 0, 0);
1441 				(void) emlxs_tx_node_flush(port, nlp,
1442 				    &hba->chan[hba->channel_els], 0, 0);
1443 				(void) emlxs_tx_node_flush(port, nlp,
1444 				    &hba->chan[hba->channel_ip], 0, 0);
1445 
1446 				/* Flush chip queues except for FCP ring */
1447 				(void) emlxs_chipq_node_flush(port,
1448 				    &hba->chan[hba->channel_ct], nlp, 0);
1449 				(void) emlxs_chipq_node_flush(port,
1450 				    &hba->chan[hba->channel_els], nlp, 0);
1451 				(void) emlxs_chipq_node_flush(port,
1452 				    &hba->chan[hba->channel_ip], nlp, 0);
1453 			}
1454 		}
1455 
1456 		break;
1457 
1458 	}	/* switch() */
1459 
1460 done:
1461 
1462 	if (unreg_vpi) {
1463 		(void) emlxs_mb_unreg_vpi(port);
1464 	}
1465 
1466 	return (0);
1467 
1468 } /* emlxs_port_offline() */
1469 
1470 
1471 extern void
emlxs_port_online(emlxs_port_t * vport)1472 emlxs_port_online(emlxs_port_t *vport)
1473 {
1474 	emlxs_hba_t *hba = vport->hba;
1475 	emlxs_port_t *port = &PPORT;
1476 	NODELIST *nlp;
1477 	uint32_t state;
1478 	uint32_t update;
1479 	uint32_t npiv_linkup;
1480 	char topology[32];
1481 	char linkspeed[32];
1482 	char mode[32];
1483 
1484 	/*
1485 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1486 	 *    "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1487 	 */
1488 
1489 	if ((vport->vpi > 0) &&
1490 	    (!(hba->flag & FC_NPIV_ENABLED) ||
1491 	    !(hba->flag & FC_NPIV_SUPPORTED))) {
1492 		return;
1493 	}
1494 
1495 	if (!(vport->flag & EMLXS_PORT_BOUND) ||
1496 	    !(vport->flag & EMLXS_PORT_ENABLED)) {
1497 		return;
1498 	}
1499 
1500 	/* Check for mode */
1501 	if (port->mode == MODE_TARGET) {
1502 		(void) strlcpy(mode, ", target", sizeof (mode));
1503 
1504 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1505 			/* Set the node tags */
1506 			emlxs_fcp_tag_nodes(vport);
1507 			while ((nlp = emlxs_find_tagged_node(vport))) {
1508 				/* The RPI was paused in port_offline */
1509 				(void) emlxs_rpi_resume_notify(vport,
1510 				    nlp->rpip, 0);
1511 			}
1512 		}
1513 	} else if (port->mode == MODE_INITIATOR) {
1514 		(void) strlcpy(mode, ", initiator", sizeof (mode));
1515 	} else {
1516 		(void) strlcpy(mode, "unknown", sizeof (mode));
1517 	}
1518 	mutex_enter(&EMLXS_PORT_LOCK);
1519 
1520 	/* Check for loop topology */
1521 	if (hba->topology == TOPOLOGY_LOOP) {
1522 		state = FC_STATE_LOOP;
1523 		(void) strlcpy(topology, ", loop", sizeof (topology));
1524 	} else {
1525 		state = FC_STATE_ONLINE;
1526 		(void) strlcpy(topology, ", fabric", sizeof (topology));
1527 	}
1528 
1529 	/* Set the link speed */
1530 	switch (hba->linkspeed) {
1531 	case 0:
1532 		(void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1533 		state |= FC_STATE_1GBIT_SPEED;
1534 		break;
1535 
1536 	case LA_1GHZ_LINK:
1537 		(void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1538 		state |= FC_STATE_1GBIT_SPEED;
1539 		break;
1540 	case LA_2GHZ_LINK:
1541 		(void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1542 		state |= FC_STATE_2GBIT_SPEED;
1543 		break;
1544 	case LA_4GHZ_LINK:
1545 		(void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1546 		state |= FC_STATE_4GBIT_SPEED;
1547 		break;
1548 	case LA_8GHZ_LINK:
1549 		(void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1550 		state |= FC_STATE_8GBIT_SPEED;
1551 		break;
1552 	case LA_10GHZ_LINK:
1553 		(void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1554 		state |= FC_STATE_10GBIT_SPEED;
1555 		break;
1556 	case LA_16GHZ_LINK:
1557 		(void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1558 		state |= FC_STATE_16GBIT_SPEED;
1559 		break;
1560 	case LA_32GHZ_LINK:
1561 		(void) strlcpy(linkspeed, "32Gb", sizeof (linkspeed));
1562 		state |= FC_STATE_32GBIT_SPEED;
1563 		break;
1564 	default:
1565 		(void) snprintf(linkspeed, sizeof (linkspeed), "unknown(0x%x)",
1566 		    hba->linkspeed);
1567 		break;
1568 	}
1569 
1570 	npiv_linkup = 0;
1571 	update = 0;
1572 
1573 	if ((hba->state >= FC_LINK_UP) &&
1574 	    !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1575 		update = 1;
1576 		vport->ulp_statec = state;
1577 
1578 		if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1579 			hba->flag |= FC_NPIV_LINKUP;
1580 			npiv_linkup = 1;
1581 		}
1582 	}
1583 
1584 	mutex_exit(&EMLXS_PORT_LOCK);
1585 
1586 	if (update) {
1587 		if (vport->flag & EMLXS_PORT_BOUND) {
1588 			if (vport->vpi == 0) {
1589 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1590 				    "%s%s%s", linkspeed, topology, mode);
1591 
1592 			} else if (npiv_linkup) {
1593 				EMLXS_MSGF(EMLXS_CONTEXT,
1594 				    &emlxs_npiv_link_up_msg, "%s%s%s",
1595 				    linkspeed, topology, mode);
1596 			}
1597 
1598 			if (vport->mode == MODE_INITIATOR) {
1599 				emlxs_fca_link_up(vport);
1600 			}
1601 #ifdef SFCT_SUPPORT
1602 			else if (vport->mode == MODE_TARGET) {
1603 				emlxs_fct_link_up(vport);
1604 			}
1605 #endif /* SFCT_SUPPORT */
1606 		} else {
1607 			if (vport->vpi == 0) {
1608 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1609 				    "%s%s%s *", linkspeed, topology, mode);
1610 
1611 			} else if (npiv_linkup) {
1612 				EMLXS_MSGF(EMLXS_CONTEXT,
1613 				    &emlxs_npiv_link_up_msg, "%s%s%s *",
1614 				    linkspeed, topology, mode);
1615 			}
1616 		}
1617 
1618 		/* Check for waiting threads */
1619 		if (vport->vpi == 0) {
1620 			mutex_enter(&EMLXS_LINKUP_LOCK);
1621 			if (hba->linkup_wait_flag == TRUE) {
1622 				hba->linkup_wait_flag = FALSE;
1623 				cv_broadcast(&EMLXS_LINKUP_CV);
1624 			}
1625 			mutex_exit(&EMLXS_LINKUP_LOCK);
1626 		}
1627 
1628 		/* Flush any pending ub buffers */
1629 		emlxs_ub_flush(vport);
1630 	}
1631 
1632 	hba->flag |= FC_GPIO_LINK_UP;
1633 
1634 	return;
1635 
1636 } /* emlxs_port_online() */
1637 
1638 
1639 /* SLI3 */
1640 extern void
emlxs_linkdown(emlxs_hba_t * hba)1641 emlxs_linkdown(emlxs_hba_t *hba)
1642 {
1643 	emlxs_port_t *port = &PPORT;
1644 	int i;
1645 	uint32_t scope;
1646 
1647 	mutex_enter(&EMLXS_PORT_LOCK);
1648 
1649 	if (hba->state > FC_LINK_DOWN) {
1650 		HBASTATS.LinkDown++;
1651 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1652 	}
1653 
1654 	/* Set scope */
1655 	scope = (hba->flag & FC_NEW_FABRIC)? 0xFDFFFFFF:0xFFFFFFFF;
1656 
1657 	/* Filter hba flags */
1658 	hba->flag &= FC_LINKDOWN_MASK;
1659 	hba->discovery_timer = 0;
1660 	hba->linkup_timer = 0;
1661 
1662 	mutex_exit(&EMLXS_PORT_LOCK);
1663 
1664 	for (i = 0; i < MAX_VPORTS; i++) {
1665 		port = &VPORT(i);
1666 
1667 		if (!(port->flag & EMLXS_PORT_BOUND)) {
1668 			continue;
1669 		}
1670 
1671 		(void) emlxs_port_offline(port, scope);
1672 
1673 	}
1674 
1675 	emlxs_log_link_event(port);
1676 
1677 	return;
1678 
1679 } /* emlxs_linkdown() */
1680 
1681 
1682 /* SLI3 */
1683 extern void
emlxs_linkup(emlxs_hba_t * hba)1684 emlxs_linkup(emlxs_hba_t *hba)
1685 {
1686 	emlxs_port_t *port = &PPORT;
1687 	emlxs_config_t *cfg = &CFG;
1688 
1689 	mutex_enter(&EMLXS_PORT_LOCK);
1690 
1691 	/* Check for any mode changes */
1692 	emlxs_mode_set(hba);
1693 
1694 	HBASTATS.LinkUp++;
1695 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1696 
1697 #ifdef MENLO_SUPPORT
1698 	if (hba->flag & FC_MENLO_MODE) {
1699 		mutex_exit(&EMLXS_PORT_LOCK);
1700 
1701 		/*
1702 		 * Trigger linkup CV and don't start linkup & discovery
1703 		 * timers
1704 		 */
1705 		mutex_enter(&EMLXS_LINKUP_LOCK);
1706 		cv_broadcast(&EMLXS_LINKUP_CV);
1707 		mutex_exit(&EMLXS_LINKUP_LOCK);
1708 
1709 		emlxs_log_link_event(port);
1710 
1711 		return;
1712 	}
1713 #endif /* MENLO_SUPPORT */
1714 
1715 	/* Set the linkup & discovery timers */
1716 	hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1717 	hba->discovery_timer =
1718 	    hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1719 	    cfg[CFG_DISC_TIMEOUT].current;
1720 
1721 	mutex_exit(&EMLXS_PORT_LOCK);
1722 
1723 	emlxs_log_link_event(port);
1724 
1725 	return;
1726 
1727 } /* emlxs_linkup() */
1728 
1729 
1730 /*
1731  *  emlxs_reset_link
1732  *
1733  *  Description:
1734  *  Called to reset the link with an init_link
1735  *
1736  *    Returns:
1737  *
1738  */
1739 extern int
emlxs_reset_link(emlxs_hba_t * hba,uint32_t linkup,uint32_t wait)1740 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1741 {
1742 	emlxs_port_t *port = &PPORT;
1743 	emlxs_config_t *cfg;
1744 	MAILBOXQ *mbq = NULL;
1745 	MAILBOX *mb = NULL;
1746 	int rval = 0;
1747 	int tmo;
1748 	int rc;
1749 
1750 	/*
1751 	 * Get a buffer to use for the mailbox command
1752 	 */
1753 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1754 	    == NULL) {
1755 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1756 		    "Unable to allocate mailbox buffer.");
1757 		rval = 1;
1758 		goto reset_link_fail;
1759 	}
1760 
1761 	if (linkup) {
1762 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1763 		    "Resetting link...");
1764 	} else {
1765 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1766 		    "Disabling link...");
1767 	}
1768 
1769 	mb = (MAILBOX *)mbq;
1770 
1771 	/* Bring link down first */
1772 	emlxs_mb_down_link(hba, mbq);
1773 
1774 #define	MBXERR_LINK_DOWN	0x33
1775 
1776 	if (wait) {
1777 		wait = MBX_WAIT;
1778 	} else {
1779 		wait = MBX_NOWAIT;
1780 	}
1781 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1782 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1783 	    (rc != MBXERR_LINK_DOWN)) {
1784 		rval = 1;
1785 		goto reset_link_fail;
1786 	}
1787 
1788 	tmo = 120;
1789 	do {
1790 		delay(drv_usectohz(500000));
1791 		tmo--;
1792 
1793 		if (!tmo)   {
1794 			rval = 1;
1795 
1796 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1797 			    "Linkdown timeout.");
1798 
1799 			goto reset_link_fail;
1800 		}
1801 	} while ((hba->state >= FC_LINK_UP) && (hba->state != FC_ERROR));
1802 
1803 	if (linkup) {
1804 		/*
1805 		 * Setup and issue mailbox INITIALIZE LINK command
1806 		 */
1807 
1808 		if (wait == MBX_NOWAIT) {
1809 			if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1810 			    == NULL) {
1811 				EMLXS_MSGF(EMLXS_CONTEXT,
1812 				    &emlxs_link_reset_failed_msg,
1813 				    "Unable to allocate mailbox buffer.");
1814 				rval = 1;
1815 				goto reset_link_fail;
1816 			}
1817 			mb = (MAILBOX *)mbq;
1818 		} else {
1819 			/* Reuse mbq from previous mbox */
1820 			mb = (MAILBOX *)mbq;
1821 		}
1822 		cfg = &CFG;
1823 
1824 		emlxs_mb_init_link(hba, mbq,
1825 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1826 
1827 		mb->un.varInitLnk.lipsr_AL_PA = 0;
1828 
1829 		/* Clear the loopback mode */
1830 		mutex_enter(&EMLXS_PORT_LOCK);
1831 		hba->flag &= ~FC_LOOPBACK_MODE;
1832 		hba->loopback_tics = 0;
1833 		mutex_exit(&EMLXS_PORT_LOCK);
1834 
1835 		rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1836 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1837 			rval = 1;
1838 			goto reset_link_fail;
1839 		}
1840 
1841 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1842 	}
1843 
1844 reset_link_fail:
1845 
1846 	if ((wait == MBX_WAIT) && mbq) {
1847 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1848 	}
1849 
1850 	return (rval);
1851 } /* emlxs_reset_link() */
1852 
1853 
1854 extern int
emlxs_online(emlxs_hba_t * hba)1855 emlxs_online(emlxs_hba_t *hba)
1856 {
1857 	emlxs_port_t *port = &PPORT;
1858 	int32_t rval = 0;
1859 	uint32_t i = 0;
1860 
1861 	/* Make sure adapter is offline or exit trying (30 seconds) */
1862 	while (i++ < 30) {
1863 		/* Check if adapter is already going online */
1864 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1865 			return (0);
1866 		}
1867 
1868 		mutex_enter(&EMLXS_PORT_LOCK);
1869 
1870 		/* Check again */
1871 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1872 			mutex_exit(&EMLXS_PORT_LOCK);
1873 			return (0);
1874 		}
1875 
1876 		/* Check if adapter is offline */
1877 		if (hba->flag & FC_OFFLINE_MODE) {
1878 			/* Mark it going online */
1879 			hba->flag &= ~FC_OFFLINE_MODE;
1880 			hba->flag |= FC_ONLINING_MODE;
1881 
1882 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1883 			mutex_exit(&EMLXS_PORT_LOCK);
1884 			break;
1885 		}
1886 
1887 		mutex_exit(&EMLXS_PORT_LOCK);
1888 
1889 		BUSYWAIT_MS(1000);
1890 	}
1891 
1892 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1893 	    "Going online...");
1894 
1895 	if (rval = EMLXS_SLI_ONLINE(hba)) {
1896 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1897 		    rval);
1898 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1899 
1900 		/* Set FC_OFFLINE_MODE */
1901 		mutex_enter(&EMLXS_PORT_LOCK);
1902 		hba->flag |= FC_OFFLINE_MODE;
1903 		hba->flag &= ~FC_ONLINING_MODE;
1904 		mutex_exit(&EMLXS_PORT_LOCK);
1905 
1906 		return (rval);
1907 	}
1908 
1909 	/* Start the timer */
1910 	emlxs_timer_start(hba);
1911 
1912 	/* Set FC_ONLINE_MODE */
1913 	mutex_enter(&EMLXS_PORT_LOCK);
1914 	hba->flag |= FC_ONLINE_MODE;
1915 	hba->flag &= ~FC_ONLINING_MODE;
1916 	mutex_exit(&EMLXS_PORT_LOCK);
1917 
1918 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1919 
1920 #ifdef SFCT_SUPPORT
1921 	if (port->flag & EMLXS_TGT_ENABLED) {
1922 		(void) emlxs_fct_port_initialize(port);
1923 	}
1924 #endif /* SFCT_SUPPORT */
1925 
1926 	return (rval);
1927 
1928 } /* emlxs_online() */
1929 
1930 
1931 extern int
emlxs_offline(emlxs_hba_t * hba,uint32_t reset_requested)1932 emlxs_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1933 {
1934 	emlxs_port_t *port = &PPORT;
1935 	uint32_t i = 0;
1936 	int rval = 1;
1937 
1938 	/* Make sure adapter is online or exit trying (30 seconds) */
1939 	while (i++ < 30) {
1940 		/* Check if adapter is already going offline */
1941 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1942 			return (0);
1943 		}
1944 
1945 		mutex_enter(&EMLXS_PORT_LOCK);
1946 
1947 		/* Check again */
1948 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1949 			mutex_exit(&EMLXS_PORT_LOCK);
1950 			return (0);
1951 		}
1952 
1953 		/* Check if adapter is online */
1954 		if (hba->flag & FC_ONLINE_MODE) {
1955 			/* Mark it going offline */
1956 			hba->flag &= ~FC_ONLINE_MODE;
1957 			hba->flag |= FC_OFFLINING_MODE;
1958 
1959 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1960 			mutex_exit(&EMLXS_PORT_LOCK);
1961 			break;
1962 		}
1963 
1964 		mutex_exit(&EMLXS_PORT_LOCK);
1965 
1966 		BUSYWAIT_MS(1000);
1967 	}
1968 
1969 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1970 	    "Going offline...");
1971 
1972 	/* Declare link down */
1973 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1974 		(void) emlxs_fcf_shutdown_notify(port, 1);
1975 	} else {
1976 		emlxs_linkdown(hba);
1977 	}
1978 
1979 #ifdef SFCT_SUPPORT
1980 	if (port->flag & EMLXS_TGT_ENABLED) {
1981 		(void) emlxs_fct_port_shutdown(port);
1982 	}
1983 #endif /* SFCT_SUPPORT */
1984 
1985 	/* Check if adapter was shutdown */
1986 	if (hba->flag & FC_HARDWARE_ERROR) {
1987 		/*
1988 		 * Force mailbox cleanup
1989 		 * This will wake any sleeping or polling threads
1990 		 */
1991 		emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1992 	}
1993 
1994 	/* Pause here for the IO to settle */
1995 	delay(drv_usectohz(1000000));	/* 1 sec */
1996 
1997 	/* Unregister all nodes */
1998 	emlxs_ffcleanup(hba);
1999 
2000 	if (hba->bus_type == SBUS_FC) {
2001 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
2002 #ifdef FMA_SUPPORT
2003 		/* Access handle validation */
2004 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
2005 #endif  /* FMA_SUPPORT */
2006 	}
2007 
2008 	/* Stop the timer */
2009 	emlxs_timer_stop(hba);
2010 
2011 	/* For safety flush every iotag list */
2012 	if (emlxs_iotag_flush(hba)) {
2013 		/* Pause here for the IO to flush */
2014 		delay(drv_usectohz(1000));
2015 	}
2016 
2017 	/* Wait for poll command request to settle */
2018 	while (hba->io_poll_count > 0) {
2019 		delay(drv_usectohz(2000000));   /* 2 sec */
2020 	}
2021 
2022 	/* Shutdown the adapter interface */
2023 	EMLXS_SLI_OFFLINE(hba, reset_requested);
2024 
2025 	mutex_enter(&EMLXS_PORT_LOCK);
2026 	hba->flag |= FC_OFFLINE_MODE;
2027 	hba->flag &= ~FC_OFFLINING_MODE;
2028 	mutex_exit(&EMLXS_PORT_LOCK);
2029 
2030 	rval = 0;
2031 
2032 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
2033 
2034 done:
2035 
2036 	return (rval);
2037 
2038 } /* emlxs_offline() */
2039 
2040 
2041 
2042 extern int
emlxs_power_down(emlxs_hba_t * hba)2043 emlxs_power_down(emlxs_hba_t *hba)
2044 {
2045 #ifdef FMA_SUPPORT
2046 	emlxs_port_t *port = &PPORT;
2047 #endif  /* FMA_SUPPORT */
2048 	int32_t rval = 0;
2049 
2050 	if ((rval = emlxs_offline(hba, 0))) {
2051 		return (rval);
2052 	}
2053 	EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
2054 
2055 
2056 #ifdef FMA_SUPPORT
2057 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2058 	    != DDI_FM_OK) {
2059 		EMLXS_MSGF(EMLXS_CONTEXT,
2060 		    &emlxs_invalid_access_handle_msg, NULL);
2061 		return (1);
2062 	}
2063 #endif  /* FMA_SUPPORT */
2064 
2065 	return (0);
2066 
2067 } /* End emlxs_power_down */
2068 
2069 
2070 extern int
emlxs_power_up(emlxs_hba_t * hba)2071 emlxs_power_up(emlxs_hba_t *hba)
2072 {
2073 #ifdef FMA_SUPPORT
2074 	emlxs_port_t *port = &PPORT;
2075 #endif  /* FMA_SUPPORT */
2076 	int32_t rval = 0;
2077 
2078 
2079 #ifdef FMA_SUPPORT
2080 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2081 	    != DDI_FM_OK) {
2082 		EMLXS_MSGF(EMLXS_CONTEXT,
2083 		    &emlxs_invalid_access_handle_msg, NULL);
2084 		return (1);
2085 	}
2086 #endif  /* FMA_SUPPORT */
2087 
2088 	/* Bring adapter online */
2089 	if ((rval = emlxs_online(hba))) {
2090 		if (hba->pci_cap_offset[PCI_CAP_ID_PM]) {
2091 			/* Put chip in D3 state */
2092 			(void) ddi_put8(hba->pci_acc_handle,
2093 			    (uint8_t *)(hba->pci_addr +
2094 			    hba->pci_cap_offset[PCI_CAP_ID_PM] +
2095 			    PCI_PMCSR),
2096 			    (uint8_t)PCI_PMCSR_D3HOT);
2097 		}
2098 		return (rval);
2099 	}
2100 
2101 	return (rval);
2102 
2103 } /* emlxs_power_up() */
2104 
2105 
2106 /*
2107  *
2108  * NAME:     emlxs_ffcleanup
2109  *
2110  * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
2111  *
2112  * EXECUTION ENVIRONMENT: process only
2113  *
2114  * CALLED FROM: CFG_TERM
2115  *
2116  * INPUT: hba       - pointer to the dev_ctl area.
2117  *
2118  * RETURNS: none
2119  */
2120 extern void
emlxs_ffcleanup(emlxs_hba_t * hba)2121 emlxs_ffcleanup(emlxs_hba_t *hba)
2122 {
2123 	emlxs_port_t *port = &PPORT;
2124 	uint32_t i;
2125 
2126 	/* Disable all but the mailbox interrupt */
2127 	EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
2128 
2129 	/* Make sure all port nodes are destroyed */
2130 	for (i = 0; i < MAX_VPORTS; i++) {
2131 		port = &VPORT(i);
2132 
2133 		if (port->node_count) {
2134 			(void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2135 		}
2136 	}
2137 
2138 	/* Clear all interrupt enable conditions */
2139 	EMLXS_SLI_DISABLE_INTR(hba, 0);
2140 
2141 	return;
2142 
2143 } /* emlxs_ffcleanup() */
2144 
2145 
2146 extern uint16_t
emlxs_register_pkt(CHANNEL * cp,emlxs_buf_t * sbp)2147 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
2148 {
2149 	emlxs_hba_t *hba;
2150 	emlxs_port_t *port;
2151 	uint16_t iotag;
2152 	uint32_t i;
2153 
2154 	hba = cp->hba;
2155 
2156 	mutex_enter(&EMLXS_FCTAB_LOCK);
2157 
2158 	if (sbp->iotag != 0) {
2159 		port = &PPORT;
2160 
2161 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2162 		    "Pkt already registered! channel=%d iotag=%d sbp=%p",
2163 		    sbp->channel, sbp->iotag, sbp);
2164 	}
2165 
2166 	iotag = 0;
2167 	for (i = 0; i < hba->max_iotag; i++) {
2168 		if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
2169 			hba->fc_iotag = 1;
2170 		}
2171 		iotag = hba->fc_iotag++;
2172 
2173 		if (hba->fc_table[iotag] == 0 ||
2174 		    hba->fc_table[iotag] == STALE_PACKET) {
2175 			hba->io_count++;
2176 			hba->fc_table[iotag] = sbp;
2177 
2178 			sbp->iotag = iotag;
2179 			sbp->channel = cp;
2180 
2181 			break;
2182 		}
2183 		iotag = 0;
2184 	}
2185 
2186 	mutex_exit(&EMLXS_FCTAB_LOCK);
2187 
2188 	/*
2189 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2190 	 *    "register_pkt: channel=%d iotag=%d sbp=%p",
2191 	 *    cp->channelno, iotag, sbp);
2192 	 */
2193 
2194 	return (iotag);
2195 
2196 } /* emlxs_register_pkt() */
2197 
2198 
2199 
2200 extern emlxs_buf_t *
emlxs_unregister_pkt(CHANNEL * cp,uint16_t iotag,uint32_t forced)2201 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
2202 {
2203 	emlxs_hba_t *hba;
2204 	emlxs_buf_t *sbp;
2205 
2206 	sbp = NULL;
2207 	hba = cp->hba;
2208 
2209 	/* Check the iotag range */
2210 	if ((iotag == 0) || (iotag >= hba->max_iotag)) {
2211 		return (NULL);
2212 	}
2213 
2214 	/* Remove the sbp from the table */
2215 	mutex_enter(&EMLXS_FCTAB_LOCK);
2216 	sbp = hba->fc_table[iotag];
2217 
2218 	if (!sbp || (sbp == STALE_PACKET)) {
2219 		mutex_exit(&EMLXS_FCTAB_LOCK);
2220 		return (sbp);
2221 	}
2222 
2223 	hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
2224 	hba->io_count--;
2225 	sbp->iotag = 0;
2226 
2227 	mutex_exit(&EMLXS_FCTAB_LOCK);
2228 
2229 
2230 	/* Clean up the sbp */
2231 	mutex_enter(&sbp->mtx);
2232 
2233 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
2234 		sbp->pkt_flags &= ~PACKET_IN_TXQ;
2235 		hba->channel_tx_count--;
2236 	}
2237 
2238 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2239 		sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
2240 	}
2241 
2242 	if (sbp->bmp) {
2243 		emlxs_mem_put(hba, MEM_BPL, (void *)sbp->bmp);
2244 		sbp->bmp = 0;
2245 	}
2246 
2247 	mutex_exit(&sbp->mtx);
2248 
2249 	return (sbp);
2250 
2251 } /* emlxs_unregister_pkt() */
2252 
2253 
2254 
2255 /* Flush all IO's to all nodes for a given IO Channel */
2256 extern uint32_t
emlxs_tx_channel_flush(emlxs_hba_t * hba,CHANNEL * cp,emlxs_buf_t * fpkt)2257 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2258 {
2259 	emlxs_port_t *port = &PPORT;
2260 	emlxs_buf_t *sbp;
2261 	IOCBQ *iocbq;
2262 	IOCBQ *next;
2263 	IOCB *iocb;
2264 	uint32_t channelno;
2265 	Q abort;
2266 	NODELIST *ndlp;
2267 	IOCB *icmd;
2268 	MATCHMAP *mp;
2269 	uint32_t i;
2270 	uint8_t flag[MAX_CHANNEL];
2271 
2272 	channelno = cp->channelno;
2273 	bzero((void *)&abort, sizeof (Q));
2274 	bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2275 
2276 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2277 
2278 	/* While a node needs servicing */
2279 	while (cp->nodeq.q_first) {
2280 		ndlp = (NODELIST *) cp->nodeq.q_first;
2281 
2282 		/* Check if priority queue is not empty */
2283 		if (ndlp->nlp_ptx[channelno].q_first) {
2284 			/* Transfer all iocb's to local queue */
2285 			if (abort.q_first == 0) {
2286 				abort.q_first =
2287 				    ndlp->nlp_ptx[channelno].q_first;
2288 			} else {
2289 				((IOCBQ *)abort.q_last)->next =
2290 				    (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2291 			}
2292 			flag[channelno] = 1;
2293 
2294 			abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2295 			abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2296 		}
2297 
2298 		/* Check if tx queue is not empty */
2299 		if (ndlp->nlp_tx[channelno].q_first) {
2300 			/* Transfer all iocb's to local queue */
2301 			if (abort.q_first == 0) {
2302 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2303 			} else {
2304 				((IOCBQ *)abort.q_last)->next =
2305 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2306 			}
2307 
2308 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2309 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2310 		}
2311 
2312 		/* Clear the queue pointers */
2313 		ndlp->nlp_ptx[channelno].q_first = NULL;
2314 		ndlp->nlp_ptx[channelno].q_last = NULL;
2315 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2316 
2317 		ndlp->nlp_tx[channelno].q_first = NULL;
2318 		ndlp->nlp_tx[channelno].q_last = NULL;
2319 		ndlp->nlp_tx[channelno].q_cnt = 0;
2320 
2321 		/* Remove node from service queue */
2322 
2323 		/* If this is the last node on list */
2324 		if (cp->nodeq.q_last == (void *)ndlp) {
2325 			cp->nodeq.q_last = NULL;
2326 			cp->nodeq.q_first = NULL;
2327 			cp->nodeq.q_cnt = 0;
2328 		} else {
2329 			/* Remove node from head */
2330 			cp->nodeq.q_first = ndlp->nlp_next[channelno];
2331 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2332 			    cp->nodeq.q_first;
2333 			cp->nodeq.q_cnt--;
2334 		}
2335 
2336 		/* Clear node */
2337 		ndlp->nlp_next[channelno] = NULL;
2338 	}
2339 
2340 	/* First cleanup the iocb's while still holding the lock */
2341 	iocbq = (IOCBQ *) abort.q_first;
2342 	while (iocbq) {
2343 		/* Free the IoTag and the bmp */
2344 		iocb = &iocbq->iocb;
2345 
2346 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2347 			sbp = iocbq->sbp;
2348 			if (sbp) {
2349 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2350 			}
2351 		} else {
2352 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2353 			    iocb->ULPIOTAG, 0);
2354 		}
2355 
2356 		if (sbp && (sbp != STALE_PACKET)) {
2357 			mutex_enter(&sbp->mtx);
2358 
2359 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2360 			/*
2361 			 * If the fpkt is already set, then we will leave it
2362 			 * alone. This ensures that this pkt is only accounted
2363 			 * for on one fpkt->flush_count
2364 			 */
2365 			if (!sbp->fpkt && fpkt) {
2366 				mutex_enter(&fpkt->mtx);
2367 				sbp->fpkt = fpkt;
2368 				fpkt->flush_count++;
2369 				mutex_exit(&fpkt->mtx);
2370 			}
2371 
2372 			mutex_exit(&sbp->mtx);
2373 		}
2374 
2375 		iocbq = (IOCBQ *)iocbq->next;
2376 	}	/* end of while */
2377 
2378 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2379 
2380 	/* Now abort the iocb's */
2381 	iocbq = (IOCBQ *)abort.q_first;
2382 	while (iocbq) {
2383 		/* Save the next iocbq for now */
2384 		next = (IOCBQ *)iocbq->next;
2385 
2386 		/* Unlink this iocbq */
2387 		iocbq->next = NULL;
2388 
2389 		/* Get the pkt */
2390 		sbp = (emlxs_buf_t *)iocbq->sbp;
2391 
2392 		if (sbp) {
2393 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2394 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2395 
2396 			if (hba->state >= FC_LINK_UP) {
2397 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2398 				    IOERR_ABORT_REQUESTED, 1);
2399 			} else {
2400 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2401 				    IOERR_LINK_DOWN, 1);
2402 			}
2403 
2404 		}
2405 		/* Free the iocb and its associated buffers */
2406 		else {
2407 			icmd = &iocbq->iocb;
2408 
2409 			/* SLI3 */
2410 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2411 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2412 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2413 				if ((hba->flag &
2414 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2415 					/* HBA is detaching or offlining */
2416 					if (icmd->ULPCOMMAND !=
2417 					    CMD_QUE_RING_LIST64_CN) {
2418 						void	*tmp;
2419 						RING *rp;
2420 
2421 						rp = &hba->sli.sli3.
2422 						    ring[channelno];
2423 						for (i = 0;
2424 						    i < icmd->ULPBDECOUNT;
2425 						    i++) {
2426 							mp = EMLXS_GET_VADDR(
2427 							    hba, rp, icmd);
2428 
2429 							tmp = (void *)mp;
2430 							if (mp) {
2431 							emlxs_mem_put(
2432 							    hba, MEM_BUF, tmp);
2433 							}
2434 						}
2435 					}
2436 
2437 					emlxs_mem_put(hba, MEM_IOCB,
2438 					    (void *)iocbq);
2439 				} else {
2440 					/* repost the unsolicited buffer */
2441 					EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2442 					    iocbq);
2443 				}
2444 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2445 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2446 
2447 				emlxs_tx_put(iocbq, 1);
2448 			}
2449 		}
2450 
2451 		iocbq = next;
2452 
2453 	}	/* end of while */
2454 
2455 	/* Now trigger channel service */
2456 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2457 		if (!flag[channelno]) {
2458 			continue;
2459 		}
2460 
2461 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2462 	}
2463 
2464 	return (abort.q_cnt);
2465 
2466 } /* emlxs_tx_channel_flush() */
2467 
2468 
2469 /* Flush all IO's on all or a given ring for a given node */
2470 extern uint32_t
emlxs_tx_node_flush(emlxs_port_t * port,NODELIST * ndlp,CHANNEL * chan,uint32_t shutdown,emlxs_buf_t * fpkt)2471 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2472     uint32_t shutdown, emlxs_buf_t *fpkt)
2473 {
2474 	emlxs_hba_t *hba = HBA;
2475 	emlxs_buf_t *sbp;
2476 	uint32_t channelno;
2477 	CHANNEL *cp;
2478 	IOCB *icmd;
2479 	IOCBQ *iocbq;
2480 	NODELIST *prev;
2481 	IOCBQ *next;
2482 	IOCB *iocb;
2483 	Q abort;
2484 	uint32_t i;
2485 	MATCHMAP *mp;
2486 	uint8_t flag[MAX_CHANNEL];
2487 
2488 	bzero((void *)&abort, sizeof (Q));
2489 
2490 	/* Flush all I/O's on tx queue to this target */
2491 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2492 
2493 	if (!ndlp->nlp_base && shutdown) {
2494 		ndlp->nlp_active = 0;
2495 	}
2496 
2497 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2498 		cp = &hba->chan[channelno];
2499 
2500 		if (chan && cp != chan) {
2501 			continue;
2502 		}
2503 
2504 		if (!ndlp->nlp_base || shutdown) {
2505 			/* Check if priority queue is not empty */
2506 			if (ndlp->nlp_ptx[channelno].q_first) {
2507 				/* Transfer all iocb's to local queue */
2508 				if (abort.q_first == 0) {
2509 					abort.q_first =
2510 					    ndlp->nlp_ptx[channelno].q_first;
2511 				} else {
2512 					((IOCBQ *)(abort.q_last))->next =
2513 					    (IOCBQ *)ndlp->nlp_ptx[channelno].
2514 					    q_first;
2515 				}
2516 
2517 				flag[channelno] = 1;
2518 
2519 				abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2520 				abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2521 			}
2522 		}
2523 
2524 		/* Check if tx queue is not empty */
2525 		if (ndlp->nlp_tx[channelno].q_first) {
2526 
2527 			/* Transfer all iocb's to local queue */
2528 			if (abort.q_first == 0) {
2529 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2530 			} else {
2531 				((IOCBQ *)abort.q_last)->next =
2532 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2533 			}
2534 
2535 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2536 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2537 		}
2538 
2539 		/* Clear the queue pointers */
2540 		ndlp->nlp_ptx[channelno].q_first = NULL;
2541 		ndlp->nlp_ptx[channelno].q_last = NULL;
2542 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2543 
2544 		ndlp->nlp_tx[channelno].q_first = NULL;
2545 		ndlp->nlp_tx[channelno].q_last = NULL;
2546 		ndlp->nlp_tx[channelno].q_cnt = 0;
2547 
2548 		/* If this node was on the channel queue, remove it */
2549 		if (ndlp->nlp_next[channelno]) {
2550 			/* If this is the only node on list */
2551 			if (cp->nodeq.q_first == (void *)ndlp &&
2552 			    cp->nodeq.q_last == (void *)ndlp) {
2553 				cp->nodeq.q_last = NULL;
2554 				cp->nodeq.q_first = NULL;
2555 				cp->nodeq.q_cnt = 0;
2556 			} else if (cp->nodeq.q_first == (void *)ndlp) {
2557 				cp->nodeq.q_first = ndlp->nlp_next[channelno];
2558 				((NODELIST *) cp->nodeq.q_last)->
2559 				    nlp_next[channelno] = cp->nodeq.q_first;
2560 				cp->nodeq.q_cnt--;
2561 			} else {
2562 				/*
2563 				 * This is a little more difficult find the
2564 				 * previous node in the circular channel queue
2565 				 */
2566 				prev = ndlp;
2567 				while (prev->nlp_next[channelno] != ndlp) {
2568 					prev = prev->nlp_next[channelno];
2569 				}
2570 
2571 				prev->nlp_next[channelno] =
2572 				    ndlp->nlp_next[channelno];
2573 
2574 				if (cp->nodeq.q_last == (void *)ndlp) {
2575 					cp->nodeq.q_last = (void *)prev;
2576 				}
2577 				cp->nodeq.q_cnt--;
2578 
2579 			}
2580 
2581 			/* Clear node */
2582 			ndlp->nlp_next[channelno] = NULL;
2583 		}
2584 
2585 	}
2586 
2587 	/* First cleanup the iocb's while still holding the lock */
2588 	iocbq = (IOCBQ *) abort.q_first;
2589 	while (iocbq) {
2590 		/* Free the IoTag and the bmp */
2591 		iocb = &iocbq->iocb;
2592 
2593 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2594 			sbp = iocbq->sbp;
2595 			if (sbp) {
2596 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2597 			}
2598 		} else {
2599 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2600 			    iocb->ULPIOTAG, 0);
2601 		}
2602 
2603 		if (sbp && (sbp != STALE_PACKET)) {
2604 			mutex_enter(&sbp->mtx);
2605 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2606 			/*
2607 			 * If the fpkt is already set, then we will leave it
2608 			 * alone. This ensures that this pkt is only accounted
2609 			 * for on one fpkt->flush_count
2610 			 */
2611 			if (!sbp->fpkt && fpkt) {
2612 				mutex_enter(&fpkt->mtx);
2613 				sbp->fpkt = fpkt;
2614 				fpkt->flush_count++;
2615 				mutex_exit(&fpkt->mtx);
2616 			}
2617 
2618 			mutex_exit(&sbp->mtx);
2619 		}
2620 
2621 		iocbq = (IOCBQ *) iocbq->next;
2622 
2623 	}	/* end of while */
2624 
2625 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2626 
2627 	/* Now abort the iocb's outside the locks */
2628 	iocbq = (IOCBQ *)abort.q_first;
2629 	while (iocbq) {
2630 		/* Save the next iocbq for now */
2631 		next = (IOCBQ *)iocbq->next;
2632 
2633 		/* Unlink this iocbq */
2634 		iocbq->next = NULL;
2635 
2636 		/* Get the pkt */
2637 		sbp = (emlxs_buf_t *)iocbq->sbp;
2638 
2639 		if (sbp) {
2640 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2641 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2642 
2643 			if (hba->state >= FC_LINK_UP) {
2644 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2645 				    IOERR_ABORT_REQUESTED, 1);
2646 			} else {
2647 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2648 				    IOERR_LINK_DOWN, 1);
2649 			}
2650 
2651 		}
2652 		/* Free the iocb and its associated buffers */
2653 		else {
2654 			/* CMD_CLOSE_XRI_CN should also free the memory */
2655 			icmd = &iocbq->iocb;
2656 
2657 			/* SLI3 */
2658 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2659 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2660 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2661 				if ((hba->flag &
2662 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2663 					/* HBA is detaching or offlining */
2664 					if (icmd->ULPCOMMAND !=
2665 					    CMD_QUE_RING_LIST64_CN) {
2666 						void	*tmp;
2667 						RING *rp;
2668 						int ch;
2669 
2670 						ch = ((CHANNEL *)
2671 						    iocbq->channel)->channelno;
2672 						rp = &hba->sli.sli3.ring[ch];
2673 						for (i = 0;
2674 						    i < icmd->ULPBDECOUNT;
2675 						    i++) {
2676 							mp = EMLXS_GET_VADDR(
2677 							    hba, rp, icmd);
2678 
2679 							tmp = (void *)mp;
2680 							if (mp) {
2681 							emlxs_mem_put(
2682 							    hba, MEM_BUF, tmp);
2683 							}
2684 						}
2685 					}
2686 
2687 					emlxs_mem_put(hba, MEM_IOCB,
2688 					    (void *)iocbq);
2689 				} else {
2690 					/* repost the unsolicited buffer */
2691 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2692 					    (CHANNEL *)iocbq->channel, iocbq);
2693 				}
2694 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2695 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2696 				/*
2697 				 * Resend the abort iocbq if any
2698 				 */
2699 				emlxs_tx_put(iocbq, 1);
2700 			}
2701 		}
2702 
2703 		iocbq = next;
2704 
2705 	}	/* end of while */
2706 
2707 	/* Now trigger channel service */
2708 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2709 		if (!flag[channelno]) {
2710 			continue;
2711 		}
2712 
2713 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2714 	}
2715 
2716 	return (abort.q_cnt);
2717 
2718 } /* emlxs_tx_node_flush() */
2719 
2720 
2721 /* Check for IO's on all or a given ring for a given node */
2722 extern uint32_t
emlxs_tx_node_check(emlxs_port_t * port,NODELIST * ndlp,CHANNEL * chan)2723 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2724 {
2725 	emlxs_hba_t *hba = HBA;
2726 	uint32_t channelno;
2727 	CHANNEL *cp;
2728 	uint32_t count;
2729 
2730 	count = 0;
2731 
2732 	/* Flush all I/O's on tx queue to this target */
2733 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2734 
2735 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2736 		cp = &hba->chan[channelno];
2737 
2738 		if (chan && cp != chan) {
2739 			continue;
2740 		}
2741 
2742 		/* Check if priority queue is not empty */
2743 		if (ndlp->nlp_ptx[channelno].q_first) {
2744 			count += ndlp->nlp_ptx[channelno].q_cnt;
2745 		}
2746 
2747 		/* Check if tx queue is not empty */
2748 		if (ndlp->nlp_tx[channelno].q_first) {
2749 			count += ndlp->nlp_tx[channelno].q_cnt;
2750 		}
2751 
2752 	}
2753 
2754 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2755 
2756 	return (count);
2757 
2758 } /* emlxs_tx_node_check() */
2759 
2760 
2761 
2762 /* Flush all IO's on the any ring for a given node's lun */
2763 extern uint32_t
emlxs_tx_lun_flush(emlxs_port_t * port,NODELIST * ndlp,uint32_t lun,emlxs_buf_t * fpkt)2764 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2765     emlxs_buf_t *fpkt)
2766 {
2767 	emlxs_hba_t *hba = HBA;
2768 	emlxs_buf_t *sbp;
2769 	uint32_t channelno;
2770 	IOCBQ *iocbq;
2771 	IOCBQ *prev;
2772 	IOCBQ *next;
2773 	IOCB *iocb;
2774 	IOCB *icmd;
2775 	Q abort;
2776 	uint32_t i;
2777 	MATCHMAP *mp;
2778 	uint8_t flag[MAX_CHANNEL];
2779 
2780 	if (lun == EMLXS_LUN_NONE) {
2781 		return (0);
2782 	}
2783 
2784 	bzero((void *)&abort, sizeof (Q));
2785 
2786 	/* Flush I/O's on txQ to this target's lun */
2787 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2788 
2789 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2790 
2791 		/* Scan the priority queue first */
2792 		prev = NULL;
2793 		iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2794 
2795 		while (iocbq) {
2796 			next = (IOCBQ *)iocbq->next;
2797 			iocb = &iocbq->iocb;
2798 			sbp = (emlxs_buf_t *)iocbq->sbp;
2799 
2800 			/* Check if this IO is for our lun */
2801 			if (sbp && (sbp->lun == lun)) {
2802 				/* Remove iocb from the node's ptx queue */
2803 				if (next == 0) {
2804 					ndlp->nlp_ptx[channelno].q_last =
2805 					    (uint8_t *)prev;
2806 				}
2807 
2808 				if (prev == 0) {
2809 					ndlp->nlp_ptx[channelno].q_first =
2810 					    (uint8_t *)next;
2811 				} else {
2812 					prev->next = next;
2813 				}
2814 
2815 				iocbq->next = NULL;
2816 				ndlp->nlp_ptx[channelno].q_cnt--;
2817 
2818 				/*
2819 				 * Add this iocb to our local abort Q
2820 				 */
2821 				if (abort.q_first) {
2822 					((IOCBQ *)abort.q_last)->next = iocbq;
2823 					abort.q_last = (uint8_t *)iocbq;
2824 					abort.q_cnt++;
2825 				} else {
2826 					abort.q_first = (uint8_t *)iocbq;
2827 					abort.q_last = (uint8_t *)iocbq;
2828 					abort.q_cnt = 1;
2829 				}
2830 				iocbq->next = NULL;
2831 				flag[channelno] = 1;
2832 
2833 			} else {
2834 				prev = iocbq;
2835 			}
2836 
2837 			iocbq = next;
2838 
2839 		}	/* while (iocbq) */
2840 
2841 
2842 		/* Scan the regular queue */
2843 		prev = NULL;
2844 		iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2845 
2846 		while (iocbq) {
2847 			next = (IOCBQ *)iocbq->next;
2848 			iocb = &iocbq->iocb;
2849 			sbp = (emlxs_buf_t *)iocbq->sbp;
2850 
2851 			/* Check if this IO is for our lun */
2852 			if (sbp && (sbp->lun == lun)) {
2853 				/* Remove iocb from the node's tx queue */
2854 				if (next == 0) {
2855 					ndlp->nlp_tx[channelno].q_last =
2856 					    (uint8_t *)prev;
2857 				}
2858 
2859 				if (prev == 0) {
2860 					ndlp->nlp_tx[channelno].q_first =
2861 					    (uint8_t *)next;
2862 				} else {
2863 					prev->next = next;
2864 				}
2865 
2866 				iocbq->next = NULL;
2867 				ndlp->nlp_tx[channelno].q_cnt--;
2868 
2869 				/*
2870 				 * Add this iocb to our local abort Q
2871 				 */
2872 				if (abort.q_first) {
2873 					((IOCBQ *) abort.q_last)->next = iocbq;
2874 					abort.q_last = (uint8_t *)iocbq;
2875 					abort.q_cnt++;
2876 				} else {
2877 					abort.q_first = (uint8_t *)iocbq;
2878 					abort.q_last = (uint8_t *)iocbq;
2879 					abort.q_cnt = 1;
2880 				}
2881 				iocbq->next = NULL;
2882 			} else {
2883 				prev = iocbq;
2884 			}
2885 
2886 			iocbq = next;
2887 
2888 		}	/* while (iocbq) */
2889 	}	/* for loop */
2890 
2891 	/* First cleanup the iocb's while still holding the lock */
2892 	iocbq = (IOCBQ *)abort.q_first;
2893 	while (iocbq) {
2894 		/* Free the IoTag and the bmp */
2895 		iocb = &iocbq->iocb;
2896 
2897 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2898 			sbp = iocbq->sbp;
2899 			if (sbp) {
2900 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2901 			}
2902 		} else {
2903 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2904 			    iocb->ULPIOTAG, 0);
2905 		}
2906 
2907 		if (sbp && (sbp != STALE_PACKET)) {
2908 			mutex_enter(&sbp->mtx);
2909 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2910 			/*
2911 			 * If the fpkt is already set, then we will leave it
2912 			 * alone. This ensures that this pkt is only accounted
2913 			 * for on one fpkt->flush_count
2914 			 */
2915 			if (!sbp->fpkt && fpkt) {
2916 				mutex_enter(&fpkt->mtx);
2917 				sbp->fpkt = fpkt;
2918 				fpkt->flush_count++;
2919 				mutex_exit(&fpkt->mtx);
2920 			}
2921 
2922 			mutex_exit(&sbp->mtx);
2923 		}
2924 
2925 		iocbq = (IOCBQ *) iocbq->next;
2926 
2927 	}	/* end of while */
2928 
2929 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2930 
2931 	/* Now abort the iocb's outside the locks */
2932 	iocbq = (IOCBQ *)abort.q_first;
2933 	while (iocbq) {
2934 		/* Save the next iocbq for now */
2935 		next = (IOCBQ *)iocbq->next;
2936 
2937 		/* Unlink this iocbq */
2938 		iocbq->next = NULL;
2939 
2940 		/* Get the pkt */
2941 		sbp = (emlxs_buf_t *)iocbq->sbp;
2942 
2943 		if (sbp) {
2944 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2945 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2946 
2947 			if (hba->state >= FC_LINK_UP) {
2948 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2949 				    IOERR_ABORT_REQUESTED, 1);
2950 			} else {
2951 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2952 				    IOERR_LINK_DOWN, 1);
2953 			}
2954 		}
2955 
2956 		/* Free the iocb and its associated buffers */
2957 		else {
2958 			/* Should never happen! */
2959 			icmd = &iocbq->iocb;
2960 
2961 			/* SLI3 */
2962 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2963 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2964 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2965 				if ((hba->flag &
2966 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2967 					/* HBA is detaching or offlining */
2968 					if (icmd->ULPCOMMAND !=
2969 					    CMD_QUE_RING_LIST64_CN) {
2970 						void	*tmp;
2971 						RING *rp;
2972 						int ch;
2973 
2974 						ch = ((CHANNEL *)
2975 						    iocbq->channel)->channelno;
2976 						rp = &hba->sli.sli3.ring[ch];
2977 						for (i = 0;
2978 						    i < icmd->ULPBDECOUNT;
2979 						    i++) {
2980 							mp = EMLXS_GET_VADDR(
2981 							    hba, rp, icmd);
2982 
2983 							tmp = (void *)mp;
2984 							if (mp) {
2985 							emlxs_mem_put(
2986 							    hba, MEM_BUF, tmp);
2987 							}
2988 						}
2989 					}
2990 
2991 					emlxs_mem_put(hba, MEM_IOCB,
2992 					    (void *)iocbq);
2993 				} else {
2994 					/* repost the unsolicited buffer */
2995 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2996 					    (CHANNEL *)iocbq->channel, iocbq);
2997 				}
2998 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2999 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
3000 				/*
3001 				 * Resend the abort iocbq if any
3002 				 */
3003 				emlxs_tx_put(iocbq, 1);
3004 			}
3005 		}
3006 
3007 		iocbq = next;
3008 
3009 	}	/* end of while */
3010 
3011 	/* Now trigger channel service */
3012 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3013 		if (!flag[channelno]) {
3014 			continue;
3015 		}
3016 
3017 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3018 	}
3019 
3020 	return (abort.q_cnt);
3021 
3022 } /* emlxs_tx_lun_flush() */
3023 
3024 
3025 extern void
emlxs_tx_put(IOCBQ * iocbq,uint32_t lock)3026 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
3027 {
3028 	emlxs_hba_t *hba;
3029 	emlxs_port_t *port;
3030 	uint32_t channelno;
3031 	NODELIST *nlp;
3032 	CHANNEL *cp;
3033 	emlxs_buf_t *sbp;
3034 
3035 	port = (emlxs_port_t *)iocbq->port;
3036 	hba = HBA;
3037 	cp = (CHANNEL *)iocbq->channel;
3038 	nlp = (NODELIST *)iocbq->node;
3039 	channelno = cp->channelno;
3040 	sbp = (emlxs_buf_t *)iocbq->sbp;
3041 
3042 	if (nlp == NULL) {
3043 		/* Set node to base node by default */
3044 		nlp = &port->node_base;
3045 
3046 		iocbq->node = (void *)nlp;
3047 
3048 		if (sbp) {
3049 			sbp->node = (void *)nlp;
3050 		}
3051 	}
3052 
3053 	if (lock) {
3054 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3055 	}
3056 
3057 	if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
3058 		if (sbp) {
3059 			mutex_enter(&sbp->mtx);
3060 			sbp->pkt_flags |= PACKET_IN_FLUSH;
3061 			mutex_exit(&sbp->mtx);
3062 
3063 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3064 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3065 			} else {
3066 				(void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
3067 			}
3068 
3069 			if (lock) {
3070 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3071 			}
3072 
3073 			if (hba->state >= FC_LINK_UP) {
3074 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3075 				    IOERR_ABORT_REQUESTED, 1);
3076 			} else {
3077 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3078 				    IOERR_LINK_DOWN, 1);
3079 			}
3080 			return;
3081 		} else {
3082 			if (lock) {
3083 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3084 			}
3085 
3086 			emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3087 		}
3088 
3089 		return;
3090 	}
3091 
3092 	if (sbp) {
3093 
3094 		mutex_enter(&sbp->mtx);
3095 
3096 		if (sbp->pkt_flags &
3097 		    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
3098 			mutex_exit(&sbp->mtx);
3099 			if (lock) {
3100 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3101 			}
3102 			return;
3103 		}
3104 
3105 		sbp->pkt_flags |= PACKET_IN_TXQ;
3106 		hba->channel_tx_count++;
3107 
3108 		mutex_exit(&sbp->mtx);
3109 	}
3110 
3111 
3112 	/* Check iocbq priority */
3113 	/* Some IOCB has the high priority like reset/close xri etc */
3114 	if (iocbq->flag & IOCB_PRIORITY) {
3115 		/* Add the iocb to the bottom of the node's ptx queue */
3116 		if (nlp->nlp_ptx[channelno].q_first) {
3117 			((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
3118 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3119 			nlp->nlp_ptx[channelno].q_cnt++;
3120 		} else {
3121 			nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
3122 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3123 			nlp->nlp_ptx[channelno].q_cnt = 1;
3124 		}
3125 
3126 		iocbq->next = NULL;
3127 	} else {	/* Normal priority */
3128 
3129 
3130 		/* Add the iocb to the bottom of the node's tx queue */
3131 		if (nlp->nlp_tx[channelno].q_first) {
3132 			((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
3133 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3134 			nlp->nlp_tx[channelno].q_cnt++;
3135 		} else {
3136 			nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
3137 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3138 			nlp->nlp_tx[channelno].q_cnt = 1;
3139 		}
3140 
3141 		iocbq->next = NULL;
3142 	}
3143 
3144 
3145 	/*
3146 	 * Check if the node is not already on channel queue and
3147 	 * (is not closed or  is a priority request)
3148 	 */
3149 	if (!nlp->nlp_next[channelno] &&
3150 	    (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
3151 	    (iocbq->flag & IOCB_PRIORITY))) {
3152 		/* If so, then add it to the channel queue */
3153 		if (cp->nodeq.q_first) {
3154 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
3155 			    (uint8_t *)nlp;
3156 			nlp->nlp_next[channelno] = cp->nodeq.q_first;
3157 
3158 			/*
3159 			 * If this is not the base node then add it
3160 			 * to the tail
3161 			 */
3162 			if (!nlp->nlp_base) {
3163 				cp->nodeq.q_last = (uint8_t *)nlp;
3164 			} else {	/* Otherwise, add it to the head */
3165 
3166 				/* The command node always gets priority */
3167 				cp->nodeq.q_first = (uint8_t *)nlp;
3168 			}
3169 
3170 			cp->nodeq.q_cnt++;
3171 		} else {
3172 			cp->nodeq.q_first = (uint8_t *)nlp;
3173 			cp->nodeq.q_last = (uint8_t *)nlp;
3174 			nlp->nlp_next[channelno] = nlp;
3175 			cp->nodeq.q_cnt = 1;
3176 		}
3177 	}
3178 
3179 	HBASTATS.IocbTxPut[channelno]++;
3180 
3181 	/* Adjust the channel timeout timer */
3182 	cp->timeout = hba->timer_tics + 5;
3183 
3184 	if (lock) {
3185 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3186 	}
3187 
3188 	return;
3189 
3190 } /* emlxs_tx_put() */
3191 
3192 
3193 extern IOCBQ *
emlxs_tx_get(CHANNEL * cp,uint32_t lock)3194 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
3195 {
3196 	emlxs_hba_t *hba;
3197 	uint32_t channelno;
3198 	IOCBQ *iocbq;
3199 	NODELIST *nlp;
3200 	emlxs_buf_t *sbp;
3201 
3202 	hba = cp->hba;
3203 	channelno = cp->channelno;
3204 
3205 	if (lock) {
3206 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3207 	}
3208 
3209 begin:
3210 
3211 	iocbq = NULL;
3212 
3213 	/* Check if a node needs servicing */
3214 	if (cp->nodeq.q_first) {
3215 		nlp = (NODELIST *)cp->nodeq.q_first;
3216 
3217 		/* Get next iocb from node's priority queue */
3218 
3219 		if (nlp->nlp_ptx[channelno].q_first) {
3220 			iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
3221 
3222 			/* Check if this is last entry */
3223 			if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
3224 				nlp->nlp_ptx[channelno].q_first = NULL;
3225 				nlp->nlp_ptx[channelno].q_last = NULL;
3226 				nlp->nlp_ptx[channelno].q_cnt = 0;
3227 			} else {
3228 				/* Remove iocb from head */
3229 				nlp->nlp_ptx[channelno].q_first =
3230 				    (void *)iocbq->next;
3231 				nlp->nlp_ptx[channelno].q_cnt--;
3232 			}
3233 
3234 			iocbq->next = NULL;
3235 		}
3236 
3237 		/* Get next iocb from node tx queue if node not closed */
3238 		else if (nlp->nlp_tx[channelno].q_first &&
3239 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3240 			iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3241 
3242 			/* Check if this is last entry */
3243 			if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3244 				nlp->nlp_tx[channelno].q_first = NULL;
3245 				nlp->nlp_tx[channelno].q_last = NULL;
3246 				nlp->nlp_tx[channelno].q_cnt = 0;
3247 			} else {
3248 				/* Remove iocb from head */
3249 				nlp->nlp_tx[channelno].q_first =
3250 				    (void *)iocbq->next;
3251 				nlp->nlp_tx[channelno].q_cnt--;
3252 			}
3253 
3254 			iocbq->next = NULL;
3255 		}
3256 
3257 		/* Now deal with node itself */
3258 
3259 		/* Check if node still needs servicing */
3260 		if ((nlp->nlp_ptx[channelno].q_first) ||
3261 		    (nlp->nlp_tx[channelno].q_first &&
3262 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3263 
3264 			/*
3265 			 * If this is the base node, then don't shift the
3266 			 * pointers. We want to drain the base node before
3267 			 * moving on
3268 			 */
3269 			if (!nlp->nlp_base) {
3270 				/*
3271 				 * Just shift channel queue pointers to next
3272 				 * node
3273 				 */
3274 				cp->nodeq.q_last = (void *)nlp;
3275 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3276 			}
3277 		} else {
3278 			/* Remove node from channel queue */
3279 
3280 			/* If this is the last node on list */
3281 			if (cp->nodeq.q_last == (void *)nlp) {
3282 				cp->nodeq.q_last = NULL;
3283 				cp->nodeq.q_first = NULL;
3284 				cp->nodeq.q_cnt = 0;
3285 			} else {
3286 				/* Remove node from head */
3287 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3288 				((NODELIST *)cp->nodeq.q_last)->
3289 				    nlp_next[channelno] = cp->nodeq.q_first;
3290 				cp->nodeq.q_cnt--;
3291 
3292 			}
3293 
3294 			/* Clear node */
3295 			nlp->nlp_next[channelno] = NULL;
3296 		}
3297 
3298 		/*
3299 		 * If no iocbq was found on this node, then it will have
3300 		 * been removed. So try again.
3301 		 */
3302 		if (!iocbq) {
3303 			goto begin;
3304 		}
3305 
3306 		sbp = (emlxs_buf_t *)iocbq->sbp;
3307 
3308 		if (sbp) {
3309 			/*
3310 			 * Check flags before we enter mutex in case this
3311 			 * has been flushed and destroyed
3312 			 */
3313 			if ((sbp->pkt_flags &
3314 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3315 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3316 				goto begin;
3317 			}
3318 
3319 			mutex_enter(&sbp->mtx);
3320 
3321 			if ((sbp->pkt_flags &
3322 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3323 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3324 				mutex_exit(&sbp->mtx);
3325 				goto begin;
3326 			}
3327 
3328 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
3329 			hba->channel_tx_count--;
3330 
3331 			mutex_exit(&sbp->mtx);
3332 		}
3333 	}
3334 
3335 	if (iocbq) {
3336 		HBASTATS.IocbTxGet[channelno]++;
3337 	}
3338 
3339 	/* Adjust the ring timeout timer */
3340 	cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3341 
3342 	if (lock) {
3343 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3344 	}
3345 
3346 	return (iocbq);
3347 
3348 } /* emlxs_tx_get() */
3349 
3350 
3351 /*
3352  * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3353  * The old IoTag has to be released, the new one has to be
3354  * allocated.  Others no change
3355  * TX_CHANNEL lock is held
3356  */
3357 extern void
emlxs_tx_move(NODELIST * ndlp,CHANNEL * from_chan,CHANNEL * to_chan,uint32_t cmd,emlxs_buf_t * fpkt,uint32_t lock)3358 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3359     uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3360 {
3361 	emlxs_hba_t *hba;
3362 	emlxs_port_t *port;
3363 	uint32_t fchanno, tchanno, i;
3364 
3365 	IOCBQ *iocbq;
3366 	IOCBQ *prev;
3367 	IOCBQ *next;
3368 	IOCB *iocb, *icmd;
3369 	Q tbm;		/* To Be Moved Q */
3370 	MATCHMAP *mp;
3371 
3372 	NODELIST *nlp = ndlp;
3373 	emlxs_buf_t *sbp;
3374 
3375 	NODELIST *n_prev = NULL;
3376 	NODELIST *n_next = NULL;
3377 	uint16_t count = 0;
3378 
3379 	hba = from_chan->hba;
3380 	port = &PPORT;
3381 	cmd = cmd; /* To pass lint */
3382 
3383 	fchanno = from_chan->channelno;
3384 	tchanno = to_chan->channelno;
3385 
3386 	if (lock) {
3387 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3388 	}
3389 
3390 	bzero((void *)&tbm, sizeof (Q));
3391 
3392 	/* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3393 	prev = NULL;
3394 	iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3395 
3396 	while (iocbq) {
3397 		next = (IOCBQ *)iocbq->next;
3398 		/* Check if this iocb is fcp cmd */
3399 		iocb = &iocbq->iocb;
3400 
3401 		switch (iocb->ULPCOMMAND) {
3402 		/* FCP commands */
3403 		case CMD_FCP_ICMND_CR:
3404 		case CMD_FCP_ICMND_CX:
3405 		case CMD_FCP_IREAD_CR:
3406 		case CMD_FCP_IREAD_CX:
3407 		case CMD_FCP_IWRITE_CR:
3408 		case CMD_FCP_IWRITE_CX:
3409 		case CMD_FCP_ICMND64_CR:
3410 		case CMD_FCP_ICMND64_CX:
3411 		case CMD_FCP_IREAD64_CR:
3412 		case CMD_FCP_IREAD64_CX:
3413 		case CMD_FCP_IWRITE64_CR:
3414 		case CMD_FCP_IWRITE64_CX:
3415 			/* We found a fcp cmd */
3416 			break;
3417 		default:
3418 			/* this is not fcp cmd continue */
3419 			prev = iocbq;
3420 			iocbq = next;
3421 			continue;
3422 		}
3423 
3424 		/* found a fcp cmd iocb in fchanno txq, now deque it */
3425 		if (next == NULL) {
3426 			/* This is the last iocbq */
3427 			nlp->nlp_tx[fchanno].q_last =
3428 			    (uint8_t *)prev;
3429 		}
3430 
3431 		if (prev == NULL) {
3432 			/* This is the first one then remove it from head */
3433 			nlp->nlp_tx[fchanno].q_first =
3434 			    (uint8_t *)next;
3435 		} else {
3436 			prev->next = next;
3437 		}
3438 
3439 		iocbq->next = NULL;
3440 		nlp->nlp_tx[fchanno].q_cnt--;
3441 
3442 		/* Add this iocb to our local toberemovedq */
3443 		/* This way we donot hold the TX_CHANNEL lock too long */
3444 
3445 		if (tbm.q_first) {
3446 			((IOCBQ *)tbm.q_last)->next = iocbq;
3447 			tbm.q_last = (uint8_t *)iocbq;
3448 			tbm.q_cnt++;
3449 		} else {
3450 			tbm.q_first = (uint8_t *)iocbq;
3451 			tbm.q_last = (uint8_t *)iocbq;
3452 			tbm.q_cnt = 1;
3453 		}
3454 
3455 		iocbq = next;
3456 
3457 	}	/* While (iocbq) */
3458 
3459 	if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3460 
3461 		/* from_chan->nodeq.q_first must be non NULL */
3462 		if (from_chan->nodeq.q_first) {
3463 
3464 			/* nodeq is not empty, now deal with the node itself */
3465 			if ((nlp->nlp_tx[fchanno].q_first)) {
3466 
3467 				if (!nlp->nlp_base) {
3468 					from_chan->nodeq.q_last =
3469 					    (void *)nlp;
3470 					from_chan->nodeq.q_first =
3471 					    nlp->nlp_next[fchanno];
3472 				}
3473 
3474 			} else {
3475 				n_prev = (NODELIST *)from_chan->nodeq.q_first;
3476 				count = from_chan->nodeq.q_cnt;
3477 
3478 				if (n_prev == nlp) {
3479 
3480 					/* If this is the only node on list */
3481 					if (from_chan->nodeq.q_last ==
3482 					    (void *)nlp) {
3483 						from_chan->nodeq.q_last =
3484 						    NULL;
3485 						from_chan->nodeq.q_first =
3486 						    NULL;
3487 						from_chan->nodeq.q_cnt = 0;
3488 					} else {
3489 						from_chan->nodeq.q_first =
3490 						    nlp->nlp_next[fchanno];
3491 						((NODELIST *)from_chan->
3492 						    nodeq.q_last)->
3493 						    nlp_next[fchanno] =
3494 						    from_chan->nodeq.q_first;
3495 						from_chan->nodeq.q_cnt--;
3496 					}
3497 					/* Clear node */
3498 					nlp->nlp_next[fchanno] = NULL;
3499 				} else {
3500 					count--;
3501 					do {
3502 						n_next =
3503 						    n_prev->nlp_next[fchanno];
3504 						if (n_next == nlp) {
3505 							break;
3506 						}
3507 						n_prev = n_next;
3508 					} while (count--);
3509 
3510 					if (count != 0) {
3511 
3512 						if (n_next ==
3513 						    (NODELIST *)from_chan->
3514 						    nodeq.q_last) {
3515 							n_prev->
3516 							    nlp_next[fchanno]
3517 							    =
3518 							    ((NODELIST *)
3519 							    from_chan->
3520 							    nodeq.q_last)->
3521 							    nlp_next
3522 							    [fchanno];
3523 							from_chan->nodeq.q_last
3524 							    = (uint8_t *)n_prev;
3525 						} else {
3526 
3527 							n_prev->
3528 							    nlp_next[fchanno]
3529 							    =
3530 							    n_next-> nlp_next
3531 							    [fchanno];
3532 						}
3533 						from_chan->nodeq.q_cnt--;
3534 						/* Clear node */
3535 						nlp->nlp_next[fchanno] =
3536 						    NULL;
3537 					}
3538 				}
3539 			}
3540 		}
3541 	}
3542 
3543 	/* Now cleanup the iocb's */
3544 	prev = NULL;
3545 	iocbq = (IOCBQ *)tbm.q_first;
3546 
3547 	while (iocbq) {
3548 
3549 		next = (IOCBQ *)iocbq->next;
3550 
3551 		/* Free the IoTag and the bmp */
3552 		iocb = &iocbq->iocb;
3553 
3554 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3555 			sbp = iocbq->sbp;
3556 			if (sbp) {
3557 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3558 			}
3559 		} else {
3560 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3561 			    iocb->ULPIOTAG, 0);
3562 		}
3563 
3564 		if (sbp && (sbp != STALE_PACKET)) {
3565 			mutex_enter(&sbp->mtx);
3566 			sbp->pkt_flags |= PACKET_IN_FLUSH;
3567 
3568 			/*
3569 			 * If the fpkt is already set, then we will leave it
3570 			 * alone. This ensures that this pkt is only accounted
3571 			 * for on one fpkt->flush_count
3572 			 */
3573 			if (!sbp->fpkt && fpkt) {
3574 				mutex_enter(&fpkt->mtx);
3575 				sbp->fpkt = fpkt;
3576 				fpkt->flush_count++;
3577 				mutex_exit(&fpkt->mtx);
3578 			}
3579 			mutex_exit(&sbp->mtx);
3580 		}
3581 		iocbq = next;
3582 
3583 	}	/* end of while */
3584 
3585 	iocbq = (IOCBQ *)tbm.q_first;
3586 	while (iocbq) {
3587 		/* Save the next iocbq for now */
3588 		next = (IOCBQ *)iocbq->next;
3589 
3590 		/* Unlink this iocbq */
3591 		iocbq->next = NULL;
3592 
3593 		/* Get the pkt */
3594 		sbp = (emlxs_buf_t *)iocbq->sbp;
3595 
3596 		if (sbp) {
3597 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3598 			"tx: sbp=%p node=%p", sbp, sbp->node);
3599 
3600 			if (hba->state >= FC_LINK_UP) {
3601 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3602 				    IOERR_ABORT_REQUESTED, 1);
3603 			} else {
3604 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3605 				    IOERR_LINK_DOWN, 1);
3606 			}
3607 
3608 		}
3609 		/* Free the iocb and its associated buffers */
3610 		else {
3611 			icmd = &iocbq->iocb;
3612 
3613 			/* SLI3 */
3614 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3615 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3616 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3617 				if ((hba->flag &
3618 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3619 					/* HBA is detaching or offlining */
3620 					if (icmd->ULPCOMMAND !=
3621 					    CMD_QUE_RING_LIST64_CN) {
3622 						void *tmp;
3623 						RING *rp;
3624 						int ch;
3625 
3626 						ch = from_chan->channelno;
3627 						rp = &hba->sli.sli3.ring[ch];
3628 
3629 						for (i = 0;
3630 						    i < icmd->ULPBDECOUNT;
3631 						    i++) {
3632 							mp = EMLXS_GET_VADDR(
3633 							    hba, rp, icmd);
3634 
3635 							tmp = (void *)mp;
3636 							if (mp) {
3637 							emlxs_mem_put(
3638 							    hba,
3639 							    MEM_BUF,
3640 							    tmp);
3641 							}
3642 						}
3643 
3644 					}
3645 
3646 					emlxs_mem_put(hba, MEM_IOCB,
3647 					    (void *)iocbq);
3648 				} else {
3649 					/* repost the unsolicited buffer */
3650 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3651 					    from_chan, iocbq);
3652 				}
3653 			}
3654 		}
3655 
3656 		iocbq = next;
3657 
3658 	}	/* end of while */
3659 
3660 	/* Now flush the chipq if any */
3661 	if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3662 
3663 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3664 
3665 		(void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3666 
3667 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3668 	}
3669 
3670 	if (lock) {
3671 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3672 	}
3673 
3674 	return;
3675 
3676 } /* emlxs_tx_move */
3677 
3678 
3679 extern uint32_t
emlxs_chipq_node_flush(emlxs_port_t * port,CHANNEL * chan,NODELIST * ndlp,emlxs_buf_t * fpkt)3680 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3681     emlxs_buf_t *fpkt)
3682 {
3683 	emlxs_hba_t *hba = HBA;
3684 	emlxs_buf_t *sbp;
3685 	IOCBQ *iocbq;
3686 	IOCBQ *next;
3687 	Q abort;
3688 	CHANNEL *cp;
3689 	uint32_t channelno;
3690 	uint8_t flag[MAX_CHANNEL];
3691 	uint32_t iotag;
3692 
3693 	bzero((void *)&abort, sizeof (Q));
3694 	bzero((void *)flag, sizeof (flag));
3695 
3696 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3697 		cp = &hba->chan[channelno];
3698 
3699 		if (chan && cp != chan) {
3700 			continue;
3701 		}
3702 
3703 		mutex_enter(&EMLXS_FCTAB_LOCK);
3704 
3705 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3706 			sbp = hba->fc_table[iotag];
3707 
3708 			if (sbp && (sbp != STALE_PACKET) &&
3709 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3710 			    (sbp->node == ndlp) &&
3711 			    (sbp->channel == cp) &&
3712 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3713 				emlxs_sbp_abort_add(port, sbp, &abort, flag,
3714 				    fpkt);
3715 			}
3716 
3717 		}
3718 		mutex_exit(&EMLXS_FCTAB_LOCK);
3719 
3720 	}	/* for */
3721 
3722 	/* Now put the iocb's on the tx queue */
3723 	iocbq = (IOCBQ *)abort.q_first;
3724 	while (iocbq) {
3725 		/* Save the next iocbq for now */
3726 		next = (IOCBQ *)iocbq->next;
3727 
3728 		/* Unlink this iocbq */
3729 		iocbq->next = NULL;
3730 
3731 		/* Send this iocbq */
3732 		emlxs_tx_put(iocbq, 1);
3733 
3734 		iocbq = next;
3735 	}
3736 
3737 	/* Now trigger channel service */
3738 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3739 		if (!flag[channelno]) {
3740 			continue;
3741 		}
3742 
3743 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3744 	}
3745 
3746 	return (abort.q_cnt);
3747 
3748 } /* emlxs_chipq_node_flush() */
3749 
3750 
3751 /* Flush all IO's left on all iotag lists */
3752 extern uint32_t
emlxs_iotag_flush(emlxs_hba_t * hba)3753 emlxs_iotag_flush(emlxs_hba_t *hba)
3754 {
3755 	emlxs_port_t *port = &PPORT;
3756 	emlxs_buf_t *sbp;
3757 	IOCBQ *iocbq;
3758 	IOCB *iocb;
3759 	Q abort;
3760 	CHANNEL *cp;
3761 	uint32_t channelno;
3762 	uint32_t iotag;
3763 	uint32_t count;
3764 
3765 	count = 0;
3766 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3767 		cp = &hba->chan[channelno];
3768 
3769 		bzero((void *)&abort, sizeof (Q));
3770 
3771 		mutex_enter(&EMLXS_FCTAB_LOCK);
3772 
3773 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3774 			sbp = hba->fc_table[iotag];
3775 
3776 			/* Check if the slot is empty */
3777 			if (!sbp || (sbp == STALE_PACKET)) {
3778 				continue;
3779 			}
3780 
3781 			/* We are building an abort list per channel */
3782 			if (sbp->channel != cp) {
3783 				continue;
3784 			}
3785 
3786 			hba->fc_table[iotag] = STALE_PACKET;
3787 			hba->io_count--;
3788 
3789 			/* Check if IO is valid */
3790 			if (!(sbp->pkt_flags & PACKET_VALID) ||
3791 			    (sbp->pkt_flags & (PACKET_ULP_OWNED|
3792 			    PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
3793 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3794 				    "iotag_flush: Invalid IO found. iotag=%d",
3795 				    iotag);
3796 
3797 				continue;
3798 			}
3799 
3800 			sbp->iotag = 0;
3801 
3802 			/* Set IOCB status */
3803 			iocbq = &sbp->iocbq;
3804 			iocb = &iocbq->iocb;
3805 
3806 			iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3807 			iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3808 			iocb->ULPLE = 1;
3809 			iocbq->next = NULL;
3810 
3811 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3812 				if (sbp->xrip) {
3813 					EMLXS_MSGF(EMLXS_CONTEXT,
3814 					    &emlxs_sli_debug_msg,
3815 					    "iotag_flush: iotag=%d sbp=%p "
3816 					    "xrip=%p state=%x flag=%x",
3817 					    iotag, sbp, sbp->xrip,
3818 					    sbp->xrip->state, sbp->xrip->flag);
3819 				} else {
3820 					EMLXS_MSGF(EMLXS_CONTEXT,
3821 					    &emlxs_sli_debug_msg,
3822 					    "iotag_flush: iotag=%d sbp=%p "
3823 					    "xrip=NULL", iotag, sbp);
3824 				}
3825 
3826 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
3827 			} else {
3828 				/* Clean up the sbp */
3829 				mutex_enter(&sbp->mtx);
3830 
3831 				if (sbp->pkt_flags & PACKET_IN_TXQ) {
3832 					sbp->pkt_flags &= ~PACKET_IN_TXQ;
3833 					hba->channel_tx_count --;
3834 				}
3835 
3836 				if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3837 					sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3838 				}
3839 
3840 				if (sbp->bmp) {
3841 					emlxs_mem_put(hba, MEM_BPL,
3842 					    (void *)sbp->bmp);
3843 					sbp->bmp = 0;
3844 				}
3845 
3846 				mutex_exit(&sbp->mtx);
3847 			}
3848 
3849 			/* At this point all nodes are assumed destroyed */
3850 			mutex_enter(&sbp->mtx);
3851 			sbp->node = 0;
3852 			mutex_exit(&sbp->mtx);
3853 
3854 			/* Add this iocb to our local abort Q */
3855 			if (abort.q_first) {
3856 				((IOCBQ *)abort.q_last)->next = iocbq;
3857 				abort.q_last = (uint8_t *)iocbq;
3858 				abort.q_cnt++;
3859 			} else {
3860 				abort.q_first = (uint8_t *)iocbq;
3861 				abort.q_last = (uint8_t *)iocbq;
3862 				abort.q_cnt = 1;
3863 			}
3864 		}
3865 
3866 		mutex_exit(&EMLXS_FCTAB_LOCK);
3867 
3868 		/* Trigger deferred completion */
3869 		if (abort.q_first) {
3870 			mutex_enter(&cp->rsp_lock);
3871 			if (cp->rsp_head == NULL) {
3872 				cp->rsp_head = (IOCBQ *)abort.q_first;
3873 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3874 			} else {
3875 				cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3876 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3877 			}
3878 			mutex_exit(&cp->rsp_lock);
3879 
3880 			emlxs_thread_trigger2(&cp->intr_thread,
3881 			    emlxs_proc_channel, cp);
3882 
3883 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3884 			    "iotag_flush: channel=%d count=%d",
3885 			    channelno, abort.q_cnt);
3886 
3887 			count += abort.q_cnt;
3888 		}
3889 	}
3890 
3891 	return (count);
3892 
3893 } /* emlxs_iotag_flush() */
3894 
3895 
3896 
3897 /* Checks for IO's on all or a given channel for a given node */
3898 extern uint32_t
emlxs_chipq_node_check(emlxs_port_t * port,CHANNEL * chan,NODELIST * ndlp)3899 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3900 {
3901 	emlxs_hba_t *hba = HBA;
3902 	emlxs_buf_t *sbp;
3903 	CHANNEL *cp;
3904 	uint32_t channelno;
3905 	uint32_t count;
3906 	uint32_t iotag;
3907 
3908 	count = 0;
3909 
3910 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3911 		cp = &hba->chan[channelno];
3912 
3913 		if (chan && cp != chan) {
3914 			continue;
3915 		}
3916 
3917 		mutex_enter(&EMLXS_FCTAB_LOCK);
3918 
3919 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3920 			sbp = hba->fc_table[iotag];
3921 
3922 			if (sbp && (sbp != STALE_PACKET) &&
3923 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3924 			    (sbp->node == ndlp) &&
3925 			    (sbp->channel == cp) &&
3926 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3927 				count++;
3928 			}
3929 
3930 		}
3931 		mutex_exit(&EMLXS_FCTAB_LOCK);
3932 
3933 	}	/* for */
3934 
3935 	return (count);
3936 
3937 } /* emlxs_chipq_node_check() */
3938 
3939 
3940 
3941 /* Flush all IO's for a given node's lun (on any channel) */
3942 extern uint32_t
emlxs_chipq_lun_flush(emlxs_port_t * port,NODELIST * ndlp,uint32_t lun,emlxs_buf_t * fpkt)3943 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3944     uint32_t lun, emlxs_buf_t *fpkt)
3945 {
3946 	emlxs_hba_t *hba = HBA;
3947 	emlxs_buf_t *sbp;
3948 	IOCBQ *iocbq;
3949 	IOCBQ *next;
3950 	Q abort;
3951 	uint32_t iotag;
3952 	uint8_t flag[MAX_CHANNEL];
3953 	uint32_t channelno;
3954 
3955 	if (lun == EMLXS_LUN_NONE) {
3956 		return (0);
3957 	}
3958 
3959 	bzero((void *)flag, sizeof (flag));
3960 	bzero((void *)&abort, sizeof (Q));
3961 
3962 	mutex_enter(&EMLXS_FCTAB_LOCK);
3963 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3964 		sbp = hba->fc_table[iotag];
3965 
3966 		if (sbp && (sbp != STALE_PACKET) &&
3967 		    sbp->pkt_flags & PACKET_IN_CHIPQ &&
3968 		    sbp->node == ndlp &&
3969 		    sbp->lun == lun &&
3970 		    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3971 			emlxs_sbp_abort_add(port, sbp,
3972 			    &abort, flag, fpkt);
3973 		}
3974 	}
3975 	mutex_exit(&EMLXS_FCTAB_LOCK);
3976 
3977 	/* Now put the iocb's on the tx queue */
3978 	iocbq = (IOCBQ *)abort.q_first;
3979 	while (iocbq) {
3980 		/* Save the next iocbq for now */
3981 		next = (IOCBQ *)iocbq->next;
3982 
3983 		/* Unlink this iocbq */
3984 		iocbq->next = NULL;
3985 
3986 		/* Send this iocbq */
3987 		emlxs_tx_put(iocbq, 1);
3988 
3989 		iocbq = next;
3990 	}
3991 
3992 	/* Now trigger channel service */
3993 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3994 		if (!flag[channelno]) {
3995 			continue;
3996 		}
3997 
3998 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3999 	}
4000 
4001 	return (abort.q_cnt);
4002 
4003 } /* emlxs_chipq_lun_flush() */
4004 
4005 
4006 
4007 /*
4008  * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
4009  * This must be called while holding the EMLXS_FCTAB_LOCK
4010  */
4011 extern IOCBQ *
emlxs_create_abort_xri_cn(emlxs_port_t * port,NODELIST * ndlp,uint16_t iotag,CHANNEL * cp,uint8_t class,int32_t flag)4012 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4013     uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
4014 {
4015 	emlxs_hba_t *hba = HBA;
4016 	IOCBQ *iocbq;
4017 	IOCB *iocb;
4018 	emlxs_wqe_t *wqe;
4019 	emlxs_buf_t *sbp;
4020 	uint16_t abort_iotag;
4021 
4022 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4023 		return (NULL);
4024 	}
4025 
4026 	iocbq->channel = (void *)cp;
4027 	iocbq->port = (void *)port;
4028 	iocbq->node = (void *)ndlp;
4029 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4030 
4031 	/*
4032 	 * set up an iotag using special Abort iotags
4033 	 */
4034 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4035 		hba->fc_oor_iotag = hba->max_iotag;
4036 	}
4037 	abort_iotag = hba->fc_oor_iotag++;
4038 
4039 
4040 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4041 		wqe = &iocbq->wqe;
4042 		sbp = hba->fc_table[iotag];
4043 
4044 		/* Try to issue abort by XRI if possible */
4045 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4046 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4047 			wqe->AbortTag = iotag;
4048 		} else {
4049 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4050 			wqe->AbortTag = sbp->xrip->XRI;
4051 		}
4052 		wqe->un.Abort.IA = 0;
4053 		wqe->RequestTag = abort_iotag;
4054 		wqe->Command = CMD_ABORT_XRI_CX;
4055 		wqe->Class = CLASS3;
4056 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4057 		wqe->CmdType = WQE_TYPE_ABORT;
4058 	} else {
4059 		iocb = &iocbq->iocb;
4060 		iocb->ULPIOTAG = abort_iotag;
4061 		iocb->un.acxri.abortType = flag;
4062 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4063 		iocb->un.acxri.abortIoTag = iotag;
4064 		iocb->ULPLE = 1;
4065 		iocb->ULPCLASS = class;
4066 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
4067 		iocb->ULPOWNER = OWN_CHIP;
4068 	}
4069 
4070 	return (iocbq);
4071 
4072 } /* emlxs_create_abort_xri_cn() */
4073 
4074 
4075 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4076 extern IOCBQ *
emlxs_create_abort_xri_cx(emlxs_port_t * port,NODELIST * ndlp,uint16_t xid,CHANNEL * cp,uint8_t class,int32_t flag)4077 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4078     CHANNEL *cp, uint8_t class, int32_t flag)
4079 {
4080 	emlxs_hba_t *hba = HBA;
4081 	IOCBQ *iocbq;
4082 	IOCB *iocb;
4083 	emlxs_wqe_t *wqe;
4084 	uint16_t abort_iotag;
4085 
4086 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4087 		return (NULL);
4088 	}
4089 
4090 	iocbq->channel = (void *)cp;
4091 	iocbq->port = (void *)port;
4092 	iocbq->node = (void *)ndlp;
4093 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4094 
4095 	/*
4096 	 * set up an iotag using special Abort iotags
4097 	 */
4098 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4099 		hba->fc_oor_iotag = hba->max_iotag;
4100 	}
4101 	abort_iotag = hba->fc_oor_iotag++;
4102 
4103 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4104 		wqe = &iocbq->wqe;
4105 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4106 		wqe->un.Abort.IA = 0;
4107 		wqe->RequestTag = abort_iotag;
4108 		wqe->AbortTag = xid;
4109 		wqe->Command = CMD_ABORT_XRI_CX;
4110 		wqe->Class = CLASS3;
4111 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4112 		wqe->CmdType = WQE_TYPE_ABORT;
4113 	} else {
4114 		iocb = &iocbq->iocb;
4115 		iocb->ULPCONTEXT = xid;
4116 		iocb->ULPIOTAG = abort_iotag;
4117 		iocb->un.acxri.abortType = flag;
4118 		iocb->ULPLE = 1;
4119 		iocb->ULPCLASS = class;
4120 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
4121 		iocb->ULPOWNER = OWN_CHIP;
4122 	}
4123 
4124 	return (iocbq);
4125 
4126 } /* emlxs_create_abort_xri_cx() */
4127 
4128 
4129 
4130 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4131 extern IOCBQ *
emlxs_create_close_xri_cn(emlxs_port_t * port,NODELIST * ndlp,uint16_t iotag,CHANNEL * cp)4132 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4133     uint16_t iotag, CHANNEL *cp)
4134 {
4135 	emlxs_hba_t *hba = HBA;
4136 	IOCBQ *iocbq;
4137 	IOCB *iocb;
4138 	emlxs_wqe_t *wqe;
4139 	emlxs_buf_t *sbp;
4140 	uint16_t abort_iotag;
4141 
4142 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4143 		return (NULL);
4144 	}
4145 
4146 	iocbq->channel = (void *)cp;
4147 	iocbq->port = (void *)port;
4148 	iocbq->node = (void *)ndlp;
4149 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4150 
4151 	/*
4152 	 * set up an iotag using special Abort iotags
4153 	 */
4154 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4155 		hba->fc_oor_iotag = hba->max_iotag;
4156 	}
4157 	abort_iotag = hba->fc_oor_iotag++;
4158 
4159 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4160 		wqe = &iocbq->wqe;
4161 		sbp = hba->fc_table[iotag];
4162 
4163 		/* Try to issue close by XRI if possible */
4164 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4165 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4166 			wqe->AbortTag = iotag;
4167 		} else {
4168 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4169 			wqe->AbortTag = sbp->xrip->XRI;
4170 		}
4171 		wqe->un.Abort.IA = 1;
4172 		wqe->RequestTag = abort_iotag;
4173 		wqe->Command = CMD_ABORT_XRI_CX;
4174 		wqe->Class = CLASS3;
4175 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4176 		wqe->CmdType = WQE_TYPE_ABORT;
4177 	} else {
4178 		iocb = &iocbq->iocb;
4179 		iocb->ULPIOTAG = abort_iotag;
4180 		iocb->un.acxri.abortType = 0;
4181 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4182 		iocb->un.acxri.abortIoTag = iotag;
4183 		iocb->ULPLE = 1;
4184 		iocb->ULPCLASS = 0;
4185 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
4186 		iocb->ULPOWNER = OWN_CHIP;
4187 	}
4188 
4189 	return (iocbq);
4190 
4191 } /* emlxs_create_close_xri_cn() */
4192 
4193 
4194 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4195 extern IOCBQ *
emlxs_create_close_xri_cx(emlxs_port_t * port,NODELIST * ndlp,uint16_t xid,CHANNEL * cp)4196 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4197     CHANNEL *cp)
4198 {
4199 	emlxs_hba_t *hba = HBA;
4200 	IOCBQ *iocbq;
4201 	IOCB *iocb;
4202 	emlxs_wqe_t *wqe;
4203 	uint16_t abort_iotag;
4204 
4205 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4206 		return (NULL);
4207 	}
4208 
4209 	iocbq->channel = (void *)cp;
4210 	iocbq->port = (void *)port;
4211 	iocbq->node = (void *)ndlp;
4212 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4213 
4214 	/*
4215 	 * set up an iotag using special Abort iotags
4216 	 */
4217 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4218 		hba->fc_oor_iotag = hba->max_iotag;
4219 	}
4220 	abort_iotag = hba->fc_oor_iotag++;
4221 
4222 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4223 		wqe = &iocbq->wqe;
4224 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4225 		wqe->un.Abort.IA = 1;
4226 		wqe->RequestTag = abort_iotag;
4227 		wqe->AbortTag = xid;
4228 		wqe->Command = CMD_ABORT_XRI_CX;
4229 		wqe->Class = CLASS3;
4230 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4231 		wqe->CmdType = WQE_TYPE_ABORT;
4232 	} else {
4233 		iocb = &iocbq->iocb;
4234 		iocb->ULPCONTEXT = xid;
4235 		iocb->ULPIOTAG = abort_iotag;
4236 		iocb->ULPLE = 1;
4237 		iocb->ULPCLASS = 0;
4238 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
4239 		iocb->ULPOWNER = OWN_CHIP;
4240 	}
4241 
4242 	return (iocbq);
4243 
4244 } /* emlxs_create_close_xri_cx() */
4245 
4246 
4247 void
emlxs_close_els_exchange(emlxs_hba_t * hba,emlxs_port_t * port,uint32_t rxid)4248 emlxs_close_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4249 {
4250 	CHANNEL *cp;
4251 	IOCBQ *iocbq;
4252 	IOCB *iocb;
4253 
4254 	if (rxid == 0 || rxid == 0xFFFF) {
4255 		return;
4256 	}
4257 
4258 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4259 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4260 		    "Closing ELS exchange: xid=%x", rxid);
4261 
4262 		if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4263 			return;
4264 		}
4265 	}
4266 
4267 	cp = &hba->chan[hba->channel_els];
4268 
4269 	mutex_enter(&EMLXS_FCTAB_LOCK);
4270 
4271 	/* Create the abort IOCB */
4272 	iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4273 
4274 	mutex_exit(&EMLXS_FCTAB_LOCK);
4275 
4276 	if (iocbq) {
4277 		iocb = &iocbq->iocb;
4278 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4279 		    "Closing ELS exchange: xid=%x iotag=%d", rxid,
4280 		    iocb->ULPIOTAG);
4281 
4282 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4283 	}
4284 
4285 } /* emlxs_close_els_exchange() */
4286 
4287 
4288 void
emlxs_abort_els_exchange(emlxs_hba_t * hba,emlxs_port_t * port,uint32_t rxid)4289 emlxs_abort_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4290 {
4291 	CHANNEL *cp;
4292 	IOCBQ *iocbq;
4293 	IOCB *iocb;
4294 
4295 	if (rxid == 0 || rxid == 0xFFFF) {
4296 		return;
4297 	}
4298 
4299 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4300 
4301 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4302 		    "Aborting ELS exchange: xid=%x", rxid);
4303 
4304 		if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4305 			/* We have no way to abort unsolicited exchanges */
4306 			/* that we have not responded to at this time */
4307 			/* So we will return for now */
4308 			return;
4309 		}
4310 	}
4311 
4312 	cp = &hba->chan[hba->channel_els];
4313 
4314 	mutex_enter(&EMLXS_FCTAB_LOCK);
4315 
4316 	/* Create the abort IOCB */
4317 	if (hba->state >= FC_LINK_UP) {
4318 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4319 		    CLASS3, ABORT_TYPE_ABTS);
4320 	} else {
4321 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4322 	}
4323 
4324 	mutex_exit(&EMLXS_FCTAB_LOCK);
4325 
4326 	if (iocbq) {
4327 		iocb = &iocbq->iocb;
4328 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4329 		    "Aborting ELS exchange: xid=%x iotag=%d", rxid,
4330 		    iocb->ULPIOTAG);
4331 
4332 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4333 	}
4334 
4335 } /* emlxs_abort_els_exchange() */
4336 
4337 
4338 void
emlxs_abort_ct_exchange(emlxs_hba_t * hba,emlxs_port_t * port,uint32_t rxid)4339 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4340 {
4341 	CHANNEL *cp;
4342 	IOCBQ *iocbq;
4343 	IOCB *iocb;
4344 
4345 	if (rxid == 0 || rxid == 0xFFFF) {
4346 		return;
4347 	}
4348 
4349 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4350 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
4351 		    "Aborting CT exchange: xid=%x", rxid);
4352 
4353 		if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4354 			/* We have no way to abort unsolicited exchanges */
4355 			/* that we have not responded to at this time */
4356 			/* So we will return for now */
4357 			return;
4358 		}
4359 	}
4360 
4361 	cp = &hba->chan[hba->channel_ct];
4362 
4363 	mutex_enter(&EMLXS_FCTAB_LOCK);
4364 
4365 	/* Create the abort IOCB */
4366 	if (hba->state >= FC_LINK_UP) {
4367 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4368 		    CLASS3, ABORT_TYPE_ABTS);
4369 	} else {
4370 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4371 	}
4372 
4373 	mutex_exit(&EMLXS_FCTAB_LOCK);
4374 
4375 	if (iocbq) {
4376 		iocb = &iocbq->iocb;
4377 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4378 		    "Aborting CT exchange: xid=%x iotag=%d", rxid,
4379 		    iocb->ULPIOTAG);
4380 
4381 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4382 	}
4383 
4384 } /* emlxs_abort_ct_exchange() */
4385 
4386 
4387 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4388 static void
emlxs_sbp_abort_add(emlxs_port_t * port,emlxs_buf_t * sbp,Q * abort,uint8_t * flag,emlxs_buf_t * fpkt)4389 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4390     uint8_t *flag, emlxs_buf_t *fpkt)
4391 {
4392 	emlxs_hba_t *hba = HBA;
4393 	IOCBQ *iocbq;
4394 	CHANNEL *cp;
4395 	NODELIST *ndlp;
4396 
4397 	cp = (CHANNEL *)sbp->channel;
4398 	ndlp = sbp->node;
4399 
4400 	/* Create the close XRI IOCB */
4401 	if (hba->state >= FC_LINK_UP) {
4402 		iocbq = emlxs_create_abort_xri_cn(port, ndlp, sbp->iotag, cp,
4403 		    CLASS3, ABORT_TYPE_ABTS);
4404 	} else {
4405 		iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4406 	}
4407 	/*
4408 	 * Add this iocb to our local abort Q
4409 	 * This way we don't hold the CHIPQ lock too long
4410 	 */
4411 	if (iocbq) {
4412 		if (abort->q_first) {
4413 			((IOCBQ *)abort->q_last)->next = iocbq;
4414 			abort->q_last = (uint8_t *)iocbq;
4415 			abort->q_cnt++;
4416 		} else {
4417 			abort->q_first = (uint8_t *)iocbq;
4418 			abort->q_last = (uint8_t *)iocbq;
4419 			abort->q_cnt = 1;
4420 		}
4421 		iocbq->next = NULL;
4422 	}
4423 
4424 	/* set the flags */
4425 	mutex_enter(&sbp->mtx);
4426 
4427 	sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4428 
4429 	sbp->ticks = hba->timer_tics + 10;
4430 	sbp->abort_attempts++;
4431 
4432 	flag[cp->channelno] = 1;
4433 
4434 	/*
4435 	 * If the fpkt is already set, then we will leave it alone
4436 	 * This ensures that this pkt is only accounted for on one
4437 	 * fpkt->flush_count
4438 	 */
4439 	if (!sbp->fpkt && fpkt) {
4440 		mutex_enter(&fpkt->mtx);
4441 		sbp->fpkt = fpkt;
4442 		fpkt->flush_count++;
4443 		mutex_exit(&fpkt->mtx);
4444 	}
4445 
4446 	mutex_exit(&sbp->mtx);
4447 
4448 	return;
4449 
4450 }	/* emlxs_sbp_abort_add() */
4451