xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_isr.c (revision de81e71e031139a0a7f13b7bf64152c3faa76698)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_isr.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_init.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local Function Prototypes.
55  */
56 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t,
57     uint32_t *);
58 static void ql_spurious_intr(ql_adapter_state_t *, int);
59 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *,
60     uint32_t *, int);
61 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *,
62     uint32_t *, uint32_t *, int);
63 static void ql_fast_fcp_post(ql_srb_t *);
64 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *,
65     uint32_t *, int);
66 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *,
67     uint32_t *, uint32_t *);
68 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *,
69     uint32_t *, uint32_t *);
70 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *,
71     ql_head_t *, uint32_t *, uint32_t *);
72 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *,
73     ql_head_t *, uint32_t *, uint32_t *);
74 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *,
75     ql_head_t *, uint32_t *, uint32_t *);
76 static void ql_immediate_notify_entry(ql_adapter_state_t *,
77     immediate_notify_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
78 static void ql_notify_acknowledge_entry(ql_adapter_state_t *,
79     notify_acknowledge_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
80 static void ql_accept_target_io_entry(ql_adapter_state_t *,
81     atio_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
82 static void ql_continue_target_io_entry(ql_adapter_state_t *,
83     ctio_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
84 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *,
85     uint32_t *, uint32_t *);
86 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *,
87     ql_head_t *, uint32_t *, uint32_t *);
88 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *,
89     ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
90 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *,
91     ql_head_t *, uint32_t *, uint32_t *);
92 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *,
93     uint32_t *, uint32_t *);
94 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *,
95     ql_head_t *, uint32_t *, uint32_t *);
96 
97 /* TODO: temporary define until defined in LV */
98 #ifndef FC_STATE_8GBIT_SPEED
99 #define	FC_STATE_8GBIT_SPEED		0x0700	/* 8 Gbit/sec */
100 #endif
101 
102 /*
103  * ql_isr
104  *	Process all INTX intr types.
105  *
106  * Input:
107  *	arg1:	adapter state pointer.
108  *
109  * Returns:
110  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
111  *
112  * Context:
113  *	Interrupt or Kernel context, no mailbox commands allowed.
114  */
115 /* ARGSUSED */
116 uint_t
117 ql_isr(caddr_t arg1)
118 {
119 	return (ql_isr_aif(arg1, 0));
120 }
121 
122 /*
123  * ql_isr_default
124  *	Process unknown/unvectored intr types
125  *
126  * Input:
127  *	arg1:	adapter state pointer.
128  *	arg2:	interrupt vector.
129  *
130  * Returns:
131  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
132  *
133  * Context:
134  *	Interrupt or Kernel context, no mailbox commands allowed.
135  */
136 /* ARGSUSED */
137 uint_t
138 ql_isr_default(caddr_t arg1, caddr_t arg2)
139 {
140 	ql_adapter_state_t	*ha = (void *)arg1;
141 
142 	EL(ha, "isr_default called: idx=%x\n", arg2);
143 	return (ql_isr_aif(arg1, arg2));
144 }
145 
146 /*
147  * ql_isr_aif
148  *	Process mailbox and I/O command completions.
149  *
150  * Input:
151  *	arg:	adapter state pointer.
152  *	intvec:	interrupt vector.
153  *
154  * Returns:
155  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
156  *
157  * Context:
158  *	Interrupt or Kernel context, no mailbox commands allowed.
159  */
160 /* ARGSUSED */
161 uint_t
162 ql_isr_aif(caddr_t arg, caddr_t intvec)
163 {
164 	uint16_t		mbx;
165 	uint32_t		stat;
166 	ql_adapter_state_t	*ha = (void *)arg;
167 	uint32_t		set_flags = 0;
168 	uint32_t		reset_flags = 0;
169 	ql_head_t		isr_done_q = {NULL, NULL};
170 	uint_t			rval = DDI_INTR_UNCLAIMED;
171 	int			spurious_intr = 0;
172 	boolean_t		intr = B_FALSE, daemon = B_FALSE;
173 	int			intr_loop = 4;
174 
175 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
176 
177 	QL_PM_LOCK(ha);
178 	if (ha->power_level != PM_LEVEL_D0) {
179 		/*
180 		 * Looks like we are about to go down soon, exit early.
181 		 */
182 		QL_PM_UNLOCK(ha);
183 		QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance);
184 		return (DDI_INTR_UNCLAIMED);
185 	}
186 	ha->busy++;
187 	QL_PM_UNLOCK(ha);
188 
189 	/* Acquire interrupt lock. */
190 	INTR_LOCK(ha);
191 
192 	if (CFG_IST(ha, CFG_CTRL_2200)) {
193 		while (RD16_IO_REG(ha, istatus) & RISC_INT) {
194 			/* Reset idle timer. */
195 			ha->idle_timer = 0;
196 			rval = DDI_INTR_CLAIMED;
197 			if (intr_loop) {
198 				intr_loop--;
199 			}
200 
201 			/* Special Fast Post 2200. */
202 			stat = 0;
203 			if (ha->task_daemon_flags & FIRMWARE_LOADED &&
204 			    ha->flags & ONLINE) {
205 				ql_srb_t	*sp;
206 
207 				mbx = RD16_IO_REG(ha, mailbox[23]);
208 
209 				if ((mbx & 3) == MBX23_SCSI_COMPLETION) {
210 					/* Release mailbox registers. */
211 					WRT16_IO_REG(ha, semaphore, 0);
212 
213 					if (intr_loop) {
214 						WRT16_IO_REG(ha, hccr,
215 						    HC_CLR_RISC_INT);
216 					}
217 
218 					/* Get handle. */
219 					mbx >>= 4;
220 					stat = mbx & OSC_INDEX_MASK;
221 
222 					/* Validate handle. */
223 					sp = stat < MAX_OUTSTANDING_COMMANDS ?
224 					    ha->outstanding_cmds[stat] : NULL;
225 
226 					if (sp != NULL && (sp->handle & 0xfff)
227 					    == mbx) {
228 						ha->outstanding_cmds[stat] =
229 						    NULL;
230 						sp->handle = 0;
231 						sp->flags &=
232 						    ~SRB_IN_TOKEN_ARRAY;
233 
234 						/* Set completed status. */
235 						sp->flags |= SRB_ISP_COMPLETED;
236 
237 						/* Set completion status */
238 						sp->pkt->pkt_reason =
239 						    CS_COMPLETE;
240 
241 						ql_fast_fcp_post(sp);
242 					} else if (mbx !=
243 					    (QL_FCA_BRAND & 0xfff)) {
244 						if (sp == NULL) {
245 							EL(ha, "unknown IOCB"
246 							    " handle=%xh\n",
247 							    mbx);
248 						} else {
249 							EL(ha, "mismatch IOCB"
250 							    " handle pkt=%xh, "
251 							    "sp=%xh\n", mbx,
252 							    sp->handle & 0xfff);
253 						}
254 
255 						(void) ql_binary_fw_dump(ha,
256 						    FALSE);
257 
258 						if (!(ha->task_daemon_flags &
259 						    (ISP_ABORT_NEEDED |
260 						    ABORT_ISP_ACTIVE))) {
261 							EL(ha, "ISP Invalid "
262 							    "handle, "
263 							    "isp_abort_needed"
264 							    "\n");
265 							set_flags |=
266 							    ISP_ABORT_NEEDED;
267 						}
268 					}
269 				}
270 			}
271 
272 			if (stat == 0) {
273 				/* Check for mailbox interrupt. */
274 				mbx = RD16_IO_REG(ha, semaphore);
275 				if (mbx & BIT_0) {
276 					/* Release mailbox registers. */
277 					WRT16_IO_REG(ha, semaphore, 0);
278 
279 					/* Get mailbox data. */
280 					mbx = RD16_IO_REG(ha, mailbox[0]);
281 					if (mbx > 0x3fff && mbx < 0x8000) {
282 						ql_mbx_completion(ha, mbx,
283 						    &set_flags, &reset_flags,
284 						    intr_loop);
285 					} else if (mbx > 0x7fff &&
286 					    mbx < 0xc000) {
287 						ql_async_event(ha, mbx,
288 						    &isr_done_q, &set_flags,
289 						    &reset_flags, intr_loop);
290 					} else {
291 						EL(ha, "UNKNOWN interrupt "
292 						    "type\n");
293 						intr = B_TRUE;
294 					}
295 				} else {
296 					ha->isp_rsp_index = RD16_IO_REG(ha,
297 					    resp_in);
298 
299 					if (ha->isp_rsp_index !=
300 					    ha->rsp_ring_index) {
301 						ql_response_pkt(ha,
302 						    &isr_done_q, &set_flags,
303 						    &reset_flags, intr_loop);
304 					} else if (++spurious_intr ==
305 					    MAX_SPURIOUS_INTR) {
306 						/*
307 						 * Process excessive
308 						 * spurious intrrupts
309 						 */
310 						ql_spurious_intr(ha,
311 						    intr_loop);
312 						EL(ha, "excessive spurious "
313 						    "interrupts, "
314 						    "isp_abort_needed\n");
315 						set_flags |= ISP_ABORT_NEEDED;
316 					} else {
317 						intr = B_TRUE;
318 					}
319 				}
320 			}
321 
322 			/* Clear RISC interrupt */
323 			if (intr || intr_loop == 0) {
324 				intr = B_FALSE;
325 				WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
326 			}
327 
328 			if (set_flags != 0 || reset_flags != 0) {
329 				TASK_DAEMON_LOCK(ha);
330 				ha->task_daemon_flags |= set_flags;
331 				ha->task_daemon_flags &= ~reset_flags;
332 				TASK_DAEMON_UNLOCK(ha);
333 				set_flags = 0;
334 				reset_flags = 0;
335 				daemon = B_TRUE;
336 			}
337 		}
338 	} else {
339 		while ((stat = RD32_IO_REG(ha, intr_info_lo)) & RH_RISC_INT) {
340 			/* Capture FW defined interrupt info */
341 			mbx = MSW(stat);
342 
343 			/* Reset idle timer. */
344 			ha->idle_timer = 0;
345 			rval = DDI_INTR_CLAIMED;
346 			if (intr_loop) {
347 				intr_loop--;
348 			}
349 
350 			switch (stat & 0x1ff) {
351 			case ROM_MBX_SUCCESS:
352 			case ROM_MBX_ERR:
353 				ql_mbx_completion(ha, mbx, &set_flags,
354 				    &reset_flags, intr_loop);
355 
356 				/* Release mailbox registers. */
357 				if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
358 					WRT16_IO_REG(ha, semaphore, 0);
359 				}
360 				break;
361 
362 			case MBX_SUCCESS:
363 			case MBX_ERR:
364 				/* Sun FW, Release mailbox registers. */
365 				if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
366 					WRT16_IO_REG(ha, semaphore, 0);
367 				}
368 				ql_mbx_completion(ha, mbx, &set_flags,
369 				    &reset_flags, intr_loop);
370 				break;
371 
372 			case ASYNC_EVENT:
373 				/* Sun FW, Release mailbox registers. */
374 				if ((CFG_IST(ha, CFG_CTRL_2425)) == 0) {
375 					WRT16_IO_REG(ha, semaphore, 0);
376 				}
377 				ql_async_event(ha, (uint32_t)mbx, &isr_done_q,
378 				    &set_flags, &reset_flags, intr_loop);
379 				break;
380 
381 			case RESP_UPDATE:
382 				if (mbx != ha->rsp_ring_index) {
383 					ha->isp_rsp_index = mbx;
384 					ql_response_pkt(ha, &isr_done_q,
385 					    &set_flags, &reset_flags,
386 					    intr_loop);
387 				} else if (++spurious_intr ==
388 				    MAX_SPURIOUS_INTR) {
389 					/* Process excessive spurious intr. */
390 					ql_spurious_intr(ha, intr_loop);
391 					EL(ha, "excessive spurious "
392 					    "interrupts, isp_abort_needed\n");
393 					set_flags |= ISP_ABORT_NEEDED;
394 				} else {
395 					intr = B_TRUE;
396 				}
397 				break;
398 
399 			case SCSI_FAST_POST_16:
400 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
401 				ql_async_event(ha, stat, &isr_done_q,
402 				    &set_flags, &reset_flags, intr_loop);
403 				break;
404 
405 			case SCSI_FAST_POST_32:
406 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
407 				ql_async_event(ha, stat, &isr_done_q,
408 				    &set_flags, &reset_flags, intr_loop);
409 				break;
410 
411 			case CTIO_FAST_POST:
412 				stat = (stat & 0xffff0000) |
413 				    MBA_CTIO_COMPLETION;
414 				ql_async_event(ha, stat, &isr_done_q,
415 				    &set_flags, &reset_flags, intr_loop);
416 				break;
417 
418 			case IP_FAST_POST_XMT:
419 				stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
420 				ql_async_event(ha, stat, &isr_done_q,
421 				    &set_flags, &reset_flags, intr_loop);
422 				break;
423 
424 			case IP_FAST_POST_RCV:
425 				stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
426 				ql_async_event(ha, stat, &isr_done_q,
427 				    &set_flags, &reset_flags, intr_loop);
428 				break;
429 
430 			case IP_FAST_POST_BRD:
431 				stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
432 				ql_async_event(ha, stat, &isr_done_q,
433 				    &set_flags, &reset_flags, intr_loop);
434 				break;
435 
436 			case IP_FAST_POST_RCV_ALN:
437 				stat = (stat & 0xffff0000) |
438 				    MBA_IP_HDR_DATA_SPLIT;
439 				ql_async_event(ha, stat, &isr_done_q,
440 				    &set_flags, &reset_flags, intr_loop);
441 				break;
442 
443 			case ATIO_UPDATE:
444 				EL(ha, "unsupported ATIO queue update"
445 				    " interrupt, status=%xh\n", stat);
446 				intr = B_TRUE;
447 				break;
448 
449 			case ATIO_RESP_UPDATE:
450 				EL(ha, "unsupported ATIO response queue "
451 				    "update interrupt, status=%xh\n", stat);
452 				intr = B_TRUE;
453 				break;
454 
455 			default:
456 				ql_handle_uncommon_risc_intr(ha, stat,
457 				    &set_flags);
458 				intr = B_TRUE;
459 				break;
460 			}
461 
462 			/* Clear RISC interrupt */
463 			if (intr || intr_loop == 0) {
464 				intr = B_FALSE;
465 				CFG_IST(ha, CFG_CTRL_2425) ?
466 				    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
467 				    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
468 			}
469 
470 			if (set_flags != 0 || reset_flags != 0) {
471 				TASK_DAEMON_LOCK(ha);
472 				ha->task_daemon_flags |= set_flags;
473 				ha->task_daemon_flags &= ~reset_flags;
474 				TASK_DAEMON_UNLOCK(ha);
475 				set_flags = 0;
476 				reset_flags = 0;
477 				daemon = B_TRUE;
478 			}
479 
480 			if (ha->flags & PARITY_ERROR) {
481 				EL(ha, "parity/pause exit\n");
482 				mbx = RD16_IO_REG(ha, hccr); /* PCI posting */
483 				break;
484 			}
485 		}
486 	}
487 
488 	/* Process claimed interrupts during polls. */
489 	if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
490 		ha->intr_claimed = B_FALSE;
491 		rval = DDI_INTR_CLAIMED;
492 	}
493 
494 	/* Release interrupt lock. */
495 	INTR_UNLOCK(ha);
496 
497 	if (daemon) {
498 		ql_awaken_task_daemon(ha, NULL, 0, 0);
499 	}
500 
501 	if (isr_done_q.first != NULL) {
502 		ql_done(isr_done_q.first);
503 	}
504 
505 	if (rval == DDI_INTR_CLAIMED) {
506 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
507 		ha->xioctl->TotalInterrupts++;
508 	} else {
509 		/*EMPTY*/
510 		QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n",
511 		    ha->instance);
512 	}
513 
514 	QL_PM_LOCK(ha);
515 	ha->busy--;
516 	QL_PM_UNLOCK(ha);
517 
518 	return (rval);
519 }
520 
521 /*
522  * ql_handle_uncommon_risc_intr
523  *	Handle an uncommon RISC interrupt.
524  *
525  * Input:
526  *	ha:		adapter state pointer.
527  *	stat:		interrupt status
528  *
529  * Context:
530  *	Interrupt or Kernel context, no mailbox commands allowed.
531  */
532 static void
533 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat,
534     uint32_t *set_flags)
535 {
536 	uint16_t	hccr_reg;
537 
538 	hccr_reg = RD16_IO_REG(ha, hccr);
539 
540 	if (stat & RH_RISC_PAUSED ||
541 	    (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
542 
543 		ADAPTER_STATE_LOCK(ha);
544 		ha->flags |= PARITY_ERROR;
545 		ADAPTER_STATE_UNLOCK(ha);
546 
547 		if (ha->parity_pause_errors == 0 ||
548 		    ha->parity_hccr_err != hccr_reg ||
549 		    ha->parity_stat_err != stat) {
550 			cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
551 			    "Pause Error - hccr=%xh, stat=%xh, count=%d",
552 			    ha->instance, hccr_reg, stat,
553 			    ha->parity_pause_errors);
554 			ha->parity_hccr_err = hccr_reg;
555 			ha->parity_stat_err = stat;
556 		}
557 
558 		EL(ha, "parity/pause error, isp_abort_needed\n");
559 
560 		if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
561 			ql_reset_chip(ha);
562 		}
563 
564 		if (ha->parity_pause_errors == 0) {
565 			(void) ql_flash_errlog(ha, FLASH_ERRLOG_PARITY_ERR,
566 			    0, MSW(stat), LSW(stat));
567 		}
568 
569 		if (ha->parity_pause_errors < 0xffffffff) {
570 			ha->parity_pause_errors++;
571 		}
572 
573 		*set_flags |= ISP_ABORT_NEEDED;
574 
575 		/* Disable ISP interrupts. */
576 		WRT16_IO_REG(ha, ictrl, 0);
577 		ADAPTER_STATE_LOCK(ha);
578 		ha->flags &= ~INTERRUPTS_ENABLED;
579 		ADAPTER_STATE_UNLOCK(ha);
580 	} else {
581 		EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
582 		    stat, hccr_reg);
583 	}
584 }
585 
586 /*
587  * ql_spurious_intr
588  *	Inform Solaris of spurious interrupts.
589  *
590  * Input:
591  *	ha:		adapter state pointer.
592  *	intr_clr:	early interrupt clear
593  *
594  * Context:
595  *	Interrupt or Kernel context, no mailbox commands allowed.
596  */
597 static void
598 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr)
599 {
600 	ddi_devstate_t	state;
601 
602 	EL(ha, "Spurious interrupt\n");
603 
604 	/* Disable ISP interrupts. */
605 	WRT16_IO_REG(ha, ictrl, 0);
606 	ADAPTER_STATE_LOCK(ha);
607 	ha->flags &= ~INTERRUPTS_ENABLED;
608 	ADAPTER_STATE_UNLOCK(ha);
609 
610 	/* Clear RISC interrupt */
611 	if (intr_clr) {
612 		CFG_IST(ha, CFG_CTRL_2425) ?
613 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
614 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
615 	}
616 
617 	state = ddi_get_devstate(ha->dip);
618 	if (state == DDI_DEVSTATE_UP) {
619 		/*EMPTY*/
620 		ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
621 		    DDI_DEVICE_FAULT, "spurious interrupts");
622 	}
623 }
624 
625 /*
626  * ql_mbx_completion
627  *	Processes mailbox completions.
628  *
629  * Input:
630  *	ha:		adapter state pointer.
631  *	mb0:		Mailbox 0 contents.
632  *	set_flags:	task daemon flags to set.
633  *	reset_flags:	task daemon flags to reset.
634  *	intr_clr:	early interrupt clear
635  *
636  * Context:
637  *	Interrupt context.
638  */
639 /* ARGSUSED */
640 static void
641 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags,
642     uint32_t *reset_flags, int intr_clr)
643 {
644 	uint32_t	index;
645 	uint16_t	cnt;
646 
647 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
648 
649 	/* Load return mailbox registers. */
650 	MBX_REGISTER_LOCK(ha);
651 
652 	if (ha->mcp != NULL) {
653 		ha->mcp->mb[0] = mb0;
654 		index = ha->mcp->in_mb & ~MBX_0;
655 
656 		for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
657 			index >>= 1;
658 			if (index & MBX_0) {
659 				ha->mcp->mb[cnt] = RD16_IO_REG(ha,
660 				    mailbox[cnt]);
661 			}
662 		}
663 
664 	} else {
665 		EL(ha, "mcp == NULL\n");
666 	}
667 
668 	if (intr_clr) {
669 		/* Clear RISC interrupt. */
670 		CFG_IST(ha, CFG_CTRL_2425) ?
671 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
672 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
673 	}
674 
675 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
676 	if (ha->flags & INTERRUPTS_ENABLED) {
677 		cv_broadcast(&ha->cv_mbx_intr);
678 	}
679 
680 	MBX_REGISTER_UNLOCK(ha);
681 
682 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
683 }
684 
685 /*
686  * ql_async_event
687  *	Processes asynchronous events.
688  *
689  * Input:
690  *	ha:		adapter state pointer.
691  *	mbx:		Mailbox 0 register.
692  *	done_q:		head pointer to done queue.
693  *	set_flags:	task daemon flags to set.
694  *	reset_flags:	task daemon flags to reset.
695  *	intr_clr:	early interrupt clear
696  *
697  * Context:
698  *	Interrupt or Kernel context, no mailbox commands allowed.
699  */
700 static void
701 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q,
702     uint32_t *set_flags, uint32_t *reset_flags, int intr_clr)
703 {
704 	uint32_t		handle;
705 	uint32_t		index;
706 	uint16_t		cnt;
707 	uint16_t		mb[MAX_MBOX_COUNT];
708 	ql_srb_t		*sp;
709 	port_id_t		s_id;
710 	ql_tgt_t		*tq;
711 	boolean_t		intr = B_TRUE;
712 	ql_adapter_state_t	*vha;
713 
714 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
715 
716 	/* Setup to process fast completion. */
717 	mb[0] = LSW(mbx);
718 	switch (mb[0]) {
719 	case MBA_SCSI_COMPLETION:
720 		handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox[1]),
721 		    RD16_IO_REG(ha, mailbox[2]));
722 		break;
723 
724 	case MBA_CMPLT_1_16BIT:
725 		handle = MSW(mbx);
726 		mb[0] = MBA_SCSI_COMPLETION;
727 		break;
728 
729 	case MBA_CMPLT_1_32BIT:
730 		handle = SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
731 		mb[0] = MBA_SCSI_COMPLETION;
732 		break;
733 
734 	case MBA_CTIO_COMPLETION:
735 	case MBA_IP_COMPLETION:
736 		handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG(
737 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2])) :
738 		    SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
739 		mb[0] = MBA_SCSI_COMPLETION;
740 		break;
741 
742 	default:
743 		break;
744 	}
745 
746 	/* Handle asynchronous event */
747 	switch (mb[0]) {
748 	case MBA_SCSI_COMPLETION:
749 		QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n",
750 		    ha->instance);
751 
752 		if (intr_clr) {
753 			/* Clear RISC interrupt */
754 			CFG_IST(ha, CFG_CTRL_2425) ?
755 			    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
756 			    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
757 			intr = B_FALSE;
758 		}
759 
760 		if ((ha->flags & ONLINE) == 0) {
761 			break;
762 		}
763 
764 		/* Get handle. */
765 		index = handle & OSC_INDEX_MASK;
766 
767 		/* Validate handle. */
768 		sp = index < MAX_OUTSTANDING_COMMANDS ?
769 		    ha->outstanding_cmds[index] : NULL;
770 
771 		if (sp != NULL && sp->handle == handle) {
772 			ha->outstanding_cmds[index] = NULL;
773 			sp->handle = 0;
774 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
775 
776 			/* Set completed status. */
777 			sp->flags |= SRB_ISP_COMPLETED;
778 
779 			/* Set completion status */
780 			sp->pkt->pkt_reason = CS_COMPLETE;
781 
782 			if (!(sp->flags & SRB_FCP_CMD_PKT)) {
783 				/* Place block on done queue */
784 				ql_add_link_b(done_q, &sp->cmd);
785 			} else {
786 				ql_fast_fcp_post(sp);
787 			}
788 		} else if (handle != QL_FCA_BRAND) {
789 			if (sp == NULL) {
790 				EL(ha, "%xh unknown IOCB handle=%xh\n",
791 				    mb[0], handle);
792 			} else {
793 				EL(ha, "%xh mismatch IOCB handle pkt=%xh, "
794 				    "sp=%xh\n", mb[0], handle, sp->handle);
795 			}
796 
797 			EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh,"
798 			    "mbx6=%xh, mbx7=%xh\n", mb[0],
799 			    RD16_IO_REG(ha, mailbox[1]),
800 			    RD16_IO_REG(ha, mailbox[2]),
801 			    RD16_IO_REG(ha, mailbox[3]),
802 			    RD16_IO_REG(ha, mailbox[6]),
803 			    RD16_IO_REG(ha, mailbox[7]));
804 
805 			(void) ql_binary_fw_dump(ha, FALSE);
806 
807 			if (!(ha->task_daemon_flags &
808 			    (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
809 				EL(ha, "%xh ISP Invalid handle, "
810 				    "isp_abort_needed\n", mb[0]);
811 				*set_flags |= ISP_ABORT_NEEDED;
812 			}
813 		}
814 		break;
815 
816 	case MBA_RESET:		/* Reset */
817 		EL(ha, "%xh Reset received\n", mb[0]);
818 		*set_flags |= RESET_MARKER_NEEDED;
819 		break;
820 
821 	case MBA_SYSTEM_ERR:		/* System Error */
822 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
823 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
824 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
825 
826 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
827 		    "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
828 		    "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
829 		    "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
830 		    RD16_IO_REG(ha, mailbox[4]), RD16_IO_REG(ha, mailbox[5]),
831 		    RD16_IO_REG(ha, mailbox[6]), RD16_IO_REG(ha, mailbox[7]),
832 		    RD16_IO_REG(ha, mailbox[8]), RD16_IO_REG(ha, mailbox[9]),
833 		    RD16_IO_REG(ha, mailbox[10]), RD16_IO_REG(ha, mailbox[11]),
834 		    RD16_IO_REG(ha, mailbox[12]));
835 
836 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
837 		    "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
838 		    "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
839 		    mb[0], RD16_IO_REG(ha, mailbox[13]),
840 		    RD16_IO_REG(ha, mailbox[14]), RD16_IO_REG(ha, mailbox[15]),
841 		    RD16_IO_REG(ha, mailbox[16]), RD16_IO_REG(ha, mailbox[17]),
842 		    RD16_IO_REG(ha, mailbox[18]), RD16_IO_REG(ha, mailbox[19]),
843 		    RD16_IO_REG(ha, mailbox[20]), RD16_IO_REG(ha, mailbox[21]),
844 		    RD16_IO_REG(ha, mailbox[22]),
845 		    RD16_IO_REG(ha, mailbox[23]));
846 
847 		if (ha->reg_off->mbox_cnt > 24) {
848 			EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
849 			    "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
850 			    "mbx30=%xh, mbx31=%xh\n", mb[0],
851 			    RD16_IO_REG(ha, mailbox[24]),
852 			    RD16_IO_REG(ha, mailbox[25]),
853 			    RD16_IO_REG(ha, mailbox[26]),
854 			    RD16_IO_REG(ha, mailbox[27]),
855 			    RD16_IO_REG(ha, mailbox[28]),
856 			    RD16_IO_REG(ha, mailbox[29]),
857 			    RD16_IO_REG(ha, mailbox[30]),
858 			    RD16_IO_REG(ha, mailbox[31]));
859 		}
860 
861 		(void) ql_binary_fw_dump(ha, FALSE);
862 
863 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1],
864 		    mb[2], mb[3]);
865 
866 		*set_flags |= ISP_ABORT_NEEDED;
867 		ha->xioctl->ControllerErrorCount++;
868 		break;
869 
870 	case MBA_REQ_TRANSFER_ERR:  /* Request Transfer Error */
871 		EL(ha, "%xh Request Transfer Error received, "
872 		    "isp_abort_needed\n", mb[0]);
873 
874 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003,
875 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
876 		    RD16_IO_REG(ha, mailbox[3]));
877 
878 		*set_flags |= ISP_ABORT_NEEDED;
879 		ha->xioctl->ControllerErrorCount++;
880 		break;
881 
882 	case MBA_RSP_TRANSFER_ERR:  /* Response Xfer Err */
883 		EL(ha, "%xh Response Transfer Error received,"
884 		    " isp_abort_needed\n", mb[0]);
885 
886 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004,
887 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
888 		    RD16_IO_REG(ha, mailbox[3]));
889 
890 		*set_flags |= ISP_ABORT_NEEDED;
891 		ha->xioctl->ControllerErrorCount++;
892 		break;
893 
894 	case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
895 		EL(ha, "%xh Request Queue Wake-up received\n",
896 		    mb[0]);
897 		break;
898 
899 	case MBA_MENLO_ALERT:	/* Menlo Alert Notification */
900 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
901 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
902 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
903 
904 		EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
905 		    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
906 
907 		switch (mb[1]) {
908 		case MLA_LOGIN_OPERATIONAL_FW:
909 			ADAPTER_STATE_LOCK(ha);
910 			ha->flags |= MENLO_LOGIN_OPERATIONAL;
911 			ADAPTER_STATE_UNLOCK(ha);
912 			break;
913 		case MLA_PANIC_RECOVERY:
914 		case MLA_LOGIN_DIAGNOSTIC_FW:
915 		case MLA_LOGIN_GOLDEN_FW:
916 		case MLA_REJECT_RESPONSE:
917 		default:
918 			break;
919 		}
920 		break;
921 
922 	case MBA_LIP_F8:	/* Received a LIP F8. */
923 	case MBA_LIP_RESET:	/* LIP reset occurred. */
924 	case MBA_LIP_OCCURRED:	/* Loop Initialization Procedure */
925 		EL(ha, "%xh LIP received\n", mb[0]);
926 
927 		ADAPTER_STATE_LOCK(ha);
928 		ha->flags &= ~POINT_TO_POINT;
929 		ADAPTER_STATE_UNLOCK(ha);
930 
931 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
932 			*set_flags |= LOOP_DOWN;
933 		}
934 		ql_port_state(ha, FC_STATE_OFFLINE,
935 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
936 
937 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
938 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
939 		}
940 
941 		ha->adapter_stats->lip_count++;
942 
943 		/* Update AEN queue. */
944 		ha->xioctl->TotalLipResets++;
945 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
946 			ql_enqueue_aen(ha, mb[0], NULL);
947 		}
948 		break;
949 
950 	case MBA_LOOP_UP:
951 		if (CFG_IST(ha, (CFG_CTRL_2300|CFG_CTRL_6322|CFG_CTRL_2425))) {
952 			mb[1] = RD16_IO_REG(ha, mailbox[1]);
953 			if (mb[1] == 0) {		/* 1GB */
954 				ha->state = FC_PORT_STATE_MASK(
955 				    ha->state) | FC_STATE_1GBIT_SPEED;
956 				index = 1;
957 			} else if (mb[1] == 1) {	/* 2GB */
958 				ha->state = FC_PORT_STATE_MASK(
959 				    ha->state) | FC_STATE_2GBIT_SPEED;
960 				index = 2;
961 			} else if (mb[1] == 3) {	/* 4GB */
962 				ha->state = FC_PORT_STATE_MASK(
963 				    ha->state) | FC_STATE_4GBIT_SPEED;
964 				index = 4;
965 			} else if (mb[1] == 4) {	/* 8GB */
966 				ha->state = FC_PORT_STATE_MASK(
967 				    ha->state) | FC_STATE_8GBIT_SPEED;
968 				index = 8;
969 			} else {
970 				ha->state = FC_PORT_STATE_MASK(
971 				    ha->state);
972 				index = 0;
973 			}
974 		} else {
975 			ha->state = FC_PORT_STATE_MASK(ha->state) |
976 			    FC_STATE_FULL_SPEED;
977 			index = 1;
978 		}
979 
980 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
981 			vha->state = FC_PORT_STATE_MASK(vha->state) |
982 			    FC_PORT_SPEED_MASK(ha->state);
983 		}
984 		EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
985 
986 		/* Update AEN queue. */
987 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
988 			ql_enqueue_aen(ha, mb[0], NULL);
989 		}
990 		break;
991 
992 	case MBA_LOOP_DOWN:
993 		EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, "
994 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
995 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]));
996 
997 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
998 			*set_flags |= LOOP_DOWN;
999 		}
1000 		ql_port_state(ha, FC_STATE_OFFLINE,
1001 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1002 
1003 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1004 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1005 		}
1006 
1007 		if (CFG_IST(ha, CFG_CTRL_25XX)) {
1008 			ha->sfp_stat = RD16_IO_REG(ha, mailbox[2]);
1009 		}
1010 
1011 		/* Update AEN queue. */
1012 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1013 			ql_enqueue_aen(ha, mb[0], NULL);
1014 		}
1015 		break;
1016 
1017 	case MBA_PORT_UPDATE:
1018 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1019 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1020 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1021 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1022 
1023 		/* Locate port state structure. */
1024 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1025 			if (vha->vp_index == LSB(mb[3])) {
1026 				break;
1027 			}
1028 		}
1029 		if (vha == NULL) {
1030 			break;
1031 		}
1032 		if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1033 		    (CFG_IST(ha, CFG_CTRL_2425) &&
1034 		    (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))) {
1035 			EL(ha, "%xh Port Database Update, Login/Logout "
1036 			    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1037 			    mb[0], mb[1], mb[2], mb[3]);
1038 		} else {
1039 			EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1040 			    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1041 			    mb[3]);
1042 			*set_flags |= LOOP_RESYNC_NEEDED;
1043 			*set_flags &= ~LOOP_DOWN;
1044 			*reset_flags |= LOOP_DOWN;
1045 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
1046 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1047 			TASK_DAEMON_LOCK(ha);
1048 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1049 			vha->task_daemon_flags &= ~LOOP_DOWN;
1050 			TASK_DAEMON_UNLOCK(ha);
1051 			ADAPTER_STATE_LOCK(ha);
1052 			vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1053 			ADAPTER_STATE_UNLOCK(ha);
1054 		}
1055 
1056 		/* Update AEN queue. */
1057 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1058 			ql_enqueue_aen(ha, mb[0], NULL);
1059 		}
1060 		break;
1061 
1062 	case MBA_RSCN_UPDATE:
1063 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1064 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1065 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1066 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1067 
1068 		/* Locate port state structure. */
1069 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1070 			if (vha->vp_index == LSB(mb[3])) {
1071 				break;
1072 			}
1073 		}
1074 
1075 		if (vha == NULL) {
1076 			break;
1077 		}
1078 
1079 		if (LSB(mb[1]) == vha->d_id.b.domain &&
1080 		    MSB(mb[2]) == vha->d_id.b.area &&
1081 		    LSB(mb[2]) == vha->d_id.b.al_pa) {
1082 			EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1083 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1084 		} else {
1085 			EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1086 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1087 			if (FC_PORT_STATE_MASK(vha->state) !=
1088 			    FC_STATE_OFFLINE) {
1089 				ql_rcv_rscn_els(vha, &mb[0], done_q);
1090 				TASK_DAEMON_LOCK(ha);
1091 				vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1092 				TASK_DAEMON_UNLOCK(ha);
1093 				*set_flags |= RSCN_UPDATE_NEEDED;
1094 			}
1095 		}
1096 
1097 		/* Update AEN queue. */
1098 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1099 			ql_enqueue_aen(ha, mb[0], NULL);
1100 		}
1101 		break;
1102 
1103 	case MBA_LIP_ERROR:	/* Loop initialization errors. */
1104 		EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1105 		    RD16_IO_REG(ha, mailbox[1]));
1106 		break;
1107 
1108 	case MBA_IP_RECEIVE:
1109 	case MBA_IP_BROADCAST:
1110 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1111 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1112 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
1113 
1114 		EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1115 		    "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1116 
1117 		/* Locate device queue. */
1118 		s_id.b.al_pa = LSB(mb[2]);
1119 		s_id.b.area = MSB(mb[2]);
1120 		s_id.b.domain = LSB(mb[1]);
1121 		if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1122 			EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1123 			break;
1124 		}
1125 
1126 		cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_2425) ?
1127 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1128 		    ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1129 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1130 		    ha->ip_init_ctrl_blk.cb.buf_size[1]));
1131 
1132 		tq->ub_sequence_length = mb[3];
1133 		tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1134 		if (mb[3] % cnt) {
1135 			tq->ub_total_seg_cnt++;
1136 		}
1137 		cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1138 
1139 		for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1140 		    index++) {
1141 			mb[index] = RD16_IO_REG(ha, mailbox[index]);
1142 		}
1143 
1144 		tq->ub_seq_id = ++ha->ub_seq_id;
1145 		tq->ub_seq_cnt = 0;
1146 		tq->ub_frame_ro = 0;
1147 		tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1148 		    (CFG_IST(ha, CFG_CTRL_2425) ? BROADCAST_24XX_HDL :
1149 		    IP_BROADCAST_LOOP_ID) : tq->loop_id);
1150 		ha->rcv_dev_q = tq;
1151 
1152 		for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1153 		    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1154 			if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1155 			    QL_SUCCESS) {
1156 				EL(ha, "ql_ub_frame_hdr failed, "
1157 				    "isp_abort_needed\n");
1158 				*set_flags |= ISP_ABORT_NEEDED;
1159 				break;
1160 			}
1161 		}
1162 		break;
1163 
1164 	case MBA_IP_LOW_WATER_MARK:
1165 	case MBA_IP_RCV_BUFFER_EMPTY:
1166 		EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1167 		    mb[0]);
1168 		*set_flags |= NEED_UNSOLICITED_BUFFERS;
1169 		break;
1170 
1171 	case MBA_IP_HDR_DATA_SPLIT:
1172 		EL(ha, "%xh IP HDR data split received\n", mb[0]);
1173 		break;
1174 
1175 	case MBA_POINT_TO_POINT:
1176 		EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1177 		ADAPTER_STATE_LOCK(ha);
1178 		ha->flags |= POINT_TO_POINT;
1179 		ADAPTER_STATE_UNLOCK(ha);
1180 
1181 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1182 			*set_flags |= LOOP_DOWN;
1183 		}
1184 		ql_port_state(ha, FC_STATE_OFFLINE,
1185 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1186 
1187 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1188 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1189 		}
1190 		break;
1191 
1192 	case MBA_CHG_IN_CONNECTION:
1193 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1194 		if (mb[1] == 2) {
1195 			EL(ha, "%xh Change In Connection received, "
1196 			    "mbx1=%xh\n",  mb[0], mb[1]);
1197 			ADAPTER_STATE_LOCK(ha);
1198 			ha->flags &= ~POINT_TO_POINT;
1199 			ADAPTER_STATE_UNLOCK(ha);
1200 			if (ha->topology & QL_N_PORT) {
1201 				ha->topology = (uint8_t)(ha->topology &
1202 				    ~QL_N_PORT);
1203 				ha->topology = (uint8_t)(ha->topology |
1204 				    QL_NL_PORT);
1205 			}
1206 		} else {
1207 			EL(ha, "%xh Change In Connection received, "
1208 			    "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1209 			*set_flags |= ISP_ABORT_NEEDED;
1210 		}
1211 		break;
1212 
1213 	case MBA_ZIO_UPDATE:
1214 		EL(ha, "%xh ZIO response received\n", mb[0]);
1215 
1216 		ha->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1217 		ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr);
1218 		intr = B_FALSE;
1219 		break;
1220 
1221 	case MBA_PORT_BYPASS_CHANGED:
1222 		EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1223 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1224 		/*
1225 		 * Event generated when there is a transition on
1226 		 * port bypass of crystal+.
1227 		 * Mailbox 1:	Bit 0 - External.
1228 		 *		Bit 2 - Internal.
1229 		 * When the bit is 0, the port is bypassed.
1230 		 *
1231 		 * For now we will generate a LIP for all cases.
1232 		 */
1233 		*set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1234 		break;
1235 
1236 	case MBA_RECEIVE_ERROR:
1237 		EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1238 		    mb[0], RD16_IO_REG(ha, mailbox[1]),
1239 		    RD16_IO_REG(ha, mailbox[2]));
1240 		break;
1241 
1242 	case MBA_LS_RJT_SENT:
1243 		EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1244 		    RD16_IO_REG(ha, mailbox[1]));
1245 		break;
1246 
1247 	case MBA_FW_RESTART_COMP:
1248 		EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1249 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1250 		break;
1251 
1252 	default:
1253 		EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1254 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1255 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]));
1256 		break;
1257 	}
1258 
1259 	/* Clear RISC interrupt */
1260 	if (intr && intr_clr) {
1261 		CFG_IST(ha, CFG_CTRL_2425) ?
1262 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1263 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1264 	}
1265 
1266 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1267 }
1268 
1269 /*
1270  * ql_fast_fcp_post
1271  *	Fast path for good SCSI I/O completion.
1272  *
1273  * Input:
1274  *	sp:	SRB pointer.
1275  *
1276  * Context:
1277  *	Interrupt or Kernel context, no mailbox commands allowed.
1278  */
1279 static void
1280 ql_fast_fcp_post(ql_srb_t *sp)
1281 {
1282 	ql_adapter_state_t	*ha = sp->ha;
1283 	ql_lun_t		*lq = sp->lun_queue;
1284 	ql_tgt_t		*tq = lq->target_queue;
1285 
1286 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1287 
1288 	ASSERT(sp->flags & SRB_FCP_CMD_PKT && ha &&
1289 	    sp->pkt->pkt_reason == CS_COMPLETE);
1290 
1291 	/* Acquire device queue lock. */
1292 	DEVICE_QUEUE_LOCK(tq);
1293 
1294 	/* Decrement outstanding commands on device. */
1295 	if (tq->outcnt != 0) {
1296 		tq->outcnt--;
1297 	}
1298 
1299 	if (sp->flags & SRB_FCP_CMD_PKT) {
1300 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1301 			/*
1302 			 * Clear the flag for this LUN so that
1303 			 * untagged commands can be submitted
1304 			 * for it.
1305 			 */
1306 			lq->flags &= ~LQF_UNTAGGED_PENDING;
1307 		}
1308 
1309 		if (lq->lun_outcnt != 0) {
1310 			lq->lun_outcnt--;
1311 		}
1312 	}
1313 
1314 	/* Reset port down retry count on good completion. */
1315 	tq->port_down_retry_count = ha->port_down_retry_count;
1316 	tq->qfull_retry_count = ha->qfull_retry_count;
1317 
1318 	/* Remove command from watchdog queue. */
1319 	if (sp->flags & SRB_WATCHDOG_ENABLED) {
1320 		ql_remove_link(&tq->wdg, &sp->wdg);
1321 		sp->flags &= ~SRB_WATCHDOG_ENABLED;
1322 	}
1323 
1324 	if (lq->cmd.first != NULL) {
1325 		ql_next(ha, lq);
1326 	} else {
1327 		/* Release LU queue specific lock. */
1328 		DEVICE_QUEUE_UNLOCK(tq);
1329 		if (ha->pha->pending_cmds.first != NULL) {
1330 			ql_start_iocb(ha, NULL);
1331 		}
1332 	}
1333 
1334 	/* Sync buffers if required.  */
1335 	if (sp->flags & SRB_MS_PKT) {
1336 		(void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1337 		    DDI_DMA_SYNC_FORCPU);
1338 	}
1339 
1340 	/* Map ISP completion codes. */
1341 	sp->pkt->pkt_expln = FC_EXPLN_NONE;
1342 	sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1343 	sp->pkt->pkt_state = FC_PKT_SUCCESS;
1344 
1345 	/* Now call the pkt completion callback */
1346 	if (sp->flags & SRB_POLL) {
1347 		sp->flags &= ~SRB_POLL;
1348 	} else if (sp->pkt->pkt_comp) {
1349 		INTR_UNLOCK(ha);
1350 		(*sp->pkt->pkt_comp)(sp->pkt);
1351 		INTR_LOCK(ha);
1352 	}
1353 
1354 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1355 }
1356 
1357 /*
1358  * ql_response_pkt
1359  *	Processes response entry.
1360  *
1361  * Input:
1362  *	ha:		adapter state pointer.
1363  *	done_q:		head pointer to done queue.
1364  *	set_flags:	task daemon flags to set.
1365  *	reset_flags:	task daemon flags to reset.
1366  *	intr_clr:	early interrupt clear
1367  *
1368  * Context:
1369  *	Interrupt or Kernel context, no mailbox commands allowed.
1370  */
1371 static void
1372 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags,
1373     uint32_t *reset_flags, int intr_clr)
1374 {
1375 	response_t	*pkt;
1376 	uint32_t	dma_sync_size_1 = 0;
1377 	uint32_t	dma_sync_size_2 = 0;
1378 	int		status = 0;
1379 
1380 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1381 
1382 	/* Clear RISC interrupt */
1383 	if (intr_clr) {
1384 		CFG_IST(ha, CFG_CTRL_2425) ?
1385 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1386 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1387 	}
1388 
1389 	if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) {
1390 		EL(ha, "index error = %xh, isp_abort_needed",
1391 		    ha->isp_rsp_index);
1392 		*set_flags |= ISP_ABORT_NEEDED;
1393 		return;
1394 	}
1395 
1396 	if ((ha->flags & ONLINE) == 0) {
1397 		QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance);
1398 		return;
1399 	}
1400 
1401 	/* Calculate size of response queue entries to sync. */
1402 	if (ha->isp_rsp_index > ha->rsp_ring_index) {
1403 		dma_sync_size_1 = (uint32_t)
1404 		    ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) *
1405 		    RESPONSE_ENTRY_SIZE);
1406 	} else if (ha->isp_rsp_index == 0) {
1407 		dma_sync_size_1 = (uint32_t)
1408 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1409 		    RESPONSE_ENTRY_SIZE);
1410 	} else {
1411 		/* Responses wrap around the Q */
1412 		dma_sync_size_1 = (uint32_t)
1413 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1414 		    RESPONSE_ENTRY_SIZE);
1415 		dma_sync_size_2 = (uint32_t)
1416 		    (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1417 	}
1418 
1419 	/* Sync DMA buffer. */
1420 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1421 	    (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE +
1422 	    RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1,
1423 	    DDI_DMA_SYNC_FORKERNEL);
1424 	if (dma_sync_size_2) {
1425 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1426 		    RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2,
1427 		    DDI_DMA_SYNC_FORKERNEL);
1428 	}
1429 
1430 	while (ha->rsp_ring_index != ha->isp_rsp_index) {
1431 		pkt = ha->response_ring_ptr;
1432 
1433 		QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1434 		    ha->instance, ha->rsp_ring_index, ha->isp_rsp_index);
1435 		QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8,
1436 		    RESPONSE_ENTRY_SIZE);
1437 
1438 		/* Adjust ring index. */
1439 		ha->rsp_ring_index++;
1440 		if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
1441 			ha->rsp_ring_index = 0;
1442 			ha->response_ring_ptr = ha->response_ring_bp;
1443 		} else {
1444 			ha->response_ring_ptr++;
1445 		}
1446 
1447 		/* Process packet. */
1448 		if (ha->status_srb != NULL && pkt->entry_type !=
1449 		    STATUS_CONT_TYPE) {
1450 			ql_add_link_b(done_q, &ha->status_srb->cmd);
1451 			ha->status_srb = NULL;
1452 		}
1453 
1454 		pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_2425) ?
1455 		    pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1456 
1457 		if (pkt->entry_status != 0) {
1458 			ql_error_entry(ha, pkt, done_q, set_flags,
1459 			    reset_flags);
1460 		} else {
1461 			switch (pkt->entry_type) {
1462 			case STATUS_TYPE:
1463 				status |= CFG_IST(ha, CFG_CTRL_2425) ?
1464 				    ql_24xx_status_entry(ha,
1465 				    (sts_24xx_entry_t *)pkt, done_q, set_flags,
1466 				    reset_flags) :
1467 				    ql_status_entry(ha, (sts_entry_t *)pkt,
1468 				    done_q, set_flags, reset_flags);
1469 				break;
1470 
1471 			case STATUS_CONT_TYPE:
1472 				ql_status_cont_entry(ha,
1473 				    (sts_cont_entry_t *)pkt, done_q, set_flags,
1474 				    reset_flags);
1475 				break;
1476 
1477 			case IMMEDIATE_NOTIFY_TYPE:
1478 				if (CFG_IST(ha, CFG_ENABLE_TARGET_MODE)) {
1479 					ql_immediate_notify_entry(ha,
1480 					    (immediate_notify_entry_t *)pkt,
1481 					    done_q, set_flags, reset_flags);
1482 				}
1483 				break;
1484 
1485 			case NOTIFY_ACKNOWLEDGE_TYPE:
1486 				if (CFG_IST(ha, CFG_ENABLE_TARGET_MODE)) {
1487 					ql_notify_acknowledge_entry(ha,
1488 					    (notify_acknowledge_entry_t *)pkt,
1489 					    done_q, set_flags, reset_flags);
1490 				}
1491 				break;
1492 
1493 			case ATIO_TYPE:
1494 				if (CFG_IST(ha, CFG_ENABLE_TARGET_MODE)) {
1495 					ql_accept_target_io_entry(ha,
1496 					    (atio_entry_t *)pkt, done_q,
1497 					    set_flags, reset_flags);
1498 				}
1499 				break;
1500 
1501 			case CTIO_TYPE_2:
1502 			case CTIO_TYPE_3:
1503 				if (CFG_IST(ha, CFG_ENABLE_TARGET_MODE)) {
1504 					ql_continue_target_io_entry(ha,
1505 					    (ctio_entry_t *)pkt, done_q,
1506 					    set_flags, reset_flags);
1507 				}
1508 				break;
1509 
1510 			case IP_TYPE:
1511 			case IP_A64_TYPE:
1512 			case IP_CMD_TYPE:
1513 				ql_ip_entry(ha, (ip_entry_t *)pkt, done_q,
1514 				    set_flags, reset_flags);
1515 				break;
1516 			case IP_RECEIVE_TYPE:
1517 				ql_ip_rcv_entry(ha,
1518 				    (ip_rcv_entry_t *)pkt, done_q, set_flags,
1519 				    reset_flags);
1520 				break;
1521 			case IP_RECEIVE_CONT_TYPE:
1522 				ql_ip_rcv_cont_entry(ha,
1523 				    (ip_rcv_cont_entry_t *)pkt,	done_q,
1524 				    set_flags, reset_flags);
1525 				break;
1526 			case IP_24XX_RECEIVE_TYPE:
1527 				ql_ip_24xx_rcv_entry(ha,
1528 				    (ip_rcv_24xx_entry_t *)pkt, done_q,
1529 				    set_flags, reset_flags);
1530 				break;
1531 			case MS_TYPE:
1532 				ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1533 				    set_flags, reset_flags);
1534 				break;
1535 			case REPORT_ID_TYPE:
1536 				ql_report_id_entry(ha, (report_id_1_t *)pkt,
1537 				    done_q, set_flags, reset_flags);
1538 				break;
1539 			case IP_BUF_POOL_TYPE:
1540 			case MARKER_TYPE:
1541 			case VP_MODIFY_TYPE:
1542 			case VP_CONTROL_TYPE:
1543 				break;
1544 			default:
1545 				EL(ha, "Unknown IOCB entry type=%xh\n",
1546 				    pkt->entry_type);
1547 				break;
1548 			}
1549 		}
1550 	}
1551 
1552 	/* Inform RISC of processed responses. */
1553 	WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index);
1554 
1555 	/* RESET packet received delay for possible async event. */
1556 	if (status & BIT_0) {
1557 		drv_usecwait(500000);
1558 	}
1559 
1560 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1561 }
1562 
1563 /*
1564  * ql_error_entry
1565  *	Processes error entry.
1566  *
1567  * Input:
1568  *	ha = adapter state pointer.
1569  *	pkt = entry pointer.
1570  *	done_q = head pointer to done queue.
1571  *	set_flags = task daemon flags to set.
1572  *	reset_flags = task daemon flags to reset.
1573  *
1574  * Context:
1575  *	Interrupt or Kernel context, no mailbox commands allowed.
1576  */
1577 /* ARGSUSED */
1578 static void
1579 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q,
1580     uint32_t *set_flags, uint32_t *reset_flags)
1581 {
1582 	ql_srb_t	*sp;
1583 	uint32_t	index, cnt;
1584 
1585 	if (pkt->entry_type == INVALID_ENTRY_TYPE) {
1586 		EL(ha, "Aborted command\n");
1587 		return;
1588 	}
1589 
1590 	QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance);
1591 	QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1592 
1593 	if (pkt->entry_status & BIT_6) {
1594 		EL(ha, "Request Queue DMA error\n");
1595 	} else if (pkt->entry_status & BIT_5) {
1596 		EL(ha, "Invalid Entry Order\n");
1597 	} else if (pkt->entry_status & BIT_4) {
1598 		EL(ha, "Invalid Entry Count\n");
1599 	} else if (pkt->entry_status & BIT_3) {
1600 		EL(ha, "Invalid Entry Parameter\n");
1601 	} else if (pkt->entry_status & BIT_2) {
1602 		EL(ha, "Invalid Entry Type\n");
1603 	} else if (pkt->entry_status & BIT_1) {
1604 		EL(ha, "Busy\n");
1605 	} else {
1606 		EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1607 	}
1608 
1609 	/* Get handle. */
1610 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1611 	index = cnt & OSC_INDEX_MASK;
1612 
1613 	/* Validate handle. */
1614 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
1615 	    NULL;
1616 
1617 	if (sp != NULL && sp->handle == cnt) {
1618 		ha->outstanding_cmds[index] = NULL;
1619 		sp->handle = 0;
1620 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1621 
1622 		/* Bad payload or header */
1623 		if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1624 			/* Bad payload or header, set error status. */
1625 			sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1626 		} else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1627 			sp->pkt->pkt_reason = CS_QUEUE_FULL;
1628 		} else {
1629 			/* Set error status. */
1630 			sp->pkt->pkt_reason = CS_UNKNOWN;
1631 		}
1632 
1633 		/* Set completed status. */
1634 		sp->flags |= SRB_ISP_COMPLETED;
1635 
1636 		/* Place command on done queue. */
1637 		ql_add_link_b(done_q, &sp->cmd);
1638 
1639 	} else {
1640 		if (sp == NULL) {
1641 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
1642 		} else {
1643 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
1644 			    cnt, sp->handle);
1645 		}
1646 
1647 		(void) ql_binary_fw_dump(ha, FALSE);
1648 
1649 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
1650 		    ABORT_ISP_ACTIVE))) {
1651 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
1652 			*set_flags |= ISP_ABORT_NEEDED;
1653 		}
1654 	}
1655 
1656 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1657 }
1658 
1659 /*
1660  * ql_status_entry
1661  *	Processes received ISP2200-2300 status entry.
1662  *
1663  * Input:
1664  *	ha:		adapter state pointer.
1665  *	pkt:		entry pointer.
1666  *	done_q:		done queue pointer.
1667  *	set_flags:	task daemon flags to set.
1668  *	reset_flags:	task daemon flags to reset.
1669  *
1670  * Returns:
1671  *	BIT_0 = CS_RESET status received.
1672  *
1673  * Context:
1674  *	Interrupt or Kernel context, no mailbox commands allowed.
1675  */
1676 /* ARGSUSED */
1677 static int
1678 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt,
1679     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1680 {
1681 	ql_srb_t		*sp;
1682 	uint32_t		index, cnt;
1683 	uint16_t		comp_status;
1684 	int			rval = 0;
1685 
1686 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1687 
1688 	/* Get handle. */
1689 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1690 	index = cnt & OSC_INDEX_MASK;
1691 
1692 	/* Validate handle. */
1693 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
1694 	    NULL;
1695 
1696 	if (sp != NULL && sp->handle == cnt) {
1697 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1698 		    &pkt->comp_status);
1699 
1700 		/*
1701 		 * We dont care about SCSI QFULLs.
1702 		 */
1703 		if (comp_status == CS_QUEUE_FULL) {
1704 			EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1705 			    sp->lun_queue->target_queue->d_id.b24,
1706 			    sp->lun_queue->lun_no);
1707 			comp_status = CS_COMPLETE;
1708 		}
1709 
1710 		/*
1711 		 * 2300 firmware marks completion status as data underrun
1712 		 * for scsi qfulls. Make it transport complete.
1713 		 */
1714 		if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) &&
1715 		    (comp_status == CS_DATA_UNDERRUN) &&
1716 		    (pkt->scsi_status_l != 0)) {
1717 			comp_status = CS_COMPLETE;
1718 		}
1719 
1720 		/*
1721 		 * Workaround T3 issue where we do not get any data xferred
1722 		 * but get back a good status.
1723 		 */
1724 		if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1725 		    comp_status == CS_COMPLETE &&
1726 		    pkt->scsi_status_l == 0 &&
1727 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1728 		    pkt->residual_length == 0 &&
1729 		    sp->fcp &&
1730 		    sp->fcp->fcp_data_len != 0 &&
1731 		    (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1732 		    SF_DATA_OUT) {
1733 			comp_status = CS_ABORTED;
1734 		}
1735 
1736 		if (sp->flags & SRB_MS_PKT) {
1737 			/*
1738 			 * Ideally it should never be true. But there
1739 			 * is a bug in FW which upon receiving invalid
1740 			 * parameters in MS IOCB returns it as
1741 			 * status entry and not as ms entry type.
1742 			 */
1743 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1744 			    set_flags, reset_flags);
1745 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1746 			    ha->instance);
1747 			return (0);
1748 		}
1749 
1750 		ha->outstanding_cmds[index] = NULL;
1751 		sp->handle = 0;
1752 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1753 
1754 		/*
1755 		 * Fast path to good SCSI I/O completion
1756 		 */
1757 		if ((comp_status == CS_COMPLETE) &
1758 		    (!pkt->scsi_status_l) &
1759 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1760 			/* Set completed status. */
1761 			sp->flags |= SRB_ISP_COMPLETED;
1762 			sp->pkt->pkt_reason = comp_status;
1763 			ql_fast_fcp_post(sp);
1764 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1765 			    ha->instance);
1766 			return (0);
1767 		}
1768 		rval = ql_status_error(ha, sp, pkt, done_q, set_flags,
1769 		    reset_flags);
1770 	} else {
1771 		if (sp == NULL) {
1772 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
1773 		} else {
1774 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
1775 			    cnt, sp->handle);
1776 		}
1777 
1778 		(void) ql_binary_fw_dump(ha, FALSE);
1779 
1780 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
1781 		    ABORT_ISP_ACTIVE))) {
1782 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
1783 			*set_flags |= ISP_ABORT_NEEDED;
1784 		}
1785 	}
1786 
1787 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1788 
1789 	return (rval);
1790 }
1791 
1792 /*
1793  * ql_24xx_status_entry
1794  *	Processes received ISP24xx status entry.
1795  *
1796  * Input:
1797  *	ha:		adapter state pointer.
1798  *	pkt:		entry pointer.
1799  *	done_q:		done queue pointer.
1800  *	set_flags:	task daemon flags to set.
1801  *	reset_flags:	task daemon flags to reset.
1802  *
1803  * Returns:
1804  *	BIT_0 = CS_RESET status received.
1805  *
1806  * Context:
1807  *	Interrupt or Kernel context, no mailbox commands allowed.
1808  */
1809 /* ARGSUSED */
1810 static int
1811 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt,
1812     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1813 {
1814 	ql_srb_t		*sp;
1815 	uint32_t		index;
1816 	uint32_t		resp_identifier;
1817 	uint16_t		comp_status;
1818 	int			rval = 0;
1819 
1820 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1821 
1822 	/* Get the response identifier. */
1823 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1824 
1825 	/* extract the outstanding cmds index */
1826 	index = resp_identifier & OSC_INDEX_MASK;
1827 
1828 	/* Validate the index and get the associated srb pointer */
1829 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
1830 	    NULL;
1831 
1832 	if (sp != NULL && sp->handle == resp_identifier) {
1833 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1834 		    &pkt->comp_status);
1835 
1836 		/*
1837 		 * We dont care about SCSI QFULLs.
1838 		 */
1839 		if (comp_status == CS_QUEUE_FULL) {
1840 			EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1841 			    sp->lun_queue->target_queue->d_id.b24,
1842 			    sp->lun_queue->lun_no);
1843 			comp_status = CS_COMPLETE;
1844 		}
1845 
1846 		/*
1847 		 * 2300 firmware marks completion status as data underrun
1848 		 * for scsi qfulls. Make it transport complete.
1849 		 */
1850 		if ((comp_status == CS_DATA_UNDERRUN) &&
1851 		    (pkt->scsi_status_l != 0)) {
1852 			comp_status = CS_COMPLETE;
1853 		}
1854 
1855 		/*
1856 		 * Workaround T3 issue where we do not get any data xferred
1857 		 * but get back a good status.
1858 		 */
1859 		if (comp_status == CS_COMPLETE &&
1860 		    pkt->scsi_status_l == 0 &&
1861 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1862 		    pkt->residual_length != 0 &&
1863 		    sp->fcp &&
1864 		    sp->fcp->fcp_data_len != 0 &&
1865 		    sp->fcp->fcp_cntl.cntl_write_data) {
1866 			comp_status = CS_ABORTED;
1867 		}
1868 
1869 		if (sp->flags & SRB_MS_PKT) {
1870 			/*
1871 			 * Ideally it should never be true. But there
1872 			 * is a bug in FW which upon receiving invalid
1873 			 * parameters in MS IOCB returns it as
1874 			 * status entry and not as ms entry type.
1875 			 */
1876 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1877 			    set_flags, reset_flags);
1878 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1879 			    ha->instance);
1880 			return (0);
1881 		}
1882 
1883 		ha->outstanding_cmds[index] = NULL;
1884 		sp->handle = 0;
1885 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1886 
1887 		/*
1888 		 * Fast path to good SCSI I/O completion
1889 		 */
1890 		if ((comp_status == CS_COMPLETE) &
1891 		    (!pkt->scsi_status_l) &
1892 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1893 			/* Set completed status. */
1894 			sp->flags |= SRB_ISP_COMPLETED;
1895 			sp->pkt->pkt_reason = comp_status;
1896 			ql_fast_fcp_post(sp);
1897 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1898 			    ha->instance);
1899 			return (0);
1900 		}
1901 		rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q,
1902 		    set_flags, reset_flags);
1903 	} else {
1904 		if (sp == NULL) {
1905 			EL(ha, "unknown IOCB handle=%xh\n", resp_identifier);
1906 		} else {
1907 			EL(sp->ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
1908 			    resp_identifier, sp->handle);
1909 		}
1910 
1911 		(void) ql_binary_fw_dump(ha, FALSE);
1912 
1913 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
1914 		    ABORT_ISP_ACTIVE))) {
1915 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
1916 			*set_flags |= ISP_ABORT_NEEDED;
1917 		}
1918 	}
1919 
1920 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1921 
1922 	return (rval);
1923 }
1924 
1925 /*
1926  * ql_status_error
1927  *	Processes received ISP status entry error.
1928  *
1929  * Input:
1930  *	ha:		adapter state pointer.
1931  *	sp:		SRB pointer.
1932  *	pkt:		entry pointer.
1933  *	done_q:		done queue pointer.
1934  *	set_flags:	task daemon flags to set.
1935  *	reset_flags:	task daemon flags to reset.
1936  *
1937  * Returns:
1938  *	BIT_0 = CS_RESET status received.
1939  *
1940  * Context:
1941  *	Interrupt or Kernel context, no mailbox commands allowed.
1942  */
1943 /* ARGSUSED */
1944 static int
1945 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23,
1946     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1947 {
1948 	uint32_t		sense_sz = 0;
1949 	uint32_t		cnt;
1950 	ql_tgt_t		*tq;
1951 	fcp_rsp_t		*fcpr;
1952 	struct fcp_rsp_info	*rsp;
1953 	int			rval = 0;
1954 
1955 	struct {
1956 		uint8_t		*rsp_info;
1957 		uint8_t		*req_sense_data;
1958 		uint32_t	residual_length;
1959 		uint32_t	fcp_residual_length;
1960 		uint32_t	rsp_info_length;
1961 		uint32_t	req_sense_length;
1962 		uint16_t	comp_status;
1963 		uint8_t		state_flags_l;
1964 		uint8_t		state_flags_h;
1965 		uint8_t		scsi_status_l;
1966 		uint8_t		scsi_status_h;
1967 	} sts;
1968 
1969 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1970 
1971 	if (CFG_IST(ha, CFG_CTRL_2425)) {
1972 		sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
1973 
1974 		/* Setup status. */
1975 		sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1976 		    &pkt24->comp_status);
1977 		sts.scsi_status_l = pkt24->scsi_status_l;
1978 		sts.scsi_status_h = pkt24->scsi_status_h;
1979 
1980 		/* Setup firmware residuals. */
1981 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
1982 		    ddi_get32(ha->hba_buf.acc_handle,
1983 		    (uint32_t *)&pkt24->residual_length) : 0;
1984 
1985 		/* Setup FCP residuals. */
1986 		sts.fcp_residual_length = sts.scsi_status_h &
1987 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
1988 		    ddi_get32(ha->hba_buf.acc_handle,
1989 		    (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
1990 
1991 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
1992 		    (sts.scsi_status_h & FCP_RESID_UNDER) &&
1993 		    (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
1994 
1995 			EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
1996 			    sts.residual_length,
1997 			    pkt24->fcp_rsp_residual_count);
1998 			sts.scsi_status_h = (uint8_t)
1999 			    (sts.scsi_status_h & ~FCP_RESID_UNDER);
2000 		}
2001 
2002 		/* Setup state flags. */
2003 		sts.state_flags_l = pkt24->state_flags_l;
2004 		sts.state_flags_h = pkt24->state_flags_h;
2005 
2006 		if (sp->fcp->fcp_data_len &&
2007 		    (sts.comp_status != CS_DATA_UNDERRUN ||
2008 		    sts.residual_length != sp->fcp->fcp_data_len)) {
2009 			sts.state_flags_h = (uint8_t)
2010 			    (sts.state_flags_h | SF_GOT_BUS |
2011 			    SF_GOT_TARGET | SF_SENT_CMD |
2012 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2013 		} else {
2014 			sts.state_flags_h = (uint8_t)
2015 			    (sts.state_flags_h | SF_GOT_BUS |
2016 			    SF_GOT_TARGET | SF_SENT_CMD |
2017 			    SF_GOT_STATUS);
2018 		}
2019 		if (sp->fcp->fcp_cntl.cntl_write_data) {
2020 			sts.state_flags_l = (uint8_t)
2021 			    (sts.state_flags_l | SF_DATA_OUT);
2022 		} else if (sp->fcp->fcp_cntl.cntl_read_data) {
2023 			sts.state_flags_l = (uint8_t)
2024 			    (sts.state_flags_l | SF_DATA_IN);
2025 		}
2026 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2027 			sts.state_flags_l = (uint8_t)
2028 			    (sts.state_flags_l | SF_HEAD_OF_Q);
2029 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2030 			sts.state_flags_l = (uint8_t)
2031 			    (sts.state_flags_l | SF_ORDERED_Q);
2032 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2033 			sts.state_flags_l = (uint8_t)
2034 			    (sts.state_flags_l | SF_SIMPLE_Q);
2035 		}
2036 
2037 		/* Setup FCP response info. */
2038 		sts.rsp_info = &pkt24->rsp_sense_data[0];
2039 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2040 			sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle,
2041 			    (uint32_t *)&pkt24->fcp_rsp_data_length);
2042 			if (sts.rsp_info_length >
2043 			    sizeof (struct fcp_rsp_info)) {
2044 				sts.rsp_info_length =
2045 				    sizeof (struct fcp_rsp_info);
2046 			}
2047 			for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2048 				ql_chg_endian(sts.rsp_info + cnt, 4);
2049 			}
2050 		} else {
2051 			sts.rsp_info_length = 0;
2052 		}
2053 
2054 		/* Setup sense data. */
2055 		sts.req_sense_data =
2056 		    &pkt24->rsp_sense_data[sts.rsp_info_length];
2057 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2058 			sts.req_sense_length =
2059 			    ddi_get32(ha->hba_buf.acc_handle,
2060 			    (uint32_t *)&pkt24->fcp_sense_length);
2061 			sts.state_flags_h = (uint8_t)
2062 			    (sts.state_flags_h | SF_ARQ_DONE);
2063 			sense_sz = (uint32_t)
2064 			    (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2065 			    (uintptr_t)sts.req_sense_data);
2066 			for (cnt = 0; cnt < sense_sz; cnt += 4) {
2067 				ql_chg_endian(sts.req_sense_data + cnt, 4);
2068 			}
2069 		} else {
2070 			sts.req_sense_length = 0;
2071 		}
2072 	} else {
2073 		/* Setup status. */
2074 		sts.comp_status = (uint16_t)ddi_get16(
2075 		    ha->hba_buf.acc_handle, &pkt23->comp_status);
2076 		sts.scsi_status_l = pkt23->scsi_status_l;
2077 		sts.scsi_status_h = pkt23->scsi_status_h;
2078 
2079 		/* Setup firmware residuals. */
2080 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2081 		    ddi_get32(ha->hba_buf.acc_handle,
2082 		    (uint32_t *)&pkt23->residual_length) : 0;
2083 
2084 		/* Setup FCP residuals. */
2085 		sts.fcp_residual_length = sts.scsi_status_h &
2086 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2087 		    sts.residual_length : 0;
2088 
2089 		/* Setup state flags. */
2090 		sts.state_flags_l = pkt23->state_flags_l;
2091 		sts.state_flags_h = pkt23->state_flags_h;
2092 
2093 		/* Setup FCP response info. */
2094 		sts.rsp_info = &pkt23->rsp_info[0];
2095 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2096 			sts.rsp_info_length = ddi_get16(
2097 			    ha->hba_buf.acc_handle,
2098 			    (uint16_t *)&pkt23->rsp_info_length);
2099 			if (sts.rsp_info_length >
2100 			    sizeof (struct fcp_rsp_info)) {
2101 				sts.rsp_info_length =
2102 				    sizeof (struct fcp_rsp_info);
2103 			}
2104 		} else {
2105 			sts.rsp_info_length = 0;
2106 		}
2107 
2108 		/* Setup sense data. */
2109 		sts.req_sense_data = &pkt23->req_sense_data[0];
2110 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2111 		    ddi_get16(ha->hba_buf.acc_handle,
2112 		    (uint16_t *)&pkt23->req_sense_length) : 0;
2113 	}
2114 
2115 	bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2116 
2117 	fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2118 	rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2119 	    sizeof (fcp_rsp_t));
2120 
2121 	tq = sp->lun_queue->target_queue;
2122 
2123 	fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2124 	if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2125 		fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2126 	}
2127 	if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2128 		fcpr->fcp_u.fcp_status.sense_len_set = 1;
2129 	}
2130 	if (sts.scsi_status_h & FCP_RESID_OVER) {
2131 		fcpr->fcp_u.fcp_status.resid_over = 1;
2132 	}
2133 	if (sts.scsi_status_h & FCP_RESID_UNDER) {
2134 		fcpr->fcp_u.fcp_status.resid_under = 1;
2135 	}
2136 	fcpr->fcp_u.fcp_status.reserved_1 = 0;
2137 
2138 	/* Set ISP completion status */
2139 	sp->pkt->pkt_reason = sts.comp_status;
2140 
2141 	/* Update statistics. */
2142 	if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2143 	    (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2144 
2145 		sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2146 		if (sense_sz > sts.rsp_info_length) {
2147 			sense_sz = sts.rsp_info_length;
2148 		}
2149 
2150 		/* copy response information data. */
2151 		if (sense_sz) {
2152 			ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp,
2153 			    sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR);
2154 		}
2155 		fcpr->fcp_response_len = sense_sz;
2156 
2157 		rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2158 		    fcpr->fcp_response_len);
2159 
2160 		switch (*(sts.rsp_info + 3)) {
2161 		case FCP_NO_FAILURE:
2162 			break;
2163 		case FCP_DL_LEN_MISMATCH:
2164 			ha->adapter_stats->d_stats[lobyte(
2165 			    tq->loop_id)].dl_len_mismatches++;
2166 			break;
2167 		case FCP_CMND_INVALID:
2168 			break;
2169 		case FCP_DATA_RO_MISMATCH:
2170 			ha->adapter_stats->d_stats[lobyte(
2171 			    tq->loop_id)].data_ro_mismatches++;
2172 			break;
2173 		case FCP_TASK_MGMT_NOT_SUPPTD:
2174 			break;
2175 		case FCP_TASK_MGMT_FAILED:
2176 			ha->adapter_stats->d_stats[lobyte(
2177 			    tq->loop_id)].task_mgmt_failures++;
2178 			break;
2179 		default:
2180 			break;
2181 		}
2182 	} else {
2183 		/*
2184 		 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2185 		 *   sts.scsi_status_h, sp->pkt->pkt_rsplen);
2186 		 */
2187 		fcpr->fcp_response_len = 0;
2188 	}
2189 
2190 	/* Set reset status received. */
2191 	if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2192 		rval |= BIT_0;
2193 	}
2194 
2195 	if (!(tq->flags & TQF_TAPE_DEVICE) &&
2196 	    (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2197 	    ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2198 	    ha->task_daemon_flags & LOOP_DOWN) {
2199 		EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2200 		    tq->d_id.b24, sp->lun_queue->lun_no);
2201 
2202 		/* Set retry status. */
2203 		sp->flags |= SRB_RETRY;
2204 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2205 	    tq->port_down_retry_count != 0 &&
2206 	    (sts.comp_status == CS_INCOMPLETE ||
2207 	    sts.comp_status == CS_PORT_UNAVAILABLE ||
2208 	    sts.comp_status == CS_PORT_LOGGED_OUT ||
2209 	    sts.comp_status == CS_PORT_CONFIG_CHG ||
2210 	    sts.comp_status == CS_PORT_BUSY)) {
2211 		EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2212 		    "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2213 		    tq->port_down_retry_count);
2214 
2215 		/* Set retry status. */
2216 		sp->flags |= SRB_RETRY;
2217 
2218 		if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2219 			/* Acquire device queue lock. */
2220 			DEVICE_QUEUE_LOCK(tq);
2221 
2222 			tq->flags |= TQF_QUEUE_SUSPENDED;
2223 
2224 			/* Decrement port down count. */
2225 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2226 				tq->port_down_retry_count--;
2227 			}
2228 
2229 			DEVICE_QUEUE_UNLOCK(tq);
2230 
2231 			if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2232 			    == 0 &&
2233 			    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2234 			    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2235 				sp->ha->adapter_stats->d_stats[lobyte(
2236 				    tq->loop_id)].logouts_recvd++;
2237 				ql_send_logo(sp->ha, tq, done_q);
2238 			}
2239 
2240 			ADAPTER_STATE_LOCK(ha);
2241 			if (ha->port_retry_timer == 0) {
2242 				if ((ha->port_retry_timer =
2243 				    ha->port_down_retry_delay) == 0) {
2244 					*set_flags |=
2245 					    PORT_RETRY_NEEDED;
2246 				}
2247 			}
2248 			ADAPTER_STATE_UNLOCK(ha);
2249 		}
2250 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2251 	    (sts.comp_status == CS_RESET ||
2252 	    (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2253 	    (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2254 		if (sts.comp_status == CS_RESET) {
2255 			EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2256 			    tq->d_id.b24, sp->lun_queue->lun_no);
2257 		} else if (sts.comp_status == CS_QUEUE_FULL) {
2258 			EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2259 			    "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2260 			    tq->qfull_retry_count);
2261 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2262 				tq->flags |= TQF_QUEUE_SUSPENDED;
2263 
2264 				tq->qfull_retry_count--;
2265 
2266 				ADAPTER_STATE_LOCK(ha);
2267 				if (ha->port_retry_timer == 0) {
2268 					if ((ha->port_retry_timer =
2269 					    ha->qfull_retry_delay) ==
2270 					    0) {
2271 						*set_flags |=
2272 						    PORT_RETRY_NEEDED;
2273 					}
2274 				}
2275 				ADAPTER_STATE_UNLOCK(ha);
2276 			}
2277 		} else {
2278 			EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2279 			    tq->d_id.b24, sp->lun_queue->lun_no);
2280 		}
2281 
2282 		/* Set retry status. */
2283 		sp->flags |= SRB_RETRY;
2284 	} else {
2285 		fcpr->fcp_resid =
2286 		    sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2287 		    sp->fcp->fcp_data_len : sts.fcp_residual_length;
2288 
2289 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2290 		    (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2291 
2292 			if (sts.scsi_status_l == STATUS_CHECK) {
2293 				sp->pkt->pkt_reason = CS_COMPLETE;
2294 			} else {
2295 				EL(ha, "transport error - "
2296 				    "underrun & invalid resid\n");
2297 				EL(ha, "ssh=%xh, ssl=%xh\n",
2298 				    sts.scsi_status_h, sts.scsi_status_l);
2299 				sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2300 			}
2301 		}
2302 
2303 		/* Ignore firmware underrun error. */
2304 		if (sts.comp_status == CS_DATA_UNDERRUN &&
2305 		    (sts.scsi_status_h & FCP_RESID_UNDER ||
2306 		    (sts.scsi_status_l != STATUS_CHECK &&
2307 		    sts.scsi_status_l != STATUS_GOOD))) {
2308 			sp->pkt->pkt_reason = CS_COMPLETE;
2309 		}
2310 
2311 		if (sp->pkt->pkt_reason != CS_COMPLETE) {
2312 			ha->xioctl->DeviceErrorCount++;
2313 			EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh"
2314 			    "\n", sts.comp_status, tq->d_id.b24,
2315 			    sp->lun_queue->lun_no);
2316 		}
2317 
2318 		/* Set target request sense data. */
2319 		if (sts.scsi_status_l == STATUS_CHECK) {
2320 			if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2321 
2322 				if (sp->pkt->pkt_reason == CS_COMPLETE &&
2323 				    sts.req_sense_data[2] != KEY_NO_SENSE &&
2324 				    sts.req_sense_data[2] !=
2325 				    KEY_UNIT_ATTENTION) {
2326 					ha->xioctl->DeviceErrorCount++;
2327 				}
2328 
2329 				sense_sz = sts.req_sense_length;
2330 
2331 				/* Insure data does not exceed buf. */
2332 				if (sp->pkt->pkt_rsplen <=
2333 				    (uint32_t)sizeof (fcp_rsp_t) +
2334 				    fcpr->fcp_response_len) {
2335 					sp->request_sense_length = 0;
2336 				} else {
2337 					sp->request_sense_length = (uint32_t)
2338 					    (sp->pkt->pkt_rsplen -
2339 					    sizeof (fcp_rsp_t) -
2340 					    fcpr->fcp_response_len);
2341 				}
2342 
2343 				if (sense_sz <
2344 				    sp->request_sense_length) {
2345 					sp->request_sense_length =
2346 					    sense_sz;
2347 				}
2348 
2349 				sp->request_sense_ptr = (caddr_t)rsp;
2350 
2351 				sense_sz = (uint32_t)
2352 				    (((uintptr_t)pkt23 +
2353 				    sizeof (sts_entry_t)) -
2354 				    (uintptr_t)sts.req_sense_data);
2355 				if (sp->request_sense_length <
2356 				    sense_sz) {
2357 					sense_sz =
2358 					    sp->request_sense_length;
2359 				}
2360 
2361 				fcpr->fcp_sense_len = sense_sz;
2362 
2363 				/* Move sense data. */
2364 				ddi_rep_get8(ha->hba_buf.acc_handle,
2365 				    (uint8_t *)sp->request_sense_ptr,
2366 				    sts.req_sense_data,
2367 				    (size_t)sense_sz,
2368 				    DDI_DEV_AUTOINCR);
2369 
2370 				sp->request_sense_ptr += sense_sz;
2371 				sp->request_sense_length -= sense_sz;
2372 				if (sp->request_sense_length != 0) {
2373 					ha->status_srb = sp;
2374 				}
2375 			}
2376 
2377 			if (sense_sz != 0) {
2378 				EL(sp->ha, "check condition sense data, "
2379 				    "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2380 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2381 				    "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2382 				    sp->lun_queue->lun_no,
2383 				    sts.req_sense_data[0],
2384 				    sts.req_sense_data[1],
2385 				    sts.req_sense_data[2],
2386 				    sts.req_sense_data[3],
2387 				    sts.req_sense_data[4],
2388 				    sts.req_sense_data[5],
2389 				    sts.req_sense_data[6],
2390 				    sts.req_sense_data[7],
2391 				    sts.req_sense_data[8],
2392 				    sts.req_sense_data[9],
2393 				    sts.req_sense_data[10],
2394 				    sts.req_sense_data[11],
2395 				    sts.req_sense_data[12],
2396 				    sts.req_sense_data[13],
2397 				    sts.req_sense_data[14],
2398 				    sts.req_sense_data[15],
2399 				    sts.req_sense_data[16],
2400 				    sts.req_sense_data[17]);
2401 			} else {
2402 				EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2403 				    "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2404 			}
2405 		}
2406 	}
2407 
2408 	/* Set completed status. */
2409 	sp->flags |= SRB_ISP_COMPLETED;
2410 
2411 	/* Place command on done queue. */
2412 	if (ha->status_srb == NULL) {
2413 		ql_add_link_b(done_q, &sp->cmd);
2414 	}
2415 
2416 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2417 
2418 	return (rval);
2419 }
2420 
2421 /*
2422  * ql_status_cont_entry
2423  *	Processes status continuation entry.
2424  *
2425  * Input:
2426  *	ha:		adapter state pointer.
2427  *	pkt:		entry pointer.
2428  *	done_q:		done queue pointer.
2429  *	set_flags:	task daemon flags to set.
2430  *	reset_flags:	task daemon flags to reset.
2431  *
2432  * Context:
2433  *	Interrupt or Kernel context, no mailbox commands allowed.
2434  */
2435 /* ARGSUSED */
2436 static void
2437 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt,
2438     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2439 {
2440 	uint32_t	sense_sz, index;
2441 	ql_srb_t	*sp = ha->status_srb;
2442 
2443 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2444 
2445 	if (sp != NULL && sp->request_sense_length) {
2446 		if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2447 			sense_sz = sizeof (pkt->req_sense_data);
2448 		} else {
2449 			sense_sz = sp->request_sense_length;
2450 		}
2451 
2452 		if (CFG_IST(ha, CFG_CTRL_2425)) {
2453 			for (index = 0; index < sense_sz; index += 4) {
2454 				ql_chg_endian((uint8_t *)
2455 				    &pkt->req_sense_data[0] + index, 4);
2456 			}
2457 		}
2458 
2459 		/* Move sense data. */
2460 		ddi_rep_get8(ha->hba_buf.acc_handle,
2461 		    (uint8_t *)sp->request_sense_ptr,
2462 		    (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2463 		    DDI_DEV_AUTOINCR);
2464 
2465 		sp->request_sense_ptr += sense_sz;
2466 		sp->request_sense_length -= sense_sz;
2467 
2468 		/* Place command on done queue. */
2469 		if (sp->request_sense_length == 0) {
2470 			ql_add_link_b(done_q, &sp->cmd);
2471 			ha->status_srb = NULL;
2472 		}
2473 	}
2474 
2475 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2476 }
2477 
2478 /*
2479  * ql_immediate_notify_entry
2480  *	Processes immediate notify entry.
2481  *
2482  * Input:
2483  *	ha:		adapter state pointer.
2484  *	pkt:		entry pointer.
2485  *	done_q:		done queue pointer.
2486  *	set_flags:	task daemon flags to set.
2487  *	reset_flags:	task daemon flags to reset.
2488  *
2489  * Context:
2490  *	Interrupt or Kernel context, no mailbox commands allowed.
2491  */
2492 /* ARGSUSED */
2493 static void
2494 ql_immediate_notify_entry(ql_adapter_state_t *ha,
2495     immediate_notify_entry_t *pkt, ql_head_t *done_q, uint32_t *set_flags,
2496     uint32_t *reset_flags)
2497 {
2498 	notify_acknowledge_entry_t *nack;
2499 	ql_srb_t		*sp;
2500 	fcp_cmd_t		*fcp;
2501 	tgt_cmd_t		*cmd, *nackcmd;
2502 	ql_tgt_t		*tq;
2503 	fc_unsol_buf_t		*ubp = NULL;
2504 	int			use_ubuffer;
2505 	uint16_t		loop_id;
2506 
2507 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2508 
2509 	cmd = (tgt_cmd_t *)kmem_zalloc(sizeof (tgt_cmd_t), KM_NOSLEEP);
2510 	if (cmd != NULL) {
2511 		cmd->cmd.base_address = cmd;
2512 
2513 		/* Save command context. */
2514 		cmd->type = pkt->entry_type;
2515 		cmd->initiator_id_l = pkt->initiator_id_l;
2516 		cmd->initiator_id_h = pkt->initiator_id_h;
2517 		cmd->rx_id = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2518 		    &pkt->sequence_id);
2519 		cmd->status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2520 		    &pkt->status);
2521 		cmd->task_flags_l = pkt->task_flags_l;
2522 		cmd->task_flags_h = pkt->task_flags_h;
2523 
2524 		/*
2525 		 * TODO: flushing in case of pkt_status_l of 0x34
2526 		 * needs to be handled properly.
2527 		 */
2528 
2529 		EL(ha, "status = %xh\n", cmd->status);
2530 
2531 		/*
2532 		 * For immediate notify situations that need attention,
2533 		 * we attempt to put the command in the array of notify
2534 		 * acknowledge slots for future handling.  A LIP RESET
2535 		 * always gets slot 0, since we have to ensure that there's
2536 		 * always a slot available and we can't do any other
2537 		 * processsing if a LIP RESET is pending.
2538 		 *
2539 		 * Otherwise, immediate notifies take the next open slot.
2540 		 */
2541 		use_ubuffer = 0;
2542 		nackcmd = NULL;
2543 		mutex_enter(&ha->ql_nack_mtx);
2544 		switch (cmd->status) {
2545 		case 0x0E:
2546 			if (ha->ql_nack != NULL) {
2547 				/*
2548 				 * We're in the sticky situation of receiving
2549 				 * LIP reset while one is pending.  What appears
2550 				 * to work is to drop the old request and
2551 				 * replace it with the new.  We send a NACK
2552 				 * for the old to replenish the IOCB.
2553 				 */
2554 				nackcmd = ha->ql_nack;
2555 			}
2556 			ha->ql_nack = cmd;
2557 			break;
2558 		case 0x20:
2559 		case 0x29:
2560 		case 0x36:
2561 			/* If this isn't NULL, a LIP RESET is outstanding */
2562 			if (ha->ql_nack == NULL) {
2563 				use_ubuffer++;
2564 			}
2565 			break;
2566 		default:
2567 			EL(ha, "unknown status=%xh\n",
2568 			    cmd->status);
2569 			break;
2570 		}
2571 		mutex_exit(&ha->ql_nack_mtx);
2572 
2573 		if (use_ubuffer) {
2574 			/*
2575 			 * Get an unsolicited buffer to send the message up in
2576 			 */
2577 
2578 			/* Locate a buffer to use. */
2579 			loop_id = (uint16_t)
2580 			    (CFG_IST(ha, CFG_EXT_FW_INTERFACE) ?
2581 			    CHAR_TO_SHORT(pkt->initiator_id_l,
2582 			    pkt->initiator_id_h) : pkt->initiator_id_h);
2583 			if ((tq = ql_loop_id_to_queue(ha, loop_id)) != NULL) {
2584 				ubp = ql_get_unsolicited_buffer(ha,
2585 				    FC_TYPE_SCSI_FCP);
2586 			}
2587 			if (ubp != NULL) {
2588 				ubp->ub_resp_flags = FC_UB_FCP_CDB_FLAG;
2589 				ubp->ub_resp_token = tq;
2590 				sp = ubp->ub_fca_private;
2591 
2592 				fcp = (fcp_cmd_t *)ubp->ub_buffer;
2593 
2594 				/* Set header. */
2595 				ubp->ub_frame.d_id = ha->d_id.b24;
2596 				/* Set 0x06 for R_CTL_COMMAND */
2597 				ubp->ub_frame.r_ctl = R_CTL_COMMAND;
2598 				ubp->ub_frame.s_id = tq->d_id.b24;
2599 				ubp->ub_frame.rsvd = 0;
2600 				ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ |
2601 				    F_CTL_END_SEQ | F_CTL_SEQ_INITIATIVE;
2602 				ubp->ub_frame.type = FC_TYPE_SCSI_FCP;
2603 				ubp->ub_frame.seq_cnt = 0;
2604 				ubp->ub_frame.df_ctl = 0;
2605 				ubp->ub_frame.seq_id = 0;
2606 				ubp->ub_frame.rx_id = cmd->rx_id;
2607 				ubp->ub_frame.ox_id = (uint16_t)ddi_get16(
2608 				    ha->hba_buf.acc_handle, &pkt->ox_id);
2609 				ubp->ub_frame.ro = 0;
2610 
2611 				/* Set command in buffer. */
2612 				bzero((void *)fcp, sizeof (fcp_cmd_t));
2613 
2614 				lobyte(fcp->fcp_ent_addr.ent_addr_0) =
2615 				    pkt->lun_l;
2616 				hibyte(fcp->fcp_ent_addr.ent_addr_0) =
2617 				    pkt->lun_h;
2618 
2619 				switch (cmd->status) {
2620 				case 0x29:
2621 					ubp->ub_resp_flags = (uint16_t)
2622 					    (ubp->ub_resp_flags |
2623 					    FC_UB_FCP_PORT_LOGOUT);
2624 					break;
2625 				case 0x20:
2626 					ubp->ub_resp_flags = (uint16_t)
2627 					    (ubp->ub_resp_flags |
2628 					    FC_UB_FCP_ABORT_TASK);
2629 					break;
2630 				case 0x36:
2631 					if (pkt->task_flags_h & BIT_7) {
2632 						fcp->fcp_cntl.cntl_kill_tsk =
2633 						    1;
2634 					}
2635 					if (pkt->task_flags_h & BIT_6) {
2636 						fcp->fcp_cntl.cntl_clr_aca = 1;
2637 					}
2638 					if (pkt->task_flags_h & BIT_5) {
2639 						fcp->fcp_cntl.cntl_reset_tgt =
2640 						    1;
2641 					}
2642 					if (pkt->task_flags_h & BIT_4) {
2643 						fcp->fcp_cntl.cntl_reset_lun =
2644 						    1;
2645 					}
2646 					if (pkt->task_flags_h & BIT_2) {
2647 						fcp->fcp_cntl.cntl_clr_tsk = 1;
2648 					}
2649 					if (pkt->task_flags_h & BIT_1) {
2650 						fcp->fcp_cntl.cntl_abort_tsk =
2651 						    1;
2652 					}
2653 					break;
2654 				default:
2655 					EL(ha, "default, no action\n");
2656 					break;
2657 				}
2658 
2659 				QL_UB_LOCK(ha);
2660 				sp->flags |= SRB_UB_CALLBACK | SRB_UB_FCP;
2661 				QL_UB_UNLOCK(ha);
2662 				QL_PRINT_3(CE_CONT, "(%d): Sent Up status = "
2663 				    "%xh\n", ha->instance, cmd->status);
2664 				ql_add_link_b(done_q, &sp->cmd);
2665 			}
2666 		}
2667 
2668 		if (nackcmd) {
2669 			if (ql_req_pkt(ha, (request_t **)&nack) ==
2670 			    QL_SUCCESS) {
2671 
2672 				ql_notify_acknowledge_iocb(ha, nackcmd, nack);
2673 
2674 				nack->flags_l = 0;
2675 				QL_PRINT_3(CE_CONT, "(%d): send clear "
2676 				    "notify_ack: status=%xh, flag=%xh\n",
2677 				    ha->instance, ddi_get16(
2678 				    ha->hba_buf.acc_handle, &nack->status),
2679 				    nack->flags_l);
2680 
2681 				/* Issue command to ISP */
2682 				ql_isp_cmd(ha);
2683 			}
2684 			kmem_free(nackcmd, sizeof (tgt_cmd_t));
2685 		}
2686 
2687 		/*
2688 		 * ql_nack can only be non-NULL if we got a LIP RESET and
2689 		 * are processing it.  In that case, we don't want to send
2690 		 * a notify acknowledge right now.
2691 		 */
2692 		if (cmd->status != 0x0E) {
2693 			if (ql_req_pkt(ha, (request_t **)&nack) ==
2694 			    QL_SUCCESS) {
2695 				ql_notify_acknowledge_iocb(ha, cmd, nack);
2696 
2697 				EL(ha, "send notify_ack: status=%xh "
2698 				    "flag=%xh\n", cmd->status, nack->flags_l);
2699 
2700 				/* Issue command to ISP */
2701 				ql_isp_cmd(ha);
2702 			}
2703 			kmem_free(cmd, sizeof (tgt_cmd_t));
2704 		} else {
2705 			ql_awaken_task_daemon(ha, NULL,
2706 			    LIP_RESET_PENDING, 0);
2707 		}
2708 	}
2709 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2710 }
2711 
2712 /*
2713  * ql_notify_acknowledge_entry
2714  *	Processes notify acknowledge entry.
2715  *
2716  * Input:
2717  *	ha:		adapter state pointer.
2718  *	pkt:		entry pointer.
2719  *	done_q:		done queue pointer.
2720  *	set_flags:	task daemon flags to set.
2721  *	reset_flags:	task daemon flags to reset.
2722  *
2723  * Context:
2724  *	Interrupt or Kernel context, no mailbox commands allowed.
2725  */
2726 /* ARGSUSED */
2727 static void
2728 ql_notify_acknowledge_entry(ql_adapter_state_t *ha,
2729     notify_acknowledge_entry_t *pkt, ql_head_t *done_q, uint32_t *set_flags,
2730     uint32_t *reset_flags)
2731 {
2732 	ql_srb_t	*sp;
2733 	uint32_t	index, cnt;
2734 
2735 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2736 
2737 	/* Get handle. */
2738 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
2739 	index = cnt & OSC_INDEX_MASK;
2740 
2741 	/* Validate handle. */
2742 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
2743 	    NULL;
2744 
2745 	if (sp != NULL && sp->handle == cnt) {
2746 		ha->outstanding_cmds[index] = NULL;
2747 		sp->handle = 0;
2748 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2749 
2750 		/* Set completion status */
2751 		sp->pkt->pkt_reason = ddi_get16(ha->hba_buf.acc_handle,
2752 		    &pkt->status) == 1 ? CS_COMPLETE : CS_PORT_UNAVAILABLE;
2753 
2754 		/* Set completed status. */
2755 		sp->flags |= SRB_ISP_COMPLETED;
2756 
2757 		/* Place command on done queue. */
2758 		ql_add_link_b(done_q, &sp->cmd);
2759 
2760 	} else if (cnt != QL_FCA_BRAND) {
2761 		if (sp == NULL) {
2762 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
2763 		} else {
2764 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
2765 			    cnt, sp->handle);
2766 		}
2767 
2768 		(void) ql_binary_fw_dump(ha, FALSE);
2769 
2770 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2771 		    ABORT_ISP_ACTIVE))) {
2772 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
2773 			*set_flags |= ISP_ABORT_NEEDED;
2774 		}
2775 	}
2776 
2777 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2778 }
2779 
2780 /*
2781  * ql_accept_target_io_entry
2782  *	Processes accept target I/O entry.
2783  *
2784  * Input:
2785  *	ha:		adapter state pointer.
2786  *	pkt:		entry pointer.
2787  *	done_q:		done queue pointer.
2788  *	set_flags:	task daemon flags to set.
2789  *	reset_flags:	task daemon flags to reset.
2790  *
2791  * Context:
2792  *	Interrupt or Kernel context, no mailbox commands allowed.
2793  */
2794 /* ARGSUSED */
2795 static void
2796 ql_accept_target_io_entry(ql_adapter_state_t *ha, atio_entry_t *pkt,
2797     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2798 {
2799 	ctio_entry_t	*ctio;
2800 	atio_entry_t	*atio;
2801 	ql_srb_t	*sp;
2802 	fcp_cmd_t	*fcp;
2803 	ql_tgt_t	*tq;
2804 	uint16_t	loop_id;
2805 	fc_unsol_buf_t	*ubp = NULL;
2806 
2807 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2808 
2809 	/* Locate a buffer to use. */
2810 	loop_id = (uint16_t)(CFG_IST(ha, CFG_EXT_FW_INTERFACE) ?
2811 	    CHAR_TO_SHORT(pkt->initiator_id_l, pkt->initiator_id_h) :
2812 	    pkt->initiator_id_h);
2813 	if ((tq = ql_loop_id_to_queue(ha, loop_id)) != NULL) {
2814 		ubp = ql_get_unsolicited_buffer(ha,
2815 		    FC_TYPE_SCSI_FCP);
2816 	}
2817 	if (ubp != NULL) {
2818 		ubp->ub_resp_flags = FC_UB_FCP_CDB_FLAG;
2819 		ubp->ub_resp_token = tq;
2820 		sp = ubp->ub_fca_private;
2821 		fcp = (fcp_cmd_t *)ubp->ub_buffer;
2822 
2823 		/* Set header. */
2824 		ubp->ub_frame.d_id = ha->d_id.b24;
2825 		ubp->ub_frame.r_ctl = R_CTL_COMMAND;
2826 		ubp->ub_frame.s_id = tq->d_id.b24;
2827 		ubp->ub_frame.rsvd = 0;
2828 		ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ |
2829 		    F_CTL_END_SEQ | F_CTL_SEQ_INITIATIVE;
2830 		ubp->ub_frame.type = FC_TYPE_SCSI_FCP;
2831 		ubp->ub_frame.seq_cnt = 0;
2832 		ubp->ub_frame.df_ctl = 0;
2833 		ubp->ub_frame.seq_id = 0;
2834 		ubp->ub_frame.rx_id = (uint16_t)ddi_get16(
2835 		    ha->hba_buf.acc_handle, &pkt->rx_id);
2836 		ubp->ub_frame.ox_id = (uint16_t)ddi_get16(
2837 		    ha->hba_buf.acc_handle, &pkt->ox_id);
2838 		ubp->ub_frame.ro = 0;
2839 
2840 		/* Set command in buffer. */
2841 
2842 		bzero((void *)fcp, sizeof (fcp_cmd_t));
2843 
2844 		lobyte(fcp->fcp_ent_addr.ent_addr_0) = pkt->lun_l;
2845 		hibyte(fcp->fcp_ent_addr.ent_addr_0) = pkt->lun_h;
2846 
2847 		fcp->fcp_cntl.cntl_qtype = pkt->task_codes;
2848 
2849 		if (pkt->execution_codes & BIT_1)
2850 			fcp->fcp_cntl.cntl_read_data = 1;
2851 		if (pkt->execution_codes & BIT_0)
2852 			fcp->fcp_cntl.cntl_write_data = 1;
2853 
2854 		ddi_rep_put8(ha->hba_buf.acc_handle, (uint8_t *)&pkt->cdb[0],
2855 		    (uint8_t *)&fcp->fcp_cdb[0], FCP_CDB_SIZE,
2856 		    DDI_DEV_AUTOINCR);
2857 
2858 		fcp->fcp_data_len = (int)ddi_get32(
2859 		    ha->hba_buf.acc_handle, (uint32_t *)&pkt->data_length);
2860 		QL_UB_LOCK(ha);
2861 		sp->flags |= SRB_UB_CALLBACK | SRB_UB_FCP;
2862 		QL_UB_UNLOCK(ha);
2863 		ql_add_link_b(done_q, &sp->cmd);
2864 	}
2865 
2866 	/* If command not sent to transport layer. */
2867 	if (ubp == NULL) {
2868 
2869 		if (ql_req_pkt(ha, (request_t **)&ctio) == QL_SUCCESS) {
2870 			ctio->entry_type = CTIO_TYPE_2;
2871 			ctio->initiator_id_l = pkt->initiator_id_l;
2872 			ctio->initiator_id_h = pkt->initiator_id_h;
2873 			ddi_put16(ha->hba_buf.acc_handle, &ctio->rx_id,
2874 			    (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2875 			    &pkt->rx_id));
2876 			ctio->flags_l = BIT_7 | BIT_6;
2877 			ctio->flags_h = BIT_7 | BIT_1 | BIT_0;
2878 			ctio->timeout = 0xffff;
2879 			ctio->type.s0_32bit.scsi_status_l = STATUS_BUSY;
2880 
2881 			/* Issue command to ISP */
2882 			ql_isp_cmd(ha);
2883 		}
2884 	} else {
2885 		if (ql_req_pkt(ha, (request_t **)&atio) == QL_SUCCESS) {
2886 			atio->entry_type = ATIO_TYPE;
2887 			atio->initiator_id_l = pkt->initiator_id_l;
2888 			atio->initiator_id_h = pkt->initiator_id_h;
2889 			ddi_put16(ha->hba_buf.acc_handle, &atio->rx_id,
2890 			    (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2891 			    &pkt->rx_id));
2892 			atio->lun_l = pkt->lun_l;
2893 			atio->lun_h = pkt->lun_h;
2894 
2895 			/* Issue command to ISP */
2896 			ql_isp_cmd(ha);
2897 		}
2898 	}
2899 
2900 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2901 }
2902 
2903 /*
2904  * ql_continue_target_io_entry
2905  *	Processes continue target IO entry.
2906  *
2907  * Input:
2908  *	ha:		adapter state pointer.
2909  *	pkt:		entry pointer.
2910  *	done_q:		done queue pointer.
2911  *	set_flags:	task daemon flags to set.
2912  *	reset_flags:	task daemon flags to reset.
2913  *
2914  * Context:
2915  *	Interrupt context, no mailbox commands allowed.
2916  */
2917 /* ARGSUSED */
2918 static void
2919 ql_continue_target_io_entry(ql_adapter_state_t *ha, ctio_entry_t *pkt,
2920     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2921 {
2922 	ql_srb_t	*sp;
2923 	uint32_t	index, cnt;
2924 	uint16_t	status;
2925 
2926 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2927 
2928 	/* Get handle. */
2929 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
2930 	index = cnt & OSC_INDEX_MASK;
2931 
2932 	/* Validate handle. */
2933 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
2934 	    NULL;
2935 
2936 	if (sp != NULL && sp->handle == cnt) {
2937 		ha->outstanding_cmds[index] = NULL;
2938 		sp->handle = 0;
2939 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2940 
2941 		/* Set completion status */
2942 		status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2943 		    &pkt->status);
2944 
2945 		switch (status) {
2946 		case 1:
2947 			sp->pkt->pkt_reason = CS_COMPLETE;
2948 			break;
2949 		case 2:
2950 			sp->pkt->pkt_reason = CS_ABORTED;
2951 			break;
2952 		case 9:
2953 			sp->pkt->pkt_reason = CS_DATA_OVERRUN;
2954 			break;
2955 		case 0xa:
2956 		case 0xb:
2957 			sp->pkt->pkt_reason = CS_TIMEOUT;
2958 			break;
2959 		case 0xe:
2960 		case 0x17:
2961 			sp->pkt->pkt_reason = CS_RESET;
2962 			break;
2963 		case 0x10:
2964 			sp->pkt->pkt_reason = CS_DMA_ERROR;
2965 			break;
2966 		case 0x15:
2967 		case 0x28:
2968 		case 0x29:
2969 		case 0x2A:
2970 			sp->pkt->pkt_reason = status;
2971 			break;
2972 		default:
2973 			sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
2974 			break;
2975 		}
2976 
2977 		/* Set completed status. */
2978 		sp->flags |= SRB_ISP_COMPLETED;
2979 
2980 		/* Place command on done queue. */
2981 		ql_add_link_b(done_q, &sp->cmd);
2982 
2983 	} else if (cnt != QL_FCA_BRAND) {
2984 		if (sp == NULL) {
2985 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
2986 		} else {
2987 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
2988 			    cnt, sp->handle);
2989 		}
2990 
2991 		(void) ql_binary_fw_dump(ha, FALSE);
2992 
2993 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2994 		    ABORT_ISP_ACTIVE))) {
2995 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
2996 			*set_flags |= ISP_ABORT_NEEDED;
2997 		}
2998 	}
2999 
3000 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3001 }
3002 
3003 /*
3004  * ql_ip_entry
3005  *	Processes received ISP IP entry.
3006  *
3007  * Input:
3008  *	ha:		adapter state pointer.
3009  *	pkt:		entry pointer.
3010  *	done_q:		done queue pointer.
3011  *	set_flags:	task daemon flags to set.
3012  *	reset_flags:	task daemon flags to reset.
3013  *
3014  * Context:
3015  *	Interrupt or Kernel context, no mailbox commands allowed.
3016  */
3017 /* ARGSUSED */
3018 static void
3019 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q,
3020     uint32_t *set_flags, uint32_t *reset_flags)
3021 {
3022 	ql_srb_t	*sp;
3023 	uint32_t	index, cnt;
3024 	ql_tgt_t	*tq;
3025 
3026 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3027 
3028 	/* Get handle. */
3029 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
3030 	index = cnt & OSC_INDEX_MASK;
3031 
3032 	/* Validate handle. */
3033 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
3034 	    NULL;
3035 
3036 	if (sp != NULL && sp->handle == cnt) {
3037 		ha->outstanding_cmds[index] = NULL;
3038 		sp->handle = 0;
3039 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3040 		tq = sp->lun_queue->target_queue;
3041 
3042 		/* Set ISP completion status */
3043 		if (CFG_IST(ha, CFG_CTRL_2425)) {
3044 			ip_cmd_entry_t	*pkt24 = (ip_cmd_entry_t *)pkt23;
3045 
3046 			sp->pkt->pkt_reason = ddi_get16(
3047 			    ha->hba_buf.acc_handle, &pkt24->hdl_status);
3048 		} else {
3049 			sp->pkt->pkt_reason = ddi_get16(
3050 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
3051 		}
3052 
3053 		if (ha->task_daemon_flags & LOOP_DOWN) {
3054 			EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
3055 			    tq->d_id.b24);
3056 
3057 			/* Set retry status. */
3058 			sp->flags |= SRB_RETRY;
3059 
3060 		} else if (tq->port_down_retry_count &&
3061 		    (sp->pkt->pkt_reason == CS_INCOMPLETE ||
3062 		    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
3063 		    sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
3064 		    sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
3065 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
3066 			EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
3067 			    sp->pkt->pkt_reason, tq->d_id.b24,
3068 			    tq->port_down_retry_count);
3069 
3070 			/* Set retry status. */
3071 			sp->flags |= SRB_RETRY;
3072 
3073 			if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
3074 			    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
3075 				ha->adapter_stats->d_stats[lobyte(
3076 				    tq->loop_id)].logouts_recvd++;
3077 				ql_send_logo(ha, tq, done_q);
3078 			}
3079 
3080 			/* Acquire device queue lock. */
3081 			DEVICE_QUEUE_LOCK(tq);
3082 
3083 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
3084 				tq->flags |= TQF_QUEUE_SUSPENDED;
3085 
3086 				tq->port_down_retry_count--;
3087 
3088 				ADAPTER_STATE_LOCK(ha);
3089 				if (ha->port_retry_timer == 0) {
3090 					if ((ha->port_retry_timer =
3091 					    ha->port_down_retry_delay) == 0) {
3092 						*set_flags |=
3093 						    PORT_RETRY_NEEDED;
3094 					}
3095 				}
3096 				ADAPTER_STATE_UNLOCK(ha);
3097 			}
3098 
3099 			/* Release device queue specific lock. */
3100 			DEVICE_QUEUE_UNLOCK(tq);
3101 
3102 		} else if (sp->pkt->pkt_reason == CS_RESET) {
3103 			EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
3104 
3105 			/* Set retry status. */
3106 			sp->flags |= SRB_RETRY;
3107 		} else {
3108 			if (sp->pkt->pkt_reason != CS_COMPLETE) {
3109 				EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
3110 				    sp->pkt->pkt_reason, tq->d_id.b24);
3111 			}
3112 		}
3113 
3114 		/* Set completed status. */
3115 		sp->flags |= SRB_ISP_COMPLETED;
3116 
3117 		ql_add_link_b(done_q, &sp->cmd);
3118 
3119 	} else {
3120 		if (sp == NULL) {
3121 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
3122 		} else {
3123 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
3124 			    cnt, sp->handle);
3125 		}
3126 
3127 		(void) ql_binary_fw_dump(ha, FALSE);
3128 
3129 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
3130 		    ABORT_ISP_ACTIVE))) {
3131 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
3132 			*set_flags |= ISP_ABORT_NEEDED;
3133 		}
3134 	}
3135 
3136 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3137 }
3138 
3139 /*
3140  * ql_ip_rcv_entry
3141  *	Processes received ISP IP buffers entry.
3142  *
3143  * Input:
3144  *	ha:		adapter state pointer.
3145  *	pkt:		entry pointer.
3146  *	done_q:		done queue pointer.
3147  *	set_flags:	task daemon flags to set.
3148  *	reset_flags:	task daemon flags to reset.
3149  *
3150  * Context:
3151  *	Interrupt or Kernel context, no mailbox commands allowed.
3152  */
3153 /* ARGSUSED */
3154 static void
3155 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt,
3156     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3157 {
3158 	port_id_t	s_id;
3159 	uint16_t	index;
3160 	uint8_t		cnt;
3161 	ql_tgt_t	*tq;
3162 
3163 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3164 
3165 	/* Locate device queue. */
3166 	s_id.b.al_pa = pkt->s_id[0];
3167 	s_id.b.area = pkt->s_id[1];
3168 	s_id.b.domain = pkt->s_id[2];
3169 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
3170 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
3171 		return;
3172 	}
3173 
3174 	tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
3175 	    &pkt->seq_length);
3176 	tq->ub_total_seg_cnt = pkt->segment_count;
3177 	tq->ub_seq_id = ++ha->ub_seq_id;
3178 	tq->ub_seq_cnt = 0;
3179 	tq->ub_frame_ro = 0;
3180 	tq->ub_loop_id = pkt->loop_id;
3181 	ha->rcv_dev_q = tq;
3182 
3183 	for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
3184 	    tq->ub_total_seg_cnt; cnt++) {
3185 
3186 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
3187 		    &pkt->buffer_handle[cnt]);
3188 
3189 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
3190 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
3191 			*set_flags |= ISP_ABORT_NEEDED;
3192 			break;
3193 		}
3194 	}
3195 
3196 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3197 }
3198 
3199 /*
3200  * ql_ip_rcv_cont_entry
3201  *	Processes received ISP IP buffers continuation entry.
3202  *
3203  * Input:
3204  *	ha:		adapter state pointer.
3205  *	pkt:		entry pointer.
3206  *	done_q:		done queue pointer.
3207  *	set_flags:	task daemon flags to set.
3208  *	reset_flags:	task daemon flags to reset.
3209  *
3210  * Context:
3211  *	Interrupt or Kernel context, no mailbox commands allowed.
3212  */
3213 /* ARGSUSED */
3214 static void
3215 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt,
3216     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3217 {
3218 	uint16_t	index;
3219 	uint8_t		cnt;
3220 	ql_tgt_t	*tq;
3221 
3222 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3223 
3224 	if ((tq = ha->rcv_dev_q) == NULL) {
3225 		EL(ha, "No IP receive device\n");
3226 		return;
3227 	}
3228 
3229 	for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
3230 	    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
3231 
3232 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
3233 		    &pkt->buffer_handle[cnt]);
3234 
3235 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
3236 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
3237 			*set_flags |= ISP_ABORT_NEEDED;
3238 			break;
3239 		}
3240 	}
3241 
3242 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3243 }
3244 
3245 /*
3246  * ip_rcv_24xx_entry_t
3247  *	Processes received ISP24xx IP buffers entry.
3248  *
3249  * Input:
3250  *	ha:		adapter state pointer.
3251  *	pkt:		entry pointer.
3252  *	done_q:		done queue pointer.
3253  *	set_flags:	task daemon flags to set.
3254  *	reset_flags:	task daemon flags to reset.
3255  *
3256  * Context:
3257  *	Interrupt or Kernel context, no mailbox commands allowed.
3258  */
3259 /* ARGSUSED */
3260 static void
3261 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt,
3262     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3263 {
3264 	port_id_t	s_id;
3265 	uint16_t	index;
3266 	uint8_t		cnt;
3267 	ql_tgt_t	*tq;
3268 
3269 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3270 
3271 	/* Locate device queue. */
3272 	s_id.b.al_pa = pkt->s_id[0];
3273 	s_id.b.area = pkt->s_id[1];
3274 	s_id.b.domain = pkt->s_id[2];
3275 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
3276 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
3277 		return;
3278 	}
3279 
3280 	if (tq->ub_total_seg_cnt == 0) {
3281 		tq->ub_sequence_length = (uint16_t)ddi_get16(
3282 		    ha->hba_buf.acc_handle, &pkt->seq_length);
3283 		tq->ub_total_seg_cnt = pkt->segment_count;
3284 		tq->ub_seq_id = ++ha->ub_seq_id;
3285 		tq->ub_seq_cnt = 0;
3286 		tq->ub_frame_ro = 0;
3287 		tq->ub_loop_id = (uint16_t)ddi_get16(
3288 		    ha->hba_buf.acc_handle, &pkt->n_port_hdl);
3289 	}
3290 
3291 	for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
3292 	    tq->ub_total_seg_cnt; cnt++) {
3293 
3294 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
3295 		    &pkt->buffer_handle[cnt]);
3296 
3297 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
3298 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
3299 			*set_flags |= ISP_ABORT_NEEDED;
3300 			break;
3301 		}
3302 	}
3303 
3304 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3305 }
3306 
3307 /*
3308  * ql_ms_entry
3309  *	Processes received Name/Management/CT Pass-Through entry.
3310  *
3311  * Input:
3312  *	ha:		adapter state pointer.
3313  *	pkt23:		entry pointer.
3314  *	done_q:		done queue pointer.
3315  *	set_flags:	task daemon flags to set.
3316  *	reset_flags:	task daemon flags to reset.
3317  *
3318  * Context:
3319  *	Interrupt or Kernel context, no mailbox commands allowed.
3320  */
3321 /* ARGSUSED */
3322 static void
3323 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q,
3324     uint32_t *set_flags, uint32_t *reset_flags)
3325 {
3326 	ql_srb_t		*sp;
3327 	uint32_t		index, cnt;
3328 	ql_tgt_t		*tq;
3329 	ct_passthru_entry_t	*pkt24 = (ct_passthru_entry_t *)pkt23;
3330 
3331 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3332 
3333 	/* Get handle. */
3334 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
3335 	index = cnt & OSC_INDEX_MASK;
3336 
3337 	/* Validate handle. */
3338 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
3339 	    NULL;
3340 
3341 	if (sp != NULL && sp->handle == cnt) {
3342 		if (!(sp->flags & SRB_MS_PKT)) {
3343 			EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
3344 			    sp->flags);
3345 			*set_flags |= ISP_ABORT_NEEDED;
3346 			return;
3347 		}
3348 
3349 		ha->outstanding_cmds[index] = NULL;
3350 		sp->handle = 0;
3351 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3352 		tq = sp->lun_queue->target_queue;
3353 
3354 		/* Set ISP completion status */
3355 		if (CFG_IST(ha, CFG_CTRL_2425)) {
3356 			sp->pkt->pkt_reason = ddi_get16(
3357 			    ha->hba_buf.acc_handle, &pkt24->status);
3358 		} else {
3359 			sp->pkt->pkt_reason = ddi_get16(
3360 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
3361 		}
3362 
3363 		if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
3364 		    sp->retry_count) {
3365 			EL(ha, "Resouce Unavailable Retry = %d\n",
3366 			    sp->retry_count);
3367 
3368 			/* Set retry status. */
3369 			sp->retry_count--;
3370 			sp->flags |= SRB_RETRY;
3371 
3372 			/* Acquire device queue lock. */
3373 			DEVICE_QUEUE_LOCK(tq);
3374 
3375 			if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
3376 				tq->flags |= TQF_QUEUE_SUSPENDED;
3377 
3378 				ADAPTER_STATE_LOCK(ha);
3379 				if (ha->port_retry_timer == 0) {
3380 					ha->port_retry_timer = 2;
3381 				}
3382 				ADAPTER_STATE_UNLOCK(ha);
3383 			}
3384 
3385 			/* Release device queue specific lock. */
3386 			DEVICE_QUEUE_UNLOCK(tq);
3387 
3388 		} else if (tq->port_down_retry_count &&
3389 		    (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
3390 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
3391 			EL(ha, "Port Down Retry\n");
3392 
3393 			/* Set retry status. */
3394 			sp->flags |= SRB_RETRY;
3395 
3396 			/* Acquire device queue lock. */
3397 			DEVICE_QUEUE_LOCK(tq);
3398 
3399 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
3400 				tq->flags |= TQF_QUEUE_SUSPENDED;
3401 
3402 				tq->port_down_retry_count--;
3403 
3404 				ADAPTER_STATE_LOCK(ha);
3405 				if (ha->port_retry_timer == 0) {
3406 					if ((ha->port_retry_timer =
3407 					    ha->port_down_retry_delay) == 0) {
3408 						*set_flags |=
3409 						    PORT_RETRY_NEEDED;
3410 					}
3411 				}
3412 				ADAPTER_STATE_UNLOCK(ha);
3413 			}
3414 
3415 			/* Release device queue specific lock. */
3416 			DEVICE_QUEUE_UNLOCK(tq);
3417 
3418 		} else if (sp->pkt->pkt_reason == CS_RESET) {
3419 			EL(ha, "Reset Retry\n");
3420 
3421 			/* Set retry status. */
3422 			sp->flags |= SRB_RETRY;
3423 
3424 		} else if (CFG_IST(ha, CFG_CTRL_2425) &&
3425 		    sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
3426 			cnt = ddi_get32(ha->hba_buf.acc_handle,
3427 			    &pkt24->resp_byte_count);
3428 			if (cnt < sizeof (fc_ct_header_t)) {
3429 				EL(ha, "Data underrun\n");
3430 			} else {
3431 				sp->pkt->pkt_reason = CS_COMPLETE;
3432 			}
3433 
3434 		} else if (sp->pkt->pkt_reason != CS_COMPLETE) {
3435 			EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
3436 		}
3437 
3438 		if (sp->pkt->pkt_reason == CS_COMPLETE) {
3439 			/*EMPTY*/
3440 			QL_PRINT_3(CE_CONT, "(%d): resp\n", ha->instance);
3441 			QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3442 		}
3443 
3444 		/* For nameserver restore command, management change header. */
3445 		if ((sp->flags & SRB_RETRY) == 0) {
3446 			tq->d_id.b24 == 0xfffffc ?
3447 			    ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3448 			    sp->pkt->pkt_cmd, B_TRUE) :
3449 			    ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3450 			    sp->pkt->pkt_resp, B_TRUE);
3451 		}
3452 
3453 		/* Set completed status. */
3454 		sp->flags |= SRB_ISP_COMPLETED;
3455 
3456 		/* Place command on done queue. */
3457 		ql_add_link_b(done_q, &sp->cmd);
3458 
3459 	} else {
3460 		if (sp == NULL) {
3461 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
3462 		} else {
3463 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
3464 			    cnt, sp->handle);
3465 		}
3466 
3467 		(void) ql_binary_fw_dump(ha, FALSE);
3468 
3469 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
3470 		    ABORT_ISP_ACTIVE))) {
3471 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
3472 			*set_flags |= ISP_ABORT_NEEDED;
3473 		}
3474 	}
3475 
3476 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3477 }
3478 
3479 /*
3480  * ql_report_id_entry
3481  *	Processes received Name/Management/CT Pass-Through entry.
3482  *
3483  * Input:
3484  *	ha:		adapter state pointer.
3485  *	pkt23:		entry pointer.
3486  *	done_q:		done queue pointer.
3487  *	set_flags:	task daemon flags to set.
3488  *	reset_flags:	task daemon flags to reset.
3489  *
3490  * Context:
3491  *	Interrupt or Kernel context, no mailbox commands allowed.
3492  */
3493 /* ARGSUSED */
3494 static void
3495 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt,
3496     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3497 {
3498 	ql_adapter_state_t	*vha;
3499 
3500 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3501 
3502 	EL(ha, "format=%d, vp=%d, status=%d\n",
3503 	    pkt->format, pkt->vp_index, pkt->status);
3504 
3505 	if (pkt->format == 1) {
3506 		/* Locate port state structure. */
3507 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
3508 			if (vha->vp_index == pkt->vp_index) {
3509 				break;
3510 			}
3511 		}
3512 		if (vha != NULL && (pkt->status == CS_COMPLETE ||
3513 		    pkt->status == CS_PORT_ID_CHANGE)) {
3514 			*set_flags |= LOOP_RESYNC_NEEDED;
3515 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
3516 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
3517 			TASK_DAEMON_LOCK(ha);
3518 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
3519 			vha->task_daemon_flags &= ~LOOP_DOWN;
3520 			TASK_DAEMON_UNLOCK(ha);
3521 		}
3522 	}
3523 
3524 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3525 }
3526