xref: /linux/drivers/scsi/qla2xxx/qla_isr.c (revision b66451723c45b791fd2824d1b8f62fe498989e23)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 #include "qla_target.h"
8 #include "qla_gbl.h"
9 
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
19 
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
24 	sts_entry_t *);
25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
26 	struct purex_item *item);
27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
28 	uint16_t size);
29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
30 	void *pkt);
31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
32 	void **pkt, struct rsp_que **rsp);
33 
34 static void
35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
36 {
37 	void *pkt = &item->iocb;
38 	uint16_t pkt_size = item->size;
39 
40 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
41 	       "%s: Enter\n", __func__);
42 
43 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
44 	       "-------- ELS REQ -------\n");
45 	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
46 		       pkt, pkt_size);
47 
48 	fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt, 0);
49 }
50 
51 const char *const port_state_str[] = {
52 	[FCS_UNKNOWN]		= "Unknown",
53 	[FCS_UNCONFIGURED]	= "UNCONFIGURED",
54 	[FCS_DEVICE_DEAD]	= "DEAD",
55 	[FCS_DEVICE_LOST]	= "LOST",
56 	[FCS_ONLINE]		= "ONLINE"
57 };
58 
59 #define SFP_DISABLE_LASER_INITIATED    0x15  /* Sub code of 8070 AEN */
60 #define SFP_ENABLE_LASER_INITIATED     0x16  /* Sub code of 8070 AEN */
61 
62 static inline void display_Laser_info(scsi_qla_host_t *vha,
63 				      u16 mb1, u16 mb2, u16 mb3) {
64 
65 	if (mb1 == SFP_DISABLE_LASER_INITIATED)
66 		ql_log(ql_log_warn, vha, 0xf0a2,
67 		       "SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n",
68 		       mb3, mb2);
69 	if (mb1 == SFP_ENABLE_LASER_INITIATED)
70 		ql_log(ql_log_warn, vha, 0xf0a3,
71 		       "SFP temperature (%d C) reached normal operating level. Laser is enabled.\n",
72 		       mb3);
73 }
74 
75 static void
76 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
77 {
78 	struct abts_entry_24xx *abts =
79 	    (struct abts_entry_24xx *)&pkt->iocb;
80 	struct qla_hw_data *ha = vha->hw;
81 	struct els_entry_24xx *rsp_els;
82 	struct abts_entry_24xx *abts_rsp;
83 	dma_addr_t dma;
84 	uint32_t fctl;
85 	int rval;
86 
87 	ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
88 
89 	ql_log(ql_log_warn, vha, 0x0287,
90 	    "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
91 	    abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
92 	    abts->seq_id, abts->seq_cnt);
93 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
94 	    "-------- ABTS RCV -------\n");
95 	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
96 	    (uint8_t *)abts, sizeof(*abts));
97 
98 	rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
99 	    GFP_KERNEL);
100 	if (!rsp_els) {
101 		ql_log(ql_log_warn, vha, 0x0287,
102 		    "Failed allocate dma buffer ABTS/ELS RSP.\n");
103 		return;
104 	}
105 
106 	/* terminate exchange */
107 	rsp_els->entry_type = ELS_IOCB_TYPE;
108 	rsp_els->entry_count = 1;
109 	rsp_els->nport_handle = cpu_to_le16(~0);
110 	rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
111 	rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
112 	ql_dbg(ql_dbg_init, vha, 0x0283,
113 	    "Sending ELS Response to terminate exchange %#x...\n",
114 	    abts->rx_xch_addr_to_abort);
115 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
116 	    "-------- ELS RSP -------\n");
117 	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
118 	    (uint8_t *)rsp_els, sizeof(*rsp_els));
119 	rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
120 	if (rval) {
121 		ql_log(ql_log_warn, vha, 0x0288,
122 		    "%s: iocb failed to execute -> %x\n", __func__, rval);
123 	} else if (rsp_els->comp_status) {
124 		ql_log(ql_log_warn, vha, 0x0289,
125 		    "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
126 		    __func__, rsp_els->comp_status,
127 		    rsp_els->error_subcode_1, rsp_els->error_subcode_2);
128 	} else {
129 		ql_dbg(ql_dbg_init, vha, 0x028a,
130 		    "%s: abort exchange done.\n", __func__);
131 	}
132 
133 	/* send ABTS response */
134 	abts_rsp = (void *)rsp_els;
135 	memset(abts_rsp, 0, sizeof(*abts_rsp));
136 	abts_rsp->entry_type = ABTS_RSP_TYPE;
137 	abts_rsp->entry_count = 1;
138 	abts_rsp->nport_handle = abts->nport_handle;
139 	abts_rsp->vp_idx = abts->vp_idx;
140 	abts_rsp->sof_type = abts->sof_type & 0xf0;
141 	abts_rsp->rx_xch_addr = abts->rx_xch_addr;
142 	abts_rsp->d_id[0] = abts->s_id[0];
143 	abts_rsp->d_id[1] = abts->s_id[1];
144 	abts_rsp->d_id[2] = abts->s_id[2];
145 	abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
146 	abts_rsp->s_id[0] = abts->d_id[0];
147 	abts_rsp->s_id[1] = abts->d_id[1];
148 	abts_rsp->s_id[2] = abts->d_id[2];
149 	abts_rsp->cs_ctl = abts->cs_ctl;
150 	/* include flipping bit23 in fctl */
151 	fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
152 	    FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
153 	abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
154 	abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
155 	abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
156 	abts_rsp->type = FC_TYPE_BLD;
157 	abts_rsp->rx_id = abts->rx_id;
158 	abts_rsp->ox_id = abts->ox_id;
159 	abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
160 	abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
161 	abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
162 	abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
163 	ql_dbg(ql_dbg_init, vha, 0x028b,
164 	    "Sending BA ACC response to ABTS %#x...\n",
165 	    abts->rx_xch_addr_to_abort);
166 	ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
167 	    "-------- ELS RSP -------\n");
168 	ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
169 	    (uint8_t *)abts_rsp, sizeof(*abts_rsp));
170 	rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
171 	if (rval) {
172 		ql_log(ql_log_warn, vha, 0x028c,
173 		    "%s: iocb failed to execute -> %x\n", __func__, rval);
174 	} else if (abts_rsp->comp_status) {
175 		ql_log(ql_log_warn, vha, 0x028d,
176 		    "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
177 		    __func__, abts_rsp->comp_status,
178 		    abts_rsp->payload.error.subcode1,
179 		    abts_rsp->payload.error.subcode2);
180 	} else {
181 		ql_dbg(ql_dbg_init, vha, 0x028ea,
182 		    "%s: done.\n", __func__);
183 	}
184 
185 	dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
186 }
187 
188 /**
189  * __qla_consume_iocb - this routine is used to tell fw driver has processed
190  *   or consumed the head IOCB along with the continuation IOCB's from the
191  *   provided respond queue.
192  * @vha: host adapter pointer
193  * @pkt: pointer to current packet.  On return, this pointer shall move
194  *       to the next packet.
195  * @rsp: respond queue pointer.
196  *
197  * it is assumed pkt is the head iocb, not the continuation iocbk
198  */
199 void __qla_consume_iocb(struct scsi_qla_host *vha,
200 	void **pkt, struct rsp_que **rsp)
201 {
202 	struct rsp_que *rsp_q = *rsp;
203 	response_t *new_pkt;
204 	uint16_t entry_count_remaining;
205 	struct purex_entry_24xx *purex = *pkt;
206 
207 	entry_count_remaining = purex->entry_count;
208 	while (entry_count_remaining > 0) {
209 		new_pkt = rsp_q->ring_ptr;
210 		*pkt = new_pkt;
211 
212 		rsp_q->ring_index++;
213 		if (rsp_q->ring_index == rsp_q->length) {
214 			rsp_q->ring_index = 0;
215 			rsp_q->ring_ptr = rsp_q->ring;
216 		} else {
217 			rsp_q->ring_ptr++;
218 		}
219 
220 		new_pkt->signature = RESPONSE_PROCESSED;
221 		/* flush signature */
222 		wmb();
223 		--entry_count_remaining;
224 	}
225 }
226 
227 /**
228  * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB
229  *    and save to provided buffer
230  * @vha: host adapter pointer
231  * @pkt: pointer Purex IOCB
232  * @rsp: respond queue
233  * @buf: extracted ELS payload copy here
234  * @buf_len: buffer length
235  */
236 int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
237 	void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
238 {
239 	struct purex_entry_24xx *purex = *pkt;
240 	struct rsp_que *rsp_q = *rsp;
241 	sts_cont_entry_t *new_pkt;
242 	uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
243 	uint16_t buffer_copy_offset = 0;
244 	uint16_t entry_count_remaining;
245 	u16 tpad;
246 
247 	entry_count_remaining = purex->entry_count;
248 	total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
249 		- PURX_ELS_HEADER_SIZE;
250 
251 	/*
252 	 * end of payload may not end in 4bytes boundary.  Need to
253 	 * round up / pad for room to swap, before saving data
254 	 */
255 	tpad = roundup(total_bytes, 4);
256 
257 	if (buf_len < tpad) {
258 		ql_dbg(ql_dbg_async, vha, 0x5084,
259 		    "%s buffer is too small %d < %d\n",
260 		    __func__, buf_len, tpad);
261 		__qla_consume_iocb(vha, pkt, rsp);
262 		return -EIO;
263 	}
264 
265 	pending_bytes = total_bytes = tpad;
266 	no_bytes = (pending_bytes > sizeof(purex->els_frame_payload))  ?
267 	    sizeof(purex->els_frame_payload) : pending_bytes;
268 
269 	memcpy(buf, &purex->els_frame_payload[0], no_bytes);
270 	buffer_copy_offset += no_bytes;
271 	pending_bytes -= no_bytes;
272 	--entry_count_remaining;
273 
274 	((response_t *)purex)->signature = RESPONSE_PROCESSED;
275 	/* flush signature */
276 	wmb();
277 
278 	do {
279 		while ((total_bytes > 0) && (entry_count_remaining > 0)) {
280 			new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
281 			*pkt = new_pkt;
282 
283 			if (new_pkt->entry_type != STATUS_CONT_TYPE) {
284 				ql_log(ql_log_warn, vha, 0x507a,
285 				    "Unexpected IOCB type, partial data 0x%x\n",
286 				    buffer_copy_offset);
287 				break;
288 			}
289 
290 			rsp_q->ring_index++;
291 			if (rsp_q->ring_index == rsp_q->length) {
292 				rsp_q->ring_index = 0;
293 				rsp_q->ring_ptr = rsp_q->ring;
294 			} else {
295 				rsp_q->ring_ptr++;
296 			}
297 			no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
298 			    sizeof(new_pkt->data) : pending_bytes;
299 			if ((buffer_copy_offset + no_bytes) <= total_bytes) {
300 				memcpy((buf + buffer_copy_offset), new_pkt->data,
301 				    no_bytes);
302 				buffer_copy_offset += no_bytes;
303 				pending_bytes -= no_bytes;
304 				--entry_count_remaining;
305 			} else {
306 				ql_log(ql_log_warn, vha, 0x5044,
307 				    "Attempt to copy more that we got, optimizing..%x\n",
308 				    buffer_copy_offset);
309 				memcpy((buf + buffer_copy_offset), new_pkt->data,
310 				    total_bytes - buffer_copy_offset);
311 			}
312 
313 			((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
314 			/* flush signature */
315 			wmb();
316 		}
317 
318 		if (pending_bytes != 0 || entry_count_remaining != 0) {
319 			ql_log(ql_log_fatal, vha, 0x508b,
320 			    "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
321 			    total_bytes, entry_count_remaining);
322 			return -EIO;
323 		}
324 	} while (entry_count_remaining > 0);
325 
326 	be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
327 
328 	return 0;
329 }
330 
331 /**
332  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
333  * @irq: interrupt number
334  * @dev_id: SCSI driver HA context
335  *
336  * Called by system whenever the host adapter generates an interrupt.
337  *
338  * Returns handled flag.
339  */
340 irqreturn_t
341 qla2100_intr_handler(int irq, void *dev_id)
342 {
343 	scsi_qla_host_t	*vha;
344 	struct qla_hw_data *ha;
345 	struct device_reg_2xxx __iomem *reg;
346 	int		status;
347 	unsigned long	iter;
348 	uint16_t	hccr;
349 	uint16_t	mb[8];
350 	struct rsp_que *rsp;
351 	unsigned long	flags;
352 
353 	rsp = (struct rsp_que *) dev_id;
354 	if (!rsp) {
355 		ql_log(ql_log_info, NULL, 0x505d,
356 		    "%s: NULL response queue pointer.\n", __func__);
357 		return (IRQ_NONE);
358 	}
359 
360 	ha = rsp->hw;
361 	reg = &ha->iobase->isp;
362 	status = 0;
363 
364 	spin_lock_irqsave(&ha->hardware_lock, flags);
365 	vha = pci_get_drvdata(ha->pdev);
366 	for (iter = 50; iter--; ) {
367 		hccr = rd_reg_word(&reg->hccr);
368 		if (qla2x00_check_reg16_for_disconnect(vha, hccr))
369 			break;
370 		if (hccr & HCCR_RISC_PAUSE) {
371 			if (pci_channel_offline(ha->pdev))
372 				break;
373 
374 			/*
375 			 * Issue a "HARD" reset in order for the RISC interrupt
376 			 * bit to be cleared.  Schedule a big hammer to get
377 			 * out of the RISC PAUSED state.
378 			 */
379 			wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
380 			rd_reg_word(&reg->hccr);
381 
382 			ha->isp_ops->fw_dump(vha);
383 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
384 			break;
385 		} else if ((rd_reg_word(&reg->istatus) & ISR_RISC_INT) == 0)
386 			break;
387 
388 		if (rd_reg_word(&reg->semaphore) & BIT_0) {
389 			wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
390 			rd_reg_word(&reg->hccr);
391 
392 			/* Get mailbox data. */
393 			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
394 			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
395 				qla2x00_mbx_completion(vha, mb[0]);
396 				status |= MBX_INTERRUPT;
397 			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
398 				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
399 				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
400 				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
401 				qla2x00_async_event(vha, rsp, mb);
402 			} else {
403 				/*EMPTY*/
404 				ql_dbg(ql_dbg_async, vha, 0x5025,
405 				    "Unrecognized interrupt type (%d).\n",
406 				    mb[0]);
407 			}
408 			/* Release mailbox registers. */
409 			wrt_reg_word(&reg->semaphore, 0);
410 			rd_reg_word(&reg->semaphore);
411 		} else {
412 			qla2x00_process_response_queue(rsp);
413 
414 			wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
415 			rd_reg_word(&reg->hccr);
416 		}
417 	}
418 	qla2x00_handle_mbx_completion(ha, status);
419 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
420 
421 	return (IRQ_HANDLED);
422 }
423 
424 bool
425 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
426 {
427 	/* Check for PCI disconnection */
428 	if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
429 		if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
430 		    !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
431 		    !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
432 			qla_schedule_eeh_work(vha);
433 		}
434 		return true;
435 	} else
436 		return false;
437 }
438 
439 bool
440 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
441 {
442 	return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
443 }
444 
445 /**
446  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
447  * @irq: interrupt number
448  * @dev_id: SCSI driver HA context
449  *
450  * Called by system whenever the host adapter generates an interrupt.
451  *
452  * Returns handled flag.
453  */
454 irqreturn_t
455 qla2300_intr_handler(int irq, void *dev_id)
456 {
457 	scsi_qla_host_t	*vha;
458 	struct device_reg_2xxx __iomem *reg;
459 	int		status;
460 	unsigned long	iter;
461 	uint32_t	stat;
462 	uint16_t	hccr;
463 	uint16_t	mb[8];
464 	struct rsp_que *rsp;
465 	struct qla_hw_data *ha;
466 	unsigned long	flags;
467 
468 	rsp = (struct rsp_que *) dev_id;
469 	if (!rsp) {
470 		ql_log(ql_log_info, NULL, 0x5058,
471 		    "%s: NULL response queue pointer.\n", __func__);
472 		return (IRQ_NONE);
473 	}
474 
475 	ha = rsp->hw;
476 	reg = &ha->iobase->isp;
477 	status = 0;
478 
479 	spin_lock_irqsave(&ha->hardware_lock, flags);
480 	vha = pci_get_drvdata(ha->pdev);
481 	for (iter = 50; iter--; ) {
482 		stat = rd_reg_dword(&reg->u.isp2300.host_status);
483 		if (qla2x00_check_reg32_for_disconnect(vha, stat))
484 			break;
485 		if (stat & HSR_RISC_PAUSED) {
486 			if (unlikely(pci_channel_offline(ha->pdev)))
487 				break;
488 
489 			hccr = rd_reg_word(&reg->hccr);
490 
491 			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
492 				ql_log(ql_log_warn, vha, 0x5026,
493 				    "Parity error -- HCCR=%x, Dumping "
494 				    "firmware.\n", hccr);
495 			else
496 				ql_log(ql_log_warn, vha, 0x5027,
497 				    "RISC paused -- HCCR=%x, Dumping "
498 				    "firmware.\n", hccr);
499 
500 			/*
501 			 * Issue a "HARD" reset in order for the RISC
502 			 * interrupt bit to be cleared.  Schedule a big
503 			 * hammer to get out of the RISC PAUSED state.
504 			 */
505 			wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
506 			rd_reg_word(&reg->hccr);
507 
508 			ha->isp_ops->fw_dump(vha);
509 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
510 			break;
511 		} else if ((stat & HSR_RISC_INT) == 0)
512 			break;
513 
514 		switch (stat & 0xff) {
515 		case 0x1:
516 		case 0x2:
517 		case 0x10:
518 		case 0x11:
519 			qla2x00_mbx_completion(vha, MSW(stat));
520 			status |= MBX_INTERRUPT;
521 
522 			/* Release mailbox registers. */
523 			wrt_reg_word(&reg->semaphore, 0);
524 			break;
525 		case 0x12:
526 			mb[0] = MSW(stat);
527 			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
528 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
529 			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
530 			qla2x00_async_event(vha, rsp, mb);
531 			break;
532 		case 0x13:
533 			qla2x00_process_response_queue(rsp);
534 			break;
535 		case 0x15:
536 			mb[0] = MBA_CMPLT_1_16BIT;
537 			mb[1] = MSW(stat);
538 			qla2x00_async_event(vha, rsp, mb);
539 			break;
540 		case 0x16:
541 			mb[0] = MBA_SCSI_COMPLETION;
542 			mb[1] = MSW(stat);
543 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
544 			qla2x00_async_event(vha, rsp, mb);
545 			break;
546 		default:
547 			ql_dbg(ql_dbg_async, vha, 0x5028,
548 			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
549 			break;
550 		}
551 		wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
552 		rd_reg_word_relaxed(&reg->hccr);
553 	}
554 	qla2x00_handle_mbx_completion(ha, status);
555 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
556 
557 	return (IRQ_HANDLED);
558 }
559 
560 /**
561  * qla2x00_mbx_completion() - Process mailbox command completions.
562  * @vha: SCSI driver HA context
563  * @mb0: Mailbox0 register
564  */
565 static void
566 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
567 {
568 	uint16_t	cnt;
569 	uint32_t	mboxes;
570 	__le16 __iomem *wptr;
571 	struct qla_hw_data *ha = vha->hw;
572 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
573 
574 	/* Read all mbox registers? */
575 	WARN_ON_ONCE(ha->mbx_count > 32);
576 	mboxes = (1ULL << ha->mbx_count) - 1;
577 	if (!ha->mcp)
578 		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
579 	else
580 		mboxes = ha->mcp->in_mb;
581 
582 	/* Load return mailbox registers. */
583 	ha->flags.mbox_int = 1;
584 	ha->mailbox_out[0] = mb0;
585 	mboxes >>= 1;
586 	wptr = MAILBOX_REG(ha, reg, 1);
587 
588 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
589 		if (IS_QLA2200(ha) && cnt == 8)
590 			wptr = MAILBOX_REG(ha, reg, 8);
591 		if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
592 			ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
593 		else if (mboxes & BIT_0)
594 			ha->mailbox_out[cnt] = rd_reg_word(wptr);
595 
596 		wptr++;
597 		mboxes >>= 1;
598 	}
599 }
600 
601 static void
602 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
603 {
604 	static char *event[] =
605 		{ "Complete", "Request Notification", "Time Extension" };
606 	int rval;
607 	struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
608 	struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
609 	__le16 __iomem *wptr;
610 	uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
611 
612 	/* Seed data -- mailbox1 -> mailbox7. */
613 	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
614 		wptr = &reg24->mailbox1;
615 	else if (IS_QLA8044(vha->hw))
616 		wptr = &reg82->mailbox_out[1];
617 	else
618 		return;
619 
620 	for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
621 		mb[cnt] = rd_reg_word(wptr);
622 
623 	ql_dbg(ql_dbg_async, vha, 0x5021,
624 	    "Inter-Driver Communication %s -- "
625 	    "%04x %04x %04x %04x %04x %04x %04x.\n",
626 	    event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
627 	    mb[4], mb[5], mb[6]);
628 	switch (aen) {
629 	/* Handle IDC Error completion case. */
630 	case MBA_IDC_COMPLETE:
631 		if (mb[1] >> 15) {
632 			vha->hw->flags.idc_compl_status = 1;
633 			if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
634 				complete(&vha->hw->dcbx_comp);
635 		}
636 		break;
637 
638 	case MBA_IDC_NOTIFY:
639 		/* Acknowledgement needed? [Notify && non-zero timeout]. */
640 		timeout = (descr >> 8) & 0xf;
641 		ql_dbg(ql_dbg_async, vha, 0x5022,
642 		    "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
643 		    vha->host_no, event[aen & 0xff], timeout);
644 
645 		if (!timeout)
646 			return;
647 		rval = qla2x00_post_idc_ack_work(vha, mb);
648 		if (rval != QLA_SUCCESS)
649 			ql_log(ql_log_warn, vha, 0x5023,
650 			    "IDC failed to post ACK.\n");
651 		break;
652 	case MBA_IDC_TIME_EXT:
653 		vha->hw->idc_extend_tmo = descr;
654 		ql_dbg(ql_dbg_async, vha, 0x5087,
655 		    "%lu Inter-Driver Communication %s -- "
656 		    "Extend timeout by=%d.\n",
657 		    vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
658 		break;
659 	}
660 }
661 
662 #define LS_UNKNOWN	2
663 const char *
664 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
665 {
666 	static const char *const link_speeds[] = {
667 		"1", "2", "?", "4", "8", "16", "32", "64", "10"
668 	};
669 #define	QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
670 
671 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
672 		return link_speeds[0];
673 	else if (speed == 0x13)
674 		return link_speeds[QLA_LAST_SPEED];
675 	else if (speed < QLA_LAST_SPEED)
676 		return link_speeds[speed];
677 	else
678 		return link_speeds[LS_UNKNOWN];
679 }
680 
681 static void
682 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
683 {
684 	struct qla_hw_data *ha = vha->hw;
685 
686 	/*
687 	 * 8200 AEN Interpretation:
688 	 * mb[0] = AEN code
689 	 * mb[1] = AEN Reason code
690 	 * mb[2] = LSW of Peg-Halt Status-1 Register
691 	 * mb[6] = MSW of Peg-Halt Status-1 Register
692 	 * mb[3] = LSW of Peg-Halt Status-2 register
693 	 * mb[7] = MSW of Peg-Halt Status-2 register
694 	 * mb[4] = IDC Device-State Register value
695 	 * mb[5] = IDC Driver-Presence Register value
696 	 */
697 	ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
698 	    "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
699 	    mb[0], mb[1], mb[2], mb[6]);
700 	ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
701 	    "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
702 	    "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
703 
704 	if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
705 				IDC_HEARTBEAT_FAILURE)) {
706 		ha->flags.nic_core_hung = 1;
707 		ql_log(ql_log_warn, vha, 0x5060,
708 		    "83XX: F/W Error Reported: Check if reset required.\n");
709 
710 		if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
711 			uint32_t protocol_engine_id, fw_err_code, err_level;
712 
713 			/*
714 			 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
715 			 *  - PEG-Halt Status-1 Register:
716 			 *	(LSW = mb[2], MSW = mb[6])
717 			 *	Bits 0-7   = protocol-engine ID
718 			 *	Bits 8-28  = f/w error code
719 			 *	Bits 29-31 = Error-level
720 			 *	    Error-level 0x1 = Non-Fatal error
721 			 *	    Error-level 0x2 = Recoverable Fatal error
722 			 *	    Error-level 0x4 = UnRecoverable Fatal error
723 			 *  - PEG-Halt Status-2 Register:
724 			 *	(LSW = mb[3], MSW = mb[7])
725 			 */
726 			protocol_engine_id = (mb[2] & 0xff);
727 			fw_err_code = (((mb[2] & 0xff00) >> 8) |
728 			    ((mb[6] & 0x1fff) << 8));
729 			err_level = ((mb[6] & 0xe000) >> 13);
730 			ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
731 			    "Register: protocol_engine_id=0x%x "
732 			    "fw_err_code=0x%x err_level=0x%x.\n",
733 			    protocol_engine_id, fw_err_code, err_level);
734 			ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
735 			    "Register: 0x%x%x.\n", mb[7], mb[3]);
736 			if (err_level == ERR_LEVEL_NON_FATAL) {
737 				ql_log(ql_log_warn, vha, 0x5063,
738 				    "Not a fatal error, f/w has recovered itself.\n");
739 			} else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
740 				ql_log(ql_log_fatal, vha, 0x5064,
741 				    "Recoverable Fatal error: Chip reset "
742 				    "required.\n");
743 				qla83xx_schedule_work(vha,
744 				    QLA83XX_NIC_CORE_RESET);
745 			} else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
746 				ql_log(ql_log_fatal, vha, 0x5065,
747 				    "Unrecoverable Fatal error: Set FAILED "
748 				    "state, reboot required.\n");
749 				qla83xx_schedule_work(vha,
750 				    QLA83XX_NIC_CORE_UNRECOVERABLE);
751 			}
752 		}
753 
754 		if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
755 			uint16_t peg_fw_state, nw_interface_link_up;
756 			uint16_t nw_interface_signal_detect, sfp_status;
757 			uint16_t htbt_counter, htbt_monitor_enable;
758 			uint16_t sfp_additional_info, sfp_multirate;
759 			uint16_t sfp_tx_fault, link_speed, dcbx_status;
760 
761 			/*
762 			 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
763 			 *  - PEG-to-FC Status Register:
764 			 *	(LSW = mb[2], MSW = mb[6])
765 			 *	Bits 0-7   = Peg-Firmware state
766 			 *	Bit 8      = N/W Interface Link-up
767 			 *	Bit 9      = N/W Interface signal detected
768 			 *	Bits 10-11 = SFP Status
769 			 *	  SFP Status 0x0 = SFP+ transceiver not expected
770 			 *	  SFP Status 0x1 = SFP+ transceiver not present
771 			 *	  SFP Status 0x2 = SFP+ transceiver invalid
772 			 *	  SFP Status 0x3 = SFP+ transceiver present and
773 			 *	  valid
774 			 *	Bits 12-14 = Heartbeat Counter
775 			 *	Bit 15     = Heartbeat Monitor Enable
776 			 *	Bits 16-17 = SFP Additional Info
777 			 *	  SFP info 0x0 = Unregocnized transceiver for
778 			 *	  Ethernet
779 			 *	  SFP info 0x1 = SFP+ brand validation failed
780 			 *	  SFP info 0x2 = SFP+ speed validation failed
781 			 *	  SFP info 0x3 = SFP+ access error
782 			 *	Bit 18     = SFP Multirate
783 			 *	Bit 19     = SFP Tx Fault
784 			 *	Bits 20-22 = Link Speed
785 			 *	Bits 23-27 = Reserved
786 			 *	Bits 28-30 = DCBX Status
787 			 *	  DCBX Status 0x0 = DCBX Disabled
788 			 *	  DCBX Status 0x1 = DCBX Enabled
789 			 *	  DCBX Status 0x2 = DCBX Exchange error
790 			 *	Bit 31     = Reserved
791 			 */
792 			peg_fw_state = (mb[2] & 0x00ff);
793 			nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
794 			nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
795 			sfp_status = ((mb[2] & 0x0c00) >> 10);
796 			htbt_counter = ((mb[2] & 0x7000) >> 12);
797 			htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
798 			sfp_additional_info = (mb[6] & 0x0003);
799 			sfp_multirate = ((mb[6] & 0x0004) >> 2);
800 			sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
801 			link_speed = ((mb[6] & 0x0070) >> 4);
802 			dcbx_status = ((mb[6] & 0x7000) >> 12);
803 
804 			ql_log(ql_log_warn, vha, 0x5066,
805 			    "Peg-to-Fc Status Register:\n"
806 			    "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
807 			    "nw_interface_signal_detect=0x%x"
808 			    "\nsfp_statis=0x%x.\n ", peg_fw_state,
809 			    nw_interface_link_up, nw_interface_signal_detect,
810 			    sfp_status);
811 			ql_log(ql_log_warn, vha, 0x5067,
812 			    "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
813 			    "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
814 			    htbt_counter, htbt_monitor_enable,
815 			    sfp_additional_info, sfp_multirate);
816 			ql_log(ql_log_warn, vha, 0x5068,
817 			    "sfp_tx_fault=0x%x, link_state=0x%x, "
818 			    "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
819 			    dcbx_status);
820 
821 			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
822 		}
823 
824 		if (mb[1] & IDC_HEARTBEAT_FAILURE) {
825 			ql_log(ql_log_warn, vha, 0x5069,
826 			    "Heartbeat Failure encountered, chip reset "
827 			    "required.\n");
828 
829 			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
830 		}
831 	}
832 
833 	if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
834 		ql_log(ql_log_info, vha, 0x506a,
835 		    "IDC Device-State changed = 0x%x.\n", mb[4]);
836 		if (ha->flags.nic_core_reset_owner)
837 			return;
838 		qla83xx_schedule_work(vha, MBA_IDC_AEN);
839 	}
840 }
841 
842 /**
843  * qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can
844  * span over multiple IOCBs.
845  * @vha: SCSI driver HA context
846  * @pkt: ELS packet
847  * @rsp: Response queue
848  * @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB
849  *            false, for Unsolicited Received ELS IOCB
850  * @byte_order: True, to change the byte ordering of iocb payload
851  */
852 struct purex_item *
853 qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt,
854 			  struct rsp_que **rsp, bool is_purls,
855 			  bool byte_order)
856 {
857 	struct purex_entry_24xx *purex = NULL;
858 	struct pt_ls4_rx_unsol *purls = NULL;
859 	struct rsp_que *rsp_q = *rsp;
860 	sts_cont_entry_t *new_pkt;
861 	uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
862 	uint16_t buffer_copy_offset = 0, payload_size = 0;
863 	uint16_t entry_count, entry_count_remaining;
864 	struct purex_item *item;
865 	void *iocb_pkt = NULL;
866 
867 	if (is_purls) {
868 		purls = *pkt;
869 		total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) -
870 			      PURX_ELS_HEADER_SIZE;
871 		entry_count = entry_count_remaining = purls->entry_count;
872 		payload_size = sizeof(purls->payload);
873 	} else {
874 		purex = *pkt;
875 		total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) -
876 			      PURX_ELS_HEADER_SIZE;
877 		entry_count = entry_count_remaining = purex->entry_count;
878 		payload_size = sizeof(purex->els_frame_payload);
879 	}
880 
881 	pending_bytes = total_bytes;
882 	no_bytes = (pending_bytes > payload_size) ? payload_size :
883 		   pending_bytes;
884 	ql_dbg(ql_dbg_async, vha, 0x509a,
885 	       "%s LS, frame_size 0x%x, entry count %d\n",
886 	       (is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count);
887 
888 	item = qla24xx_alloc_purex_item(vha, total_bytes);
889 	if (!item)
890 		return item;
891 
892 	iocb_pkt = &item->iocb;
893 
894 	if (is_purls)
895 		memcpy(iocb_pkt, &purls->payload[0], no_bytes);
896 	else
897 		memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes);
898 	buffer_copy_offset += no_bytes;
899 	pending_bytes -= no_bytes;
900 	--entry_count_remaining;
901 
902 	if (is_purls)
903 		((response_t *)purls)->signature = RESPONSE_PROCESSED;
904 	else
905 		((response_t *)purex)->signature = RESPONSE_PROCESSED;
906 	wmb();
907 
908 	do {
909 		while ((total_bytes > 0) && (entry_count_remaining > 0)) {
910 			if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
911 				ql_dbg(ql_dbg_async, vha, 0x5084,
912 				       "Ran out of IOCBs, partial data 0x%x\n",
913 				       buffer_copy_offset);
914 				cpu_relax();
915 				continue;
916 			}
917 
918 			new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
919 			*pkt = new_pkt;
920 
921 			if (new_pkt->entry_type != STATUS_CONT_TYPE) {
922 				ql_log(ql_log_warn, vha, 0x507a,
923 				       "Unexpected IOCB type, partial data 0x%x\n",
924 				       buffer_copy_offset);
925 				break;
926 			}
927 
928 			rsp_q->ring_index++;
929 			if (rsp_q->ring_index == rsp_q->length) {
930 				rsp_q->ring_index = 0;
931 				rsp_q->ring_ptr = rsp_q->ring;
932 			} else {
933 				rsp_q->ring_ptr++;
934 			}
935 			no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
936 				sizeof(new_pkt->data) : pending_bytes;
937 			if ((buffer_copy_offset + no_bytes) <= total_bytes) {
938 				memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
939 				       new_pkt->data, no_bytes);
940 				buffer_copy_offset += no_bytes;
941 				pending_bytes -= no_bytes;
942 				--entry_count_remaining;
943 			} else {
944 				ql_log(ql_log_warn, vha, 0x5044,
945 				       "Attempt to copy more that we got, optimizing..%x\n",
946 				       buffer_copy_offset);
947 				memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
948 				       new_pkt->data,
949 				       total_bytes - buffer_copy_offset);
950 			}
951 
952 			((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
953 			wmb();
954 		}
955 
956 		if (pending_bytes != 0 || entry_count_remaining != 0) {
957 			ql_log(ql_log_fatal, vha, 0x508b,
958 			       "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
959 			       total_bytes, entry_count_remaining);
960 			qla24xx_free_purex_item(item);
961 			return NULL;
962 		}
963 	} while (entry_count_remaining > 0);
964 
965 	if (byte_order)
966 		host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
967 
968 	return item;
969 }
970 
971 int
972 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
973 {
974 	struct qla_hw_data *ha = vha->hw;
975 	scsi_qla_host_t *vp;
976 	uint32_t vp_did;
977 	unsigned long flags;
978 	int ret = 0;
979 
980 	if (!ha->num_vhosts)
981 		return ret;
982 
983 	spin_lock_irqsave(&ha->vport_slock, flags);
984 	list_for_each_entry(vp, &ha->vp_list, list) {
985 		vp_did = vp->d_id.b24;
986 		if (vp_did == rscn_entry) {
987 			ret = 1;
988 			break;
989 		}
990 	}
991 	spin_unlock_irqrestore(&ha->vport_slock, flags);
992 
993 	return ret;
994 }
995 
996 fc_port_t *
997 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
998 {
999 	fc_port_t *f, *tf;
1000 
1001 	f = tf = NULL;
1002 	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
1003 		if (f->loop_id == loop_id)
1004 			return f;
1005 	return NULL;
1006 }
1007 
1008 fc_port_t *
1009 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
1010 {
1011 	fc_port_t *f, *tf;
1012 
1013 	f = tf = NULL;
1014 	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
1015 		if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
1016 			if (incl_deleted)
1017 				return f;
1018 			else if (f->deleted == 0)
1019 				return f;
1020 		}
1021 	}
1022 	return NULL;
1023 }
1024 
1025 fc_port_t *
1026 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
1027 	u8 incl_deleted)
1028 {
1029 	fc_port_t *f, *tf;
1030 
1031 	f = tf = NULL;
1032 	list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
1033 		if (f->d_id.b24 == id->b24) {
1034 			if (incl_deleted)
1035 				return f;
1036 			else if (f->deleted == 0)
1037 				return f;
1038 		}
1039 	}
1040 	return NULL;
1041 }
1042 
1043 /* Shall be called only on supported adapters. */
1044 static void
1045 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
1046 {
1047 	struct qla_hw_data *ha = vha->hw;
1048 	bool reset_isp_needed = false;
1049 
1050 	ql_log(ql_log_warn, vha, 0x02f0,
1051 	       "MPI Heartbeat stop. MPI reset is%s needed. "
1052 	       "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
1053 	       mb[1] & BIT_8 ? "" : " not",
1054 	       mb[0], mb[1], mb[2], mb[3]);
1055 
1056 	if ((mb[1] & BIT_8) == 0)
1057 		return;
1058 
1059 	ql_log(ql_log_warn, vha, 0x02f1,
1060 	       "MPI Heartbeat stop. FW dump needed\n");
1061 
1062 	if (ql2xfulldump_on_mpifail) {
1063 		ha->isp_ops->fw_dump(vha);
1064 		reset_isp_needed = true;
1065 	}
1066 
1067 	ha->isp_ops->mpi_fw_dump(vha, 1);
1068 
1069 	if (reset_isp_needed) {
1070 		vha->hw->flags.fw_init_done = 0;
1071 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1072 		qla2xxx_wake_dpc(vha);
1073 	}
1074 }
1075 
1076 static struct purex_item *
1077 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
1078 {
1079 	struct purex_item *item = NULL;
1080 
1081 	if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
1082 		item = kzalloc(struct_size(item, iocb, size), GFP_ATOMIC);
1083 	} else {
1084 		if (atomic_inc_return(&vha->default_item.in_use) == 1) {
1085 			item = &vha->default_item;
1086 			goto initialize_purex_header;
1087 		} else {
1088 			item = kzalloc(
1089 				struct_size(item, iocb, QLA_DEFAULT_PAYLOAD_SIZE),
1090 				GFP_ATOMIC);
1091 		}
1092 	}
1093 	if (!item) {
1094 		ql_log(ql_log_warn, vha, 0x5092,
1095 		       ">> Failed allocate purex list item.\n");
1096 
1097 		return NULL;
1098 	}
1099 
1100 initialize_purex_header:
1101 	item->vha = vha;
1102 	item->size = size;
1103 	return item;
1104 }
1105 
1106 void
1107 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
1108 			 void (*process_item)(struct scsi_qla_host *vha,
1109 					      struct purex_item *pkt))
1110 {
1111 	struct purex_list *list = &vha->purex_list;
1112 	ulong flags;
1113 
1114 	pkt->process_item = process_item;
1115 
1116 	spin_lock_irqsave(&list->lock, flags);
1117 	list_add_tail(&pkt->list, &list->head);
1118 	spin_unlock_irqrestore(&list->lock, flags);
1119 
1120 	set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
1121 }
1122 
1123 /**
1124  * qla24xx_copy_std_pkt() - Copy over purex ELS which is
1125  * contained in a single IOCB.
1126  * purex packet.
1127  * @vha: SCSI driver HA context
1128  * @pkt: ELS packet
1129  */
1130 static struct purex_item *
1131 qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
1132 {
1133 	struct purex_item *item;
1134 
1135 	item = qla24xx_alloc_purex_item(vha, QLA_DEFAULT_PAYLOAD_SIZE);
1136 	if (!item)
1137 		return item;
1138 
1139 	memcpy(&item->iocb, pkt, QLA_DEFAULT_PAYLOAD_SIZE);
1140 	return item;
1141 }
1142 
1143 /**
1144  * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
1145  * span over multiple IOCBs.
1146  * @vha: SCSI driver HA context
1147  * @pkt: ELS packet
1148  * @rsp: Response queue
1149  */
1150 static struct purex_item *
1151 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
1152 		      struct rsp_que **rsp)
1153 {
1154 	struct purex_entry_24xx *purex = *pkt;
1155 	struct rsp_que *rsp_q = *rsp;
1156 	sts_cont_entry_t *new_pkt;
1157 	uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
1158 	uint16_t buffer_copy_offset = 0;
1159 	uint16_t entry_count, entry_count_remaining;
1160 	struct purex_item *item;
1161 	void *fpin_pkt = NULL;
1162 
1163 	total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
1164 	    - PURX_ELS_HEADER_SIZE;
1165 	pending_bytes = total_bytes;
1166 	entry_count = entry_count_remaining = purex->entry_count;
1167 	no_bytes = (pending_bytes > sizeof(purex->els_frame_payload))  ?
1168 		   sizeof(purex->els_frame_payload) : pending_bytes;
1169 	ql_log(ql_log_info, vha, 0x509a,
1170 	       "FPIN ELS, frame_size 0x%x, entry count %d\n",
1171 	       total_bytes, entry_count);
1172 
1173 	item = qla24xx_alloc_purex_item(vha, total_bytes);
1174 	if (!item)
1175 		return item;
1176 
1177 	fpin_pkt = &item->iocb;
1178 
1179 	memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
1180 	buffer_copy_offset += no_bytes;
1181 	pending_bytes -= no_bytes;
1182 	--entry_count_remaining;
1183 
1184 	((response_t *)purex)->signature = RESPONSE_PROCESSED;
1185 	wmb();
1186 
1187 	do {
1188 		while ((total_bytes > 0) && (entry_count_remaining > 0)) {
1189 			if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
1190 				ql_dbg(ql_dbg_async, vha, 0x5084,
1191 				       "Ran out of IOCBs, partial data 0x%x\n",
1192 				       buffer_copy_offset);
1193 				cpu_relax();
1194 				continue;
1195 			}
1196 
1197 			new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
1198 			*pkt = new_pkt;
1199 
1200 			if (new_pkt->entry_type != STATUS_CONT_TYPE) {
1201 				ql_log(ql_log_warn, vha, 0x507a,
1202 				       "Unexpected IOCB type, partial data 0x%x\n",
1203 				       buffer_copy_offset);
1204 				break;
1205 			}
1206 
1207 			rsp_q->ring_index++;
1208 			if (rsp_q->ring_index == rsp_q->length) {
1209 				rsp_q->ring_index = 0;
1210 				rsp_q->ring_ptr = rsp_q->ring;
1211 			} else {
1212 				rsp_q->ring_ptr++;
1213 			}
1214 			no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
1215 			    sizeof(new_pkt->data) : pending_bytes;
1216 			if ((buffer_copy_offset + no_bytes) <= total_bytes) {
1217 				memcpy(((uint8_t *)fpin_pkt +
1218 				    buffer_copy_offset), new_pkt->data,
1219 				    no_bytes);
1220 				buffer_copy_offset += no_bytes;
1221 				pending_bytes -= no_bytes;
1222 				--entry_count_remaining;
1223 			} else {
1224 				ql_log(ql_log_warn, vha, 0x5044,
1225 				       "Attempt to copy more that we got, optimizing..%x\n",
1226 				       buffer_copy_offset);
1227 				memcpy(((uint8_t *)fpin_pkt +
1228 				    buffer_copy_offset), new_pkt->data,
1229 				    total_bytes - buffer_copy_offset);
1230 			}
1231 
1232 			((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
1233 			wmb();
1234 		}
1235 
1236 		if (pending_bytes != 0 || entry_count_remaining != 0) {
1237 			ql_log(ql_log_fatal, vha, 0x508b,
1238 			       "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
1239 			       total_bytes, entry_count_remaining);
1240 			qla24xx_free_purex_item(item);
1241 			return NULL;
1242 		}
1243 	} while (entry_count_remaining > 0);
1244 	host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
1245 	return item;
1246 }
1247 
1248 /**
1249  * qla2x00_async_event() - Process aynchronous events.
1250  * @vha: SCSI driver HA context
1251  * @rsp: response queue
1252  * @mb: Mailbox registers (0 - 3)
1253  */
1254 void
1255 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1256 {
1257 	uint16_t	handle_cnt;
1258 	uint16_t	cnt, mbx;
1259 	uint32_t	handles[5];
1260 	struct qla_hw_data *ha = vha->hw;
1261 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1262 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1263 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1264 	uint32_t	rscn_entry, host_pid;
1265 	unsigned long	flags;
1266 	fc_port_t	*fcport = NULL;
1267 
1268 	if (!vha->hw->flags.fw_started) {
1269 		ql_log(ql_log_warn, vha, 0x50ff,
1270 		    "Dropping AEN - %04x %04x %04x %04x.\n",
1271 		    mb[0], mb[1], mb[2], mb[3]);
1272 		return;
1273 	}
1274 
1275 	/* Setup to process RIO completion. */
1276 	handle_cnt = 0;
1277 	if (IS_CNA_CAPABLE(ha))
1278 		goto skip_rio;
1279 	switch (mb[0]) {
1280 	case MBA_SCSI_COMPLETION:
1281 		handles[0] = make_handle(mb[2], mb[1]);
1282 		handle_cnt = 1;
1283 		break;
1284 	case MBA_CMPLT_1_16BIT:
1285 		handles[0] = mb[1];
1286 		handle_cnt = 1;
1287 		mb[0] = MBA_SCSI_COMPLETION;
1288 		break;
1289 	case MBA_CMPLT_2_16BIT:
1290 		handles[0] = mb[1];
1291 		handles[1] = mb[2];
1292 		handle_cnt = 2;
1293 		mb[0] = MBA_SCSI_COMPLETION;
1294 		break;
1295 	case MBA_CMPLT_3_16BIT:
1296 		handles[0] = mb[1];
1297 		handles[1] = mb[2];
1298 		handles[2] = mb[3];
1299 		handle_cnt = 3;
1300 		mb[0] = MBA_SCSI_COMPLETION;
1301 		break;
1302 	case MBA_CMPLT_4_16BIT:
1303 		handles[0] = mb[1];
1304 		handles[1] = mb[2];
1305 		handles[2] = mb[3];
1306 		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1307 		handle_cnt = 4;
1308 		mb[0] = MBA_SCSI_COMPLETION;
1309 		break;
1310 	case MBA_CMPLT_5_16BIT:
1311 		handles[0] = mb[1];
1312 		handles[1] = mb[2];
1313 		handles[2] = mb[3];
1314 		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1315 		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1316 		handle_cnt = 5;
1317 		mb[0] = MBA_SCSI_COMPLETION;
1318 		break;
1319 	case MBA_CMPLT_2_32BIT:
1320 		handles[0] = make_handle(mb[2], mb[1]);
1321 		handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1322 					 RD_MAILBOX_REG(ha, reg, 6));
1323 		handle_cnt = 2;
1324 		mb[0] = MBA_SCSI_COMPLETION;
1325 		break;
1326 	default:
1327 		break;
1328 	}
1329 skip_rio:
1330 	switch (mb[0]) {
1331 	case MBA_SCSI_COMPLETION:	/* Fast Post */
1332 		if (!vha->flags.online)
1333 			break;
1334 
1335 		for (cnt = 0; cnt < handle_cnt; cnt++)
1336 			qla2x00_process_completed_request(vha, rsp->req,
1337 				handles[cnt]);
1338 		break;
1339 
1340 	case MBA_RESET:			/* Reset */
1341 		ql_dbg(ql_dbg_async, vha, 0x5002,
1342 		    "Asynchronous RESET.\n");
1343 
1344 		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1345 		break;
1346 
1347 	case MBA_SYSTEM_ERR:		/* System Error */
1348 		mbx = 0;
1349 
1350 		vha->hw_err_cnt++;
1351 
1352 		if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1353 		    IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1354 			u16 m[4];
1355 
1356 			m[0] = rd_reg_word(&reg24->mailbox4);
1357 			m[1] = rd_reg_word(&reg24->mailbox5);
1358 			m[2] = rd_reg_word(&reg24->mailbox6);
1359 			mbx = m[3] = rd_reg_word(&reg24->mailbox7);
1360 
1361 			ql_log(ql_log_warn, vha, 0x5003,
1362 			    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1363 			    mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1364 		} else
1365 			ql_log(ql_log_warn, vha, 0x5003,
1366 			    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1367 			    mb[1], mb[2], mb[3]);
1368 
1369 		if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1370 		    rd_reg_word(&reg24->mailbox7) & BIT_8)
1371 			ha->isp_ops->mpi_fw_dump(vha, 1);
1372 		ha->isp_ops->fw_dump(vha);
1373 		ha->flags.fw_init_done = 0;
1374 		QLA_FW_STOPPED(ha);
1375 
1376 		if (IS_FWI2_CAPABLE(ha)) {
1377 			if (mb[1] == 0 && mb[2] == 0) {
1378 				ql_log(ql_log_fatal, vha, 0x5004,
1379 				    "Unrecoverable Hardware Error: adapter "
1380 				    "marked OFFLINE!\n");
1381 				vha->flags.online = 0;
1382 				vha->device_flags |= DFLG_DEV_FAILED;
1383 			} else {
1384 				/* Check to see if MPI timeout occurred */
1385 				if ((mbx & MBX_3) && (ha->port_no == 0))
1386 					set_bit(MPI_RESET_NEEDED,
1387 					    &vha->dpc_flags);
1388 
1389 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1390 			}
1391 		} else if (mb[1] == 0) {
1392 			ql_log(ql_log_fatal, vha, 0x5005,
1393 			    "Unrecoverable Hardware Error: adapter marked "
1394 			    "OFFLINE!\n");
1395 			vha->flags.online = 0;
1396 			vha->device_flags |= DFLG_DEV_FAILED;
1397 		} else
1398 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1399 		break;
1400 
1401 	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
1402 		ql_log(ql_log_warn, vha, 0x5006,
1403 		    "ISP Request Transfer Error (%x).\n",  mb[1]);
1404 
1405 		vha->hw_err_cnt++;
1406 
1407 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1408 		break;
1409 
1410 	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
1411 		ql_log(ql_log_warn, vha, 0x5007,
1412 		    "ISP Response Transfer Error (%x).\n", mb[1]);
1413 
1414 		vha->hw_err_cnt++;
1415 
1416 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1417 		break;
1418 
1419 	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
1420 		ql_dbg(ql_dbg_async, vha, 0x5008,
1421 		    "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1422 		break;
1423 
1424 	case MBA_LOOP_INIT_ERR:
1425 		ql_log(ql_log_warn, vha, 0x5090,
1426 		    "LOOP INIT ERROR (%x).\n", mb[1]);
1427 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1428 		break;
1429 
1430 	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
1431 		ha->flags.lip_ae = 1;
1432 
1433 		ql_dbg(ql_dbg_async, vha, 0x5009,
1434 		    "LIP occurred (%x).\n", mb[1]);
1435 
1436 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1437 			atomic_set(&vha->loop_state, LOOP_DOWN);
1438 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1439 			qla2x00_mark_all_devices_lost(vha);
1440 		}
1441 
1442 		if (vha->vp_idx) {
1443 			atomic_set(&vha->vp_state, VP_FAILED);
1444 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1445 		}
1446 
1447 		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1448 		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1449 
1450 		vha->flags.management_server_logged_in = 0;
1451 		qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1452 		break;
1453 
1454 	case MBA_LOOP_UP:		/* Loop Up Event */
1455 		if (IS_QLA2100(ha) || IS_QLA2200(ha))
1456 			ha->link_data_rate = PORT_SPEED_1GB;
1457 		else
1458 			ha->link_data_rate = mb[1];
1459 
1460 		ql_log(ql_log_info, vha, 0x500a,
1461 		    "LOOP UP detected (%s Gbps).\n",
1462 		    qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1463 
1464 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1465 			if (mb[2] & BIT_0)
1466 				ql_log(ql_log_info, vha, 0x11a0,
1467 				    "FEC=enabled (link up).\n");
1468 		}
1469 
1470 		vha->flags.management_server_logged_in = 0;
1471 		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1472 
1473 		if (vha->link_down_time < vha->hw->port_down_retry_count) {
1474 			vha->short_link_down_cnt++;
1475 			vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
1476 		}
1477 
1478 		break;
1479 
1480 	case MBA_LOOP_DOWN:		/* Loop Down Event */
1481 		SAVE_TOPO(ha);
1482 		ha->flags.lip_ae = 0;
1483 		ha->current_topology = 0;
1484 		vha->link_down_time = 0;
1485 
1486 		mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1487 			? rd_reg_word(&reg24->mailbox4) : 0;
1488 		mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4])
1489 			: mbx;
1490 		ql_log(ql_log_info, vha, 0x500b,
1491 		    "LOOP DOWN detected (%x %x %x %x).\n",
1492 		    mb[1], mb[2], mb[3], mbx);
1493 
1494 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1495 			atomic_set(&vha->loop_state, LOOP_DOWN);
1496 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1497 			/*
1498 			 * In case of loop down, restore WWPN from
1499 			 * NVRAM in case of FA-WWPN capable ISP
1500 			 * Restore for Physical Port only
1501 			 */
1502 			if (!vha->vp_idx) {
1503 				if (ha->flags.fawwpn_enabled &&
1504 				    (ha->current_topology == ISP_CFG_F)) {
1505 					memcpy(vha->port_name, ha->port_name, WWN_SIZE);
1506 					fc_host_port_name(vha->host) =
1507 					    wwn_to_u64(vha->port_name);
1508 					ql_dbg(ql_dbg_init + ql_dbg_verbose,
1509 					    vha, 0x00d8, "LOOP DOWN detected,"
1510 					    "restore WWPN %016llx\n",
1511 					    wwn_to_u64(vha->port_name));
1512 				}
1513 
1514 				clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1515 			}
1516 
1517 			vha->device_flags |= DFLG_NO_CABLE;
1518 			qla2x00_mark_all_devices_lost(vha);
1519 		}
1520 
1521 		if (vha->vp_idx) {
1522 			atomic_set(&vha->vp_state, VP_FAILED);
1523 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1524 		}
1525 
1526 		vha->flags.management_server_logged_in = 0;
1527 		ha->link_data_rate = PORT_SPEED_UNKNOWN;
1528 		qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1529 		break;
1530 
1531 	case MBA_LIP_RESET:		/* LIP reset occurred */
1532 		ql_dbg(ql_dbg_async, vha, 0x500c,
1533 		    "LIP reset occurred (%x).\n", mb[1]);
1534 
1535 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1536 			atomic_set(&vha->loop_state, LOOP_DOWN);
1537 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1538 			qla2x00_mark_all_devices_lost(vha);
1539 		}
1540 
1541 		if (vha->vp_idx) {
1542 			atomic_set(&vha->vp_state, VP_FAILED);
1543 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1544 		}
1545 
1546 		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1547 
1548 		ha->operating_mode = LOOP;
1549 		vha->flags.management_server_logged_in = 0;
1550 		qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1551 		break;
1552 
1553 	/* case MBA_DCBX_COMPLETE: */
1554 	case MBA_POINT_TO_POINT:	/* Point-to-Point */
1555 		ha->flags.lip_ae = 0;
1556 
1557 		if (IS_QLA2100(ha))
1558 			break;
1559 
1560 		if (IS_CNA_CAPABLE(ha)) {
1561 			ql_dbg(ql_dbg_async, vha, 0x500d,
1562 			    "DCBX Completed -- %04x %04x %04x.\n",
1563 			    mb[1], mb[2], mb[3]);
1564 			if (ha->notify_dcbx_comp && !vha->vp_idx)
1565 				complete(&ha->dcbx_comp);
1566 
1567 		} else
1568 			ql_dbg(ql_dbg_async, vha, 0x500e,
1569 			    "Asynchronous P2P MODE received.\n");
1570 
1571 		/*
1572 		 * Until there's a transition from loop down to loop up, treat
1573 		 * this as loop down only.
1574 		 */
1575 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1576 			atomic_set(&vha->loop_state, LOOP_DOWN);
1577 			if (!atomic_read(&vha->loop_down_timer))
1578 				atomic_set(&vha->loop_down_timer,
1579 				    LOOP_DOWN_TIME);
1580 			if (!N2N_TOPO(ha))
1581 				qla2x00_mark_all_devices_lost(vha);
1582 		}
1583 
1584 		if (vha->vp_idx) {
1585 			atomic_set(&vha->vp_state, VP_FAILED);
1586 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1587 		}
1588 
1589 		if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1590 			set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1591 
1592 		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1593 		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1594 
1595 		vha->flags.management_server_logged_in = 0;
1596 		break;
1597 
1598 	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
1599 		if (IS_QLA2100(ha))
1600 			break;
1601 
1602 		ql_dbg(ql_dbg_async, vha, 0x500f,
1603 		    "Configuration change detected: value=%x.\n", mb[1]);
1604 
1605 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1606 			atomic_set(&vha->loop_state, LOOP_DOWN);
1607 			if (!atomic_read(&vha->loop_down_timer))
1608 				atomic_set(&vha->loop_down_timer,
1609 				    LOOP_DOWN_TIME);
1610 			qla2x00_mark_all_devices_lost(vha);
1611 		}
1612 
1613 		if (vha->vp_idx) {
1614 			atomic_set(&vha->vp_state, VP_FAILED);
1615 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1616 		}
1617 
1618 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1619 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1620 		break;
1621 
1622 	case MBA_PORT_UPDATE:		/* Port database update */
1623 		/*
1624 		 * Handle only global and vn-port update events
1625 		 *
1626 		 * Relevant inputs:
1627 		 * mb[1] = N_Port handle of changed port
1628 		 * OR 0xffff for global event
1629 		 * mb[2] = New login state
1630 		 * 7 = Port logged out
1631 		 * mb[3] = LSB is vp_idx, 0xff = all vps
1632 		 *
1633 		 * Skip processing if:
1634 		 *       Event is global, vp_idx is NOT all vps,
1635 		 *           vp_idx does not match
1636 		 *       Event is not global, vp_idx does not match
1637 		 */
1638 		if (IS_QLA2XXX_MIDTYPE(ha) &&
1639 		    ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1640 			(mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1641 			break;
1642 
1643 		if (mb[2] == 0x7) {
1644 			ql_dbg(ql_dbg_async, vha, 0x5010,
1645 			    "Port %s %04x %04x %04x.\n",
1646 			    mb[1] == 0xffff ? "unavailable" : "logout",
1647 			    mb[1], mb[2], mb[3]);
1648 
1649 			if (mb[1] == 0xffff)
1650 				goto global_port_update;
1651 
1652 			if (mb[1] == NPH_SNS_LID(ha)) {
1653 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1654 				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1655 				break;
1656 			}
1657 
1658 			/* use handle_cnt for loop id/nport handle */
1659 			if (IS_FWI2_CAPABLE(ha))
1660 				handle_cnt = NPH_SNS;
1661 			else
1662 				handle_cnt = SIMPLE_NAME_SERVER;
1663 			if (mb[1] == handle_cnt) {
1664 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1665 				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1666 				break;
1667 			}
1668 
1669 			/* Port logout */
1670 			fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1671 			if (!fcport)
1672 				break;
1673 			if (atomic_read(&fcport->state) != FCS_ONLINE)
1674 				break;
1675 			ql_dbg(ql_dbg_async, vha, 0x508a,
1676 			    "Marking port lost loopid=%04x portid=%06x.\n",
1677 			    fcport->loop_id, fcport->d_id.b24);
1678 			if (qla_ini_mode_enabled(vha)) {
1679 				fcport->logout_on_delete = 0;
1680 				qlt_schedule_sess_for_deletion(fcport);
1681 			}
1682 			break;
1683 
1684 global_port_update:
1685 			if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1686 				atomic_set(&vha->loop_state, LOOP_DOWN);
1687 				atomic_set(&vha->loop_down_timer,
1688 				    LOOP_DOWN_TIME);
1689 				vha->device_flags |= DFLG_NO_CABLE;
1690 				qla2x00_mark_all_devices_lost(vha);
1691 			}
1692 
1693 			if (vha->vp_idx) {
1694 				atomic_set(&vha->vp_state, VP_FAILED);
1695 				fc_vport_set_state(vha->fc_vport,
1696 				    FC_VPORT_FAILED);
1697 				qla2x00_mark_all_devices_lost(vha);
1698 			}
1699 
1700 			vha->flags.management_server_logged_in = 0;
1701 			ha->link_data_rate = PORT_SPEED_UNKNOWN;
1702 			break;
1703 		}
1704 
1705 		/*
1706 		 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1707 		 * event etc. earlier indicating loop is down) then process
1708 		 * it.  Otherwise ignore it and Wait for RSCN to come in.
1709 		 */
1710 		atomic_set(&vha->loop_down_timer, 0);
1711 		if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1712 			!ha->flags.n2n_ae  &&
1713 		    atomic_read(&vha->loop_state) != LOOP_DEAD) {
1714 			ql_dbg(ql_dbg_async, vha, 0x5011,
1715 			    "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1716 			    mb[1], mb[2], mb[3]);
1717 			break;
1718 		}
1719 
1720 		ql_dbg(ql_dbg_async, vha, 0x5012,
1721 		    "Port database changed %04x %04x %04x.\n",
1722 		    mb[1], mb[2], mb[3]);
1723 
1724 		/*
1725 		 * Mark all devices as missing so we will login again.
1726 		 */
1727 		atomic_set(&vha->loop_state, LOOP_UP);
1728 		vha->scan.scan_retry = 0;
1729 
1730 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1731 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1732 		set_bit(VP_CONFIG_OK, &vha->vp_flags);
1733 		break;
1734 
1735 	case MBA_RSCN_UPDATE:		/* State Change Registration */
1736 		/* Check if the Vport has issued a SCR */
1737 		if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1738 			break;
1739 		/* Only handle SCNs for our Vport index. */
1740 		if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1741 			break;
1742 
1743 		ql_log(ql_log_warn, vha, 0x5013,
1744 		       "RSCN database changed -- %04x %04x %04x.\n",
1745 		       mb[1], mb[2], mb[3]);
1746 
1747 		rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1748 		host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1749 				| vha->d_id.b.al_pa;
1750 		if (rscn_entry == host_pid) {
1751 			ql_dbg(ql_dbg_async, vha, 0x5014,
1752 			    "Ignoring RSCN update to local host "
1753 			    "port ID (%06x).\n", host_pid);
1754 			break;
1755 		}
1756 
1757 		/* Ignore reserved bits from RSCN-payload. */
1758 		rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1759 
1760 		/* Skip RSCNs for virtual ports on the same physical port */
1761 		if (qla2x00_is_a_vp_did(vha, rscn_entry))
1762 			break;
1763 
1764 		atomic_set(&vha->loop_down_timer, 0);
1765 		vha->flags.management_server_logged_in = 0;
1766 		{
1767 			struct event_arg ea;
1768 
1769 			memset(&ea, 0, sizeof(ea));
1770 			ea.id.b24 = rscn_entry;
1771 			ea.id.b.rsvd_1 = rscn_entry >> 24;
1772 			qla2x00_handle_rscn(vha, &ea);
1773 			qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1774 		}
1775 		break;
1776 	case MBA_CONGN_NOTI_RECV:
1777 		if (!ha->flags.scm_enabled ||
1778 		    mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1779 			break;
1780 
1781 		if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1782 			ql_dbg(ql_dbg_async, vha, 0x509b,
1783 			       "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1784 		} else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1785 			ql_log(ql_log_warn, vha, 0x509b,
1786 			       "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1787 		}
1788 		break;
1789 	/* case MBA_RIO_RESPONSE: */
1790 	case MBA_ZIO_RESPONSE:
1791 		ql_dbg(ql_dbg_async, vha, 0x5015,
1792 		    "[R|Z]IO update completion.\n");
1793 
1794 		if (IS_FWI2_CAPABLE(ha))
1795 			qla24xx_process_response_queue(vha, rsp);
1796 		else
1797 			qla2x00_process_response_queue(rsp);
1798 		break;
1799 
1800 	case MBA_DISCARD_RND_FRAME:
1801 		ql_dbg(ql_dbg_async, vha, 0x5016,
1802 		    "Discard RND Frame -- %04x %04x %04x.\n",
1803 		    mb[1], mb[2], mb[3]);
1804 		vha->interface_err_cnt++;
1805 		break;
1806 
1807 	case MBA_TRACE_NOTIFICATION:
1808 		ql_dbg(ql_dbg_async, vha, 0x5017,
1809 		    "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1810 		break;
1811 
1812 	case MBA_ISP84XX_ALERT:
1813 		ql_dbg(ql_dbg_async, vha, 0x5018,
1814 		    "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1815 		    mb[1], mb[2], mb[3]);
1816 
1817 		spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1818 		switch (mb[1]) {
1819 		case A84_PANIC_RECOVERY:
1820 			ql_log(ql_log_info, vha, 0x5019,
1821 			    "Alert 84XX: panic recovery %04x %04x.\n",
1822 			    mb[2], mb[3]);
1823 			break;
1824 		case A84_OP_LOGIN_COMPLETE:
1825 			ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1826 			ql_log(ql_log_info, vha, 0x501a,
1827 			    "Alert 84XX: firmware version %x.\n",
1828 			    ha->cs84xx->op_fw_version);
1829 			break;
1830 		case A84_DIAG_LOGIN_COMPLETE:
1831 			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1832 			ql_log(ql_log_info, vha, 0x501b,
1833 			    "Alert 84XX: diagnostic firmware version %x.\n",
1834 			    ha->cs84xx->diag_fw_version);
1835 			break;
1836 		case A84_GOLD_LOGIN_COMPLETE:
1837 			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1838 			ha->cs84xx->fw_update = 1;
1839 			ql_log(ql_log_info, vha, 0x501c,
1840 			    "Alert 84XX: gold firmware version %x.\n",
1841 			    ha->cs84xx->gold_fw_version);
1842 			break;
1843 		default:
1844 			ql_log(ql_log_warn, vha, 0x501d,
1845 			    "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1846 			    mb[1], mb[2], mb[3]);
1847 		}
1848 		spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1849 		break;
1850 	case MBA_DCBX_START:
1851 		ql_dbg(ql_dbg_async, vha, 0x501e,
1852 		    "DCBX Started -- %04x %04x %04x.\n",
1853 		    mb[1], mb[2], mb[3]);
1854 		break;
1855 	case MBA_DCBX_PARAM_UPDATE:
1856 		ql_dbg(ql_dbg_async, vha, 0x501f,
1857 		    "DCBX Parameters Updated -- %04x %04x %04x.\n",
1858 		    mb[1], mb[2], mb[3]);
1859 		break;
1860 	case MBA_FCF_CONF_ERR:
1861 		ql_dbg(ql_dbg_async, vha, 0x5020,
1862 		    "FCF Configuration Error -- %04x %04x %04x.\n",
1863 		    mb[1], mb[2], mb[3]);
1864 		break;
1865 	case MBA_IDC_NOTIFY:
1866 		if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1867 			mb[4] = rd_reg_word(&reg24->mailbox4);
1868 			if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1869 			    (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1870 			    (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1871 				set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1872 				/*
1873 				 * Extend loop down timer since port is active.
1874 				 */
1875 				if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1876 					atomic_set(&vha->loop_down_timer,
1877 					    LOOP_DOWN_TIME);
1878 				qla2xxx_wake_dpc(vha);
1879 			}
1880 		}
1881 		fallthrough;
1882 	case MBA_IDC_COMPLETE:
1883 		if (ha->notify_lb_portup_comp && !vha->vp_idx)
1884 			complete(&ha->lb_portup_comp);
1885 		fallthrough;
1886 	case MBA_IDC_TIME_EXT:
1887 		if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1888 		    IS_QLA8044(ha))
1889 			qla81xx_idc_event(vha, mb[0], mb[1]);
1890 		break;
1891 
1892 	case MBA_IDC_AEN:
1893 		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1894 			vha->hw_err_cnt++;
1895 			qla27xx_handle_8200_aen(vha, mb);
1896 		} else if (IS_QLA83XX(ha)) {
1897 			mb[4] = rd_reg_word(&reg24->mailbox4);
1898 			mb[5] = rd_reg_word(&reg24->mailbox5);
1899 			mb[6] = rd_reg_word(&reg24->mailbox6);
1900 			mb[7] = rd_reg_word(&reg24->mailbox7);
1901 			qla83xx_handle_8200_aen(vha, mb);
1902 		} else {
1903 			ql_dbg(ql_dbg_async, vha, 0x5052,
1904 			    "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1905 			    mb[0], mb[1], mb[2], mb[3]);
1906 		}
1907 		break;
1908 
1909 	case MBA_DPORT_DIAGNOSTICS:
1910 		if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
1911 		    (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
1912 			vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
1913 		ql_dbg(ql_dbg_async, vha, 0x5052,
1914 		    "D-Port Diagnostics: %04x %04x %04x %04x\n",
1915 		    mb[0], mb[1], mb[2], mb[3]);
1916 		memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1917 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1918 			static char *results[] = {
1919 			    "start", "done(pass)", "done(error)", "undefined" };
1920 			static char *types[] = {
1921 			    "none", "dynamic", "static", "other" };
1922 			uint result = mb[1] >> 0 & 0x3;
1923 			uint type = mb[1] >> 6 & 0x3;
1924 			uint sw = mb[1] >> 15 & 0x1;
1925 			ql_dbg(ql_dbg_async, vha, 0x5052,
1926 			    "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1927 			    results[result], types[type], sw);
1928 			if (result == 2) {
1929 				static char *reasons[] = {
1930 				    "reserved", "unexpected reject",
1931 				    "unexpected phase", "retry exceeded",
1932 				    "timed out", "not supported",
1933 				    "user stopped" };
1934 				uint reason = mb[2] >> 0 & 0xf;
1935 				uint phase = mb[2] >> 12 & 0xf;
1936 				ql_dbg(ql_dbg_async, vha, 0x5052,
1937 				    "D-Port Diagnostics: reason=%s phase=%u \n",
1938 				    reason < 7 ? reasons[reason] : "other",
1939 				    phase >> 1);
1940 			}
1941 		}
1942 		break;
1943 
1944 	case MBA_TEMPERATURE_ALERT:
1945 		if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1946 			display_Laser_info(vha, mb[1], mb[2], mb[3]);
1947 		ql_dbg(ql_dbg_async, vha, 0x505e,
1948 		    "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1949 		break;
1950 
1951 	case MBA_TRANS_INSERT:
1952 		ql_dbg(ql_dbg_async, vha, 0x5091,
1953 		    "Transceiver Insertion: %04x\n", mb[1]);
1954 		set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1955 		break;
1956 
1957 	case MBA_TRANS_REMOVE:
1958 		ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1959 		break;
1960 
1961 	default:
1962 		ql_dbg(ql_dbg_async, vha, 0x5057,
1963 		    "Unknown AEN:%04x %04x %04x %04x\n",
1964 		    mb[0], mb[1], mb[2], mb[3]);
1965 	}
1966 
1967 	qlt_async_event(mb[0], vha, mb);
1968 
1969 	if (!vha->vp_idx && ha->num_vhosts)
1970 		qla2x00_alert_all_vps(rsp, mb);
1971 }
1972 
1973 /**
1974  * qla2x00_process_completed_request() - Process a Fast Post response.
1975  * @vha: SCSI driver HA context
1976  * @req: request queue
1977  * @index: SRB index
1978  */
1979 void
1980 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1981 				  struct req_que *req, uint32_t index)
1982 {
1983 	srb_t *sp;
1984 	struct qla_hw_data *ha = vha->hw;
1985 
1986 	/* Validate handle. */
1987 	if (index >= req->num_outstanding_cmds) {
1988 		ql_log(ql_log_warn, vha, 0x3014,
1989 		    "Invalid SCSI command index (%x).\n", index);
1990 
1991 		if (IS_P3P_TYPE(ha))
1992 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1993 		else
1994 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1995 		return;
1996 	}
1997 
1998 	sp = req->outstanding_cmds[index];
1999 	if (sp) {
2000 		/* Free outstanding command slot. */
2001 		req->outstanding_cmds[index] = NULL;
2002 
2003 		/* Save ISP completion status */
2004 		sp->done(sp, DID_OK << 16);
2005 	} else {
2006 		ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
2007 
2008 		if (IS_P3P_TYPE(ha))
2009 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2010 		else
2011 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2012 	}
2013 }
2014 
2015 static srb_t *
2016 qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
2017 		       struct req_que *req, void *iocb, u16 *ret_index)
2018 {
2019 	struct qla_hw_data *ha = vha->hw;
2020 	sts_entry_t *pkt = iocb;
2021 	srb_t *sp;
2022 	uint16_t index;
2023 
2024 	if (pkt->handle == QLA_SKIP_HANDLE)
2025 		return NULL;
2026 
2027 	index = LSW(pkt->handle);
2028 	if (index >= req->num_outstanding_cmds) {
2029 		ql_log(ql_log_warn, vha, 0x5031,
2030 			   "%s: Invalid command index (%x) type %8ph.\n",
2031 			   func, index, iocb);
2032 		if (IS_P3P_TYPE(ha))
2033 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2034 		else
2035 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2036 		return NULL;
2037 	}
2038 	sp = req->outstanding_cmds[index];
2039 	if (!sp) {
2040 		ql_log(ql_log_warn, vha, 0x5032,
2041 			"%s: Invalid completion handle (%x) -- timed-out.\n",
2042 			func, index);
2043 		return NULL;
2044 	}
2045 	if (sp->handle != index) {
2046 		ql_log(ql_log_warn, vha, 0x5033,
2047 			"%s: SRB handle (%x) mismatch %x.\n", func,
2048 			sp->handle, index);
2049 		return NULL;
2050 	}
2051 
2052 	*ret_index = index;
2053 	qla_put_fw_resources(sp->qpair, &sp->iores);
2054 	return sp;
2055 }
2056 
2057 srb_t *
2058 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
2059 			   struct req_que *req, void *iocb)
2060 {
2061 	uint16_t index;
2062 	srb_t *sp;
2063 
2064 	sp = qla_get_sp_from_handle(vha, func, req, iocb, &index);
2065 	if (sp)
2066 		req->outstanding_cmds[index] = NULL;
2067 
2068 	return sp;
2069 }
2070 
2071 static void
2072 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2073     struct mbx_entry *mbx)
2074 {
2075 	const char func[] = "MBX-IOCB";
2076 	const char *type;
2077 	fc_port_t *fcport;
2078 	srb_t *sp;
2079 	struct srb_iocb *lio;
2080 	uint16_t *data;
2081 	uint16_t status;
2082 
2083 	sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
2084 	if (!sp)
2085 		return;
2086 
2087 	lio = &sp->u.iocb_cmd;
2088 	type = sp->name;
2089 	fcport = sp->fcport;
2090 	data = lio->u.logio.data;
2091 
2092 	data[0] = MBS_COMMAND_ERROR;
2093 	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2094 	    QLA_LOGIO_LOGIN_RETRIED : 0;
2095 	if (mbx->entry_status) {
2096 		ql_dbg(ql_dbg_async, vha, 0x5043,
2097 		    "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
2098 		    "entry-status=%x status=%x state-flag=%x "
2099 		    "status-flags=%x.\n", type, sp->handle,
2100 		    fcport->d_id.b.domain, fcport->d_id.b.area,
2101 		    fcport->d_id.b.al_pa, mbx->entry_status,
2102 		    le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
2103 		    le16_to_cpu(mbx->status_flags));
2104 
2105 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
2106 		    mbx, sizeof(*mbx));
2107 
2108 		goto logio_done;
2109 	}
2110 
2111 	status = le16_to_cpu(mbx->status);
2112 	if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
2113 	    le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
2114 		status = 0;
2115 	if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
2116 		ql_dbg(ql_dbg_async, vha, 0x5045,
2117 		    "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
2118 		    type, sp->handle, fcport->d_id.b.domain,
2119 		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
2120 		    le16_to_cpu(mbx->mb1));
2121 
2122 		data[0] = MBS_COMMAND_COMPLETE;
2123 		if (sp->type == SRB_LOGIN_CMD) {
2124 			fcport->port_type = FCT_TARGET;
2125 			if (le16_to_cpu(mbx->mb1) & BIT_0)
2126 				fcport->port_type = FCT_INITIATOR;
2127 			else if (le16_to_cpu(mbx->mb1) & BIT_1)
2128 				fcport->flags |= FCF_FCP2_DEVICE;
2129 		}
2130 		goto logio_done;
2131 	}
2132 
2133 	data[0] = le16_to_cpu(mbx->mb0);
2134 	switch (data[0]) {
2135 	case MBS_PORT_ID_USED:
2136 		data[1] = le16_to_cpu(mbx->mb1);
2137 		break;
2138 	case MBS_LOOP_ID_USED:
2139 		break;
2140 	default:
2141 		data[0] = MBS_COMMAND_ERROR;
2142 		break;
2143 	}
2144 
2145 	ql_log(ql_log_warn, vha, 0x5046,
2146 	    "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
2147 	    "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
2148 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
2149 	    status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
2150 	    le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
2151 	    le16_to_cpu(mbx->mb7));
2152 
2153 logio_done:
2154 	sp->done(sp, 0);
2155 }
2156 
2157 static void
2158 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2159     struct mbx_24xx_entry *pkt)
2160 {
2161 	const char func[] = "MBX-IOCB2";
2162 	struct qla_hw_data *ha = vha->hw;
2163 	srb_t *sp;
2164 	struct srb_iocb *si;
2165 	u16 sz, i;
2166 	int res;
2167 
2168 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2169 	if (!sp)
2170 		return;
2171 
2172 	if (sp->type == SRB_SCSI_CMD ||
2173 	    sp->type == SRB_NVME_CMD ||
2174 	    sp->type == SRB_TM_CMD) {
2175 		ql_log(ql_log_warn, vha, 0x509d,
2176 			"Inconsistent event entry type %d\n", sp->type);
2177 		if (IS_P3P_TYPE(ha))
2178 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2179 		else
2180 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2181 		return;
2182 	}
2183 
2184 	si = &sp->u.iocb_cmd;
2185 	sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
2186 
2187 	for (i = 0; i < sz; i++)
2188 		si->u.mbx.in_mb[i] = pkt->mb[i];
2189 
2190 	res = (si->u.mbx.in_mb[0] & MBS_MASK);
2191 
2192 	sp->done(sp, res);
2193 }
2194 
2195 static void
2196 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2197     struct nack_to_isp *pkt)
2198 {
2199 	const char func[] = "nack";
2200 	srb_t *sp;
2201 	int res = 0;
2202 
2203 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2204 	if (!sp)
2205 		return;
2206 
2207 	if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
2208 		res = QLA_FUNCTION_FAILED;
2209 
2210 	sp->done(sp, res);
2211 }
2212 
2213 static void
2214 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
2215     sts_entry_t *pkt, int iocb_type)
2216 {
2217 	const char func[] = "CT_IOCB";
2218 	const char *type;
2219 	srb_t *sp;
2220 	struct bsg_job *bsg_job;
2221 	struct fc_bsg_reply *bsg_reply;
2222 	uint16_t comp_status;
2223 	int res = 0;
2224 
2225 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2226 	if (!sp)
2227 		return;
2228 
2229 	switch (sp->type) {
2230 	case SRB_CT_CMD:
2231 	    bsg_job = sp->u.bsg_job;
2232 	    bsg_reply = bsg_job->reply;
2233 
2234 	    type = "ct pass-through";
2235 
2236 	    comp_status = le16_to_cpu(pkt->comp_status);
2237 
2238 	    /*
2239 	     * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2240 	     * fc payload  to the caller
2241 	     */
2242 	    bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2243 	    bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2244 
2245 	    if (comp_status != CS_COMPLETE) {
2246 		    if (comp_status == CS_DATA_UNDERRUN) {
2247 			    res = DID_OK << 16;
2248 			    bsg_reply->reply_payload_rcv_len =
2249 				le16_to_cpu(pkt->rsp_info_len);
2250 
2251 			    ql_log(ql_log_warn, vha, 0x5048,
2252 				"CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
2253 				type, comp_status,
2254 				bsg_reply->reply_payload_rcv_len);
2255 		    } else {
2256 			    ql_log(ql_log_warn, vha, 0x5049,
2257 				"CT pass-through-%s error comp_status=0x%x.\n",
2258 				type, comp_status);
2259 			    res = DID_ERROR << 16;
2260 			    bsg_reply->reply_payload_rcv_len = 0;
2261 		    }
2262 		    ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
2263 			pkt, sizeof(*pkt));
2264 	    } else {
2265 		    res = DID_OK << 16;
2266 		    bsg_reply->reply_payload_rcv_len =
2267 			bsg_job->reply_payload.payload_len;
2268 		    bsg_job->reply_len = 0;
2269 	    }
2270 	    break;
2271 	case SRB_CT_PTHRU_CMD:
2272 	    /*
2273 	     * borrowing sts_entry_24xx.comp_status.
2274 	     * same location as ct_entry_24xx.comp_status
2275 	     */
2276 	     res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
2277 		 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2278 		 sp->name);
2279 	     break;
2280 	}
2281 
2282 	sp->done(sp, res);
2283 }
2284 
2285 static void
2286 qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
2287     struct sts_entry_24xx *pkt, int iocb_type)
2288 {
2289 	struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
2290 	const char func[] = "ELS_CT_IOCB";
2291 	const char *type;
2292 	srb_t *sp;
2293 	struct bsg_job *bsg_job;
2294 	struct fc_bsg_reply *bsg_reply;
2295 	uint16_t comp_status;
2296 	uint32_t fw_status[3];
2297 	int res, logit = 1;
2298 	struct srb_iocb *els;
2299 	uint n;
2300 	scsi_qla_host_t *vha;
2301 	struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
2302 
2303 	sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2304 	if (!sp)
2305 		return;
2306 	bsg_job = sp->u.bsg_job;
2307 	vha = sp->vha;
2308 
2309 	type = NULL;
2310 
2311 	comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2312 	fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
2313 	fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
2314 
2315 	switch (sp->type) {
2316 	case SRB_ELS_CMD_RPT:
2317 	case SRB_ELS_CMD_HST:
2318 		type = "rpt hst";
2319 		break;
2320 	case SRB_ELS_CMD_HST_NOLOGIN:
2321 		type = "els";
2322 		{
2323 			struct els_entry_24xx *els = (void *)pkt;
2324 			struct qla_bsg_auth_els_request *p =
2325 				(struct qla_bsg_auth_els_request *)bsg_job->request;
2326 
2327 			ql_dbg(ql_dbg_user, vha, 0x700f,
2328 			     "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
2329 			     __func__, sc_to_str(p->e.sub_cmd),
2330 			     e->d_id[2], e->d_id[1], e->d_id[0],
2331 			     comp_status, p->e.extra_rx_xchg_address, bsg_job);
2332 
2333 			if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
2334 				if (sp->remap.remapped) {
2335 					n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2336 						bsg_job->reply_payload.sg_cnt,
2337 						sp->remap.rsp.buf,
2338 						sp->remap.rsp.len);
2339 					ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
2340 					   "%s: SG copied %x of %x\n",
2341 					   __func__, n, sp->remap.rsp.len);
2342 				} else {
2343 					ql_dbg(ql_dbg_user, vha, 0x700f,
2344 					   "%s: NOT REMAPPED (error)...!!!\n",
2345 					   __func__);
2346 				}
2347 			}
2348 		}
2349 		break;
2350 	case SRB_CT_CMD:
2351 		type = "ct pass-through";
2352 		break;
2353 	case SRB_ELS_DCMD:
2354 		type = "Driver ELS logo";
2355 		if (iocb_type != ELS_IOCB_TYPE) {
2356 			ql_dbg(ql_dbg_user, vha, 0x5047,
2357 			    "Completing %s: (%p) type=%d.\n",
2358 			    type, sp, sp->type);
2359 			sp->done(sp, 0);
2360 			return;
2361 		}
2362 		break;
2363 	case SRB_CT_PTHRU_CMD:
2364 		/* borrowing sts_entry_24xx.comp_status.
2365 		   same location as ct_entry_24xx.comp_status
2366 		 */
2367 		res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2368 			(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2369 			sp->name);
2370 		sp->done(sp, res);
2371 		return;
2372 	default:
2373 		ql_dbg(ql_dbg_user, vha, 0x503e,
2374 		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2375 		return;
2376 	}
2377 
2378 	if (iocb_type == ELS_IOCB_TYPE) {
2379 		els = &sp->u.iocb_cmd;
2380 		els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2381 		els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2382 		els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2383 		els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2384 		if (comp_status == CS_COMPLETE) {
2385 			res =  DID_OK << 16;
2386 		} else {
2387 			if (comp_status == CS_DATA_UNDERRUN) {
2388 				res =  DID_OK << 16;
2389 				els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2390 					ese->total_byte_count));
2391 
2392 				if (sp->remap.remapped &&
2393 				    ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
2394 					ql_dbg(ql_dbg_user, vha, 0x503f,
2395 					    "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
2396 					    __func__, e->s_id[0], e->s_id[2], e->s_id[1],
2397 					    e->d_id[2], e->d_id[1], e->d_id[0]);
2398 					logit = 0;
2399 				}
2400 
2401 			} else if (comp_status == CS_PORT_LOGGED_OUT) {
2402 				ql_dbg(ql_dbg_disc, vha, 0x911e,
2403 				       "%s %d schedule session deletion\n",
2404 				       __func__, __LINE__);
2405 
2406 				els->u.els_plogi.len = 0;
2407 				res = DID_IMM_RETRY << 16;
2408 				qlt_schedule_sess_for_deletion(sp->fcport);
2409 			} else {
2410 				els->u.els_plogi.len = 0;
2411 				res = DID_ERROR << 16;
2412 			}
2413 
2414 			if (sp->remap.remapped &&
2415 			    ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
2416 				if (logit) {
2417 					ql_dbg(ql_dbg_user, vha, 0x503f,
2418 					    "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
2419 					    type, sp->handle, comp_status);
2420 
2421 					ql_dbg(ql_dbg_user, vha, 0x503f,
2422 					    "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2423 					    fw_status[1], fw_status[2],
2424 					    le32_to_cpu(((struct els_sts_entry_24xx *)
2425 						pkt)->total_byte_count),
2426 					    e->s_id[0], e->s_id[2], e->s_id[1],
2427 					    e->d_id[2], e->d_id[1], e->d_id[0]);
2428 				}
2429 				if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
2430 				    sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
2431 					ql_dbg(ql_dbg_edif, vha, 0x911e,
2432 					    "%s rcv reject. Sched delete\n", __func__);
2433 					qlt_schedule_sess_for_deletion(sp->fcport);
2434 				}
2435 			} else if (logit) {
2436 				ql_log(ql_log_info, vha, 0x503f,
2437 				    "%s IOCB Done hdl=%x comp_status=0x%x\n",
2438 				    type, sp->handle, comp_status);
2439 				ql_log(ql_log_info, vha, 0x503f,
2440 				    "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2441 				    fw_status[1], fw_status[2],
2442 				    le32_to_cpu(((struct els_sts_entry_24xx *)
2443 				    pkt)->total_byte_count),
2444 				    e->s_id[0], e->s_id[2], e->s_id[1],
2445 				    e->d_id[2], e->d_id[1], e->d_id[0]);
2446 			}
2447 		}
2448 		goto els_ct_done;
2449 	}
2450 
2451 	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2452 	 * fc payload  to the caller
2453 	 */
2454 	bsg_job = sp->u.bsg_job;
2455 	bsg_reply = bsg_job->reply;
2456 	bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2457 	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2458 
2459 	if (comp_status != CS_COMPLETE) {
2460 		if (comp_status == CS_DATA_UNDERRUN) {
2461 			res = DID_OK << 16;
2462 			bsg_reply->reply_payload_rcv_len =
2463 				le32_to_cpu(ese->total_byte_count);
2464 
2465 			ql_dbg(ql_dbg_user, vha, 0x503f,
2466 			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2467 			    "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2468 			    type, sp->handle, comp_status, fw_status[1], fw_status[2],
2469 			    le32_to_cpu(ese->total_byte_count));
2470 		} else {
2471 			ql_dbg(ql_dbg_user, vha, 0x5040,
2472 			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2473 			    "error subcode 1=0x%x error subcode 2=0x%x.\n",
2474 			    type, sp->handle, comp_status,
2475 			    le32_to_cpu(ese->error_subcode_1),
2476 			    le32_to_cpu(ese->error_subcode_2));
2477 			res = DID_ERROR << 16;
2478 			bsg_reply->reply_payload_rcv_len = 0;
2479 		}
2480 		memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2481 		       fw_status, sizeof(fw_status));
2482 		ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2483 		    pkt, sizeof(*pkt));
2484 	}
2485 	else {
2486 		res =  DID_OK << 16;
2487 		bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2488 		bsg_job->reply_len = 0;
2489 	}
2490 els_ct_done:
2491 
2492 	sp->done(sp, res);
2493 }
2494 
2495 static void
2496 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2497     struct logio_entry_24xx *logio)
2498 {
2499 	const char func[] = "LOGIO-IOCB";
2500 	const char *type;
2501 	fc_port_t *fcport;
2502 	srb_t *sp;
2503 	struct srb_iocb *lio;
2504 	uint16_t *data;
2505 	uint32_t iop[2];
2506 	int logit = 1;
2507 
2508 	sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
2509 	if (!sp)
2510 		return;
2511 
2512 	lio = &sp->u.iocb_cmd;
2513 	type = sp->name;
2514 	fcport = sp->fcport;
2515 	data = lio->u.logio.data;
2516 
2517 	data[0] = MBS_COMMAND_ERROR;
2518 	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2519 		QLA_LOGIO_LOGIN_RETRIED : 0;
2520 	if (logio->entry_status) {
2521 		ql_log(ql_log_warn, fcport->vha, 0x5034,
2522 		    "Async-%s error entry - %8phC hdl=%x"
2523 		    "portid=%02x%02x%02x entry-status=%x.\n",
2524 		    type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2525 		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
2526 		    logio->entry_status);
2527 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2528 		    logio, sizeof(*logio));
2529 
2530 		goto logio_done;
2531 	}
2532 
2533 	if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2534 		ql_dbg(ql_dbg_async, sp->vha, 0x5036,
2535 		    "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2536 		    type, sp->handle, fcport->d_id.b24, fcport->port_name,
2537 		    le32_to_cpu(logio->io_parameter[0]));
2538 
2539 		vha->hw->exch_starvation = 0;
2540 		data[0] = MBS_COMMAND_COMPLETE;
2541 
2542 		if (sp->type == SRB_PRLI_CMD) {
2543 			lio->u.logio.iop[0] =
2544 			    le32_to_cpu(logio->io_parameter[0]);
2545 			lio->u.logio.iop[1] =
2546 			    le32_to_cpu(logio->io_parameter[1]);
2547 			goto logio_done;
2548 		}
2549 
2550 		if (sp->type != SRB_LOGIN_CMD)
2551 			goto logio_done;
2552 
2553 		lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
2554 		if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
2555 			fcport->flags |= FCF_FCSP_DEVICE;
2556 
2557 		iop[0] = le32_to_cpu(logio->io_parameter[0]);
2558 		if (iop[0] & BIT_4) {
2559 			fcport->port_type = FCT_TARGET;
2560 			if (iop[0] & BIT_8)
2561 				fcport->flags |= FCF_FCP2_DEVICE;
2562 		} else if (iop[0] & BIT_5)
2563 			fcport->port_type = FCT_INITIATOR;
2564 
2565 		if (iop[0] & BIT_7)
2566 			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2567 
2568 		if (logio->io_parameter[7] || logio->io_parameter[8])
2569 			fcport->supported_classes |= FC_COS_CLASS2;
2570 		if (logio->io_parameter[9] || logio->io_parameter[10])
2571 			fcport->supported_classes |= FC_COS_CLASS3;
2572 
2573 		goto logio_done;
2574 	}
2575 
2576 	iop[0] = le32_to_cpu(logio->io_parameter[0]);
2577 	iop[1] = le32_to_cpu(logio->io_parameter[1]);
2578 	lio->u.logio.iop[0] = iop[0];
2579 	lio->u.logio.iop[1] = iop[1];
2580 	switch (iop[0]) {
2581 	case LSC_SCODE_PORTID_USED:
2582 		data[0] = MBS_PORT_ID_USED;
2583 		data[1] = LSW(iop[1]);
2584 		logit = 0;
2585 		break;
2586 	case LSC_SCODE_NPORT_USED:
2587 		data[0] = MBS_LOOP_ID_USED;
2588 		logit = 0;
2589 		break;
2590 	case LSC_SCODE_CMD_FAILED:
2591 		if (iop[1] == 0x0606) {
2592 			/*
2593 			 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
2594 			 * Target side acked.
2595 			 */
2596 			data[0] = MBS_COMMAND_COMPLETE;
2597 			goto logio_done;
2598 		}
2599 		data[0] = MBS_COMMAND_ERROR;
2600 		break;
2601 	case LSC_SCODE_NOXCB:
2602 		vha->hw->exch_starvation++;
2603 		if (vha->hw->exch_starvation > 5) {
2604 			ql_log(ql_log_warn, vha, 0xd046,
2605 			    "Exchange starvation. Resetting RISC\n");
2606 
2607 			vha->hw->exch_starvation = 0;
2608 
2609 			if (IS_P3P_TYPE(vha->hw))
2610 				set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2611 			else
2612 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2613 			qla2xxx_wake_dpc(vha);
2614 		}
2615 		fallthrough;
2616 	default:
2617 		data[0] = MBS_COMMAND_ERROR;
2618 		break;
2619 	}
2620 
2621 	if (logit)
2622 		ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
2623 		       "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2624 		       type, sp->handle, fcport->d_id.b24, fcport->port_name,
2625 		       le16_to_cpu(logio->comp_status),
2626 		       le32_to_cpu(logio->io_parameter[0]),
2627 		       le32_to_cpu(logio->io_parameter[1]));
2628 	else
2629 		ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
2630 		       "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2631 		       type, sp->handle, fcport->d_id.b24, fcport->port_name,
2632 		       le16_to_cpu(logio->comp_status),
2633 		       le32_to_cpu(logio->io_parameter[0]),
2634 		       le32_to_cpu(logio->io_parameter[1]));
2635 
2636 logio_done:
2637 	sp->done(sp, 0);
2638 }
2639 
2640 static void
2641 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2642 {
2643 	const char func[] = "TMF-IOCB";
2644 	const char *type;
2645 	fc_port_t *fcport;
2646 	srb_t *sp;
2647 	struct srb_iocb *iocb;
2648 	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2649 	u16 comp_status;
2650 
2651 	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2652 	if (!sp)
2653 		return;
2654 
2655 	comp_status = le16_to_cpu(sts->comp_status);
2656 	iocb = &sp->u.iocb_cmd;
2657 	type = sp->name;
2658 	fcport = sp->fcport;
2659 	iocb->u.tmf.data = QLA_SUCCESS;
2660 
2661 	if (sts->entry_status) {
2662 		ql_log(ql_log_warn, fcport->vha, 0x5038,
2663 		    "Async-%s error - hdl=%x entry-status(%x).\n",
2664 		    type, sp->handle, sts->entry_status);
2665 		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2666 	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2667 		ql_log(ql_log_warn, fcport->vha, 0x5039,
2668 		    "Async-%s error - hdl=%x completion status(%x).\n",
2669 		    type, sp->handle, comp_status);
2670 		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2671 	} else if ((le16_to_cpu(sts->scsi_status) &
2672 	    SS_RESPONSE_INFO_LEN_VALID)) {
2673 		host_to_fcp_swap(sts->data, sizeof(sts->data));
2674 		if (le32_to_cpu(sts->rsp_data_len) < 4) {
2675 			ql_log(ql_log_warn, fcport->vha, 0x503b,
2676 			    "Async-%s error - hdl=%x not enough response(%d).\n",
2677 			    type, sp->handle, sts->rsp_data_len);
2678 		} else if (sts->data[3]) {
2679 			ql_log(ql_log_warn, fcport->vha, 0x503c,
2680 			    "Async-%s error - hdl=%x response(%x).\n",
2681 			    type, sp->handle, sts->data[3]);
2682 			iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2683 		}
2684 	}
2685 
2686 	switch (comp_status) {
2687 	case CS_PORT_LOGGED_OUT:
2688 	case CS_PORT_CONFIG_CHG:
2689 	case CS_PORT_BUSY:
2690 	case CS_INCOMPLETE:
2691 	case CS_PORT_UNAVAILABLE:
2692 	case CS_RESET:
2693 		if (atomic_read(&fcport->state) == FCS_ONLINE) {
2694 			ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2695 			       "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
2696 			       fcport->d_id.b.domain, fcport->d_id.b.area,
2697 			       fcport->d_id.b.al_pa,
2698 			       port_state_str[FCS_ONLINE],
2699 			       comp_status);
2700 
2701 			qlt_schedule_sess_for_deletion(fcport);
2702 		}
2703 		break;
2704 
2705 	default:
2706 		break;
2707 	}
2708 
2709 	if (iocb->u.tmf.data != QLA_SUCCESS)
2710 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2711 		    sts, sizeof(*sts));
2712 
2713 	sp->done(sp, 0);
2714 }
2715 
2716 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2717     void *tsk, srb_t *sp)
2718 {
2719 	fc_port_t *fcport;
2720 	struct srb_iocb *iocb;
2721 	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2722 	uint16_t        state_flags;
2723 	struct nvmefc_fcp_req *fd;
2724 	uint16_t        ret = QLA_SUCCESS;
2725 	__le16		comp_status = sts->comp_status;
2726 	int		logit = 0;
2727 
2728 	iocb = &sp->u.iocb_cmd;
2729 	fcport = sp->fcport;
2730 	iocb->u.nvme.comp_status = comp_status;
2731 	state_flags  = le16_to_cpu(sts->state_flags);
2732 	fd = iocb->u.nvme.desc;
2733 
2734 	if (unlikely(iocb->u.nvme.aen_op))
2735 		atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2736 	else
2737 		sp->qpair->cmd_completion_cnt++;
2738 
2739 	if (unlikely(comp_status != CS_COMPLETE))
2740 		logit = 1;
2741 
2742 	fd->transferred_length = fd->payload_length -
2743 	    le32_to_cpu(sts->residual_len);
2744 
2745 	/*
2746 	 * State flags: Bit 6 and 0.
2747 	 * If 0 is set, we don't care about 6.
2748 	 * both cases resp was dma'd to host buffer
2749 	 * if both are 0, that is good path case.
2750 	 * if six is set and 0 is clear, we need to
2751 	 * copy resp data from status iocb to resp buffer.
2752 	 */
2753 	if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2754 		iocb->u.nvme.rsp_pyld_len = 0;
2755 	} else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2756 			(SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2757 		/* Response already DMA'd to fd->rspaddr. */
2758 		iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2759 	} else if ((state_flags & SF_FCP_RSP_DMA)) {
2760 		/*
2761 		 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
2762 		 * as an error.
2763 		 */
2764 		iocb->u.nvme.rsp_pyld_len = 0;
2765 		fd->transferred_length = 0;
2766 		ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2767 			"Unexpected values in NVMe_RSP IU.\n");
2768 		logit = 1;
2769 	} else if (state_flags & SF_NVME_ERSP) {
2770 		uint32_t *inbuf, *outbuf;
2771 		uint16_t iter;
2772 
2773 		inbuf = (uint32_t *)&sts->nvme_ersp_data;
2774 		outbuf = (uint32_t *)fd->rspaddr;
2775 		iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2776 		if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2777 		    sizeof(struct nvme_fc_ersp_iu))) {
2778 			if (ql_mask_match(ql_dbg_io)) {
2779 				WARN_ONCE(1, "Unexpected response payload length %u.\n",
2780 				    iocb->u.nvme.rsp_pyld_len);
2781 				ql_log(ql_log_warn, fcport->vha, 0x5100,
2782 				    "Unexpected response payload length %u.\n",
2783 				    iocb->u.nvme.rsp_pyld_len);
2784 			}
2785 			iocb->u.nvme.rsp_pyld_len =
2786 				cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2787 		}
2788 		iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2789 		for (; iter; iter--)
2790 			*outbuf++ = swab32(*inbuf++);
2791 	}
2792 
2793 	if (state_flags & SF_NVME_ERSP) {
2794 		struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2795 		u32 tgt_xfer_len;
2796 
2797 		tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2798 		if (fd->transferred_length != tgt_xfer_len) {
2799 			ql_log(ql_log_warn, fcport->vha, 0x3079,
2800 			       "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2801 			       tgt_xfer_len, fd->transferred_length);
2802 			logit = 1;
2803 		} else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2804 			/*
2805 			 * Do not log if this is just an underflow and there
2806 			 * is no data loss.
2807 			 */
2808 			logit = 0;
2809 		}
2810 	}
2811 
2812 	if (unlikely(logit))
2813 		ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
2814 		   "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x  ox_id=%x\n",
2815 		   sp->name, sp->handle, comp_status,
2816 		   fd->transferred_length, le32_to_cpu(sts->residual_len),
2817 		   sts->ox_id);
2818 
2819 	/*
2820 	 * If transport error then Failure (HBA rejects request)
2821 	 * otherwise transport will handle.
2822 	 */
2823 	switch (le16_to_cpu(comp_status)) {
2824 	case CS_COMPLETE:
2825 		break;
2826 
2827 	case CS_RESET:
2828 	case CS_PORT_UNAVAILABLE:
2829 	case CS_PORT_LOGGED_OUT:
2830 		fcport->nvme_flag |= NVME_FLAG_RESETTING;
2831 		if (atomic_read(&fcport->state) == FCS_ONLINE) {
2832 			ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2833 			       "Port to be marked lost on fcport=%06x, current "
2834 			       "port state= %s comp_status %x.\n",
2835 			       fcport->d_id.b24, port_state_str[FCS_ONLINE],
2836 			       comp_status);
2837 
2838 			qlt_schedule_sess_for_deletion(fcport);
2839 		}
2840 		fallthrough;
2841 	case CS_ABORTED:
2842 	case CS_PORT_BUSY:
2843 		fd->transferred_length = 0;
2844 		iocb->u.nvme.rsp_pyld_len = 0;
2845 		ret = QLA_ABORTED;
2846 		break;
2847 	case CS_DATA_UNDERRUN:
2848 		break;
2849 	default:
2850 		ret = QLA_FUNCTION_FAILED;
2851 		break;
2852 	}
2853 	sp->done(sp, ret);
2854 }
2855 
2856 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2857     struct vp_ctrl_entry_24xx *vce)
2858 {
2859 	const char func[] = "CTRLVP-IOCB";
2860 	srb_t *sp;
2861 	int rval = QLA_SUCCESS;
2862 
2863 	sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2864 	if (!sp)
2865 		return;
2866 
2867 	if (vce->entry_status != 0) {
2868 		ql_dbg(ql_dbg_vport, vha, 0x10c4,
2869 		    "%s: Failed to complete IOCB -- error status (%x)\n",
2870 		    sp->name, vce->entry_status);
2871 		rval = QLA_FUNCTION_FAILED;
2872 	} else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2873 		ql_dbg(ql_dbg_vport, vha, 0x10c5,
2874 		    "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2875 		    sp->name, le16_to_cpu(vce->comp_status),
2876 		    le16_to_cpu(vce->vp_idx_failed));
2877 		rval = QLA_FUNCTION_FAILED;
2878 	} else {
2879 		ql_dbg(ql_dbg_vport, vha, 0x10c6,
2880 		    "Done %s.\n", __func__);
2881 	}
2882 
2883 	sp->rc = rval;
2884 	sp->done(sp, rval);
2885 }
2886 
2887 /* Process a single response queue entry. */
2888 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2889 					   struct rsp_que *rsp,
2890 					   sts_entry_t *pkt)
2891 {
2892 	sts21_entry_t *sts21_entry;
2893 	sts22_entry_t *sts22_entry;
2894 	uint16_t handle_cnt;
2895 	uint16_t cnt;
2896 
2897 	switch (pkt->entry_type) {
2898 	case STATUS_TYPE:
2899 		qla2x00_status_entry(vha, rsp, pkt);
2900 		break;
2901 	case STATUS_TYPE_21:
2902 		sts21_entry = (sts21_entry_t *)pkt;
2903 		handle_cnt = sts21_entry->handle_count;
2904 		for (cnt = 0; cnt < handle_cnt; cnt++)
2905 			qla2x00_process_completed_request(vha, rsp->req,
2906 						sts21_entry->handle[cnt]);
2907 		break;
2908 	case STATUS_TYPE_22:
2909 		sts22_entry = (sts22_entry_t *)pkt;
2910 		handle_cnt = sts22_entry->handle_count;
2911 		for (cnt = 0; cnt < handle_cnt; cnt++)
2912 			qla2x00_process_completed_request(vha, rsp->req,
2913 						sts22_entry->handle[cnt]);
2914 		break;
2915 	case STATUS_CONT_TYPE:
2916 		qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2917 		break;
2918 	case MBX_IOCB_TYPE:
2919 		qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2920 		break;
2921 	case CT_IOCB_TYPE:
2922 		qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2923 		break;
2924 	default:
2925 		/* Type Not Supported. */
2926 		ql_log(ql_log_warn, vha, 0x504a,
2927 		       "Received unknown response pkt type %x entry status=%x.\n",
2928 		       pkt->entry_type, pkt->entry_status);
2929 		break;
2930 	}
2931 }
2932 
2933 /**
2934  * qla2x00_process_response_queue() - Process response queue entries.
2935  * @rsp: response queue
2936  */
2937 void
2938 qla2x00_process_response_queue(struct rsp_que *rsp)
2939 {
2940 	struct scsi_qla_host *vha;
2941 	struct qla_hw_data *ha = rsp->hw;
2942 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2943 	sts_entry_t	*pkt;
2944 
2945 	vha = pci_get_drvdata(ha->pdev);
2946 
2947 	if (!vha->flags.online)
2948 		return;
2949 
2950 	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2951 		pkt = (sts_entry_t *)rsp->ring_ptr;
2952 
2953 		rsp->ring_index++;
2954 		if (rsp->ring_index == rsp->length) {
2955 			rsp->ring_index = 0;
2956 			rsp->ring_ptr = rsp->ring;
2957 		} else {
2958 			rsp->ring_ptr++;
2959 		}
2960 
2961 		if (pkt->entry_status != 0) {
2962 			qla2x00_error_entry(vha, rsp, pkt);
2963 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2964 			wmb();
2965 			continue;
2966 		}
2967 
2968 		qla2x00_process_response_entry(vha, rsp, pkt);
2969 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2970 		wmb();
2971 	}
2972 
2973 	/* Adjust ring index */
2974 	wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2975 }
2976 
2977 static inline void
2978 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2979 		     uint32_t sense_len, struct rsp_que *rsp, int res)
2980 {
2981 	struct scsi_qla_host *vha = sp->vha;
2982 	struct scsi_cmnd *cp = GET_CMD_SP(sp);
2983 	uint32_t track_sense_len;
2984 
2985 	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2986 		sense_len = SCSI_SENSE_BUFFERSIZE;
2987 
2988 	SET_CMD_SENSE_LEN(sp, sense_len);
2989 	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2990 	track_sense_len = sense_len;
2991 
2992 	if (sense_len > par_sense_len)
2993 		sense_len = par_sense_len;
2994 
2995 	memcpy(cp->sense_buffer, sense_data, sense_len);
2996 
2997 	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2998 	track_sense_len -= sense_len;
2999 	SET_CMD_SENSE_LEN(sp, track_sense_len);
3000 
3001 	if (track_sense_len != 0) {
3002 		rsp->status_srb = sp;
3003 		cp->result = res;
3004 	}
3005 
3006 	if (sense_len) {
3007 		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
3008 		    "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
3009 		    sp->vha->host_no, cp->device->id, cp->device->lun,
3010 		    cp);
3011 		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
3012 		    cp->sense_buffer, sense_len);
3013 	}
3014 }
3015 
3016 /*
3017  * Checks the guard or meta-data for the type of error
3018  * detected by the HBA. In case of errors, we set the
3019  * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
3020  * to indicate to the kernel that the HBA detected error.
3021  */
3022 static inline int
3023 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
3024 {
3025 	struct scsi_qla_host *vha = sp->vha;
3026 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3027 	uint8_t		*ap = &sts24->data[12];
3028 	uint8_t		*ep = &sts24->data[20];
3029 	uint32_t	e_ref_tag, a_ref_tag;
3030 	uint16_t	e_app_tag, a_app_tag;
3031 	uint16_t	e_guard, a_guard;
3032 
3033 	/*
3034 	 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
3035 	 * would make guard field appear at offset 2
3036 	 */
3037 	a_guard   = get_unaligned_le16(ap + 2);
3038 	a_app_tag = get_unaligned_le16(ap + 0);
3039 	a_ref_tag = get_unaligned_le32(ap + 4);
3040 	e_guard   = get_unaligned_le16(ep + 2);
3041 	e_app_tag = get_unaligned_le16(ep + 0);
3042 	e_ref_tag = get_unaligned_le32(ep + 4);
3043 
3044 	ql_dbg(ql_dbg_io, vha, 0x3023,
3045 	    "iocb(s) %p Returned STATUS.\n", sts24);
3046 
3047 	ql_dbg(ql_dbg_io, vha, 0x3024,
3048 	    "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
3049 	    " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
3050 	    " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
3051 	    cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
3052 	    a_app_tag, e_app_tag, a_guard, e_guard);
3053 
3054 	/*
3055 	 * Ignore sector if:
3056 	 * For type     3: ref & app tag is all 'f's
3057 	 * For type 0,1,2: app tag is all 'f's
3058 	 */
3059 	if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
3060 	    (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
3061 	     a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
3062 		uint32_t blocks_done, resid;
3063 		sector_t lba_s = scsi_get_lba(cmd);
3064 
3065 		/* 2TB boundary case covered automatically with this */
3066 		blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
3067 
3068 		resid = scsi_bufflen(cmd) - (blocks_done *
3069 		    cmd->device->sector_size);
3070 
3071 		scsi_set_resid(cmd, resid);
3072 		cmd->result = DID_OK << 16;
3073 
3074 		/* Update protection tag */
3075 		if (scsi_prot_sg_count(cmd)) {
3076 			uint32_t i, j = 0, k = 0, num_ent;
3077 			struct scatterlist *sg;
3078 			struct t10_pi_tuple *spt;
3079 
3080 			/* Patch the corresponding protection tags */
3081 			scsi_for_each_prot_sg(cmd, sg,
3082 			    scsi_prot_sg_count(cmd), i) {
3083 				num_ent = sg_dma_len(sg) / 8;
3084 				if (k + num_ent < blocks_done) {
3085 					k += num_ent;
3086 					continue;
3087 				}
3088 				j = blocks_done - k - 1;
3089 				k = blocks_done;
3090 				break;
3091 			}
3092 
3093 			if (k != blocks_done) {
3094 				ql_log(ql_log_warn, vha, 0x302f,
3095 				    "unexpected tag values tag:lba=%x:%llx)\n",
3096 				    e_ref_tag, (unsigned long long)lba_s);
3097 				return 1;
3098 			}
3099 
3100 			spt = page_address(sg_page(sg)) + sg->offset;
3101 			spt += j;
3102 
3103 			spt->app_tag = T10_PI_APP_ESCAPE;
3104 			if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
3105 				spt->ref_tag = T10_PI_REF_ESCAPE;
3106 		}
3107 
3108 		return 0;
3109 	}
3110 
3111 	/* check guard */
3112 	if (e_guard != a_guard) {
3113 		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
3114 		set_host_byte(cmd, DID_ABORT);
3115 		return 1;
3116 	}
3117 
3118 	/* check ref tag */
3119 	if (e_ref_tag != a_ref_tag) {
3120 		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3121 		set_host_byte(cmd, DID_ABORT);
3122 		return 1;
3123 	}
3124 
3125 	/* check appl tag */
3126 	if (e_app_tag != a_app_tag) {
3127 		scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3128 		set_host_byte(cmd, DID_ABORT);
3129 		return 1;
3130 	}
3131 
3132 	return 1;
3133 }
3134 
3135 static void
3136 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
3137 				  struct req_que *req, uint32_t index)
3138 {
3139 	struct qla_hw_data *ha = vha->hw;
3140 	srb_t *sp;
3141 	uint16_t	comp_status;
3142 	uint16_t	scsi_status;
3143 	uint16_t thread_id;
3144 	uint32_t rval = EXT_STATUS_OK;
3145 	struct bsg_job *bsg_job = NULL;
3146 	struct fc_bsg_request *bsg_request;
3147 	struct fc_bsg_reply *bsg_reply;
3148 	sts_entry_t *sts = pkt;
3149 	struct sts_entry_24xx *sts24 = pkt;
3150 
3151 	/* Validate handle. */
3152 	if (index >= req->num_outstanding_cmds) {
3153 		ql_log(ql_log_warn, vha, 0x70af,
3154 		    "Invalid SCSI completion handle 0x%x.\n", index);
3155 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3156 		return;
3157 	}
3158 
3159 	sp = req->outstanding_cmds[index];
3160 	if (!sp) {
3161 		ql_log(ql_log_warn, vha, 0x70b0,
3162 		    "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
3163 		    req->id, index);
3164 
3165 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3166 		return;
3167 	}
3168 
3169 	/* Free outstanding command slot. */
3170 	req->outstanding_cmds[index] = NULL;
3171 	bsg_job = sp->u.bsg_job;
3172 	bsg_request = bsg_job->request;
3173 	bsg_reply = bsg_job->reply;
3174 
3175 	if (IS_FWI2_CAPABLE(ha)) {
3176 		comp_status = le16_to_cpu(sts24->comp_status);
3177 		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3178 	} else {
3179 		comp_status = le16_to_cpu(sts->comp_status);
3180 		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3181 	}
3182 
3183 	thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3184 	switch (comp_status) {
3185 	case CS_COMPLETE:
3186 		if (scsi_status == 0) {
3187 			bsg_reply->reply_payload_rcv_len =
3188 					bsg_job->reply_payload.payload_len;
3189 			vha->qla_stats.input_bytes +=
3190 				bsg_reply->reply_payload_rcv_len;
3191 			vha->qla_stats.input_requests++;
3192 			rval = EXT_STATUS_OK;
3193 		}
3194 		goto done;
3195 
3196 	case CS_DATA_OVERRUN:
3197 		ql_dbg(ql_dbg_user, vha, 0x70b1,
3198 		    "Command completed with data overrun thread_id=%d\n",
3199 		    thread_id);
3200 		rval = EXT_STATUS_DATA_OVERRUN;
3201 		break;
3202 
3203 	case CS_DATA_UNDERRUN:
3204 		ql_dbg(ql_dbg_user, vha, 0x70b2,
3205 		    "Command completed with data underrun thread_id=%d\n",
3206 		    thread_id);
3207 		rval = EXT_STATUS_DATA_UNDERRUN;
3208 		break;
3209 	case CS_BIDIR_RD_OVERRUN:
3210 		ql_dbg(ql_dbg_user, vha, 0x70b3,
3211 		    "Command completed with read data overrun thread_id=%d\n",
3212 		    thread_id);
3213 		rval = EXT_STATUS_DATA_OVERRUN;
3214 		break;
3215 
3216 	case CS_BIDIR_RD_WR_OVERRUN:
3217 		ql_dbg(ql_dbg_user, vha, 0x70b4,
3218 		    "Command completed with read and write data overrun "
3219 		    "thread_id=%d\n", thread_id);
3220 		rval = EXT_STATUS_DATA_OVERRUN;
3221 		break;
3222 
3223 	case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
3224 		ql_dbg(ql_dbg_user, vha, 0x70b5,
3225 		    "Command completed with read data over and write data "
3226 		    "underrun thread_id=%d\n", thread_id);
3227 		rval = EXT_STATUS_DATA_OVERRUN;
3228 		break;
3229 
3230 	case CS_BIDIR_RD_UNDERRUN:
3231 		ql_dbg(ql_dbg_user, vha, 0x70b6,
3232 		    "Command completed with read data underrun "
3233 		    "thread_id=%d\n", thread_id);
3234 		rval = EXT_STATUS_DATA_UNDERRUN;
3235 		break;
3236 
3237 	case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
3238 		ql_dbg(ql_dbg_user, vha, 0x70b7,
3239 		    "Command completed with read data under and write data "
3240 		    "overrun thread_id=%d\n", thread_id);
3241 		rval = EXT_STATUS_DATA_UNDERRUN;
3242 		break;
3243 
3244 	case CS_BIDIR_RD_WR_UNDERRUN:
3245 		ql_dbg(ql_dbg_user, vha, 0x70b8,
3246 		    "Command completed with read and write data underrun "
3247 		    "thread_id=%d\n", thread_id);
3248 		rval = EXT_STATUS_DATA_UNDERRUN;
3249 		break;
3250 
3251 	case CS_BIDIR_DMA:
3252 		ql_dbg(ql_dbg_user, vha, 0x70b9,
3253 		    "Command completed with data DMA error thread_id=%d\n",
3254 		    thread_id);
3255 		rval = EXT_STATUS_DMA_ERR;
3256 		break;
3257 
3258 	case CS_TIMEOUT:
3259 		ql_dbg(ql_dbg_user, vha, 0x70ba,
3260 		    "Command completed with timeout thread_id=%d\n",
3261 		    thread_id);
3262 		rval = EXT_STATUS_TIMEOUT;
3263 		break;
3264 	default:
3265 		ql_dbg(ql_dbg_user, vha, 0x70bb,
3266 		    "Command completed with completion status=0x%x "
3267 		    "thread_id=%d\n", comp_status, thread_id);
3268 		rval = EXT_STATUS_ERR;
3269 		break;
3270 	}
3271 	bsg_reply->reply_payload_rcv_len = 0;
3272 
3273 done:
3274 	/* Return the vendor specific reply to API */
3275 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
3276 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
3277 	/* Always return DID_OK, bsg will send the vendor specific response
3278 	 * in this case only */
3279 	sp->done(sp, DID_OK << 16);
3280 
3281 }
3282 
3283 /**
3284  * qla2x00_status_entry() - Process a Status IOCB entry.
3285  * @vha: SCSI driver HA context
3286  * @rsp: response queue
3287  * @pkt: Entry pointer
3288  */
3289 static void
3290 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3291 {
3292 	srb_t		*sp;
3293 	fc_port_t	*fcport;
3294 	struct scsi_cmnd *cp;
3295 	sts_entry_t *sts = pkt;
3296 	struct sts_entry_24xx *sts24 = pkt;
3297 	uint16_t	comp_status;
3298 	uint16_t	scsi_status;
3299 	uint16_t	ox_id;
3300 	uint8_t		lscsi_status;
3301 	int32_t		resid;
3302 	uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
3303 	    fw_resid_len;
3304 	uint8_t		*rsp_info, *sense_data;
3305 	struct qla_hw_data *ha = vha->hw;
3306 	uint32_t handle;
3307 	uint16_t que;
3308 	struct req_que *req;
3309 	int logit = 1;
3310 	int res = 0;
3311 	uint16_t state_flags = 0;
3312 	uint16_t sts_qual = 0;
3313 
3314 	if (IS_FWI2_CAPABLE(ha)) {
3315 		comp_status = le16_to_cpu(sts24->comp_status);
3316 		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3317 		state_flags = le16_to_cpu(sts24->state_flags);
3318 	} else {
3319 		comp_status = le16_to_cpu(sts->comp_status);
3320 		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3321 	}
3322 	handle = (uint32_t) LSW(sts->handle);
3323 	que = MSW(sts->handle);
3324 	req = ha->req_q_map[que];
3325 
3326 	/* Check for invalid queue pointer */
3327 	if (req == NULL ||
3328 	    que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
3329 		ql_dbg(ql_dbg_io, vha, 0x3059,
3330 		    "Invalid status handle (0x%x): Bad req pointer. req=%p, "
3331 		    "que=%u.\n", sts->handle, req, que);
3332 		return;
3333 	}
3334 
3335 	/* Validate handle. */
3336 	if (handle < req->num_outstanding_cmds) {
3337 		sp = req->outstanding_cmds[handle];
3338 		if (!sp) {
3339 			ql_dbg(ql_dbg_io, vha, 0x3075,
3340 			    "%s(%ld): Already returned command for status handle (0x%x).\n",
3341 			    __func__, vha->host_no, sts->handle);
3342 			return;
3343 		}
3344 	} else {
3345 		ql_dbg(ql_dbg_io, vha, 0x3017,
3346 		    "Invalid status handle, out of range (0x%x).\n",
3347 		    sts->handle);
3348 
3349 		if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3350 			if (IS_P3P_TYPE(ha))
3351 				set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3352 			else
3353 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3354 			qla2xxx_wake_dpc(vha);
3355 		}
3356 		return;
3357 	}
3358 	qla_put_fw_resources(sp->qpair, &sp->iores);
3359 
3360 	if (sp->cmd_type != TYPE_SRB) {
3361 		req->outstanding_cmds[handle] = NULL;
3362 		ql_dbg(ql_dbg_io, vha, 0x3015,
3363 		    "Unknown sp->cmd_type %x %p).\n",
3364 		    sp->cmd_type, sp);
3365 		return;
3366 	}
3367 
3368 	/* NVME completion. */
3369 	if (sp->type == SRB_NVME_CMD) {
3370 		req->outstanding_cmds[handle] = NULL;
3371 		qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
3372 		return;
3373 	}
3374 
3375 	if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
3376 		qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
3377 		return;
3378 	}
3379 
3380 	/* Task Management completion. */
3381 	if (sp->type == SRB_TM_CMD) {
3382 		qla24xx_tm_iocb_entry(vha, req, pkt);
3383 		return;
3384 	}
3385 
3386 	/* Fast path completion. */
3387 	qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
3388 	sp->qpair->cmd_completion_cnt++;
3389 
3390 	if (comp_status == CS_COMPLETE && scsi_status == 0) {
3391 		qla2x00_process_completed_request(vha, req, handle);
3392 
3393 		return;
3394 	}
3395 
3396 	cp = GET_CMD_SP(sp);
3397 	if (cp == NULL) {
3398 		ql_dbg(ql_dbg_io, vha, 0x3018,
3399 		    "Command already returned (0x%x/%p).\n",
3400 		    sts->handle, sp);
3401 
3402 		req->outstanding_cmds[handle] = NULL;
3403 		return;
3404 	}
3405 
3406 	lscsi_status = scsi_status & STATUS_MASK;
3407 
3408 	fcport = sp->fcport;
3409 
3410 	ox_id = 0;
3411 	sense_len = par_sense_len = rsp_info_len = resid_len =
3412 	    fw_resid_len = 0;
3413 	if (IS_FWI2_CAPABLE(ha)) {
3414 		if (scsi_status & SS_SENSE_LEN_VALID)
3415 			sense_len = le32_to_cpu(sts24->sense_len);
3416 		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3417 			rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
3418 		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
3419 			resid_len = le32_to_cpu(sts24->rsp_residual_count);
3420 		if (comp_status == CS_DATA_UNDERRUN)
3421 			fw_resid_len = le32_to_cpu(sts24->residual_len);
3422 		rsp_info = sts24->data;
3423 		sense_data = sts24->data;
3424 		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
3425 		ox_id = le16_to_cpu(sts24->ox_id);
3426 		par_sense_len = sizeof(sts24->data);
3427 		sts_qual = le16_to_cpu(sts24->status_qualifier);
3428 	} else {
3429 		if (scsi_status & SS_SENSE_LEN_VALID)
3430 			sense_len = le16_to_cpu(sts->req_sense_length);
3431 		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3432 			rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3433 		resid_len = le32_to_cpu(sts->residual_length);
3434 		rsp_info = sts->rsp_info;
3435 		sense_data = sts->req_sense_data;
3436 		par_sense_len = sizeof(sts->req_sense_data);
3437 	}
3438 
3439 	/* Check for any FCP transport errors. */
3440 	if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3441 		/* Sense data lies beyond any FCP RESPONSE data. */
3442 		if (IS_FWI2_CAPABLE(ha)) {
3443 			sense_data += rsp_info_len;
3444 			par_sense_len -= rsp_info_len;
3445 		}
3446 		if (rsp_info_len > 3 && rsp_info[3]) {
3447 			ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
3448 			    "FCP I/O protocol failure (0x%x/0x%x).\n",
3449 			    rsp_info_len, rsp_info[3]);
3450 
3451 			res = DID_BUS_BUSY << 16;
3452 			goto out;
3453 		}
3454 	}
3455 
3456 	/* Check for overrun. */
3457 	if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3458 	    scsi_status & SS_RESIDUAL_OVER)
3459 		comp_status = CS_DATA_OVERRUN;
3460 
3461 	/*
3462 	 * Check retry_delay_timer value if we receive a busy or
3463 	 * queue full.
3464 	 */
3465 	if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
3466 		     lscsi_status == SAM_STAT_BUSY))
3467 		qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3468 
3469 	/*
3470 	 * Based on Host and scsi status generate status code for Linux
3471 	 */
3472 	switch (comp_status) {
3473 	case CS_COMPLETE:
3474 	case CS_QUEUE_FULL:
3475 		if (scsi_status == 0) {
3476 			res = DID_OK << 16;
3477 			break;
3478 		}
3479 		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3480 			resid = resid_len;
3481 			scsi_set_resid(cp, resid);
3482 
3483 			if (!lscsi_status &&
3484 			    ((unsigned)(scsi_bufflen(cp) - resid) <
3485 			     cp->underflow)) {
3486 				ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3487 				    "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3488 				    resid, scsi_bufflen(cp));
3489 
3490 				res = DID_ERROR << 16;
3491 				break;
3492 			}
3493 		}
3494 		res = DID_OK << 16 | lscsi_status;
3495 
3496 		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3497 			ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3498 			    "QUEUE FULL detected.\n");
3499 			break;
3500 		}
3501 		logit = 0;
3502 		if (lscsi_status != SS_CHECK_CONDITION)
3503 			break;
3504 
3505 		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3506 		if (!(scsi_status & SS_SENSE_LEN_VALID))
3507 			break;
3508 
3509 		qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3510 		    rsp, res);
3511 		break;
3512 
3513 	case CS_DATA_UNDERRUN:
3514 		/* Use F/W calculated residual length. */
3515 		resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3516 		scsi_set_resid(cp, resid);
3517 		if (scsi_status & SS_RESIDUAL_UNDER) {
3518 			if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3519 				ql_log(ql_log_warn, fcport->vha, 0x301d,
3520 				       "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3521 				       resid, scsi_bufflen(cp));
3522 
3523 				res = DID_ERROR << 16 | lscsi_status;
3524 				goto check_scsi_status;
3525 			}
3526 
3527 			if (!lscsi_status &&
3528 			    ((unsigned)(scsi_bufflen(cp) - resid) <
3529 			    cp->underflow)) {
3530 				ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3531 				    "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3532 				    resid, scsi_bufflen(cp));
3533 
3534 				res = DID_ERROR << 16;
3535 				break;
3536 			}
3537 		} else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3538 			    lscsi_status != SAM_STAT_BUSY) {
3539 			/*
3540 			 * scsi status of task set and busy are considered to be
3541 			 * task not completed.
3542 			 */
3543 
3544 			ql_log(ql_log_warn, fcport->vha, 0x301f,
3545 			       "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3546 			       resid, scsi_bufflen(cp));
3547 
3548 			vha->interface_err_cnt++;
3549 
3550 			res = DID_ERROR << 16 | lscsi_status;
3551 			goto check_scsi_status;
3552 		} else {
3553 			ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
3554 			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3555 			    scsi_status, lscsi_status);
3556 		}
3557 
3558 		res = DID_OK << 16 | lscsi_status;
3559 		logit = 0;
3560 
3561 check_scsi_status:
3562 		/*
3563 		 * Check to see if SCSI Status is non zero. If so report SCSI
3564 		 * Status.
3565 		 */
3566 		if (lscsi_status != 0) {
3567 			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3568 				ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3569 				    "QUEUE FULL detected.\n");
3570 				logit = 1;
3571 				break;
3572 			}
3573 			if (lscsi_status != SS_CHECK_CONDITION)
3574 				break;
3575 
3576 			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3577 			if (!(scsi_status & SS_SENSE_LEN_VALID))
3578 				break;
3579 
3580 			qla2x00_handle_sense(sp, sense_data, par_sense_len,
3581 			    sense_len, rsp, res);
3582 		}
3583 		break;
3584 
3585 	case CS_PORT_LOGGED_OUT:
3586 	case CS_PORT_CONFIG_CHG:
3587 	case CS_PORT_BUSY:
3588 	case CS_INCOMPLETE:
3589 	case CS_PORT_UNAVAILABLE:
3590 	case CS_TIMEOUT:
3591 	case CS_RESET:
3592 	case CS_EDIF_INV_REQ:
3593 
3594 		/*
3595 		 * We are going to have the fc class block the rport
3596 		 * while we try to recover so instruct the mid layer
3597 		 * to requeue until the class decides how to handle this.
3598 		 */
3599 		res = DID_TRANSPORT_DISRUPTED << 16;
3600 
3601 		if (comp_status == CS_TIMEOUT) {
3602 			if (IS_FWI2_CAPABLE(ha))
3603 				break;
3604 			else if ((le16_to_cpu(sts->status_flags) &
3605 			    SF_LOGOUT_SENT) == 0)
3606 				break;
3607 		}
3608 
3609 		if (atomic_read(&fcport->state) == FCS_ONLINE) {
3610 			ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
3611 				"Port to be marked lost on fcport=%02x%02x%02x, current "
3612 				"port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3613 				fcport->d_id.b.area, fcport->d_id.b.al_pa,
3614 				port_state_str[FCS_ONLINE],
3615 				comp_status);
3616 
3617 			qlt_schedule_sess_for_deletion(fcport);
3618 		}
3619 
3620 		break;
3621 
3622 	case CS_ABORTED:
3623 		res = DID_RESET << 16;
3624 		break;
3625 
3626 	case CS_DIF_ERROR:
3627 		logit = qla2x00_handle_dif_error(sp, sts24);
3628 		res = cp->result;
3629 		break;
3630 
3631 	case CS_TRANSPORT:
3632 		res = DID_ERROR << 16;
3633 		vha->hw_err_cnt++;
3634 
3635 		if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3636 			break;
3637 
3638 		if (state_flags & BIT_4)
3639 			scmd_printk(KERN_WARNING, cp,
3640 			    "Unsupported device '%s' found.\n",
3641 			    cp->device->vendor);
3642 		break;
3643 
3644 	case CS_DMA:
3645 		ql_log(ql_log_info, fcport->vha, 0x3022,
3646 		    "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3647 		    comp_status, scsi_status, res, vha->host_no,
3648 		    cp->device->id, cp->device->lun, fcport->d_id.b24,
3649 		    ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3650 		    resid_len, fw_resid_len, sp, cp);
3651 		ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3652 		    pkt, sizeof(*sts24));
3653 		res = DID_ERROR << 16;
3654 		vha->hw_err_cnt++;
3655 		break;
3656 	default:
3657 		res = DID_ERROR << 16;
3658 		break;
3659 	}
3660 
3661 out:
3662 	if (logit)
3663 		ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
3664 		       "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3665 		       comp_status, scsi_status, res, vha->host_no,
3666 		       cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3667 		       fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3668 		       cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3669 		       resid_len, fw_resid_len, sp, cp);
3670 
3671 	if (rsp->status_srb == NULL)
3672 		sp->done(sp, res);
3673 
3674 	/* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */
3675 	req->outstanding_cmds[handle] = NULL;
3676 }
3677 
3678 /**
3679  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
3680  * @rsp: response queue
3681  * @pkt: Entry pointer
3682  *
3683  * Extended sense data.
3684  */
3685 static void
3686 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3687 {
3688 	uint8_t	sense_sz = 0;
3689 	struct qla_hw_data *ha = rsp->hw;
3690 	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3691 	srb_t *sp = rsp->status_srb;
3692 	struct scsi_cmnd *cp;
3693 	uint32_t sense_len;
3694 	uint8_t *sense_ptr;
3695 
3696 	if (!sp || !GET_CMD_SENSE_LEN(sp))
3697 		return;
3698 
3699 	sense_len = GET_CMD_SENSE_LEN(sp);
3700 	sense_ptr = GET_CMD_SENSE_PTR(sp);
3701 
3702 	cp = GET_CMD_SP(sp);
3703 	if (cp == NULL) {
3704 		ql_log(ql_log_warn, vha, 0x3025,
3705 		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3706 
3707 		rsp->status_srb = NULL;
3708 		return;
3709 	}
3710 
3711 	if (sense_len > sizeof(pkt->data))
3712 		sense_sz = sizeof(pkt->data);
3713 	else
3714 		sense_sz = sense_len;
3715 
3716 	/* Move sense data. */
3717 	if (IS_FWI2_CAPABLE(ha))
3718 		host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3719 	memcpy(sense_ptr, pkt->data, sense_sz);
3720 	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3721 		sense_ptr, sense_sz);
3722 
3723 	sense_len -= sense_sz;
3724 	sense_ptr += sense_sz;
3725 
3726 	SET_CMD_SENSE_PTR(sp, sense_ptr);
3727 	SET_CMD_SENSE_LEN(sp, sense_len);
3728 
3729 	/* Place command on done queue. */
3730 	if (sense_len == 0) {
3731 		rsp->status_srb = NULL;
3732 		sp->done(sp, cp->result);
3733 	}
3734 }
3735 
3736 /**
3737  * qla2x00_error_entry() - Process an error entry.
3738  * @vha: SCSI driver HA context
3739  * @rsp: response queue
3740  * @pkt: Entry pointer
3741  * return : 1=allow further error analysis. 0=no additional error analysis.
3742  */
3743 static int
3744 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3745 {
3746 	srb_t *sp;
3747 	struct qla_hw_data *ha = vha->hw;
3748 	const char func[] = "ERROR-IOCB";
3749 	uint16_t que = MSW(pkt->handle);
3750 	struct req_que *req = NULL;
3751 	int res = DID_ERROR << 16;
3752 	u16 index;
3753 
3754 	ql_dbg(ql_dbg_async, vha, 0x502a,
3755 	    "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3756 	    pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3757 
3758 	if (que >= ha->max_req_queues || !ha->req_q_map[que])
3759 		goto fatal;
3760 
3761 	req = ha->req_q_map[que];
3762 
3763 	if (pkt->entry_status & RF_BUSY)
3764 		res = DID_BUS_BUSY << 16;
3765 
3766 	if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3767 		return 0;
3768 
3769 	switch (pkt->entry_type) {
3770 	case NOTIFY_ACK_TYPE:
3771 	case STATUS_CONT_TYPE:
3772 	case LOGINOUT_PORT_IOCB_TYPE:
3773 	case CT_IOCB_TYPE:
3774 	case ELS_IOCB_TYPE:
3775 	case ABORT_IOCB_TYPE:
3776 	case MBX_IOCB_TYPE:
3777 	default:
3778 		sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3779 		if (sp) {
3780 			sp->done(sp, res);
3781 			return 0;
3782 		}
3783 		break;
3784 
3785 	case SA_UPDATE_IOCB_TYPE:
3786 	case ABTS_RESP_24XX:
3787 	case CTIO_TYPE7:
3788 	case CTIO_CRC2:
3789 		return 1;
3790 	case STATUS_TYPE:
3791 		sp = qla_get_sp_from_handle(vha, func, req, pkt, &index);
3792 		if (sp) {
3793 			sp->done(sp, res);
3794 			req->outstanding_cmds[index] = NULL;
3795 			return 0;
3796 		}
3797 		break;
3798 	}
3799 fatal:
3800 	ql_log(ql_log_warn, vha, 0x5030,
3801 	    "Error entry - invalid handle/queue (%04x).\n", que);
3802 	return 0;
3803 }
3804 
3805 /**
3806  * qla24xx_mbx_completion() - Process mailbox command completions.
3807  * @vha: SCSI driver HA context
3808  * @mb0: Mailbox0 register
3809  */
3810 static void
3811 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3812 {
3813 	uint16_t	cnt;
3814 	uint32_t	mboxes;
3815 	__le16 __iomem *wptr;
3816 	struct qla_hw_data *ha = vha->hw;
3817 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3818 
3819 	/* Read all mbox registers? */
3820 	WARN_ON_ONCE(ha->mbx_count > 32);
3821 	mboxes = (1ULL << ha->mbx_count) - 1;
3822 	if (!ha->mcp)
3823 		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3824 	else
3825 		mboxes = ha->mcp->in_mb;
3826 
3827 	/* Load return mailbox registers. */
3828 	ha->flags.mbox_int = 1;
3829 	ha->mailbox_out[0] = mb0;
3830 	mboxes >>= 1;
3831 	wptr = &reg->mailbox1;
3832 
3833 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3834 		if (mboxes & BIT_0)
3835 			ha->mailbox_out[cnt] = rd_reg_word(wptr);
3836 
3837 		mboxes >>= 1;
3838 		wptr++;
3839 	}
3840 }
3841 
3842 static void
3843 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3844 	struct abort_entry_24xx *pkt)
3845 {
3846 	const char func[] = "ABT_IOCB";
3847 	srb_t *sp;
3848 	srb_t *orig_sp = NULL;
3849 	struct srb_iocb *abt;
3850 
3851 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3852 	if (!sp)
3853 		return;
3854 
3855 	abt = &sp->u.iocb_cmd;
3856 	abt->u.abt.comp_status = pkt->comp_status;
3857 	orig_sp = sp->cmd_sp;
3858 	/* Need to pass original sp */
3859 	if (orig_sp)
3860 		qla_nvme_abort_process_comp_status(pkt, orig_sp);
3861 
3862 	sp->done(sp, 0);
3863 }
3864 
3865 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3866     struct pt_ls4_request *pkt, struct req_que *req)
3867 {
3868 	srb_t *sp;
3869 	const char func[] = "LS4_IOCB";
3870 	uint16_t comp_status;
3871 
3872 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3873 	if (!sp)
3874 		return;
3875 
3876 	comp_status = le16_to_cpu(pkt->status);
3877 	sp->done(sp, comp_status);
3878 }
3879 
3880 /**
3881  * qla_chk_cont_iocb_avail - check for all continuation iocbs are available
3882  *   before iocb processing can start.
3883  * @vha: host adapter pointer
3884  * @rsp: respond queue
3885  * @pkt: head iocb describing how many continuation iocb
3886  * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
3887  */
3888 static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
3889 	struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
3890 {
3891 	int start_pkt_ring_index;
3892 	u32 iocb_cnt = 0;
3893 	int rc = 0;
3894 
3895 	if (pkt->entry_count == 1)
3896 		return rc;
3897 
3898 	/* ring_index was pre-increment. set it back to current pkt */
3899 	if (rsp->ring_index == 0)
3900 		start_pkt_ring_index = rsp->length - 1;
3901 	else
3902 		start_pkt_ring_index = rsp->ring_index - 1;
3903 
3904 	if (rsp_q_in < start_pkt_ring_index)
3905 		/* q in ptr is wrapped */
3906 		iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
3907 	else
3908 		iocb_cnt = rsp_q_in - start_pkt_ring_index;
3909 
3910 	if (iocb_cnt < pkt->entry_count)
3911 		rc = -EIO;
3912 
3913 	ql_dbg(ql_dbg_init, vha, 0x5091,
3914 	       "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
3915 	       __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
3916 
3917 	return rc;
3918 }
3919 
3920 static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3921 	struct mrk_entry_24xx *pkt)
3922 {
3923 	const char func[] = "MRK-IOCB";
3924 	srb_t *sp;
3925 	int res = QLA_SUCCESS;
3926 
3927 	if (!IS_FWI2_CAPABLE(vha->hw))
3928 		return;
3929 
3930 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3931 	if (!sp)
3932 		return;
3933 
3934 	if (pkt->entry_status) {
3935 		ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n");
3936 		res = QLA_COMMAND_ERROR;
3937 	}
3938 	sp->u.iocb_cmd.u.tmf.data = res;
3939 	sp->done(sp, res);
3940 }
3941 
3942 /**
3943  * qla24xx_process_response_queue() - Process response queue entries.
3944  * @vha: SCSI driver HA context
3945  * @rsp: response queue
3946  */
3947 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3948 	struct rsp_que *rsp)
3949 {
3950 	struct sts_entry_24xx *pkt;
3951 	struct qla_hw_data *ha = vha->hw;
3952 	struct purex_entry_24xx *purex_entry;
3953 	struct purex_item *pure_item;
3954 	struct pt_ls4_rx_unsol *p;
3955 	u16 rsp_in = 0, cur_ring_index;
3956 	int is_shadow_hba;
3957 
3958 	if (!ha->flags.fw_started)
3959 		return;
3960 
3961 	if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
3962 		rsp->qpair->rcv_intr = 1;
3963 
3964 		if (!rsp->qpair->cpu_mapped)
3965 			qla_cpu_update(rsp->qpair, raw_smp_processor_id());
3966 	}
3967 
3968 #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in)			\
3969 	do {								\
3970 		_rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr :		\
3971 				rd_reg_dword_relaxed((_rsp)->rsp_q_in);	\
3972 	} while (0)
3973 
3974 	is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
3975 
3976 	__update_rsp_in(is_shadow_hba, rsp, rsp_in);
3977 
3978 	while (rsp->ring_index != rsp_in &&
3979 		       rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3980 		pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3981 		cur_ring_index = rsp->ring_index;
3982 
3983 		rsp->ring_index++;
3984 		if (rsp->ring_index == rsp->length) {
3985 			rsp->ring_index = 0;
3986 			rsp->ring_ptr = rsp->ring;
3987 		} else {
3988 			rsp->ring_ptr++;
3989 		}
3990 
3991 		if (pkt->entry_status != 0) {
3992 			if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3993 				goto process_err;
3994 
3995 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3996 			wmb();
3997 			continue;
3998 		}
3999 process_err:
4000 
4001 		switch (pkt->entry_type) {
4002 		case STATUS_TYPE:
4003 			qla2x00_status_entry(vha, rsp, pkt);
4004 			break;
4005 		case STATUS_CONT_TYPE:
4006 			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
4007 			break;
4008 		case VP_RPT_ID_IOCB_TYPE:
4009 			qla24xx_report_id_acquisition(vha,
4010 			    (struct vp_rpt_id_entry_24xx *)pkt);
4011 			break;
4012 		case LOGINOUT_PORT_IOCB_TYPE:
4013 			qla24xx_logio_entry(vha, rsp->req,
4014 			    (struct logio_entry_24xx *)pkt);
4015 			break;
4016 		case CT_IOCB_TYPE:
4017 			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
4018 			break;
4019 		case ELS_IOCB_TYPE:
4020 			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
4021 			break;
4022 		case ABTS_RECV_24XX:
4023 			if (qla_ini_mode_enabled(vha)) {
4024 				pure_item = qla24xx_copy_std_pkt(vha, pkt);
4025 				if (!pure_item)
4026 					break;
4027 				qla24xx_queue_purex_item(vha, pure_item,
4028 							 qla24xx_process_abts);
4029 				break;
4030 			}
4031 			if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4032 			    IS_QLA28XX(ha)) {
4033 				/* ensure that the ATIO queue is empty */
4034 				qlt_handle_abts_recv(vha, rsp,
4035 				    (response_t *)pkt);
4036 				break;
4037 			} else {
4038 				qlt_24xx_process_atio_queue(vha, 1);
4039 			}
4040 			fallthrough;
4041 		case ABTS_RESP_24XX:
4042 		case CTIO_TYPE7:
4043 		case CTIO_CRC2:
4044 			qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
4045 			break;
4046 		case PT_LS4_REQUEST:
4047 			qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
4048 			    rsp->req);
4049 			break;
4050 		case NOTIFY_ACK_TYPE:
4051 			if (pkt->handle == QLA_TGT_SKIP_HANDLE)
4052 				qlt_response_pkt_all_vps(vha, rsp,
4053 				    (response_t *)pkt);
4054 			else
4055 				qla24xxx_nack_iocb_entry(vha, rsp->req,
4056 					(struct nack_to_isp *)pkt);
4057 			break;
4058 		case MARKER_TYPE:
4059 			qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt);
4060 			break;
4061 		case ABORT_IOCB_TYPE:
4062 			qla24xx_abort_iocb_entry(vha, rsp->req,
4063 			    (struct abort_entry_24xx *)pkt);
4064 			break;
4065 		case MBX_IOCB_TYPE:
4066 			qla24xx_mbx_iocb_entry(vha, rsp->req,
4067 			    (struct mbx_24xx_entry *)pkt);
4068 			break;
4069 		case VP_CTRL_IOCB_TYPE:
4070 			qla_ctrlvp_completed(vha, rsp->req,
4071 			    (struct vp_ctrl_entry_24xx *)pkt);
4072 			break;
4073 		case PUREX_IOCB_TYPE:
4074 			purex_entry = (void *)pkt;
4075 			switch (purex_entry->els_frame_payload[3]) {
4076 			case ELS_RDP:
4077 				pure_item = qla24xx_copy_std_pkt(vha, pkt);
4078 				if (!pure_item)
4079 					break;
4080 				qla24xx_queue_purex_item(vha, pure_item,
4081 						 qla24xx_process_purex_rdp);
4082 				break;
4083 			case ELS_FPIN:
4084 				if (!vha->hw->flags.scm_enabled) {
4085 					ql_log(ql_log_warn, vha, 0x5094,
4086 					       "SCM not active for this port\n");
4087 					break;
4088 				}
4089 				pure_item = qla27xx_copy_fpin_pkt(vha,
4090 							  (void **)&pkt, &rsp);
4091 				__update_rsp_in(is_shadow_hba, rsp, rsp_in);
4092 				if (!pure_item)
4093 					break;
4094 				qla24xx_queue_purex_item(vha, pure_item,
4095 						 qla27xx_process_purex_fpin);
4096 				break;
4097 
4098 			case ELS_AUTH_ELS:
4099 				if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
4100 					/*
4101 					 * ring_ptr and ring_index were
4102 					 * pre-incremented above. Reset them
4103 					 * back to current. Wait for next
4104 					 * interrupt with all IOCBs to arrive
4105 					 * and re-process.
4106 					 */
4107 					rsp->ring_ptr = (response_t *)pkt;
4108 					rsp->ring_index = cur_ring_index;
4109 
4110 					ql_dbg(ql_dbg_init, vha, 0x5091,
4111 					    "Defer processing ELS opcode %#x...\n",
4112 					    purex_entry->els_frame_payload[3]);
4113 					return;
4114 				}
4115 				qla24xx_auth_els(vha, (void **)&pkt, &rsp);
4116 				break;
4117 			default:
4118 				ql_log(ql_log_warn, vha, 0x509c,
4119 				       "Discarding ELS Request opcode 0x%x\n",
4120 				       purex_entry->els_frame_payload[3]);
4121 			}
4122 			break;
4123 		case SA_UPDATE_IOCB_TYPE:
4124 			qla28xx_sa_update_iocb_entry(vha, rsp->req,
4125 				(struct sa_update_28xx *)pkt);
4126 			break;
4127 		case PT_LS4_UNSOL:
4128 			p = (void *)pkt;
4129 			if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
4130 				rsp->ring_ptr = (response_t *)pkt;
4131 				rsp->ring_index = cur_ring_index;
4132 
4133 				ql_dbg(ql_dbg_init, vha, 0x2124,
4134 				       "Defer processing UNSOL LS req opcode %#x...\n",
4135 				       p->payload[0]);
4136 				return;
4137 			}
4138 			qla2xxx_process_purls_iocb((void **)&pkt, &rsp);
4139 			break;
4140 		default:
4141 			/* Type Not Supported. */
4142 			ql_dbg(ql_dbg_async, vha, 0x5042,
4143 			       "Received unknown response pkt type 0x%x entry status=%x.\n",
4144 			       pkt->entry_type, pkt->entry_status);
4145 			break;
4146 		}
4147 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
4148 		wmb();
4149 	}
4150 
4151 	/* Adjust ring index */
4152 	if (IS_P3P_TYPE(ha)) {
4153 		struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
4154 
4155 		wrt_reg_dword(&reg->rsp_q_out[0], rsp->ring_index);
4156 	} else {
4157 		wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
4158 	}
4159 }
4160 
4161 static void
4162 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
4163 {
4164 	int rval;
4165 	uint32_t cnt;
4166 	struct qla_hw_data *ha = vha->hw;
4167 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4168 
4169 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4170 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4171 		return;
4172 
4173 	rval = QLA_SUCCESS;
4174 	wrt_reg_dword(&reg->iobase_addr, 0x7C00);
4175 	rd_reg_dword(&reg->iobase_addr);
4176 	wrt_reg_dword(&reg->iobase_window, 0x0001);
4177 	for (cnt = 10000; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
4178 	    rval == QLA_SUCCESS; cnt--) {
4179 		if (cnt) {
4180 			wrt_reg_dword(&reg->iobase_window, 0x0001);
4181 			udelay(10);
4182 		} else
4183 			rval = QLA_FUNCTION_TIMEOUT;
4184 	}
4185 	if (rval == QLA_SUCCESS)
4186 		goto next_test;
4187 
4188 	rval = QLA_SUCCESS;
4189 	wrt_reg_dword(&reg->iobase_window, 0x0003);
4190 	for (cnt = 100; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
4191 	    rval == QLA_SUCCESS; cnt--) {
4192 		if (cnt) {
4193 			wrt_reg_dword(&reg->iobase_window, 0x0003);
4194 			udelay(10);
4195 		} else
4196 			rval = QLA_FUNCTION_TIMEOUT;
4197 	}
4198 	if (rval != QLA_SUCCESS)
4199 		goto done;
4200 
4201 next_test:
4202 	if (rd_reg_dword(&reg->iobase_c8) & BIT_3)
4203 		ql_log(ql_log_info, vha, 0x504c,
4204 		    "Additional code -- 0x55AA.\n");
4205 
4206 done:
4207 	wrt_reg_dword(&reg->iobase_window, 0x0000);
4208 	rd_reg_dword(&reg->iobase_window);
4209 }
4210 
4211 /**
4212  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
4213  * @irq: interrupt number
4214  * @dev_id: SCSI driver HA context
4215  *
4216  * Called by system whenever the host adapter generates an interrupt.
4217  *
4218  * Returns handled flag.
4219  */
4220 irqreturn_t
4221 qla24xx_intr_handler(int irq, void *dev_id)
4222 {
4223 	scsi_qla_host_t	*vha;
4224 	struct qla_hw_data *ha;
4225 	struct device_reg_24xx __iomem *reg;
4226 	int		status;
4227 	unsigned long	iter;
4228 	uint32_t	stat;
4229 	uint32_t	hccr;
4230 	uint16_t	mb[8];
4231 	struct rsp_que *rsp;
4232 	unsigned long	flags;
4233 	bool process_atio = false;
4234 
4235 	rsp = (struct rsp_que *) dev_id;
4236 	if (!rsp) {
4237 		ql_log(ql_log_info, NULL, 0x5059,
4238 		    "%s: NULL response queue pointer.\n", __func__);
4239 		return IRQ_NONE;
4240 	}
4241 
4242 	ha = rsp->hw;
4243 	reg = &ha->iobase->isp24;
4244 	status = 0;
4245 
4246 	if (unlikely(pci_channel_offline(ha->pdev)))
4247 		return IRQ_HANDLED;
4248 
4249 	spin_lock_irqsave(&ha->hardware_lock, flags);
4250 	vha = pci_get_drvdata(ha->pdev);
4251 	for (iter = 50; iter--; ) {
4252 		stat = rd_reg_dword(&reg->host_status);
4253 		if (qla2x00_check_reg32_for_disconnect(vha, stat))
4254 			break;
4255 		if (stat & HSRX_RISC_PAUSED) {
4256 			if (unlikely(pci_channel_offline(ha->pdev)))
4257 				break;
4258 
4259 			hccr = rd_reg_dword(&reg->hccr);
4260 
4261 			ql_log(ql_log_warn, vha, 0x504b,
4262 			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
4263 			    hccr);
4264 
4265 			qla2xxx_check_risc_status(vha);
4266 
4267 			ha->isp_ops->fw_dump(vha);
4268 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4269 			break;
4270 		} else if ((stat & HSRX_RISC_INT) == 0)
4271 			break;
4272 
4273 		switch (stat & 0xff) {
4274 		case INTR_ROM_MB_SUCCESS:
4275 		case INTR_ROM_MB_FAILED:
4276 		case INTR_MB_SUCCESS:
4277 		case INTR_MB_FAILED:
4278 			qla24xx_mbx_completion(vha, MSW(stat));
4279 			status |= MBX_INTERRUPT;
4280 
4281 			break;
4282 		case INTR_ASYNC_EVENT:
4283 			mb[0] = MSW(stat);
4284 			mb[1] = rd_reg_word(&reg->mailbox1);
4285 			mb[2] = rd_reg_word(&reg->mailbox2);
4286 			mb[3] = rd_reg_word(&reg->mailbox3);
4287 			qla2x00_async_event(vha, rsp, mb);
4288 			break;
4289 		case INTR_RSP_QUE_UPDATE:
4290 		case INTR_RSP_QUE_UPDATE_83XX:
4291 			qla24xx_process_response_queue(vha, rsp);
4292 			break;
4293 		case INTR_ATIO_QUE_UPDATE_27XX:
4294 		case INTR_ATIO_QUE_UPDATE:
4295 			process_atio = true;
4296 			break;
4297 		case INTR_ATIO_RSP_QUE_UPDATE:
4298 			process_atio = true;
4299 			qla24xx_process_response_queue(vha, rsp);
4300 			break;
4301 		default:
4302 			ql_dbg(ql_dbg_async, vha, 0x504f,
4303 			    "Unrecognized interrupt type (%d).\n", stat * 0xff);
4304 			break;
4305 		}
4306 		wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
4307 		rd_reg_dword_relaxed(&reg->hccr);
4308 		if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
4309 			ndelay(3500);
4310 	}
4311 	qla2x00_handle_mbx_completion(ha, status);
4312 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4313 
4314 	if (process_atio) {
4315 		spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4316 		qlt_24xx_process_atio_queue(vha, 0);
4317 		spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4318 	}
4319 
4320 	return IRQ_HANDLED;
4321 }
4322 
4323 static irqreturn_t
4324 qla24xx_msix_rsp_q(int irq, void *dev_id)
4325 {
4326 	struct qla_hw_data *ha;
4327 	struct rsp_que *rsp;
4328 	struct device_reg_24xx __iomem *reg;
4329 	struct scsi_qla_host *vha;
4330 	unsigned long flags;
4331 
4332 	rsp = (struct rsp_que *) dev_id;
4333 	if (!rsp) {
4334 		ql_log(ql_log_info, NULL, 0x505a,
4335 		    "%s: NULL response queue pointer.\n", __func__);
4336 		return IRQ_NONE;
4337 	}
4338 	ha = rsp->hw;
4339 	reg = &ha->iobase->isp24;
4340 
4341 	spin_lock_irqsave(&ha->hardware_lock, flags);
4342 
4343 	vha = pci_get_drvdata(ha->pdev);
4344 	qla24xx_process_response_queue(vha, rsp);
4345 	if (!ha->flags.disable_msix_handshake) {
4346 		wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
4347 		rd_reg_dword_relaxed(&reg->hccr);
4348 	}
4349 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4350 
4351 	return IRQ_HANDLED;
4352 }
4353 
4354 static irqreturn_t
4355 qla24xx_msix_default(int irq, void *dev_id)
4356 {
4357 	scsi_qla_host_t	*vha;
4358 	struct qla_hw_data *ha;
4359 	struct rsp_que *rsp;
4360 	struct device_reg_24xx __iomem *reg;
4361 	int		status;
4362 	uint32_t	stat;
4363 	uint32_t	hccr;
4364 	uint16_t	mb[8];
4365 	unsigned long flags;
4366 	bool process_atio = false;
4367 
4368 	rsp = (struct rsp_que *) dev_id;
4369 	if (!rsp) {
4370 		ql_log(ql_log_info, NULL, 0x505c,
4371 		    "%s: NULL response queue pointer.\n", __func__);
4372 		return IRQ_NONE;
4373 	}
4374 	ha = rsp->hw;
4375 	reg = &ha->iobase->isp24;
4376 	status = 0;
4377 
4378 	spin_lock_irqsave(&ha->hardware_lock, flags);
4379 	vha = pci_get_drvdata(ha->pdev);
4380 	do {
4381 		stat = rd_reg_dword(&reg->host_status);
4382 		if (qla2x00_check_reg32_for_disconnect(vha, stat))
4383 			break;
4384 		if (stat & HSRX_RISC_PAUSED) {
4385 			if (unlikely(pci_channel_offline(ha->pdev)))
4386 				break;
4387 
4388 			hccr = rd_reg_dword(&reg->hccr);
4389 
4390 			ql_log(ql_log_info, vha, 0x5050,
4391 			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
4392 			    hccr);
4393 
4394 			qla2xxx_check_risc_status(vha);
4395 			vha->hw_err_cnt++;
4396 
4397 			ha->isp_ops->fw_dump(vha);
4398 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4399 			break;
4400 		} else if ((stat & HSRX_RISC_INT) == 0)
4401 			break;
4402 
4403 		switch (stat & 0xff) {
4404 		case INTR_ROM_MB_SUCCESS:
4405 		case INTR_ROM_MB_FAILED:
4406 		case INTR_MB_SUCCESS:
4407 		case INTR_MB_FAILED:
4408 			qla24xx_mbx_completion(vha, MSW(stat));
4409 			status |= MBX_INTERRUPT;
4410 
4411 			break;
4412 		case INTR_ASYNC_EVENT:
4413 			mb[0] = MSW(stat);
4414 			mb[1] = rd_reg_word(&reg->mailbox1);
4415 			mb[2] = rd_reg_word(&reg->mailbox2);
4416 			mb[3] = rd_reg_word(&reg->mailbox3);
4417 			qla2x00_async_event(vha, rsp, mb);
4418 			break;
4419 		case INTR_RSP_QUE_UPDATE:
4420 		case INTR_RSP_QUE_UPDATE_83XX:
4421 			qla24xx_process_response_queue(vha, rsp);
4422 			break;
4423 		case INTR_ATIO_QUE_UPDATE_27XX:
4424 		case INTR_ATIO_QUE_UPDATE:
4425 			process_atio = true;
4426 			break;
4427 		case INTR_ATIO_RSP_QUE_UPDATE:
4428 			process_atio = true;
4429 			qla24xx_process_response_queue(vha, rsp);
4430 			break;
4431 		default:
4432 			ql_dbg(ql_dbg_async, vha, 0x5051,
4433 			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
4434 			break;
4435 		}
4436 		wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
4437 	} while (0);
4438 	qla2x00_handle_mbx_completion(ha, status);
4439 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4440 
4441 	if (process_atio) {
4442 		spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4443 		qlt_24xx_process_atio_queue(vha, 0);
4444 		spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4445 	}
4446 
4447 	return IRQ_HANDLED;
4448 }
4449 
4450 irqreturn_t
4451 qla2xxx_msix_rsp_q(int irq, void *dev_id)
4452 {
4453 	struct qla_hw_data *ha;
4454 	struct qla_qpair *qpair;
4455 
4456 	qpair = dev_id;
4457 	if (!qpair) {
4458 		ql_log(ql_log_info, NULL, 0x505b,
4459 		    "%s: NULL response queue pointer.\n", __func__);
4460 		return IRQ_NONE;
4461 	}
4462 	ha = qpair->hw;
4463 
4464 	queue_work(ha->wq, &qpair->q_work);
4465 
4466 	return IRQ_HANDLED;
4467 }
4468 
4469 irqreturn_t
4470 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
4471 {
4472 	struct qla_hw_data *ha;
4473 	struct qla_qpair *qpair;
4474 	struct device_reg_24xx __iomem *reg;
4475 	unsigned long flags;
4476 
4477 	qpair = dev_id;
4478 	if (!qpair) {
4479 		ql_log(ql_log_info, NULL, 0x505b,
4480 		    "%s: NULL response queue pointer.\n", __func__);
4481 		return IRQ_NONE;
4482 	}
4483 	ha = qpair->hw;
4484 
4485 	reg = &ha->iobase->isp24;
4486 	spin_lock_irqsave(&ha->hardware_lock, flags);
4487 	wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
4488 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4489 
4490 	queue_work(ha->wq, &qpair->q_work);
4491 
4492 	return IRQ_HANDLED;
4493 }
4494 
4495 /* Interrupt handling helpers. */
4496 
4497 struct qla_init_msix_entry {
4498 	const char *name;
4499 	irq_handler_t handler;
4500 };
4501 
4502 static const struct qla_init_msix_entry msix_entries[] = {
4503 	{ "default", qla24xx_msix_default },
4504 	{ "rsp_q", qla24xx_msix_rsp_q },
4505 	{ "atio_q", qla83xx_msix_atio_q },
4506 	{ "qpair_multiq", qla2xxx_msix_rsp_q },
4507 	{ "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
4508 };
4509 
4510 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
4511 	{ "qla2xxx (default)", qla82xx_msix_default },
4512 	{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
4513 };
4514 
4515 static int
4516 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
4517 {
4518 	int i, ret;
4519 	struct qla_msix_entry *qentry;
4520 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4521 	int min_vecs = QLA_BASE_VECTORS;
4522 	struct irq_affinity desc = {
4523 		.pre_vectors = QLA_BASE_VECTORS,
4524 	};
4525 
4526 	if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4527 	    IS_ATIO_MSIX_CAPABLE(ha)) {
4528 		desc.pre_vectors++;
4529 		min_vecs++;
4530 	}
4531 
4532 	if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4533 		/* user wants to control IRQ setting for target mode */
4534 		ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4535 			blk_mq_num_online_queues(ha->msix_count) + min_vecs,
4536 			PCI_IRQ_MSIX);
4537 	} else
4538 		ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4539 			blk_mq_num_online_queues(ha->msix_count) + min_vecs,
4540 			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4541 			&desc);
4542 
4543 	if (ret < 0) {
4544 		ql_log(ql_log_fatal, vha, 0x00c7,
4545 		    "MSI-X: Failed to enable support, "
4546 		    "giving   up -- %d/%d.\n",
4547 		    ha->msix_count, ret);
4548 		goto msix_out;
4549 	} else if (ret < ha->msix_count) {
4550 		ql_log(ql_log_info, vha, 0x00c6,
4551 		    "MSI-X: Using %d vectors\n", ret);
4552 		ha->msix_count = ret;
4553 		/* Recalculate queue values */
4554 		if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4555 			ha->max_req_queues = ha->msix_count - 1;
4556 
4557 			/* ATIOQ needs 1 vector. That's 1 less QPair */
4558 			if (QLA_TGT_MODE_ENABLED())
4559 				ha->max_req_queues--;
4560 
4561 			ha->max_rsp_queues = ha->max_req_queues;
4562 
4563 			ha->max_qpairs = ha->max_req_queues - 1;
4564 			ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
4565 			    "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4566 		}
4567 	}
4568 	vha->irq_offset = desc.pre_vectors;
4569 	ha->msix_entries = kcalloc(ha->msix_count,
4570 				   sizeof(struct qla_msix_entry),
4571 				   GFP_KERNEL);
4572 	if (!ha->msix_entries) {
4573 		ql_log(ql_log_fatal, vha, 0x00c8,
4574 		    "Failed to allocate memory for ha->msix_entries.\n");
4575 		ret = -ENOMEM;
4576 		goto free_irqs;
4577 	}
4578 	ha->flags.msix_enabled = 1;
4579 
4580 	for (i = 0; i < ha->msix_count; i++) {
4581 		qentry = &ha->msix_entries[i];
4582 		qentry->vector = pci_irq_vector(ha->pdev, i);
4583 		qentry->vector_base0 = i;
4584 		qentry->entry = i;
4585 		qentry->have_irq = 0;
4586 		qentry->in_use = 0;
4587 		qentry->handle = NULL;
4588 	}
4589 
4590 	/* Enable MSI-X vectors for the base queue */
4591 	for (i = 0; i < QLA_BASE_VECTORS; i++) {
4592 		qentry = &ha->msix_entries[i];
4593 		qentry->handle = rsp;
4594 		rsp->msix = qentry;
4595 		scnprintf(qentry->name, sizeof(qentry->name),
4596 		    "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4597 		if (IS_P3P_TYPE(ha))
4598 			ret = request_irq(qentry->vector,
4599 				qla82xx_msix_entries[i].handler,
4600 				0, qla82xx_msix_entries[i].name, rsp);
4601 		else
4602 			ret = request_irq(qentry->vector,
4603 				msix_entries[i].handler,
4604 				0, qentry->name, rsp);
4605 		if (ret)
4606 			goto msix_register_fail;
4607 		qentry->have_irq = 1;
4608 		qentry->in_use = 1;
4609 	}
4610 
4611 	/*
4612 	 * If target mode is enable, also request the vector for the ATIO
4613 	 * queue.
4614 	 */
4615 	if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4616 	    IS_ATIO_MSIX_CAPABLE(ha)) {
4617 		qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4618 		rsp->msix = qentry;
4619 		qentry->handle = rsp;
4620 		scnprintf(qentry->name, sizeof(qentry->name),
4621 		    "qla2xxx%lu_%s", vha->host_no,
4622 		    msix_entries[QLA_ATIO_VECTOR].name);
4623 		qentry->in_use = 1;
4624 		ret = request_irq(qentry->vector,
4625 			msix_entries[QLA_ATIO_VECTOR].handler,
4626 			0, qentry->name, rsp);
4627 		qentry->have_irq = 1;
4628 	}
4629 
4630 msix_register_fail:
4631 	if (ret) {
4632 		ql_log(ql_log_fatal, vha, 0x00cb,
4633 		    "MSI-X: unable to register handler -- %x/%d.\n",
4634 		    qentry->vector, ret);
4635 		qla2x00_free_irqs(vha);
4636 		ha->mqenable = 0;
4637 		goto msix_out;
4638 	}
4639 
4640 	/* Enable MSI-X vector for response queue update for queue 0 */
4641 	if (IS_MQUE_CAPABLE(ha) &&
4642 	    (ha->msixbase && ha->mqiobase && ha->max_qpairs))
4643 		ha->mqenable = 1;
4644 	else
4645 		ha->mqenable = 0;
4646 
4647 	ql_dbg(ql_dbg_multiq, vha, 0xc005,
4648 	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4649 	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4650 	ql_dbg(ql_dbg_init, vha, 0x0055,
4651 	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4652 	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4653 
4654 msix_out:
4655 	return ret;
4656 
4657 free_irqs:
4658 	pci_free_irq_vectors(ha->pdev);
4659 	goto msix_out;
4660 }
4661 
4662 int
4663 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4664 {
4665 	int ret = QLA_FUNCTION_FAILED;
4666 	device_reg_t *reg = ha->iobase;
4667 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4668 
4669 	/* If possible, enable MSI-X. */
4670 	if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4671 	    !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4672 	    !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4673 		goto skip_msi;
4674 
4675 	if (ql2xenablemsix == 2)
4676 		goto skip_msix;
4677 
4678 	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4679 		(ha->pdev->subsystem_device == 0x7040 ||
4680 		ha->pdev->subsystem_device == 0x7041 ||
4681 		ha->pdev->subsystem_device == 0x1705)) {
4682 		ql_log(ql_log_warn, vha, 0x0034,
4683 		    "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4684 			ha->pdev->subsystem_vendor,
4685 			ha->pdev->subsystem_device);
4686 		goto skip_msi;
4687 	}
4688 
4689 	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4690 		ql_log(ql_log_warn, vha, 0x0035,
4691 		    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4692 		    ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4693 		goto skip_msix;
4694 	}
4695 
4696 	ret = qla24xx_enable_msix(ha, rsp);
4697 	if (!ret) {
4698 		ql_dbg(ql_dbg_init, vha, 0x0036,
4699 		    "MSI-X: Enabled (0x%X, 0x%X).\n",
4700 		    ha->chip_revision, ha->fw_attributes);
4701 		goto clear_risc_ints;
4702 	}
4703 
4704 skip_msix:
4705 
4706 	ql_log(ql_log_info, vha, 0x0037,
4707 	    "Falling back-to MSI mode -- ret=%d.\n", ret);
4708 
4709 	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4710 	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4711 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4712 		goto skip_msi;
4713 
4714 	ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4715 	if (ret > 0) {
4716 		ql_dbg(ql_dbg_init, vha, 0x0038,
4717 		    "MSI: Enabled.\n");
4718 		ha->flags.msi_enabled = 1;
4719 	} else
4720 		ql_log(ql_log_warn, vha, 0x0039,
4721 		    "Falling back-to INTa mode -- ret=%d.\n", ret);
4722 skip_msi:
4723 
4724 	/* Skip INTx on ISP82xx. */
4725 	if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4726 		return QLA_FUNCTION_FAILED;
4727 
4728 	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4729 	    ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4730 	    QLA2XXX_DRIVER_NAME, rsp);
4731 	if (ret) {
4732 		ql_log(ql_log_warn, vha, 0x003a,
4733 		    "Failed to reserve interrupt %d already in use.\n",
4734 		    ha->pdev->irq);
4735 		goto fail;
4736 	} else if (!ha->flags.msi_enabled) {
4737 		ql_dbg(ql_dbg_init, vha, 0x0125,
4738 		    "INTa mode: Enabled.\n");
4739 		ha->flags.mr_intr_valid = 1;
4740 		/* Set max_qpair to 0, as MSI-X and MSI in not enabled */
4741 		ha->max_qpairs = 0;
4742 	}
4743 
4744 clear_risc_ints:
4745 	if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4746 		goto fail;
4747 
4748 	spin_lock_irq(&ha->hardware_lock);
4749 	wrt_reg_word(&reg->isp.semaphore, 0);
4750 	spin_unlock_irq(&ha->hardware_lock);
4751 
4752 fail:
4753 	return ret;
4754 }
4755 
4756 void
4757 qla2x00_free_irqs(scsi_qla_host_t *vha)
4758 {
4759 	struct qla_hw_data *ha = vha->hw;
4760 	struct rsp_que *rsp;
4761 	struct qla_msix_entry *qentry;
4762 	int i;
4763 
4764 	/*
4765 	 * We need to check that ha->rsp_q_map is valid in case we are called
4766 	 * from a probe failure context.
4767 	 */
4768 	if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4769 		goto free_irqs;
4770 	rsp = ha->rsp_q_map[0];
4771 
4772 	if (ha->flags.msix_enabled) {
4773 		for (i = 0; i < ha->msix_count; i++) {
4774 			qentry = &ha->msix_entries[i];
4775 			if (qentry->have_irq) {
4776 				irq_set_affinity_notifier(qentry->vector, NULL);
4777 				free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4778 			}
4779 		}
4780 		kfree(ha->msix_entries);
4781 		ha->msix_entries = NULL;
4782 		ha->flags.msix_enabled = 0;
4783 		ql_dbg(ql_dbg_init, vha, 0x0042,
4784 			"Disabled MSI-X.\n");
4785 	} else {
4786 		free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4787 	}
4788 
4789 free_irqs:
4790 	pci_free_irq_vectors(ha->pdev);
4791 }
4792 
4793 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4794 	struct qla_msix_entry *msix, int vector_type)
4795 {
4796 	const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4797 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4798 	int ret;
4799 
4800 	scnprintf(msix->name, sizeof(msix->name),
4801 	    "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4802 	ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4803 	if (ret) {
4804 		ql_log(ql_log_fatal, vha, 0x00e6,
4805 		    "MSI-X: Unable to register handler -- %x/%d.\n",
4806 		    msix->vector, ret);
4807 		return ret;
4808 	}
4809 	msix->have_irq = 1;
4810 	msix->handle = qpair;
4811 	qla_mapq_init_qp_cpu_map(ha, msix, qpair);
4812 	return ret;
4813 }
4814