xref: /linux/drivers/scsi/qla2xxx/qla_mbx.c (revision 662fa3d6099374c4615bf64d06895e3573b935b2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 #include "qla_target.h"
8 
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
11 
12 static struct mb_cmd_name {
13 	uint16_t cmd;
14 	const char *str;
15 } mb_str[] = {
16 	{MBC_GET_PORT_DATABASE,		"GPDB"},
17 	{MBC_GET_ID_LIST,		"GIDList"},
18 	{MBC_GET_LINK_PRIV_STATS,	"Stats"},
19 	{MBC_GET_RESOURCE_COUNTS,	"ResCnt"},
20 };
21 
22 static const char *mb_to_str(uint16_t cmd)
23 {
24 	int i;
25 	struct mb_cmd_name *e;
26 
27 	for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
28 		e = mb_str + i;
29 		if (cmd == e->cmd)
30 			return e->str;
31 	}
32 	return "unknown";
33 }
34 
35 static struct rom_cmd {
36 	uint16_t cmd;
37 } rom_cmds[] = {
38 	{ MBC_LOAD_RAM },
39 	{ MBC_EXECUTE_FIRMWARE },
40 	{ MBC_READ_RAM_WORD },
41 	{ MBC_MAILBOX_REGISTER_TEST },
42 	{ MBC_VERIFY_CHECKSUM },
43 	{ MBC_GET_FIRMWARE_VERSION },
44 	{ MBC_LOAD_RISC_RAM },
45 	{ MBC_DUMP_RISC_RAM },
46 	{ MBC_LOAD_RISC_RAM_EXTENDED },
47 	{ MBC_DUMP_RISC_RAM_EXTENDED },
48 	{ MBC_WRITE_RAM_WORD_EXTENDED },
49 	{ MBC_READ_RAM_EXTENDED },
50 	{ MBC_GET_RESOURCE_COUNTS },
51 	{ MBC_SET_FIRMWARE_OPTION },
52 	{ MBC_MID_INITIALIZE_FIRMWARE },
53 	{ MBC_GET_FIRMWARE_STATE },
54 	{ MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
55 	{ MBC_GET_RETRY_COUNT },
56 	{ MBC_TRACE_CONTROL },
57 	{ MBC_INITIALIZE_MULTIQ },
58 	{ MBC_IOCB_COMMAND_A64 },
59 	{ MBC_GET_ADAPTER_LOOP_ID },
60 	{ MBC_READ_SFP },
61 	{ MBC_SET_RNID_PARAMS },
62 	{ MBC_GET_RNID_PARAMS },
63 	{ MBC_GET_SET_ZIO_THRESHOLD },
64 };
65 
66 static int is_rom_cmd(uint16_t cmd)
67 {
68 	int i;
69 	struct  rom_cmd *wc;
70 
71 	for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
72 		wc = rom_cmds + i;
73 		if (wc->cmd == cmd)
74 			return 1;
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * qla2x00_mailbox_command
82  *	Issue mailbox command and waits for completion.
83  *
84  * Input:
85  *	ha = adapter block pointer.
86  *	mcp = driver internal mbx struct pointer.
87  *
88  * Output:
89  *	mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
90  *
91  * Returns:
92  *	0 : QLA_SUCCESS = cmd performed success
93  *	1 : QLA_FUNCTION_FAILED   (error encountered)
94  *	6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
95  *
96  * Context:
97  *	Kernel context.
98  */
99 static int
100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
101 {
102 	int		rval, i;
103 	unsigned long    flags = 0;
104 	device_reg_t *reg;
105 	uint8_t		abort_active, eeh_delay;
106 	uint8_t		io_lock_on;
107 	uint16_t	command = 0;
108 	uint16_t	*iptr;
109 	__le16 __iomem  *optr;
110 	uint32_t	cnt;
111 	uint32_t	mboxes;
112 	unsigned long	wait_time;
113 	struct qla_hw_data *ha = vha->hw;
114 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
115 	u32 chip_reset;
116 
117 
118 	ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
119 
120 	if (ha->pdev->error_state == pci_channel_io_perm_failure) {
121 		ql_log(ql_log_warn, vha, 0x1001,
122 		    "PCI channel failed permanently, exiting.\n");
123 		return QLA_FUNCTION_TIMEOUT;
124 	}
125 
126 	if (vha->device_flags & DFLG_DEV_FAILED) {
127 		ql_log(ql_log_warn, vha, 0x1002,
128 		    "Device in failed state, exiting.\n");
129 		return QLA_FUNCTION_TIMEOUT;
130 	}
131 
132 	/* if PCI error, then avoid mbx processing.*/
133 	if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
134 	    test_bit(UNLOADING, &base_vha->dpc_flags)) {
135 		ql_log(ql_log_warn, vha, 0xd04e,
136 		    "PCI error, exiting.\n");
137 		return QLA_FUNCTION_TIMEOUT;
138 	}
139 	eeh_delay = 0;
140 	reg = ha->iobase;
141 	io_lock_on = base_vha->flags.init_done;
142 
143 	rval = QLA_SUCCESS;
144 	abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
145 	chip_reset = ha->chip_reset;
146 
147 	if (ha->flags.pci_channel_io_perm_failure) {
148 		ql_log(ql_log_warn, vha, 0x1003,
149 		    "Perm failure on EEH timeout MBX, exiting.\n");
150 		return QLA_FUNCTION_TIMEOUT;
151 	}
152 
153 	if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
154 		/* Setting Link-Down error */
155 		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
156 		ql_log(ql_log_warn, vha, 0x1004,
157 		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
158 		return QLA_FUNCTION_TIMEOUT;
159 	}
160 
161 	/* check if ISP abort is active and return cmd with timeout */
162 	if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
163 	      test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
164 	      test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
165 	      !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
166 		ql_log(ql_log_info, vha, 0x1005,
167 		    "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
168 		    mcp->mb[0]);
169 		return QLA_FUNCTION_TIMEOUT;
170 	}
171 
172 	atomic_inc(&ha->num_pend_mbx_stage1);
173 	/*
174 	 * Wait for active mailbox commands to finish by waiting at most tov
175 	 * seconds. This is to serialize actual issuing of mailbox cmds during
176 	 * non ISP abort time.
177 	 */
178 	if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
179 		/* Timeout occurred. Return error. */
180 		ql_log(ql_log_warn, vha, 0xd035,
181 		    "Cmd access timeout, cmd=0x%x, Exiting.\n",
182 		    mcp->mb[0]);
183 		vha->hw_err_cnt++;
184 		atomic_dec(&ha->num_pend_mbx_stage1);
185 		return QLA_FUNCTION_TIMEOUT;
186 	}
187 	atomic_dec(&ha->num_pend_mbx_stage1);
188 	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
189 	    ha->flags.eeh_busy) {
190 		ql_log(ql_log_warn, vha, 0xd035,
191 		       "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
192 		       ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
193 		rval = QLA_ABORTED;
194 		goto premature_exit;
195 	}
196 
197 
198 	/* Save mailbox command for debug */
199 	ha->mcp = mcp;
200 
201 	ql_dbg(ql_dbg_mbx, vha, 0x1006,
202 	    "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
203 
204 	spin_lock_irqsave(&ha->hardware_lock, flags);
205 
206 	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
207 	    ha->flags.mbox_busy) {
208 		rval = QLA_ABORTED;
209 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
210 		goto premature_exit;
211 	}
212 	ha->flags.mbox_busy = 1;
213 
214 	/* Load mailbox registers. */
215 	if (IS_P3P_TYPE(ha))
216 		optr = &reg->isp82.mailbox_in[0];
217 	else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
218 		optr = &reg->isp24.mailbox0;
219 	else
220 		optr = MAILBOX_REG(ha, &reg->isp, 0);
221 
222 	iptr = mcp->mb;
223 	command = mcp->mb[0];
224 	mboxes = mcp->out_mb;
225 
226 	ql_dbg(ql_dbg_mbx, vha, 0x1111,
227 	    "Mailbox registers (OUT):\n");
228 	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
229 		if (IS_QLA2200(ha) && cnt == 8)
230 			optr = MAILBOX_REG(ha, &reg->isp, 8);
231 		if (mboxes & BIT_0) {
232 			ql_dbg(ql_dbg_mbx, vha, 0x1112,
233 			    "mbox[%d]<-0x%04x\n", cnt, *iptr);
234 			wrt_reg_word(optr, *iptr);
235 		}
236 
237 		mboxes >>= 1;
238 		optr++;
239 		iptr++;
240 	}
241 
242 	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
243 	    "I/O Address = %p.\n", optr);
244 
245 	/* Issue set host interrupt command to send cmd out. */
246 	ha->flags.mbox_int = 0;
247 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
248 
249 	/* Unlock mbx registers and wait for interrupt */
250 	ql_dbg(ql_dbg_mbx, vha, 0x100f,
251 	    "Going to unlock irq & waiting for interrupts. "
252 	    "jiffies=%lx.\n", jiffies);
253 
254 	/* Wait for mbx cmd completion until timeout */
255 	atomic_inc(&ha->num_pend_mbx_stage2);
256 	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
257 		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
258 
259 		if (IS_P3P_TYPE(ha))
260 			wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
261 		else if (IS_FWI2_CAPABLE(ha))
262 			wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
263 		else
264 			wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
265 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
266 
267 		wait_time = jiffies;
268 		atomic_inc(&ha->num_pend_mbx_stage3);
269 		if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
270 		    mcp->tov * HZ)) {
271 			if (chip_reset != ha->chip_reset) {
272 				eeh_delay = ha->flags.eeh_busy ? 1 : 0;
273 
274 				spin_lock_irqsave(&ha->hardware_lock, flags);
275 				ha->flags.mbox_busy = 0;
276 				spin_unlock_irqrestore(&ha->hardware_lock,
277 				    flags);
278 				atomic_dec(&ha->num_pend_mbx_stage2);
279 				atomic_dec(&ha->num_pend_mbx_stage3);
280 				rval = QLA_ABORTED;
281 				goto premature_exit;
282 			}
283 			ql_dbg(ql_dbg_mbx, vha, 0x117a,
284 			    "cmd=%x Timeout.\n", command);
285 			spin_lock_irqsave(&ha->hardware_lock, flags);
286 			clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
287 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
288 
289 		} else if (ha->flags.purge_mbox ||
290 		    chip_reset != ha->chip_reset) {
291 			eeh_delay = ha->flags.eeh_busy ? 1 : 0;
292 
293 			spin_lock_irqsave(&ha->hardware_lock, flags);
294 			ha->flags.mbox_busy = 0;
295 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
296 			atomic_dec(&ha->num_pend_mbx_stage2);
297 			atomic_dec(&ha->num_pend_mbx_stage3);
298 			rval = QLA_ABORTED;
299 			goto premature_exit;
300 		}
301 		atomic_dec(&ha->num_pend_mbx_stage3);
302 
303 		if (time_after(jiffies, wait_time + 5 * HZ))
304 			ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
305 			    command, jiffies_to_msecs(jiffies - wait_time));
306 	} else {
307 		ql_dbg(ql_dbg_mbx, vha, 0x1011,
308 		    "Cmd=%x Polling Mode.\n", command);
309 
310 		if (IS_P3P_TYPE(ha)) {
311 			if (rd_reg_dword(&reg->isp82.hint) &
312 				HINT_MBX_INT_PENDING) {
313 				ha->flags.mbox_busy = 0;
314 				spin_unlock_irqrestore(&ha->hardware_lock,
315 					flags);
316 				atomic_dec(&ha->num_pend_mbx_stage2);
317 				ql_dbg(ql_dbg_mbx, vha, 0x1012,
318 				    "Pending mailbox timeout, exiting.\n");
319 				vha->hw_err_cnt++;
320 				rval = QLA_FUNCTION_TIMEOUT;
321 				goto premature_exit;
322 			}
323 			wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
324 		} else if (IS_FWI2_CAPABLE(ha))
325 			wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
326 		else
327 			wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
328 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
329 
330 		wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
331 		while (!ha->flags.mbox_int) {
332 			if (ha->flags.purge_mbox ||
333 			    chip_reset != ha->chip_reset) {
334 				eeh_delay = ha->flags.eeh_busy ? 1 : 0;
335 
336 				spin_lock_irqsave(&ha->hardware_lock, flags);
337 				ha->flags.mbox_busy = 0;
338 				spin_unlock_irqrestore(&ha->hardware_lock,
339 				    flags);
340 				atomic_dec(&ha->num_pend_mbx_stage2);
341 				rval = QLA_ABORTED;
342 				goto premature_exit;
343 			}
344 
345 			if (time_after(jiffies, wait_time))
346 				break;
347 
348 			/* Check for pending interrupts. */
349 			qla2x00_poll(ha->rsp_q_map[0]);
350 
351 			if (!ha->flags.mbox_int &&
352 			    !(IS_QLA2200(ha) &&
353 			    command == MBC_LOAD_RISC_RAM_EXTENDED))
354 				msleep(10);
355 		} /* while */
356 		ql_dbg(ql_dbg_mbx, vha, 0x1013,
357 		    "Waited %d sec.\n",
358 		    (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
359 	}
360 	atomic_dec(&ha->num_pend_mbx_stage2);
361 
362 	/* Check whether we timed out */
363 	if (ha->flags.mbox_int) {
364 		uint16_t *iptr2;
365 
366 		ql_dbg(ql_dbg_mbx, vha, 0x1014,
367 		    "Cmd=%x completed.\n", command);
368 
369 		/* Got interrupt. Clear the flag. */
370 		ha->flags.mbox_int = 0;
371 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
372 
373 		if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
374 			spin_lock_irqsave(&ha->hardware_lock, flags);
375 			ha->flags.mbox_busy = 0;
376 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
377 
378 			/* Setting Link-Down error */
379 			mcp->mb[0] = MBS_LINK_DOWN_ERROR;
380 			ha->mcp = NULL;
381 			rval = QLA_FUNCTION_FAILED;
382 			ql_log(ql_log_warn, vha, 0xd048,
383 			    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
384 			goto premature_exit;
385 		}
386 
387 		if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
388 			ql_dbg(ql_dbg_mbx, vha, 0x11ff,
389 			       "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
390 			       MBS_COMMAND_COMPLETE);
391 			rval = QLA_FUNCTION_FAILED;
392 		}
393 
394 		/* Load return mailbox registers. */
395 		iptr2 = mcp->mb;
396 		iptr = (uint16_t *)&ha->mailbox_out[0];
397 		mboxes = mcp->in_mb;
398 
399 		ql_dbg(ql_dbg_mbx, vha, 0x1113,
400 		    "Mailbox registers (IN):\n");
401 		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
402 			if (mboxes & BIT_0) {
403 				*iptr2 = *iptr;
404 				ql_dbg(ql_dbg_mbx, vha, 0x1114,
405 				    "mbox[%d]->0x%04x\n", cnt, *iptr2);
406 			}
407 
408 			mboxes >>= 1;
409 			iptr2++;
410 			iptr++;
411 		}
412 	} else {
413 
414 		uint16_t mb[8];
415 		uint32_t ictrl, host_status, hccr;
416 		uint16_t        w;
417 
418 		if (IS_FWI2_CAPABLE(ha)) {
419 			mb[0] = rd_reg_word(&reg->isp24.mailbox0);
420 			mb[1] = rd_reg_word(&reg->isp24.mailbox1);
421 			mb[2] = rd_reg_word(&reg->isp24.mailbox2);
422 			mb[3] = rd_reg_word(&reg->isp24.mailbox3);
423 			mb[7] = rd_reg_word(&reg->isp24.mailbox7);
424 			ictrl = rd_reg_dword(&reg->isp24.ictrl);
425 			host_status = rd_reg_dword(&reg->isp24.host_status);
426 			hccr = rd_reg_dword(&reg->isp24.hccr);
427 
428 			ql_log(ql_log_warn, vha, 0xd04c,
429 			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
430 			    "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
431 			    command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
432 			    mb[7], host_status, hccr);
433 			vha->hw_err_cnt++;
434 
435 		} else {
436 			mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
437 			ictrl = rd_reg_word(&reg->isp.ictrl);
438 			ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
439 			    "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
440 			    "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
441 			vha->hw_err_cnt++;
442 		}
443 		ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
444 
445 		/* Capture FW dump only, if PCI device active */
446 		if (!pci_channel_offline(vha->hw->pdev)) {
447 			pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
448 			if (w == 0xffff || ictrl == 0xffffffff ||
449 			    (chip_reset != ha->chip_reset)) {
450 				/* This is special case if there is unload
451 				 * of driver happening and if PCI device go
452 				 * into bad state due to PCI error condition
453 				 * then only PCI ERR flag would be set.
454 				 * we will do premature exit for above case.
455 				 */
456 				spin_lock_irqsave(&ha->hardware_lock, flags);
457 				ha->flags.mbox_busy = 0;
458 				spin_unlock_irqrestore(&ha->hardware_lock,
459 				    flags);
460 				rval = QLA_FUNCTION_TIMEOUT;
461 				goto premature_exit;
462 			}
463 
464 			/* Attempt to capture firmware dump for further
465 			 * anallysis of the current formware state. we do not
466 			 * need to do this if we are intentionally generating
467 			 * a dump
468 			 */
469 			if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
470 				qla2xxx_dump_fw(vha);
471 			rval = QLA_FUNCTION_TIMEOUT;
472 		 }
473 	}
474 	spin_lock_irqsave(&ha->hardware_lock, flags);
475 	ha->flags.mbox_busy = 0;
476 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
477 
478 	/* Clean up */
479 	ha->mcp = NULL;
480 
481 	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
482 		ql_dbg(ql_dbg_mbx, vha, 0x101a,
483 		    "Checking for additional resp interrupt.\n");
484 
485 		/* polling mode for non isp_abort commands. */
486 		qla2x00_poll(ha->rsp_q_map[0]);
487 	}
488 
489 	if (rval == QLA_FUNCTION_TIMEOUT &&
490 	    mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
491 		if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
492 		    ha->flags.eeh_busy) {
493 			/* not in dpc. schedule it for dpc to take over. */
494 			ql_dbg(ql_dbg_mbx, vha, 0x101b,
495 			    "Timeout, schedule isp_abort_needed.\n");
496 
497 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
498 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
499 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
500 				if (IS_QLA82XX(ha)) {
501 					ql_dbg(ql_dbg_mbx, vha, 0x112a,
502 					    "disabling pause transmit on port "
503 					    "0 & 1.\n");
504 					qla82xx_wr_32(ha,
505 					    QLA82XX_CRB_NIU + 0x98,
506 					    CRB_NIU_XG_PAUSE_CTL_P0|
507 					    CRB_NIU_XG_PAUSE_CTL_P1);
508 				}
509 				ql_log(ql_log_info, base_vha, 0x101c,
510 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
511 				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
512 				    "abort.\n", command, mcp->mb[0],
513 				    ha->flags.eeh_busy);
514 				vha->hw_err_cnt++;
515 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
516 				qla2xxx_wake_dpc(vha);
517 			}
518 		} else if (current == ha->dpc_thread) {
519 			/* call abort directly since we are in the DPC thread */
520 			ql_dbg(ql_dbg_mbx, vha, 0x101d,
521 			    "Timeout, calling abort_isp.\n");
522 
523 			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
524 			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
525 			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
526 				if (IS_QLA82XX(ha)) {
527 					ql_dbg(ql_dbg_mbx, vha, 0x112b,
528 					    "disabling pause transmit on port "
529 					    "0 & 1.\n");
530 					qla82xx_wr_32(ha,
531 					    QLA82XX_CRB_NIU + 0x98,
532 					    CRB_NIU_XG_PAUSE_CTL_P0|
533 					    CRB_NIU_XG_PAUSE_CTL_P1);
534 				}
535 				ql_log(ql_log_info, base_vha, 0x101e,
536 				    "Mailbox cmd timeout occurred, cmd=0x%x, "
537 				    "mb[0]=0x%x. Scheduling ISP abort ",
538 				    command, mcp->mb[0]);
539 				vha->hw_err_cnt++;
540 				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
541 				clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
542 				/* Allow next mbx cmd to come in. */
543 				complete(&ha->mbx_cmd_comp);
544 				if (ha->isp_ops->abort_isp(vha) &&
545 				    !ha->flags.eeh_busy) {
546 					/* Failed. retry later. */
547 					set_bit(ISP_ABORT_NEEDED,
548 					    &vha->dpc_flags);
549 				}
550 				clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
551 				ql_dbg(ql_dbg_mbx, vha, 0x101f,
552 				    "Finished abort_isp.\n");
553 				goto mbx_done;
554 			}
555 		}
556 	}
557 
558 premature_exit:
559 	/* Allow next mbx cmd to come in. */
560 	complete(&ha->mbx_cmd_comp);
561 
562 mbx_done:
563 	if (rval == QLA_ABORTED) {
564 		ql_log(ql_log_info, vha, 0xd035,
565 		    "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
566 		    mcp->mb[0]);
567 	} else if (rval) {
568 		if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
569 			pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
570 			    dev_name(&ha->pdev->dev), 0x1020+0x800,
571 			    vha->host_no, rval);
572 			mboxes = mcp->in_mb;
573 			cnt = 4;
574 			for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
575 				if (mboxes & BIT_0) {
576 					printk(" mb[%u]=%x", i, mcp->mb[i]);
577 					cnt--;
578 				}
579 			pr_warn(" cmd=%x ****\n", command);
580 		}
581 		if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
582 			ql_dbg(ql_dbg_mbx, vha, 0x1198,
583 			    "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
584 			    rd_reg_dword(&reg->isp24.host_status),
585 			    rd_reg_dword(&reg->isp24.ictrl),
586 			    rd_reg_dword(&reg->isp24.istatus));
587 		} else {
588 			ql_dbg(ql_dbg_mbx, vha, 0x1206,
589 			    "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
590 			    rd_reg_word(&reg->isp.ctrl_status),
591 			    rd_reg_word(&reg->isp.ictrl),
592 			    rd_reg_word(&reg->isp.istatus));
593 		}
594 	} else {
595 		ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
596 	}
597 
598 	i = 500;
599 	while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
600 		/*
601 		 * The caller of this mailbox encounter pci error.
602 		 * Hold the thread until PCIE link reset complete to make
603 		 * sure caller does not unmap dma while recovery is
604 		 * in progress.
605 		 */
606 		msleep(1);
607 		i--;
608 	}
609 	return rval;
610 }
611 
612 int
613 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
614     uint32_t risc_code_size)
615 {
616 	int rval;
617 	struct qla_hw_data *ha = vha->hw;
618 	mbx_cmd_t mc;
619 	mbx_cmd_t *mcp = &mc;
620 
621 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
622 	    "Entered %s.\n", __func__);
623 
624 	if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
625 		mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
626 		mcp->mb[8] = MSW(risc_addr);
627 		mcp->out_mb = MBX_8|MBX_0;
628 	} else {
629 		mcp->mb[0] = MBC_LOAD_RISC_RAM;
630 		mcp->out_mb = MBX_0;
631 	}
632 	mcp->mb[1] = LSW(risc_addr);
633 	mcp->mb[2] = MSW(req_dma);
634 	mcp->mb[3] = LSW(req_dma);
635 	mcp->mb[6] = MSW(MSD(req_dma));
636 	mcp->mb[7] = LSW(MSD(req_dma));
637 	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
638 	if (IS_FWI2_CAPABLE(ha)) {
639 		mcp->mb[4] = MSW(risc_code_size);
640 		mcp->mb[5] = LSW(risc_code_size);
641 		mcp->out_mb |= MBX_5|MBX_4;
642 	} else {
643 		mcp->mb[4] = LSW(risc_code_size);
644 		mcp->out_mb |= MBX_4;
645 	}
646 
647 	mcp->in_mb = MBX_1|MBX_0;
648 	mcp->tov = MBX_TOV_SECONDS;
649 	mcp->flags = 0;
650 	rval = qla2x00_mailbox_command(vha, mcp);
651 
652 	if (rval != QLA_SUCCESS) {
653 		ql_dbg(ql_dbg_mbx, vha, 0x1023,
654 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
655 		    rval, mcp->mb[0], mcp->mb[1]);
656 		vha->hw_err_cnt++;
657 	} else {
658 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
659 		    "Done %s.\n", __func__);
660 	}
661 
662 	return rval;
663 }
664 
665 #define	NVME_ENABLE_FLAG	BIT_3
666 
667 /*
668  * qla2x00_execute_fw
669  *     Start adapter firmware.
670  *
671  * Input:
672  *     ha = adapter block pointer.
673  *     TARGET_QUEUE_LOCK must be released.
674  *     ADAPTER_STATE_LOCK must be released.
675  *
676  * Returns:
677  *     qla2x00 local function return status code.
678  *
679  * Context:
680  *     Kernel context.
681  */
682 int
683 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
684 {
685 	int rval;
686 	struct qla_hw_data *ha = vha->hw;
687 	mbx_cmd_t mc;
688 	mbx_cmd_t *mcp = &mc;
689 	u8 semaphore = 0;
690 #define EXE_FW_FORCE_SEMAPHORE BIT_7
691 	u8 retry = 3;
692 
693 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
694 	    "Entered %s.\n", __func__);
695 
696 again:
697 	mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
698 	mcp->out_mb = MBX_0;
699 	mcp->in_mb = MBX_0;
700 	if (IS_FWI2_CAPABLE(ha)) {
701 		mcp->mb[1] = MSW(risc_addr);
702 		mcp->mb[2] = LSW(risc_addr);
703 		mcp->mb[3] = 0;
704 		mcp->mb[4] = 0;
705 		mcp->mb[11] = 0;
706 
707 		/* Enable BPM? */
708 		if (ha->flags.lr_detected) {
709 			mcp->mb[4] = BIT_0;
710 			if (IS_BPM_RANGE_CAPABLE(ha))
711 				mcp->mb[4] |=
712 				    ha->lr_distance << LR_DIST_FW_POS;
713 		}
714 
715 		if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
716 			mcp->mb[4] |= NVME_ENABLE_FLAG;
717 
718 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
719 			struct nvram_81xx *nv = ha->nvram;
720 			/* set minimum speed if specified in nvram */
721 			if (nv->min_supported_speed >= 2 &&
722 			    nv->min_supported_speed <= 5) {
723 				mcp->mb[4] |= BIT_4;
724 				mcp->mb[11] |= nv->min_supported_speed & 0xF;
725 				mcp->out_mb |= MBX_11;
726 				mcp->in_mb |= BIT_5;
727 				vha->min_supported_speed =
728 				    nv->min_supported_speed;
729 			}
730 		}
731 
732 		if (ha->flags.exlogins_enabled)
733 			mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
734 
735 		if (ha->flags.exchoffld_enabled)
736 			mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
737 
738 		if (semaphore)
739 			mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
740 
741 		mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
742 		mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
743 	} else {
744 		mcp->mb[1] = LSW(risc_addr);
745 		mcp->out_mb |= MBX_1;
746 		if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
747 			mcp->mb[2] = 0;
748 			mcp->out_mb |= MBX_2;
749 		}
750 	}
751 
752 	mcp->tov = MBX_TOV_SECONDS;
753 	mcp->flags = 0;
754 	rval = qla2x00_mailbox_command(vha, mcp);
755 
756 	if (rval != QLA_SUCCESS) {
757 		if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
758 		    mcp->mb[1] == 0x27 && retry) {
759 			semaphore = 1;
760 			retry--;
761 			ql_dbg(ql_dbg_async, vha, 0x1026,
762 			    "Exe FW: force semaphore.\n");
763 			goto again;
764 		}
765 
766 		ql_dbg(ql_dbg_mbx, vha, 0x1026,
767 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
768 		vha->hw_err_cnt++;
769 		return rval;
770 	}
771 
772 	if (!IS_FWI2_CAPABLE(ha))
773 		goto done;
774 
775 	ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
776 	ql_dbg(ql_dbg_mbx, vha, 0x119a,
777 	    "fw_ability_mask=%x.\n", ha->fw_ability_mask);
778 	ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
779 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
780 		ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
781 		ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
782 		    ha->max_supported_speed == 0 ? "16Gps" :
783 		    ha->max_supported_speed == 1 ? "32Gps" :
784 		    ha->max_supported_speed == 2 ? "64Gps" : "unknown");
785 		if (vha->min_supported_speed) {
786 			ha->min_supported_speed = mcp->mb[5] &
787 			    (BIT_0 | BIT_1 | BIT_2);
788 			ql_dbg(ql_dbg_mbx, vha, 0x119c,
789 			    "min_supported_speed=%s.\n",
790 			    ha->min_supported_speed == 6 ? "64Gps" :
791 			    ha->min_supported_speed == 5 ? "32Gps" :
792 			    ha->min_supported_speed == 4 ? "16Gps" :
793 			    ha->min_supported_speed == 3 ? "8Gps" :
794 			    ha->min_supported_speed == 2 ? "4Gps" : "unknown");
795 		}
796 	}
797 
798 done:
799 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
800 	    "Done %s.\n", __func__);
801 
802 	return rval;
803 }
804 
805 /*
806  * qla_get_exlogin_status
807  *	Get extended login status
808  *	uses the memory offload control/status Mailbox
809  *
810  * Input:
811  *	ha:		adapter state pointer.
812  *	fwopt:		firmware options
813  *
814  * Returns:
815  *	qla2x00 local function status
816  *
817  * Context:
818  *	Kernel context.
819  */
820 #define	FETCH_XLOGINS_STAT	0x8
821 int
822 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
823 	uint16_t *ex_logins_cnt)
824 {
825 	int rval;
826 	mbx_cmd_t	mc;
827 	mbx_cmd_t	*mcp = &mc;
828 
829 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
830 	    "Entered %s\n", __func__);
831 
832 	memset(mcp->mb, 0 , sizeof(mcp->mb));
833 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
834 	mcp->mb[1] = FETCH_XLOGINS_STAT;
835 	mcp->out_mb = MBX_1|MBX_0;
836 	mcp->in_mb = MBX_10|MBX_4|MBX_0;
837 	mcp->tov = MBX_TOV_SECONDS;
838 	mcp->flags = 0;
839 
840 	rval = qla2x00_mailbox_command(vha, mcp);
841 	if (rval != QLA_SUCCESS) {
842 		ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
843 	} else {
844 		*buf_sz = mcp->mb[4];
845 		*ex_logins_cnt = mcp->mb[10];
846 
847 		ql_log(ql_log_info, vha, 0x1190,
848 		    "buffer size 0x%x, exchange login count=%d\n",
849 		    mcp->mb[4], mcp->mb[10]);
850 
851 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
852 		    "Done %s.\n", __func__);
853 	}
854 
855 	return rval;
856 }
857 
858 /*
859  * qla_set_exlogin_mem_cfg
860  *	set extended login memory configuration
861  *	Mbx needs to be issues before init_cb is set
862  *
863  * Input:
864  *	ha:		adapter state pointer.
865  *	buffer:		buffer pointer
866  *	phys_addr:	physical address of buffer
867  *	size:		size of buffer
868  *	TARGET_QUEUE_LOCK must be released
869  *	ADAPTER_STATE_LOCK must be release
870  *
871  * Returns:
872  *	qla2x00 local funxtion status code.
873  *
874  * Context:
875  *	Kernel context.
876  */
877 #define CONFIG_XLOGINS_MEM	0x9
878 int
879 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
880 {
881 	int		rval;
882 	mbx_cmd_t	mc;
883 	mbx_cmd_t	*mcp = &mc;
884 	struct qla_hw_data *ha = vha->hw;
885 
886 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
887 	    "Entered %s.\n", __func__);
888 
889 	memset(mcp->mb, 0 , sizeof(mcp->mb));
890 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
891 	mcp->mb[1] = CONFIG_XLOGINS_MEM;
892 	mcp->mb[2] = MSW(phys_addr);
893 	mcp->mb[3] = LSW(phys_addr);
894 	mcp->mb[6] = MSW(MSD(phys_addr));
895 	mcp->mb[7] = LSW(MSD(phys_addr));
896 	mcp->mb[8] = MSW(ha->exlogin_size);
897 	mcp->mb[9] = LSW(ha->exlogin_size);
898 	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
899 	mcp->in_mb = MBX_11|MBX_0;
900 	mcp->tov = MBX_TOV_SECONDS;
901 	mcp->flags = 0;
902 	rval = qla2x00_mailbox_command(vha, mcp);
903 	if (rval != QLA_SUCCESS) {
904 		ql_dbg(ql_dbg_mbx, vha, 0x111b,
905 		       "EXlogin Failed=%x. MB0=%x MB11=%x\n",
906 		       rval, mcp->mb[0], mcp->mb[11]);
907 	} else {
908 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
909 		    "Done %s.\n", __func__);
910 	}
911 
912 	return rval;
913 }
914 
915 /*
916  * qla_get_exchoffld_status
917  *	Get exchange offload status
918  *	uses the memory offload control/status Mailbox
919  *
920  * Input:
921  *	ha:		adapter state pointer.
922  *	fwopt:		firmware options
923  *
924  * Returns:
925  *	qla2x00 local function status
926  *
927  * Context:
928  *	Kernel context.
929  */
930 #define	FETCH_XCHOFFLD_STAT	0x2
931 int
932 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
933 	uint16_t *ex_logins_cnt)
934 {
935 	int rval;
936 	mbx_cmd_t	mc;
937 	mbx_cmd_t	*mcp = &mc;
938 
939 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
940 	    "Entered %s\n", __func__);
941 
942 	memset(mcp->mb, 0 , sizeof(mcp->mb));
943 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
944 	mcp->mb[1] = FETCH_XCHOFFLD_STAT;
945 	mcp->out_mb = MBX_1|MBX_0;
946 	mcp->in_mb = MBX_10|MBX_4|MBX_0;
947 	mcp->tov = MBX_TOV_SECONDS;
948 	mcp->flags = 0;
949 
950 	rval = qla2x00_mailbox_command(vha, mcp);
951 	if (rval != QLA_SUCCESS) {
952 		ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
953 	} else {
954 		*buf_sz = mcp->mb[4];
955 		*ex_logins_cnt = mcp->mb[10];
956 
957 		ql_log(ql_log_info, vha, 0x118e,
958 		    "buffer size 0x%x, exchange offload count=%d\n",
959 		    mcp->mb[4], mcp->mb[10]);
960 
961 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
962 		    "Done %s.\n", __func__);
963 	}
964 
965 	return rval;
966 }
967 
968 /*
969  * qla_set_exchoffld_mem_cfg
970  *	Set exchange offload memory configuration
971  *	Mbx needs to be issues before init_cb is set
972  *
973  * Input:
974  *	ha:		adapter state pointer.
975  *	buffer:		buffer pointer
976  *	phys_addr:	physical address of buffer
977  *	size:		size of buffer
978  *	TARGET_QUEUE_LOCK must be released
979  *	ADAPTER_STATE_LOCK must be release
980  *
981  * Returns:
982  *	qla2x00 local funxtion status code.
983  *
984  * Context:
985  *	Kernel context.
986  */
987 #define CONFIG_XCHOFFLD_MEM	0x3
988 int
989 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
990 {
991 	int		rval;
992 	mbx_cmd_t	mc;
993 	mbx_cmd_t	*mcp = &mc;
994 	struct qla_hw_data *ha = vha->hw;
995 
996 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
997 	    "Entered %s.\n", __func__);
998 
999 	memset(mcp->mb, 0 , sizeof(mcp->mb));
1000 	mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1001 	mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1002 	mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1003 	mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1004 	mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1005 	mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1006 	mcp->mb[8] = MSW(ha->exchoffld_size);
1007 	mcp->mb[9] = LSW(ha->exchoffld_size);
1008 	mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1009 	mcp->in_mb = MBX_11|MBX_0;
1010 	mcp->tov = MBX_TOV_SECONDS;
1011 	mcp->flags = 0;
1012 	rval = qla2x00_mailbox_command(vha, mcp);
1013 	if (rval != QLA_SUCCESS) {
1014 		/*EMPTY*/
1015 		ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1016 	} else {
1017 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1018 		    "Done %s.\n", __func__);
1019 	}
1020 
1021 	return rval;
1022 }
1023 
1024 /*
1025  * qla2x00_get_fw_version
1026  *	Get firmware version.
1027  *
1028  * Input:
1029  *	ha:		adapter state pointer.
1030  *	major:		pointer for major number.
1031  *	minor:		pointer for minor number.
1032  *	subminor:	pointer for subminor number.
1033  *
1034  * Returns:
1035  *	qla2x00 local function return status code.
1036  *
1037  * Context:
1038  *	Kernel context.
1039  */
1040 int
1041 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1042 {
1043 	int		rval;
1044 	mbx_cmd_t	mc;
1045 	mbx_cmd_t	*mcp = &mc;
1046 	struct qla_hw_data *ha = vha->hw;
1047 
1048 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1049 	    "Entered %s.\n", __func__);
1050 
1051 	mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1052 	mcp->out_mb = MBX_0;
1053 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1054 	if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1055 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1056 	if (IS_FWI2_CAPABLE(ha))
1057 		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1058 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1059 		mcp->in_mb |=
1060 		    MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1061 		    MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1062 
1063 	mcp->flags = 0;
1064 	mcp->tov = MBX_TOV_SECONDS;
1065 	rval = qla2x00_mailbox_command(vha, mcp);
1066 	if (rval != QLA_SUCCESS)
1067 		goto failed;
1068 
1069 	/* Return mailbox data. */
1070 	ha->fw_major_version = mcp->mb[1];
1071 	ha->fw_minor_version = mcp->mb[2];
1072 	ha->fw_subminor_version = mcp->mb[3];
1073 	ha->fw_attributes = mcp->mb[6];
1074 	if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1075 		ha->fw_memory_size = 0x1FFFF;		/* Defaults to 128KB. */
1076 	else
1077 		ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1078 
1079 	if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1080 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
1081 		ha->mpi_version[1] = mcp->mb[11] >> 8;
1082 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
1083 		ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1084 		ha->phy_version[0] = mcp->mb[8] & 0xff;
1085 		ha->phy_version[1] = mcp->mb[9] >> 8;
1086 		ha->phy_version[2] = mcp->mb[9] & 0xff;
1087 	}
1088 
1089 	if (IS_FWI2_CAPABLE(ha)) {
1090 		ha->fw_attributes_h = mcp->mb[15];
1091 		ha->fw_attributes_ext[0] = mcp->mb[16];
1092 		ha->fw_attributes_ext[1] = mcp->mb[17];
1093 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1094 		    "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1095 		    __func__, mcp->mb[15], mcp->mb[6]);
1096 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1097 		    "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1098 		    __func__, mcp->mb[17], mcp->mb[16]);
1099 
1100 		if (ha->fw_attributes_h & 0x4)
1101 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1102 			    "%s: Firmware supports Extended Login 0x%x\n",
1103 			    __func__, ha->fw_attributes_h);
1104 
1105 		if (ha->fw_attributes_h & 0x8)
1106 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1107 			    "%s: Firmware supports Exchange Offload 0x%x\n",
1108 			    __func__, ha->fw_attributes_h);
1109 
1110 		/*
1111 		 * FW supports nvme and driver load parameter requested nvme.
1112 		 * BIT 26 of fw_attributes indicates NVMe support.
1113 		 */
1114 		if ((ha->fw_attributes_h &
1115 		    (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1116 			ql2xnvmeenable) {
1117 			if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1118 				vha->flags.nvme_first_burst = 1;
1119 
1120 			vha->flags.nvme_enabled = 1;
1121 			ql_log(ql_log_info, vha, 0xd302,
1122 			    "%s: FC-NVMe is Enabled (0x%x)\n",
1123 			     __func__, ha->fw_attributes_h);
1124 		}
1125 
1126 		/* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1127 		if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1128 			ql_log(ql_log_info, vha, 0xd302,
1129 			       "Firmware supports NVMe2 0x%x\n",
1130 			       ha->fw_attributes_ext[0]);
1131 			vha->flags.nvme2_enabled = 1;
1132 		}
1133 	}
1134 
1135 	if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1136 		ha->serdes_version[0] = mcp->mb[7] & 0xff;
1137 		ha->serdes_version[1] = mcp->mb[8] >> 8;
1138 		ha->serdes_version[2] = mcp->mb[8] & 0xff;
1139 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
1140 		ha->mpi_version[1] = mcp->mb[11] >> 8;
1141 		ha->mpi_version[2] = mcp->mb[11] & 0xff;
1142 		ha->pep_version[0] = mcp->mb[13] & 0xff;
1143 		ha->pep_version[1] = mcp->mb[14] >> 8;
1144 		ha->pep_version[2] = mcp->mb[14] & 0xff;
1145 		ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1146 		ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1147 		ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1148 		ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1149 		if (IS_QLA28XX(ha)) {
1150 			if (mcp->mb[16] & BIT_10)
1151 				ha->flags.secure_fw = 1;
1152 
1153 			ql_log(ql_log_info, vha, 0xffff,
1154 			    "Secure Flash Update in FW: %s\n",
1155 			    (ha->flags.secure_fw) ? "Supported" :
1156 			    "Not Supported");
1157 		}
1158 
1159 		if (ha->flags.scm_supported_a &&
1160 		    (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1161 			ha->flags.scm_supported_f = 1;
1162 			ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1163 		}
1164 		ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1165 		       (ha->flags.scm_supported_f) ? "Supported" :
1166 		       "Not Supported");
1167 
1168 		if (vha->flags.nvme2_enabled) {
1169 			/* set BIT_15 of special feature control block for SLER */
1170 			ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1171 			/* set BIT_14 of special feature control block for PI CTRL*/
1172 			ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1173 		}
1174 	}
1175 
1176 failed:
1177 	if (rval != QLA_SUCCESS) {
1178 		/*EMPTY*/
1179 		ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1180 	} else {
1181 		/*EMPTY*/
1182 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1183 		    "Done %s.\n", __func__);
1184 	}
1185 	return rval;
1186 }
1187 
1188 /*
1189  * qla2x00_get_fw_options
1190  *	Set firmware options.
1191  *
1192  * Input:
1193  *	ha = adapter block pointer.
1194  *	fwopt = pointer for firmware options.
1195  *
1196  * Returns:
1197  *	qla2x00 local function return status code.
1198  *
1199  * Context:
1200  *	Kernel context.
1201  */
1202 int
1203 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1204 {
1205 	int rval;
1206 	mbx_cmd_t mc;
1207 	mbx_cmd_t *mcp = &mc;
1208 
1209 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1210 	    "Entered %s.\n", __func__);
1211 
1212 	mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1213 	mcp->out_mb = MBX_0;
1214 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1215 	mcp->tov = MBX_TOV_SECONDS;
1216 	mcp->flags = 0;
1217 	rval = qla2x00_mailbox_command(vha, mcp);
1218 
1219 	if (rval != QLA_SUCCESS) {
1220 		/*EMPTY*/
1221 		ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1222 	} else {
1223 		fwopts[0] = mcp->mb[0];
1224 		fwopts[1] = mcp->mb[1];
1225 		fwopts[2] = mcp->mb[2];
1226 		fwopts[3] = mcp->mb[3];
1227 
1228 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1229 		    "Done %s.\n", __func__);
1230 	}
1231 
1232 	return rval;
1233 }
1234 
1235 
1236 /*
1237  * qla2x00_set_fw_options
1238  *	Set firmware options.
1239  *
1240  * Input:
1241  *	ha = adapter block pointer.
1242  *	fwopt = pointer for firmware options.
1243  *
1244  * Returns:
1245  *	qla2x00 local function return status code.
1246  *
1247  * Context:
1248  *	Kernel context.
1249  */
1250 int
1251 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1252 {
1253 	int rval;
1254 	mbx_cmd_t mc;
1255 	mbx_cmd_t *mcp = &mc;
1256 
1257 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1258 	    "Entered %s.\n", __func__);
1259 
1260 	mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1261 	mcp->mb[1] = fwopts[1];
1262 	mcp->mb[2] = fwopts[2];
1263 	mcp->mb[3] = fwopts[3];
1264 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1265 	mcp->in_mb = MBX_0;
1266 	if (IS_FWI2_CAPABLE(vha->hw)) {
1267 		mcp->in_mb |= MBX_1;
1268 		mcp->mb[10] = fwopts[10];
1269 		mcp->out_mb |= MBX_10;
1270 	} else {
1271 		mcp->mb[10] = fwopts[10];
1272 		mcp->mb[11] = fwopts[11];
1273 		mcp->mb[12] = 0;	/* Undocumented, but used */
1274 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1275 	}
1276 	mcp->tov = MBX_TOV_SECONDS;
1277 	mcp->flags = 0;
1278 	rval = qla2x00_mailbox_command(vha, mcp);
1279 
1280 	fwopts[0] = mcp->mb[0];
1281 
1282 	if (rval != QLA_SUCCESS) {
1283 		/*EMPTY*/
1284 		ql_dbg(ql_dbg_mbx, vha, 0x1030,
1285 		    "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1286 	} else {
1287 		/*EMPTY*/
1288 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1289 		    "Done %s.\n", __func__);
1290 	}
1291 
1292 	return rval;
1293 }
1294 
1295 /*
1296  * qla2x00_mbx_reg_test
1297  *	Mailbox register wrap test.
1298  *
1299  * Input:
1300  *	ha = adapter block pointer.
1301  *	TARGET_QUEUE_LOCK must be released.
1302  *	ADAPTER_STATE_LOCK must be released.
1303  *
1304  * Returns:
1305  *	qla2x00 local function return status code.
1306  *
1307  * Context:
1308  *	Kernel context.
1309  */
1310 int
1311 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1312 {
1313 	int rval;
1314 	mbx_cmd_t mc;
1315 	mbx_cmd_t *mcp = &mc;
1316 
1317 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1318 	    "Entered %s.\n", __func__);
1319 
1320 	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1321 	mcp->mb[1] = 0xAAAA;
1322 	mcp->mb[2] = 0x5555;
1323 	mcp->mb[3] = 0xAA55;
1324 	mcp->mb[4] = 0x55AA;
1325 	mcp->mb[5] = 0xA5A5;
1326 	mcp->mb[6] = 0x5A5A;
1327 	mcp->mb[7] = 0x2525;
1328 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1329 	mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1330 	mcp->tov = MBX_TOV_SECONDS;
1331 	mcp->flags = 0;
1332 	rval = qla2x00_mailbox_command(vha, mcp);
1333 
1334 	if (rval == QLA_SUCCESS) {
1335 		if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1336 		    mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1337 			rval = QLA_FUNCTION_FAILED;
1338 		if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1339 		    mcp->mb[7] != 0x2525)
1340 			rval = QLA_FUNCTION_FAILED;
1341 	}
1342 
1343 	if (rval != QLA_SUCCESS) {
1344 		/*EMPTY*/
1345 		ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1346 		vha->hw_err_cnt++;
1347 	} else {
1348 		/*EMPTY*/
1349 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1350 		    "Done %s.\n", __func__);
1351 	}
1352 
1353 	return rval;
1354 }
1355 
1356 /*
1357  * qla2x00_verify_checksum
1358  *	Verify firmware checksum.
1359  *
1360  * Input:
1361  *	ha = adapter block pointer.
1362  *	TARGET_QUEUE_LOCK must be released.
1363  *	ADAPTER_STATE_LOCK must be released.
1364  *
1365  * Returns:
1366  *	qla2x00 local function return status code.
1367  *
1368  * Context:
1369  *	Kernel context.
1370  */
1371 int
1372 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1373 {
1374 	int rval;
1375 	mbx_cmd_t mc;
1376 	mbx_cmd_t *mcp = &mc;
1377 
1378 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1379 	    "Entered %s.\n", __func__);
1380 
1381 	mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1382 	mcp->out_mb = MBX_0;
1383 	mcp->in_mb = MBX_0;
1384 	if (IS_FWI2_CAPABLE(vha->hw)) {
1385 		mcp->mb[1] = MSW(risc_addr);
1386 		mcp->mb[2] = LSW(risc_addr);
1387 		mcp->out_mb |= MBX_2|MBX_1;
1388 		mcp->in_mb |= MBX_2|MBX_1;
1389 	} else {
1390 		mcp->mb[1] = LSW(risc_addr);
1391 		mcp->out_mb |= MBX_1;
1392 		mcp->in_mb |= MBX_1;
1393 	}
1394 
1395 	mcp->tov = MBX_TOV_SECONDS;
1396 	mcp->flags = 0;
1397 	rval = qla2x00_mailbox_command(vha, mcp);
1398 
1399 	if (rval != QLA_SUCCESS) {
1400 		ql_dbg(ql_dbg_mbx, vha, 0x1036,
1401 		    "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1402 		    (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1403 	} else {
1404 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1405 		    "Done %s.\n", __func__);
1406 	}
1407 
1408 	return rval;
1409 }
1410 
1411 /*
1412  * qla2x00_issue_iocb
1413  *	Issue IOCB using mailbox command
1414  *
1415  * Input:
1416  *	ha = adapter state pointer.
1417  *	buffer = buffer pointer.
1418  *	phys_addr = physical address of buffer.
1419  *	size = size of buffer.
1420  *	TARGET_QUEUE_LOCK must be released.
1421  *	ADAPTER_STATE_LOCK must be released.
1422  *
1423  * Returns:
1424  *	qla2x00 local function return status code.
1425  *
1426  * Context:
1427  *	Kernel context.
1428  */
1429 int
1430 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1431     dma_addr_t phys_addr, size_t size, uint32_t tov)
1432 {
1433 	int		rval;
1434 	mbx_cmd_t	mc;
1435 	mbx_cmd_t	*mcp = &mc;
1436 
1437 	if (!vha->hw->flags.fw_started)
1438 		return QLA_INVALID_COMMAND;
1439 
1440 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1441 	    "Entered %s.\n", __func__);
1442 
1443 	mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1444 	mcp->mb[1] = 0;
1445 	mcp->mb[2] = MSW(LSD(phys_addr));
1446 	mcp->mb[3] = LSW(LSD(phys_addr));
1447 	mcp->mb[6] = MSW(MSD(phys_addr));
1448 	mcp->mb[7] = LSW(MSD(phys_addr));
1449 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1450 	mcp->in_mb = MBX_1|MBX_0;
1451 	mcp->tov = tov;
1452 	mcp->flags = 0;
1453 	rval = qla2x00_mailbox_command(vha, mcp);
1454 
1455 	if (rval != QLA_SUCCESS) {
1456 		/*EMPTY*/
1457 		ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1458 	} else {
1459 		sts_entry_t *sts_entry = buffer;
1460 
1461 		/* Mask reserved bits. */
1462 		sts_entry->entry_status &=
1463 		    IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1464 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1465 		    "Done %s (status=%x).\n", __func__,
1466 		    sts_entry->entry_status);
1467 	}
1468 
1469 	return rval;
1470 }
1471 
1472 int
1473 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1474     size_t size)
1475 {
1476 	return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1477 	    MBX_TOV_SECONDS);
1478 }
1479 
1480 /*
1481  * qla2x00_abort_command
1482  *	Abort command aborts a specified IOCB.
1483  *
1484  * Input:
1485  *	ha = adapter block pointer.
1486  *	sp = SB structure pointer.
1487  *
1488  * Returns:
1489  *	qla2x00 local function return status code.
1490  *
1491  * Context:
1492  *	Kernel context.
1493  */
1494 int
1495 qla2x00_abort_command(srb_t *sp)
1496 {
1497 	unsigned long   flags = 0;
1498 	int		rval;
1499 	uint32_t	handle = 0;
1500 	mbx_cmd_t	mc;
1501 	mbx_cmd_t	*mcp = &mc;
1502 	fc_port_t	*fcport = sp->fcport;
1503 	scsi_qla_host_t *vha = fcport->vha;
1504 	struct qla_hw_data *ha = vha->hw;
1505 	struct req_que *req;
1506 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1507 
1508 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1509 	    "Entered %s.\n", __func__);
1510 
1511 	if (sp->qpair)
1512 		req = sp->qpair->req;
1513 	else
1514 		req = vha->req;
1515 
1516 	spin_lock_irqsave(&ha->hardware_lock, flags);
1517 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1518 		if (req->outstanding_cmds[handle] == sp)
1519 			break;
1520 	}
1521 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1522 
1523 	if (handle == req->num_outstanding_cmds) {
1524 		/* command not found */
1525 		return QLA_FUNCTION_FAILED;
1526 	}
1527 
1528 	mcp->mb[0] = MBC_ABORT_COMMAND;
1529 	if (HAS_EXTENDED_IDS(ha))
1530 		mcp->mb[1] = fcport->loop_id;
1531 	else
1532 		mcp->mb[1] = fcport->loop_id << 8;
1533 	mcp->mb[2] = (uint16_t)handle;
1534 	mcp->mb[3] = (uint16_t)(handle >> 16);
1535 	mcp->mb[6] = (uint16_t)cmd->device->lun;
1536 	mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1537 	mcp->in_mb = MBX_0;
1538 	mcp->tov = MBX_TOV_SECONDS;
1539 	mcp->flags = 0;
1540 	rval = qla2x00_mailbox_command(vha, mcp);
1541 
1542 	if (rval != QLA_SUCCESS) {
1543 		ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1544 	} else {
1545 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1546 		    "Done %s.\n", __func__);
1547 	}
1548 
1549 	return rval;
1550 }
1551 
1552 int
1553 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1554 {
1555 	int rval, rval2;
1556 	mbx_cmd_t  mc;
1557 	mbx_cmd_t  *mcp = &mc;
1558 	scsi_qla_host_t *vha;
1559 
1560 	vha = fcport->vha;
1561 
1562 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1563 	    "Entered %s.\n", __func__);
1564 
1565 	mcp->mb[0] = MBC_ABORT_TARGET;
1566 	mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1567 	if (HAS_EXTENDED_IDS(vha->hw)) {
1568 		mcp->mb[1] = fcport->loop_id;
1569 		mcp->mb[10] = 0;
1570 		mcp->out_mb |= MBX_10;
1571 	} else {
1572 		mcp->mb[1] = fcport->loop_id << 8;
1573 	}
1574 	mcp->mb[2] = vha->hw->loop_reset_delay;
1575 	mcp->mb[9] = vha->vp_idx;
1576 
1577 	mcp->in_mb = MBX_0;
1578 	mcp->tov = MBX_TOV_SECONDS;
1579 	mcp->flags = 0;
1580 	rval = qla2x00_mailbox_command(vha, mcp);
1581 	if (rval != QLA_SUCCESS) {
1582 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1583 		    "Failed=%x.\n", rval);
1584 	}
1585 
1586 	/* Issue marker IOCB. */
1587 	rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1588 							MK_SYNC_ID);
1589 	if (rval2 != QLA_SUCCESS) {
1590 		ql_dbg(ql_dbg_mbx, vha, 0x1040,
1591 		    "Failed to issue marker IOCB (%x).\n", rval2);
1592 	} else {
1593 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1594 		    "Done %s.\n", __func__);
1595 	}
1596 
1597 	return rval;
1598 }
1599 
1600 int
1601 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1602 {
1603 	int rval, rval2;
1604 	mbx_cmd_t  mc;
1605 	mbx_cmd_t  *mcp = &mc;
1606 	scsi_qla_host_t *vha;
1607 
1608 	vha = fcport->vha;
1609 
1610 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1611 	    "Entered %s.\n", __func__);
1612 
1613 	mcp->mb[0] = MBC_LUN_RESET;
1614 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1615 	if (HAS_EXTENDED_IDS(vha->hw))
1616 		mcp->mb[1] = fcport->loop_id;
1617 	else
1618 		mcp->mb[1] = fcport->loop_id << 8;
1619 	mcp->mb[2] = (u32)l;
1620 	mcp->mb[3] = 0;
1621 	mcp->mb[9] = vha->vp_idx;
1622 
1623 	mcp->in_mb = MBX_0;
1624 	mcp->tov = MBX_TOV_SECONDS;
1625 	mcp->flags = 0;
1626 	rval = qla2x00_mailbox_command(vha, mcp);
1627 	if (rval != QLA_SUCCESS) {
1628 		ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1629 	}
1630 
1631 	/* Issue marker IOCB. */
1632 	rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1633 								MK_SYNC_ID_LUN);
1634 	if (rval2 != QLA_SUCCESS) {
1635 		ql_dbg(ql_dbg_mbx, vha, 0x1044,
1636 		    "Failed to issue marker IOCB (%x).\n", rval2);
1637 	} else {
1638 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1639 		    "Done %s.\n", __func__);
1640 	}
1641 
1642 	return rval;
1643 }
1644 
1645 /*
1646  * qla2x00_get_adapter_id
1647  *	Get adapter ID and topology.
1648  *
1649  * Input:
1650  *	ha = adapter block pointer.
1651  *	id = pointer for loop ID.
1652  *	al_pa = pointer for AL_PA.
1653  *	area = pointer for area.
1654  *	domain = pointer for domain.
1655  *	top = pointer for topology.
1656  *	TARGET_QUEUE_LOCK must be released.
1657  *	ADAPTER_STATE_LOCK must be released.
1658  *
1659  * Returns:
1660  *	qla2x00 local function return status code.
1661  *
1662  * Context:
1663  *	Kernel context.
1664  */
1665 int
1666 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1667     uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1668 {
1669 	int rval;
1670 	mbx_cmd_t mc;
1671 	mbx_cmd_t *mcp = &mc;
1672 
1673 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1674 	    "Entered %s.\n", __func__);
1675 
1676 	mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1677 	mcp->mb[9] = vha->vp_idx;
1678 	mcp->out_mb = MBX_9|MBX_0;
1679 	mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1680 	if (IS_CNA_CAPABLE(vha->hw))
1681 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1682 	if (IS_FWI2_CAPABLE(vha->hw))
1683 		mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1684 	if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1685 		mcp->in_mb |= MBX_15;
1686 		mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23;
1687 	}
1688 
1689 	mcp->tov = MBX_TOV_SECONDS;
1690 	mcp->flags = 0;
1691 	rval = qla2x00_mailbox_command(vha, mcp);
1692 	if (mcp->mb[0] == MBS_COMMAND_ERROR)
1693 		rval = QLA_COMMAND_ERROR;
1694 	else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1695 		rval = QLA_INVALID_COMMAND;
1696 
1697 	/* Return data. */
1698 	*id = mcp->mb[1];
1699 	*al_pa = LSB(mcp->mb[2]);
1700 	*area = MSB(mcp->mb[2]);
1701 	*domain	= LSB(mcp->mb[3]);
1702 	*top = mcp->mb[6];
1703 	*sw_cap = mcp->mb[7];
1704 
1705 	if (rval != QLA_SUCCESS) {
1706 		/*EMPTY*/
1707 		ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1708 	} else {
1709 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1710 		    "Done %s.\n", __func__);
1711 
1712 		if (IS_CNA_CAPABLE(vha->hw)) {
1713 			vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1714 			vha->fcoe_fcf_idx = mcp->mb[10];
1715 			vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1716 			vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1717 			vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1718 			vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1719 			vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1720 			vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1721 		}
1722 		/* If FA-WWN supported */
1723 		if (IS_FAWWN_CAPABLE(vha->hw)) {
1724 			if (mcp->mb[7] & BIT_14) {
1725 				vha->port_name[0] = MSB(mcp->mb[16]);
1726 				vha->port_name[1] = LSB(mcp->mb[16]);
1727 				vha->port_name[2] = MSB(mcp->mb[17]);
1728 				vha->port_name[3] = LSB(mcp->mb[17]);
1729 				vha->port_name[4] = MSB(mcp->mb[18]);
1730 				vha->port_name[5] = LSB(mcp->mb[18]);
1731 				vha->port_name[6] = MSB(mcp->mb[19]);
1732 				vha->port_name[7] = LSB(mcp->mb[19]);
1733 				fc_host_port_name(vha->host) =
1734 				    wwn_to_u64(vha->port_name);
1735 				ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1736 				    "FA-WWN acquired %016llx\n",
1737 				    wwn_to_u64(vha->port_name));
1738 			}
1739 		}
1740 
1741 		if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1742 			vha->bbcr = mcp->mb[15];
1743 			if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1744 				ql_log(ql_log_info, vha, 0x11a4,
1745 				       "SCM: EDC ELS completed, flags 0x%x\n",
1746 				       mcp->mb[21]);
1747 			}
1748 			if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1749 				vha->hw->flags.scm_enabled = 1;
1750 				vha->scm_fabric_connection_flags |=
1751 				    SCM_FLAG_RDF_COMPLETED;
1752 				ql_log(ql_log_info, vha, 0x11a5,
1753 				       "SCM: RDF ELS completed, flags 0x%x\n",
1754 				       mcp->mb[23]);
1755 			}
1756 		}
1757 	}
1758 
1759 	return rval;
1760 }
1761 
1762 /*
1763  * qla2x00_get_retry_cnt
1764  *	Get current firmware login retry count and delay.
1765  *
1766  * Input:
1767  *	ha = adapter block pointer.
1768  *	retry_cnt = pointer to login retry count.
1769  *	tov = pointer to login timeout value.
1770  *
1771  * Returns:
1772  *	qla2x00 local function return status code.
1773  *
1774  * Context:
1775  *	Kernel context.
1776  */
1777 int
1778 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1779     uint16_t *r_a_tov)
1780 {
1781 	int rval;
1782 	uint16_t ratov;
1783 	mbx_cmd_t mc;
1784 	mbx_cmd_t *mcp = &mc;
1785 
1786 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1787 	    "Entered %s.\n", __func__);
1788 
1789 	mcp->mb[0] = MBC_GET_RETRY_COUNT;
1790 	mcp->out_mb = MBX_0;
1791 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1792 	mcp->tov = MBX_TOV_SECONDS;
1793 	mcp->flags = 0;
1794 	rval = qla2x00_mailbox_command(vha, mcp);
1795 
1796 	if (rval != QLA_SUCCESS) {
1797 		/*EMPTY*/
1798 		ql_dbg(ql_dbg_mbx, vha, 0x104a,
1799 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1800 	} else {
1801 		/* Convert returned data and check our values. */
1802 		*r_a_tov = mcp->mb[3] / 2;
1803 		ratov = (mcp->mb[3]/2) / 10;  /* mb[3] value is in 100ms */
1804 		if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1805 			/* Update to the larger values */
1806 			*retry_cnt = (uint8_t)mcp->mb[1];
1807 			*tov = ratov;
1808 		}
1809 
1810 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1811 		    "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1812 	}
1813 
1814 	return rval;
1815 }
1816 
1817 /*
1818  * qla2x00_init_firmware
1819  *	Initialize adapter firmware.
1820  *
1821  * Input:
1822  *	ha = adapter block pointer.
1823  *	dptr = Initialization control block pointer.
1824  *	size = size of initialization control block.
1825  *	TARGET_QUEUE_LOCK must be released.
1826  *	ADAPTER_STATE_LOCK must be released.
1827  *
1828  * Returns:
1829  *	qla2x00 local function return status code.
1830  *
1831  * Context:
1832  *	Kernel context.
1833  */
1834 int
1835 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1836 {
1837 	int rval;
1838 	mbx_cmd_t mc;
1839 	mbx_cmd_t *mcp = &mc;
1840 	struct qla_hw_data *ha = vha->hw;
1841 
1842 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1843 	    "Entered %s.\n", __func__);
1844 
1845 	if (IS_P3P_TYPE(ha) && ql2xdbwr)
1846 		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1847 			(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1848 
1849 	if (ha->flags.npiv_supported)
1850 		mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1851 	else
1852 		mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1853 
1854 	mcp->mb[1] = 0;
1855 	mcp->mb[2] = MSW(ha->init_cb_dma);
1856 	mcp->mb[3] = LSW(ha->init_cb_dma);
1857 	mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1858 	mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1859 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1860 	if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1861 		mcp->mb[1] = BIT_0;
1862 		mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1863 		mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1864 		mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1865 		mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1866 		mcp->mb[14] = sizeof(*ha->ex_init_cb);
1867 		mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1868 	}
1869 
1870 	if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1871 		mcp->mb[1] |= BIT_1;
1872 		mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1873 		mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1874 		mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1875 		mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1876 		mcp->mb[15] = sizeof(*ha->sf_init_cb);
1877 		mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1878 	}
1879 
1880 	/* 1 and 2 should normally be captured. */
1881 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
1882 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1883 		/* mb3 is additional info about the installed SFP. */
1884 		mcp->in_mb  |= MBX_3;
1885 	mcp->buf_size = size;
1886 	mcp->flags = MBX_DMA_OUT;
1887 	mcp->tov = MBX_TOV_SECONDS;
1888 	rval = qla2x00_mailbox_command(vha, mcp);
1889 
1890 	if (rval != QLA_SUCCESS) {
1891 		/*EMPTY*/
1892 		ql_dbg(ql_dbg_mbx, vha, 0x104d,
1893 		    "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1894 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1895 		if (ha->init_cb) {
1896 			ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1897 			ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1898 			    0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1899 		}
1900 		if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1901 			ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1902 			ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1903 			    0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1904 		}
1905 	} else {
1906 		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1907 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1908 				ql_dbg(ql_dbg_mbx, vha, 0x119d,
1909 				    "Invalid SFP/Validation Failed\n");
1910 		}
1911 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1912 		    "Done %s.\n", __func__);
1913 	}
1914 
1915 	return rval;
1916 }
1917 
1918 
1919 /*
1920  * qla2x00_get_port_database
1921  *	Issue normal/enhanced get port database mailbox command
1922  *	and copy device name as necessary.
1923  *
1924  * Input:
1925  *	ha = adapter state pointer.
1926  *	dev = structure pointer.
1927  *	opt = enhanced cmd option byte.
1928  *
1929  * Returns:
1930  *	qla2x00 local function return status code.
1931  *
1932  * Context:
1933  *	Kernel context.
1934  */
1935 int
1936 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1937 {
1938 	int rval;
1939 	mbx_cmd_t mc;
1940 	mbx_cmd_t *mcp = &mc;
1941 	port_database_t *pd;
1942 	struct port_database_24xx *pd24;
1943 	dma_addr_t pd_dma;
1944 	struct qla_hw_data *ha = vha->hw;
1945 
1946 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1947 	    "Entered %s.\n", __func__);
1948 
1949 	pd24 = NULL;
1950 	pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1951 	if (pd  == NULL) {
1952 		ql_log(ql_log_warn, vha, 0x1050,
1953 		    "Failed to allocate port database structure.\n");
1954 		fcport->query = 0;
1955 		return QLA_MEMORY_ALLOC_FAILED;
1956 	}
1957 
1958 	mcp->mb[0] = MBC_GET_PORT_DATABASE;
1959 	if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1960 		mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1961 	mcp->mb[2] = MSW(pd_dma);
1962 	mcp->mb[3] = LSW(pd_dma);
1963 	mcp->mb[6] = MSW(MSD(pd_dma));
1964 	mcp->mb[7] = LSW(MSD(pd_dma));
1965 	mcp->mb[9] = vha->vp_idx;
1966 	mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1967 	mcp->in_mb = MBX_0;
1968 	if (IS_FWI2_CAPABLE(ha)) {
1969 		mcp->mb[1] = fcport->loop_id;
1970 		mcp->mb[10] = opt;
1971 		mcp->out_mb |= MBX_10|MBX_1;
1972 		mcp->in_mb |= MBX_1;
1973 	} else if (HAS_EXTENDED_IDS(ha)) {
1974 		mcp->mb[1] = fcport->loop_id;
1975 		mcp->mb[10] = opt;
1976 		mcp->out_mb |= MBX_10|MBX_1;
1977 	} else {
1978 		mcp->mb[1] = fcport->loop_id << 8 | opt;
1979 		mcp->out_mb |= MBX_1;
1980 	}
1981 	mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1982 	    PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1983 	mcp->flags = MBX_DMA_IN;
1984 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1985 	rval = qla2x00_mailbox_command(vha, mcp);
1986 	if (rval != QLA_SUCCESS)
1987 		goto gpd_error_out;
1988 
1989 	if (IS_FWI2_CAPABLE(ha)) {
1990 		uint64_t zero = 0;
1991 		u8 current_login_state, last_login_state;
1992 
1993 		pd24 = (struct port_database_24xx *) pd;
1994 
1995 		/* Check for logged in state. */
1996 		if (NVME_TARGET(ha, fcport)) {
1997 			current_login_state = pd24->current_login_state >> 4;
1998 			last_login_state = pd24->last_login_state >> 4;
1999 		} else {
2000 			current_login_state = pd24->current_login_state & 0xf;
2001 			last_login_state = pd24->last_login_state & 0xf;
2002 		}
2003 		fcport->current_login_state = pd24->current_login_state;
2004 		fcport->last_login_state = pd24->last_login_state;
2005 
2006 		/* Check for logged in state. */
2007 		if (current_login_state != PDS_PRLI_COMPLETE &&
2008 		    last_login_state != PDS_PRLI_COMPLETE) {
2009 			ql_dbg(ql_dbg_mbx, vha, 0x119a,
2010 			    "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2011 			    current_login_state, last_login_state,
2012 			    fcport->loop_id);
2013 			rval = QLA_FUNCTION_FAILED;
2014 
2015 			if (!fcport->query)
2016 				goto gpd_error_out;
2017 		}
2018 
2019 		if (fcport->loop_id == FC_NO_LOOP_ID ||
2020 		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2021 		     memcmp(fcport->port_name, pd24->port_name, 8))) {
2022 			/* We lost the device mid way. */
2023 			rval = QLA_NOT_LOGGED_IN;
2024 			goto gpd_error_out;
2025 		}
2026 
2027 		/* Names are little-endian. */
2028 		memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2029 		memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2030 
2031 		/* Get port_id of device. */
2032 		fcport->d_id.b.domain = pd24->port_id[0];
2033 		fcport->d_id.b.area = pd24->port_id[1];
2034 		fcport->d_id.b.al_pa = pd24->port_id[2];
2035 		fcport->d_id.b.rsvd_1 = 0;
2036 
2037 		/* If not target must be initiator or unknown type. */
2038 		if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2039 			fcport->port_type = FCT_INITIATOR;
2040 		else
2041 			fcport->port_type = FCT_TARGET;
2042 
2043 		/* Passback COS information. */
2044 		fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2045 				FC_COS_CLASS2 : FC_COS_CLASS3;
2046 
2047 		if (pd24->prli_svc_param_word_3[0] & BIT_7)
2048 			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2049 	} else {
2050 		uint64_t zero = 0;
2051 
2052 		/* Check for logged in state. */
2053 		if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2054 		    pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2055 			ql_dbg(ql_dbg_mbx, vha, 0x100a,
2056 			    "Unable to verify login-state (%x/%x) - "
2057 			    "portid=%02x%02x%02x.\n", pd->master_state,
2058 			    pd->slave_state, fcport->d_id.b.domain,
2059 			    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2060 			rval = QLA_FUNCTION_FAILED;
2061 			goto gpd_error_out;
2062 		}
2063 
2064 		if (fcport->loop_id == FC_NO_LOOP_ID ||
2065 		    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2066 		     memcmp(fcport->port_name, pd->port_name, 8))) {
2067 			/* We lost the device mid way. */
2068 			rval = QLA_NOT_LOGGED_IN;
2069 			goto gpd_error_out;
2070 		}
2071 
2072 		/* Names are little-endian. */
2073 		memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2074 		memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2075 
2076 		/* Get port_id of device. */
2077 		fcport->d_id.b.domain = pd->port_id[0];
2078 		fcport->d_id.b.area = pd->port_id[3];
2079 		fcport->d_id.b.al_pa = pd->port_id[2];
2080 		fcport->d_id.b.rsvd_1 = 0;
2081 
2082 		/* If not target must be initiator or unknown type. */
2083 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2084 			fcport->port_type = FCT_INITIATOR;
2085 		else
2086 			fcport->port_type = FCT_TARGET;
2087 
2088 		/* Passback COS information. */
2089 		fcport->supported_classes = (pd->options & BIT_4) ?
2090 		    FC_COS_CLASS2 : FC_COS_CLASS3;
2091 	}
2092 
2093 gpd_error_out:
2094 	dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2095 	fcport->query = 0;
2096 
2097 	if (rval != QLA_SUCCESS) {
2098 		ql_dbg(ql_dbg_mbx, vha, 0x1052,
2099 		    "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2100 		    mcp->mb[0], mcp->mb[1]);
2101 	} else {
2102 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2103 		    "Done %s.\n", __func__);
2104 	}
2105 
2106 	return rval;
2107 }
2108 
2109 int
2110 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2111 	struct port_database_24xx *pdb)
2112 {
2113 	mbx_cmd_t mc;
2114 	mbx_cmd_t *mcp = &mc;
2115 	dma_addr_t pdb_dma;
2116 	int rval;
2117 
2118 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2119 	    "Entered %s.\n", __func__);
2120 
2121 	memset(pdb, 0, sizeof(*pdb));
2122 
2123 	pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2124 	    sizeof(*pdb), DMA_FROM_DEVICE);
2125 	if (!pdb_dma) {
2126 		ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2127 		return QLA_MEMORY_ALLOC_FAILED;
2128 	}
2129 
2130 	mcp->mb[0] = MBC_GET_PORT_DATABASE;
2131 	mcp->mb[1] = nport_handle;
2132 	mcp->mb[2] = MSW(LSD(pdb_dma));
2133 	mcp->mb[3] = LSW(LSD(pdb_dma));
2134 	mcp->mb[6] = MSW(MSD(pdb_dma));
2135 	mcp->mb[7] = LSW(MSD(pdb_dma));
2136 	mcp->mb[9] = 0;
2137 	mcp->mb[10] = 0;
2138 	mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2139 	mcp->in_mb = MBX_1|MBX_0;
2140 	mcp->buf_size = sizeof(*pdb);
2141 	mcp->flags = MBX_DMA_IN;
2142 	mcp->tov = vha->hw->login_timeout * 2;
2143 	rval = qla2x00_mailbox_command(vha, mcp);
2144 
2145 	if (rval != QLA_SUCCESS) {
2146 		ql_dbg(ql_dbg_mbx, vha, 0x111a,
2147 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
2148 		    rval, mcp->mb[0], mcp->mb[1]);
2149 	} else {
2150 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2151 		    "Done %s.\n", __func__);
2152 	}
2153 
2154 	dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2155 	    sizeof(*pdb), DMA_FROM_DEVICE);
2156 
2157 	return rval;
2158 }
2159 
2160 /*
2161  * qla2x00_get_firmware_state
2162  *	Get adapter firmware state.
2163  *
2164  * Input:
2165  *	ha = adapter block pointer.
2166  *	dptr = pointer for firmware state.
2167  *	TARGET_QUEUE_LOCK must be released.
2168  *	ADAPTER_STATE_LOCK must be released.
2169  *
2170  * Returns:
2171  *	qla2x00 local function return status code.
2172  *
2173  * Context:
2174  *	Kernel context.
2175  */
2176 int
2177 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2178 {
2179 	int rval;
2180 	mbx_cmd_t mc;
2181 	mbx_cmd_t *mcp = &mc;
2182 	struct qla_hw_data *ha = vha->hw;
2183 
2184 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2185 	    "Entered %s.\n", __func__);
2186 
2187 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2188 	mcp->out_mb = MBX_0;
2189 	if (IS_FWI2_CAPABLE(vha->hw))
2190 		mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2191 	else
2192 		mcp->in_mb = MBX_1|MBX_0;
2193 	mcp->tov = MBX_TOV_SECONDS;
2194 	mcp->flags = 0;
2195 	rval = qla2x00_mailbox_command(vha, mcp);
2196 
2197 	/* Return firmware states. */
2198 	states[0] = mcp->mb[1];
2199 	if (IS_FWI2_CAPABLE(vha->hw)) {
2200 		states[1] = mcp->mb[2];
2201 		states[2] = mcp->mb[3];  /* SFP info */
2202 		states[3] = mcp->mb[4];
2203 		states[4] = mcp->mb[5];
2204 		states[5] = mcp->mb[6];  /* DPORT status */
2205 	}
2206 
2207 	if (rval != QLA_SUCCESS) {
2208 		/*EMPTY*/
2209 		ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2210 	} else {
2211 		if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2212 			if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2213 				ql_dbg(ql_dbg_mbx, vha, 0x119e,
2214 				    "Invalid SFP/Validation Failed\n");
2215 		}
2216 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2217 		    "Done %s.\n", __func__);
2218 	}
2219 
2220 	return rval;
2221 }
2222 
2223 /*
2224  * qla2x00_get_port_name
2225  *	Issue get port name mailbox command.
2226  *	Returned name is in big endian format.
2227  *
2228  * Input:
2229  *	ha = adapter block pointer.
2230  *	loop_id = loop ID of device.
2231  *	name = pointer for name.
2232  *	TARGET_QUEUE_LOCK must be released.
2233  *	ADAPTER_STATE_LOCK must be released.
2234  *
2235  * Returns:
2236  *	qla2x00 local function return status code.
2237  *
2238  * Context:
2239  *	Kernel context.
2240  */
2241 int
2242 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2243     uint8_t opt)
2244 {
2245 	int rval;
2246 	mbx_cmd_t mc;
2247 	mbx_cmd_t *mcp = &mc;
2248 
2249 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2250 	    "Entered %s.\n", __func__);
2251 
2252 	mcp->mb[0] = MBC_GET_PORT_NAME;
2253 	mcp->mb[9] = vha->vp_idx;
2254 	mcp->out_mb = MBX_9|MBX_1|MBX_0;
2255 	if (HAS_EXTENDED_IDS(vha->hw)) {
2256 		mcp->mb[1] = loop_id;
2257 		mcp->mb[10] = opt;
2258 		mcp->out_mb |= MBX_10;
2259 	} else {
2260 		mcp->mb[1] = loop_id << 8 | opt;
2261 	}
2262 
2263 	mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2264 	mcp->tov = MBX_TOV_SECONDS;
2265 	mcp->flags = 0;
2266 	rval = qla2x00_mailbox_command(vha, mcp);
2267 
2268 	if (rval != QLA_SUCCESS) {
2269 		/*EMPTY*/
2270 		ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2271 	} else {
2272 		if (name != NULL) {
2273 			/* This function returns name in big endian. */
2274 			name[0] = MSB(mcp->mb[2]);
2275 			name[1] = LSB(mcp->mb[2]);
2276 			name[2] = MSB(mcp->mb[3]);
2277 			name[3] = LSB(mcp->mb[3]);
2278 			name[4] = MSB(mcp->mb[6]);
2279 			name[5] = LSB(mcp->mb[6]);
2280 			name[6] = MSB(mcp->mb[7]);
2281 			name[7] = LSB(mcp->mb[7]);
2282 		}
2283 
2284 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2285 		    "Done %s.\n", __func__);
2286 	}
2287 
2288 	return rval;
2289 }
2290 
2291 /*
2292  * qla24xx_link_initialization
2293  *	Issue link initialization mailbox command.
2294  *
2295  * Input:
2296  *	ha = adapter block pointer.
2297  *	TARGET_QUEUE_LOCK must be released.
2298  *	ADAPTER_STATE_LOCK must be released.
2299  *
2300  * Returns:
2301  *	qla2x00 local function return status code.
2302  *
2303  * Context:
2304  *	Kernel context.
2305  */
2306 int
2307 qla24xx_link_initialize(scsi_qla_host_t *vha)
2308 {
2309 	int rval;
2310 	mbx_cmd_t mc;
2311 	mbx_cmd_t *mcp = &mc;
2312 
2313 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2314 	    "Entered %s.\n", __func__);
2315 
2316 	if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2317 		return QLA_FUNCTION_FAILED;
2318 
2319 	mcp->mb[0] = MBC_LINK_INITIALIZATION;
2320 	mcp->mb[1] = BIT_4;
2321 	if (vha->hw->operating_mode == LOOP)
2322 		mcp->mb[1] |= BIT_6;
2323 	else
2324 		mcp->mb[1] |= BIT_5;
2325 	mcp->mb[2] = 0;
2326 	mcp->mb[3] = 0;
2327 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2328 	mcp->in_mb = MBX_0;
2329 	mcp->tov = MBX_TOV_SECONDS;
2330 	mcp->flags = 0;
2331 	rval = qla2x00_mailbox_command(vha, mcp);
2332 
2333 	if (rval != QLA_SUCCESS) {
2334 		ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2335 	} else {
2336 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2337 		    "Done %s.\n", __func__);
2338 	}
2339 
2340 	return rval;
2341 }
2342 
2343 /*
2344  * qla2x00_lip_reset
2345  *	Issue LIP reset mailbox command.
2346  *
2347  * Input:
2348  *	ha = adapter block pointer.
2349  *	TARGET_QUEUE_LOCK must be released.
2350  *	ADAPTER_STATE_LOCK must be released.
2351  *
2352  * Returns:
2353  *	qla2x00 local function return status code.
2354  *
2355  * Context:
2356  *	Kernel context.
2357  */
2358 int
2359 qla2x00_lip_reset(scsi_qla_host_t *vha)
2360 {
2361 	int rval;
2362 	mbx_cmd_t mc;
2363 	mbx_cmd_t *mcp = &mc;
2364 
2365 	ql_dbg(ql_dbg_disc, vha, 0x105a,
2366 	    "Entered %s.\n", __func__);
2367 
2368 	if (IS_CNA_CAPABLE(vha->hw)) {
2369 		/* Logout across all FCFs. */
2370 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2371 		mcp->mb[1] = BIT_1;
2372 		mcp->mb[2] = 0;
2373 		mcp->out_mb = MBX_2|MBX_1|MBX_0;
2374 	} else if (IS_FWI2_CAPABLE(vha->hw)) {
2375 		mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2376 		mcp->mb[1] = BIT_4;
2377 		mcp->mb[2] = 0;
2378 		mcp->mb[3] = vha->hw->loop_reset_delay;
2379 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2380 	} else {
2381 		mcp->mb[0] = MBC_LIP_RESET;
2382 		mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2383 		if (HAS_EXTENDED_IDS(vha->hw)) {
2384 			mcp->mb[1] = 0x00ff;
2385 			mcp->mb[10] = 0;
2386 			mcp->out_mb |= MBX_10;
2387 		} else {
2388 			mcp->mb[1] = 0xff00;
2389 		}
2390 		mcp->mb[2] = vha->hw->loop_reset_delay;
2391 		mcp->mb[3] = 0;
2392 	}
2393 	mcp->in_mb = MBX_0;
2394 	mcp->tov = MBX_TOV_SECONDS;
2395 	mcp->flags = 0;
2396 	rval = qla2x00_mailbox_command(vha, mcp);
2397 
2398 	if (rval != QLA_SUCCESS) {
2399 		/*EMPTY*/
2400 		ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2401 	} else {
2402 		/*EMPTY*/
2403 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2404 		    "Done %s.\n", __func__);
2405 	}
2406 
2407 	return rval;
2408 }
2409 
2410 /*
2411  * qla2x00_send_sns
2412  *	Send SNS command.
2413  *
2414  * Input:
2415  *	ha = adapter block pointer.
2416  *	sns = pointer for command.
2417  *	cmd_size = command size.
2418  *	buf_size = response/command size.
2419  *	TARGET_QUEUE_LOCK must be released.
2420  *	ADAPTER_STATE_LOCK must be released.
2421  *
2422  * Returns:
2423  *	qla2x00 local function return status code.
2424  *
2425  * Context:
2426  *	Kernel context.
2427  */
2428 int
2429 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2430     uint16_t cmd_size, size_t buf_size)
2431 {
2432 	int rval;
2433 	mbx_cmd_t mc;
2434 	mbx_cmd_t *mcp = &mc;
2435 
2436 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2437 	    "Entered %s.\n", __func__);
2438 
2439 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2440 	    "Retry cnt=%d ratov=%d total tov=%d.\n",
2441 	    vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2442 
2443 	mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2444 	mcp->mb[1] = cmd_size;
2445 	mcp->mb[2] = MSW(sns_phys_address);
2446 	mcp->mb[3] = LSW(sns_phys_address);
2447 	mcp->mb[6] = MSW(MSD(sns_phys_address));
2448 	mcp->mb[7] = LSW(MSD(sns_phys_address));
2449 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2450 	mcp->in_mb = MBX_0|MBX_1;
2451 	mcp->buf_size = buf_size;
2452 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2453 	mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2454 	rval = qla2x00_mailbox_command(vha, mcp);
2455 
2456 	if (rval != QLA_SUCCESS) {
2457 		/*EMPTY*/
2458 		ql_dbg(ql_dbg_mbx, vha, 0x105f,
2459 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
2460 		    rval, mcp->mb[0], mcp->mb[1]);
2461 	} else {
2462 		/*EMPTY*/
2463 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2464 		    "Done %s.\n", __func__);
2465 	}
2466 
2467 	return rval;
2468 }
2469 
2470 int
2471 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2472     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2473 {
2474 	int		rval;
2475 
2476 	struct logio_entry_24xx *lg;
2477 	dma_addr_t	lg_dma;
2478 	uint32_t	iop[2];
2479 	struct qla_hw_data *ha = vha->hw;
2480 	struct req_que *req;
2481 
2482 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2483 	    "Entered %s.\n", __func__);
2484 
2485 	if (vha->vp_idx && vha->qpair)
2486 		req = vha->qpair->req;
2487 	else
2488 		req = ha->req_q_map[0];
2489 
2490 	lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2491 	if (lg == NULL) {
2492 		ql_log(ql_log_warn, vha, 0x1062,
2493 		    "Failed to allocate login IOCB.\n");
2494 		return QLA_MEMORY_ALLOC_FAILED;
2495 	}
2496 
2497 	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2498 	lg->entry_count = 1;
2499 	lg->handle = make_handle(req->id, lg->handle);
2500 	lg->nport_handle = cpu_to_le16(loop_id);
2501 	lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2502 	if (opt & BIT_0)
2503 		lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2504 	if (opt & BIT_1)
2505 		lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2506 	lg->port_id[0] = al_pa;
2507 	lg->port_id[1] = area;
2508 	lg->port_id[2] = domain;
2509 	lg->vp_index = vha->vp_idx;
2510 	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2511 	    (ha->r_a_tov / 10 * 2) + 2);
2512 	if (rval != QLA_SUCCESS) {
2513 		ql_dbg(ql_dbg_mbx, vha, 0x1063,
2514 		    "Failed to issue login IOCB (%x).\n", rval);
2515 	} else if (lg->entry_status != 0) {
2516 		ql_dbg(ql_dbg_mbx, vha, 0x1064,
2517 		    "Failed to complete IOCB -- error status (%x).\n",
2518 		    lg->entry_status);
2519 		rval = QLA_FUNCTION_FAILED;
2520 	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2521 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
2522 		iop[1] = le32_to_cpu(lg->io_parameter[1]);
2523 
2524 		ql_dbg(ql_dbg_mbx, vha, 0x1065,
2525 		    "Failed to complete IOCB -- completion  status (%x) "
2526 		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2527 		    iop[0], iop[1]);
2528 
2529 		switch (iop[0]) {
2530 		case LSC_SCODE_PORTID_USED:
2531 			mb[0] = MBS_PORT_ID_USED;
2532 			mb[1] = LSW(iop[1]);
2533 			break;
2534 		case LSC_SCODE_NPORT_USED:
2535 			mb[0] = MBS_LOOP_ID_USED;
2536 			break;
2537 		case LSC_SCODE_NOLINK:
2538 		case LSC_SCODE_NOIOCB:
2539 		case LSC_SCODE_NOXCB:
2540 		case LSC_SCODE_CMD_FAILED:
2541 		case LSC_SCODE_NOFABRIC:
2542 		case LSC_SCODE_FW_NOT_READY:
2543 		case LSC_SCODE_NOT_LOGGED_IN:
2544 		case LSC_SCODE_NOPCB:
2545 		case LSC_SCODE_ELS_REJECT:
2546 		case LSC_SCODE_CMD_PARAM_ERR:
2547 		case LSC_SCODE_NONPORT:
2548 		case LSC_SCODE_LOGGED_IN:
2549 		case LSC_SCODE_NOFLOGI_ACC:
2550 		default:
2551 			mb[0] = MBS_COMMAND_ERROR;
2552 			break;
2553 		}
2554 	} else {
2555 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2556 		    "Done %s.\n", __func__);
2557 
2558 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
2559 
2560 		mb[0] = MBS_COMMAND_COMPLETE;
2561 		mb[1] = 0;
2562 		if (iop[0] & BIT_4) {
2563 			if (iop[0] & BIT_8)
2564 				mb[1] |= BIT_1;
2565 		} else
2566 			mb[1] = BIT_0;
2567 
2568 		/* Passback COS information. */
2569 		mb[10] = 0;
2570 		if (lg->io_parameter[7] || lg->io_parameter[8])
2571 			mb[10] |= BIT_0;	/* Class 2. */
2572 		if (lg->io_parameter[9] || lg->io_parameter[10])
2573 			mb[10] |= BIT_1;	/* Class 3. */
2574 		if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2575 			mb[10] |= BIT_7;	/* Confirmed Completion
2576 						 * Allowed
2577 						 */
2578 	}
2579 
2580 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2581 
2582 	return rval;
2583 }
2584 
2585 /*
2586  * qla2x00_login_fabric
2587  *	Issue login fabric port mailbox command.
2588  *
2589  * Input:
2590  *	ha = adapter block pointer.
2591  *	loop_id = device loop ID.
2592  *	domain = device domain.
2593  *	area = device area.
2594  *	al_pa = device AL_PA.
2595  *	status = pointer for return status.
2596  *	opt = command options.
2597  *	TARGET_QUEUE_LOCK must be released.
2598  *	ADAPTER_STATE_LOCK must be released.
2599  *
2600  * Returns:
2601  *	qla2x00 local function return status code.
2602  *
2603  * Context:
2604  *	Kernel context.
2605  */
2606 int
2607 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2608     uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2609 {
2610 	int rval;
2611 	mbx_cmd_t mc;
2612 	mbx_cmd_t *mcp = &mc;
2613 	struct qla_hw_data *ha = vha->hw;
2614 
2615 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2616 	    "Entered %s.\n", __func__);
2617 
2618 	mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2619 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2620 	if (HAS_EXTENDED_IDS(ha)) {
2621 		mcp->mb[1] = loop_id;
2622 		mcp->mb[10] = opt;
2623 		mcp->out_mb |= MBX_10;
2624 	} else {
2625 		mcp->mb[1] = (loop_id << 8) | opt;
2626 	}
2627 	mcp->mb[2] = domain;
2628 	mcp->mb[3] = area << 8 | al_pa;
2629 
2630 	mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2631 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2632 	mcp->flags = 0;
2633 	rval = qla2x00_mailbox_command(vha, mcp);
2634 
2635 	/* Return mailbox statuses. */
2636 	if (mb != NULL) {
2637 		mb[0] = mcp->mb[0];
2638 		mb[1] = mcp->mb[1];
2639 		mb[2] = mcp->mb[2];
2640 		mb[6] = mcp->mb[6];
2641 		mb[7] = mcp->mb[7];
2642 		/* COS retrieved from Get-Port-Database mailbox command. */
2643 		mb[10] = 0;
2644 	}
2645 
2646 	if (rval != QLA_SUCCESS) {
2647 		/* RLU tmp code: need to change main mailbox_command function to
2648 		 * return ok even when the mailbox completion value is not
2649 		 * SUCCESS. The caller needs to be responsible to interpret
2650 		 * the return values of this mailbox command if we're not
2651 		 * to change too much of the existing code.
2652 		 */
2653 		if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2654 		    mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2655 		    mcp->mb[0] == 0x4006)
2656 			rval = QLA_SUCCESS;
2657 
2658 		/*EMPTY*/
2659 		ql_dbg(ql_dbg_mbx, vha, 0x1068,
2660 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2661 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2662 	} else {
2663 		/*EMPTY*/
2664 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2665 		    "Done %s.\n", __func__);
2666 	}
2667 
2668 	return rval;
2669 }
2670 
2671 /*
2672  * qla2x00_login_local_device
2673  *           Issue login loop port mailbox command.
2674  *
2675  * Input:
2676  *           ha = adapter block pointer.
2677  *           loop_id = device loop ID.
2678  *           opt = command options.
2679  *
2680  * Returns:
2681  *            Return status code.
2682  *
2683  * Context:
2684  *            Kernel context.
2685  *
2686  */
2687 int
2688 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2689     uint16_t *mb_ret, uint8_t opt)
2690 {
2691 	int rval;
2692 	mbx_cmd_t mc;
2693 	mbx_cmd_t *mcp = &mc;
2694 	struct qla_hw_data *ha = vha->hw;
2695 
2696 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2697 	    "Entered %s.\n", __func__);
2698 
2699 	if (IS_FWI2_CAPABLE(ha))
2700 		return qla24xx_login_fabric(vha, fcport->loop_id,
2701 		    fcport->d_id.b.domain, fcport->d_id.b.area,
2702 		    fcport->d_id.b.al_pa, mb_ret, opt);
2703 
2704 	mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2705 	if (HAS_EXTENDED_IDS(ha))
2706 		mcp->mb[1] = fcport->loop_id;
2707 	else
2708 		mcp->mb[1] = fcport->loop_id << 8;
2709 	mcp->mb[2] = opt;
2710 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
2711  	mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2712 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2713 	mcp->flags = 0;
2714 	rval = qla2x00_mailbox_command(vha, mcp);
2715 
2716  	/* Return mailbox statuses. */
2717  	if (mb_ret != NULL) {
2718  		mb_ret[0] = mcp->mb[0];
2719  		mb_ret[1] = mcp->mb[1];
2720  		mb_ret[6] = mcp->mb[6];
2721  		mb_ret[7] = mcp->mb[7];
2722  	}
2723 
2724 	if (rval != QLA_SUCCESS) {
2725  		/* AV tmp code: need to change main mailbox_command function to
2726  		 * return ok even when the mailbox completion value is not
2727  		 * SUCCESS. The caller needs to be responsible to interpret
2728  		 * the return values of this mailbox command if we're not
2729  		 * to change too much of the existing code.
2730  		 */
2731  		if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2732  			rval = QLA_SUCCESS;
2733 
2734 		ql_dbg(ql_dbg_mbx, vha, 0x106b,
2735 		    "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2736 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2737 	} else {
2738 		/*EMPTY*/
2739 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2740 		    "Done %s.\n", __func__);
2741 	}
2742 
2743 	return (rval);
2744 }
2745 
2746 int
2747 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2748     uint8_t area, uint8_t al_pa)
2749 {
2750 	int		rval;
2751 	struct logio_entry_24xx *lg;
2752 	dma_addr_t	lg_dma;
2753 	struct qla_hw_data *ha = vha->hw;
2754 	struct req_que *req;
2755 
2756 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2757 	    "Entered %s.\n", __func__);
2758 
2759 	lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2760 	if (lg == NULL) {
2761 		ql_log(ql_log_warn, vha, 0x106e,
2762 		    "Failed to allocate logout IOCB.\n");
2763 		return QLA_MEMORY_ALLOC_FAILED;
2764 	}
2765 
2766 	req = vha->req;
2767 	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2768 	lg->entry_count = 1;
2769 	lg->handle = make_handle(req->id, lg->handle);
2770 	lg->nport_handle = cpu_to_le16(loop_id);
2771 	lg->control_flags =
2772 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2773 		LCF_FREE_NPORT);
2774 	lg->port_id[0] = al_pa;
2775 	lg->port_id[1] = area;
2776 	lg->port_id[2] = domain;
2777 	lg->vp_index = vha->vp_idx;
2778 	rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2779 	    (ha->r_a_tov / 10 * 2) + 2);
2780 	if (rval != QLA_SUCCESS) {
2781 		ql_dbg(ql_dbg_mbx, vha, 0x106f,
2782 		    "Failed to issue logout IOCB (%x).\n", rval);
2783 	} else if (lg->entry_status != 0) {
2784 		ql_dbg(ql_dbg_mbx, vha, 0x1070,
2785 		    "Failed to complete IOCB -- error status (%x).\n",
2786 		    lg->entry_status);
2787 		rval = QLA_FUNCTION_FAILED;
2788 	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2789 		ql_dbg(ql_dbg_mbx, vha, 0x1071,
2790 		    "Failed to complete IOCB -- completion status (%x) "
2791 		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2792 		    le32_to_cpu(lg->io_parameter[0]),
2793 		    le32_to_cpu(lg->io_parameter[1]));
2794 	} else {
2795 		/*EMPTY*/
2796 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2797 		    "Done %s.\n", __func__);
2798 	}
2799 
2800 	dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2801 
2802 	return rval;
2803 }
2804 
2805 /*
2806  * qla2x00_fabric_logout
2807  *	Issue logout fabric port mailbox command.
2808  *
2809  * Input:
2810  *	ha = adapter block pointer.
2811  *	loop_id = device loop ID.
2812  *	TARGET_QUEUE_LOCK must be released.
2813  *	ADAPTER_STATE_LOCK must be released.
2814  *
2815  * Returns:
2816  *	qla2x00 local function return status code.
2817  *
2818  * Context:
2819  *	Kernel context.
2820  */
2821 int
2822 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2823     uint8_t area, uint8_t al_pa)
2824 {
2825 	int rval;
2826 	mbx_cmd_t mc;
2827 	mbx_cmd_t *mcp = &mc;
2828 
2829 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2830 	    "Entered %s.\n", __func__);
2831 
2832 	mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2833 	mcp->out_mb = MBX_1|MBX_0;
2834 	if (HAS_EXTENDED_IDS(vha->hw)) {
2835 		mcp->mb[1] = loop_id;
2836 		mcp->mb[10] = 0;
2837 		mcp->out_mb |= MBX_10;
2838 	} else {
2839 		mcp->mb[1] = loop_id << 8;
2840 	}
2841 
2842 	mcp->in_mb = MBX_1|MBX_0;
2843 	mcp->tov = MBX_TOV_SECONDS;
2844 	mcp->flags = 0;
2845 	rval = qla2x00_mailbox_command(vha, mcp);
2846 
2847 	if (rval != QLA_SUCCESS) {
2848 		/*EMPTY*/
2849 		ql_dbg(ql_dbg_mbx, vha, 0x1074,
2850 		    "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2851 	} else {
2852 		/*EMPTY*/
2853 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2854 		    "Done %s.\n", __func__);
2855 	}
2856 
2857 	return rval;
2858 }
2859 
2860 /*
2861  * qla2x00_full_login_lip
2862  *	Issue full login LIP mailbox command.
2863  *
2864  * Input:
2865  *	ha = adapter block pointer.
2866  *	TARGET_QUEUE_LOCK must be released.
2867  *	ADAPTER_STATE_LOCK must be released.
2868  *
2869  * Returns:
2870  *	qla2x00 local function return status code.
2871  *
2872  * Context:
2873  *	Kernel context.
2874  */
2875 int
2876 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2877 {
2878 	int rval;
2879 	mbx_cmd_t mc;
2880 	mbx_cmd_t *mcp = &mc;
2881 
2882 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2883 	    "Entered %s.\n", __func__);
2884 
2885 	mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2886 	mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2887 	mcp->mb[2] = 0;
2888 	mcp->mb[3] = 0;
2889 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2890 	mcp->in_mb = MBX_0;
2891 	mcp->tov = MBX_TOV_SECONDS;
2892 	mcp->flags = 0;
2893 	rval = qla2x00_mailbox_command(vha, mcp);
2894 
2895 	if (rval != QLA_SUCCESS) {
2896 		/*EMPTY*/
2897 		ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2898 	} else {
2899 		/*EMPTY*/
2900 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2901 		    "Done %s.\n", __func__);
2902 	}
2903 
2904 	return rval;
2905 }
2906 
2907 /*
2908  * qla2x00_get_id_list
2909  *
2910  * Input:
2911  *	ha = adapter block pointer.
2912  *
2913  * Returns:
2914  *	qla2x00 local function return status code.
2915  *
2916  * Context:
2917  *	Kernel context.
2918  */
2919 int
2920 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2921     uint16_t *entries)
2922 {
2923 	int rval;
2924 	mbx_cmd_t mc;
2925 	mbx_cmd_t *mcp = &mc;
2926 
2927 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2928 	    "Entered %s.\n", __func__);
2929 
2930 	if (id_list == NULL)
2931 		return QLA_FUNCTION_FAILED;
2932 
2933 	mcp->mb[0] = MBC_GET_ID_LIST;
2934 	mcp->out_mb = MBX_0;
2935 	if (IS_FWI2_CAPABLE(vha->hw)) {
2936 		mcp->mb[2] = MSW(id_list_dma);
2937 		mcp->mb[3] = LSW(id_list_dma);
2938 		mcp->mb[6] = MSW(MSD(id_list_dma));
2939 		mcp->mb[7] = LSW(MSD(id_list_dma));
2940 		mcp->mb[8] = 0;
2941 		mcp->mb[9] = vha->vp_idx;
2942 		mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2943 	} else {
2944 		mcp->mb[1] = MSW(id_list_dma);
2945 		mcp->mb[2] = LSW(id_list_dma);
2946 		mcp->mb[3] = MSW(MSD(id_list_dma));
2947 		mcp->mb[6] = LSW(MSD(id_list_dma));
2948 		mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2949 	}
2950 	mcp->in_mb = MBX_1|MBX_0;
2951 	mcp->tov = MBX_TOV_SECONDS;
2952 	mcp->flags = 0;
2953 	rval = qla2x00_mailbox_command(vha, mcp);
2954 
2955 	if (rval != QLA_SUCCESS) {
2956 		/*EMPTY*/
2957 		ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2958 	} else {
2959 		*entries = mcp->mb[1];
2960 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2961 		    "Done %s.\n", __func__);
2962 	}
2963 
2964 	return rval;
2965 }
2966 
2967 /*
2968  * qla2x00_get_resource_cnts
2969  *	Get current firmware resource counts.
2970  *
2971  * Input:
2972  *	ha = adapter block pointer.
2973  *
2974  * Returns:
2975  *	qla2x00 local function return status code.
2976  *
2977  * Context:
2978  *	Kernel context.
2979  */
2980 int
2981 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2982 {
2983 	struct qla_hw_data *ha = vha->hw;
2984 	int rval;
2985 	mbx_cmd_t mc;
2986 	mbx_cmd_t *mcp = &mc;
2987 
2988 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2989 	    "Entered %s.\n", __func__);
2990 
2991 	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2992 	mcp->out_mb = MBX_0;
2993 	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2994 	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
2995 	    IS_QLA27XX(ha) || IS_QLA28XX(ha))
2996 		mcp->in_mb |= MBX_12;
2997 	mcp->tov = MBX_TOV_SECONDS;
2998 	mcp->flags = 0;
2999 	rval = qla2x00_mailbox_command(vha, mcp);
3000 
3001 	if (rval != QLA_SUCCESS) {
3002 		/*EMPTY*/
3003 		ql_dbg(ql_dbg_mbx, vha, 0x107d,
3004 		    "Failed mb[0]=%x.\n", mcp->mb[0]);
3005 	} else {
3006 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3007 		    "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3008 		    "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3009 		    mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3010 		    mcp->mb[11], mcp->mb[12]);
3011 
3012 		ha->orig_fw_tgt_xcb_count =  mcp->mb[1];
3013 		ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3014 		ha->cur_fw_xcb_count = mcp->mb[3];
3015 		ha->orig_fw_xcb_count = mcp->mb[6];
3016 		ha->cur_fw_iocb_count = mcp->mb[7];
3017 		ha->orig_fw_iocb_count = mcp->mb[10];
3018 		if (ha->flags.npiv_supported)
3019 			ha->max_npiv_vports = mcp->mb[11];
3020 		if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3021 		    IS_QLA28XX(ha))
3022 			ha->fw_max_fcf_count = mcp->mb[12];
3023 	}
3024 
3025 	return (rval);
3026 }
3027 
3028 /*
3029  * qla2x00_get_fcal_position_map
3030  *	Get FCAL (LILP) position map using mailbox command
3031  *
3032  * Input:
3033  *	ha = adapter state pointer.
3034  *	pos_map = buffer pointer (can be NULL).
3035  *
3036  * Returns:
3037  *	qla2x00 local function return status code.
3038  *
3039  * Context:
3040  *	Kernel context.
3041  */
3042 int
3043 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
3044 {
3045 	int rval;
3046 	mbx_cmd_t mc;
3047 	mbx_cmd_t *mcp = &mc;
3048 	char *pmap;
3049 	dma_addr_t pmap_dma;
3050 	struct qla_hw_data *ha = vha->hw;
3051 
3052 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3053 	    "Entered %s.\n", __func__);
3054 
3055 	pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3056 	if (pmap  == NULL) {
3057 		ql_log(ql_log_warn, vha, 0x1080,
3058 		    "Memory alloc failed.\n");
3059 		return QLA_MEMORY_ALLOC_FAILED;
3060 	}
3061 
3062 	mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3063 	mcp->mb[2] = MSW(pmap_dma);
3064 	mcp->mb[3] = LSW(pmap_dma);
3065 	mcp->mb[6] = MSW(MSD(pmap_dma));
3066 	mcp->mb[7] = LSW(MSD(pmap_dma));
3067 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3068 	mcp->in_mb = MBX_1|MBX_0;
3069 	mcp->buf_size = FCAL_MAP_SIZE;
3070 	mcp->flags = MBX_DMA_IN;
3071 	mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3072 	rval = qla2x00_mailbox_command(vha, mcp);
3073 
3074 	if (rval == QLA_SUCCESS) {
3075 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3076 		    "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3077 		    mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3078 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3079 		    pmap, pmap[0] + 1);
3080 
3081 		if (pos_map)
3082 			memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3083 	}
3084 	dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3085 
3086 	if (rval != QLA_SUCCESS) {
3087 		ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3088 	} else {
3089 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3090 		    "Done %s.\n", __func__);
3091 	}
3092 
3093 	return rval;
3094 }
3095 
3096 /*
3097  * qla2x00_get_link_status
3098  *
3099  * Input:
3100  *	ha = adapter block pointer.
3101  *	loop_id = device loop ID.
3102  *	ret_buf = pointer to link status return buffer.
3103  *
3104  * Returns:
3105  *	0 = success.
3106  *	BIT_0 = mem alloc error.
3107  *	BIT_1 = mailbox error.
3108  */
3109 int
3110 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3111     struct link_statistics *stats, dma_addr_t stats_dma)
3112 {
3113 	int rval;
3114 	mbx_cmd_t mc;
3115 	mbx_cmd_t *mcp = &mc;
3116 	uint32_t *iter = (uint32_t *)stats;
3117 	ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3118 	struct qla_hw_data *ha = vha->hw;
3119 
3120 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3121 	    "Entered %s.\n", __func__);
3122 
3123 	mcp->mb[0] = MBC_GET_LINK_STATUS;
3124 	mcp->mb[2] = MSW(LSD(stats_dma));
3125 	mcp->mb[3] = LSW(LSD(stats_dma));
3126 	mcp->mb[6] = MSW(MSD(stats_dma));
3127 	mcp->mb[7] = LSW(MSD(stats_dma));
3128 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3129 	mcp->in_mb = MBX_0;
3130 	if (IS_FWI2_CAPABLE(ha)) {
3131 		mcp->mb[1] = loop_id;
3132 		mcp->mb[4] = 0;
3133 		mcp->mb[10] = 0;
3134 		mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3135 		mcp->in_mb |= MBX_1;
3136 	} else if (HAS_EXTENDED_IDS(ha)) {
3137 		mcp->mb[1] = loop_id;
3138 		mcp->mb[10] = 0;
3139 		mcp->out_mb |= MBX_10|MBX_1;
3140 	} else {
3141 		mcp->mb[1] = loop_id << 8;
3142 		mcp->out_mb |= MBX_1;
3143 	}
3144 	mcp->tov = MBX_TOV_SECONDS;
3145 	mcp->flags = IOCTL_CMD;
3146 	rval = qla2x00_mailbox_command(vha, mcp);
3147 
3148 	if (rval == QLA_SUCCESS) {
3149 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3150 			ql_dbg(ql_dbg_mbx, vha, 0x1085,
3151 			    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3152 			rval = QLA_FUNCTION_FAILED;
3153 		} else {
3154 			/* Re-endianize - firmware data is le32. */
3155 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3156 			    "Done %s.\n", __func__);
3157 			for ( ; dwords--; iter++)
3158 				le32_to_cpus(iter);
3159 		}
3160 	} else {
3161 		/* Failed. */
3162 		ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3163 	}
3164 
3165 	return rval;
3166 }
3167 
3168 int
3169 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3170     dma_addr_t stats_dma, uint16_t options)
3171 {
3172 	int rval;
3173 	mbx_cmd_t mc;
3174 	mbx_cmd_t *mcp = &mc;
3175 	uint32_t *iter = (uint32_t *)stats;
3176 	ushort dwords = sizeof(*stats)/sizeof(*iter);
3177 
3178 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3179 	    "Entered %s.\n", __func__);
3180 
3181 	memset(&mc, 0, sizeof(mc));
3182 	mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3183 	mc.mb[2] = MSW(LSD(stats_dma));
3184 	mc.mb[3] = LSW(LSD(stats_dma));
3185 	mc.mb[6] = MSW(MSD(stats_dma));
3186 	mc.mb[7] = LSW(MSD(stats_dma));
3187 	mc.mb[8] = dwords;
3188 	mc.mb[9] = vha->vp_idx;
3189 	mc.mb[10] = options;
3190 
3191 	rval = qla24xx_send_mb_cmd(vha, &mc);
3192 
3193 	if (rval == QLA_SUCCESS) {
3194 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3195 			ql_dbg(ql_dbg_mbx, vha, 0x1089,
3196 			    "Failed mb[0]=%x.\n", mcp->mb[0]);
3197 			rval = QLA_FUNCTION_FAILED;
3198 		} else {
3199 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3200 			    "Done %s.\n", __func__);
3201 			/* Re-endianize - firmware data is le32. */
3202 			for ( ; dwords--; iter++)
3203 				le32_to_cpus(iter);
3204 		}
3205 	} else {
3206 		/* Failed. */
3207 		ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3208 	}
3209 
3210 	return rval;
3211 }
3212 
3213 int
3214 qla24xx_abort_command(srb_t *sp)
3215 {
3216 	int		rval;
3217 	unsigned long   flags = 0;
3218 
3219 	struct abort_entry_24xx *abt;
3220 	dma_addr_t	abt_dma;
3221 	uint32_t	handle;
3222 	fc_port_t	*fcport = sp->fcport;
3223 	struct scsi_qla_host *vha = fcport->vha;
3224 	struct qla_hw_data *ha = vha->hw;
3225 	struct req_que *req = vha->req;
3226 	struct qla_qpair *qpair = sp->qpair;
3227 
3228 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3229 	    "Entered %s.\n", __func__);
3230 
3231 	if (sp->qpair)
3232 		req = sp->qpair->req;
3233 	else
3234 		return QLA_FUNCTION_FAILED;
3235 
3236 	if (ql2xasynctmfenable)
3237 		return qla24xx_async_abort_command(sp);
3238 
3239 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3240 	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3241 		if (req->outstanding_cmds[handle] == sp)
3242 			break;
3243 	}
3244 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3245 	if (handle == req->num_outstanding_cmds) {
3246 		/* Command not found. */
3247 		return QLA_FUNCTION_FAILED;
3248 	}
3249 
3250 	abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3251 	if (abt == NULL) {
3252 		ql_log(ql_log_warn, vha, 0x108d,
3253 		    "Failed to allocate abort IOCB.\n");
3254 		return QLA_MEMORY_ALLOC_FAILED;
3255 	}
3256 
3257 	abt->entry_type = ABORT_IOCB_TYPE;
3258 	abt->entry_count = 1;
3259 	abt->handle = make_handle(req->id, abt->handle);
3260 	abt->nport_handle = cpu_to_le16(fcport->loop_id);
3261 	abt->handle_to_abort = make_handle(req->id, handle);
3262 	abt->port_id[0] = fcport->d_id.b.al_pa;
3263 	abt->port_id[1] = fcport->d_id.b.area;
3264 	abt->port_id[2] = fcport->d_id.b.domain;
3265 	abt->vp_index = fcport->vha->vp_idx;
3266 
3267 	abt->req_que_no = cpu_to_le16(req->id);
3268 	/* Need to pass original sp */
3269 	qla_nvme_abort_set_option(abt, sp);
3270 
3271 	rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3272 	if (rval != QLA_SUCCESS) {
3273 		ql_dbg(ql_dbg_mbx, vha, 0x108e,
3274 		    "Failed to issue IOCB (%x).\n", rval);
3275 	} else if (abt->entry_status != 0) {
3276 		ql_dbg(ql_dbg_mbx, vha, 0x108f,
3277 		    "Failed to complete IOCB -- error status (%x).\n",
3278 		    abt->entry_status);
3279 		rval = QLA_FUNCTION_FAILED;
3280 	} else if (abt->nport_handle != cpu_to_le16(0)) {
3281 		ql_dbg(ql_dbg_mbx, vha, 0x1090,
3282 		    "Failed to complete IOCB -- completion status (%x).\n",
3283 		    le16_to_cpu(abt->nport_handle));
3284 		if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3285 			rval = QLA_FUNCTION_PARAMETER_ERROR;
3286 		else
3287 			rval = QLA_FUNCTION_FAILED;
3288 	} else {
3289 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3290 		    "Done %s.\n", __func__);
3291 	}
3292 	if (rval == QLA_SUCCESS)
3293 		qla_nvme_abort_process_comp_status(abt, sp);
3294 
3295 	qla_wait_nvme_release_cmd_kref(sp);
3296 
3297 	dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3298 
3299 	return rval;
3300 }
3301 
3302 struct tsk_mgmt_cmd {
3303 	union {
3304 		struct tsk_mgmt_entry tsk;
3305 		struct sts_entry_24xx sts;
3306 	} p;
3307 };
3308 
3309 static int
3310 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3311     uint64_t l, int tag)
3312 {
3313 	int		rval, rval2;
3314 	struct tsk_mgmt_cmd *tsk;
3315 	struct sts_entry_24xx *sts;
3316 	dma_addr_t	tsk_dma;
3317 	scsi_qla_host_t *vha;
3318 	struct qla_hw_data *ha;
3319 	struct req_que *req;
3320 	struct qla_qpair *qpair;
3321 
3322 	vha = fcport->vha;
3323 	ha = vha->hw;
3324 	req = vha->req;
3325 
3326 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3327 	    "Entered %s.\n", __func__);
3328 
3329 	if (vha->vp_idx && vha->qpair) {
3330 		/* NPIV port */
3331 		qpair = vha->qpair;
3332 		req = qpair->req;
3333 	}
3334 
3335 	tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3336 	if (tsk == NULL) {
3337 		ql_log(ql_log_warn, vha, 0x1093,
3338 		    "Failed to allocate task management IOCB.\n");
3339 		return QLA_MEMORY_ALLOC_FAILED;
3340 	}
3341 
3342 	tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3343 	tsk->p.tsk.entry_count = 1;
3344 	tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3345 	tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3346 	tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3347 	tsk->p.tsk.control_flags = cpu_to_le32(type);
3348 	tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3349 	tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3350 	tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3351 	tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3352 	if (type == TCF_LUN_RESET) {
3353 		int_to_scsilun(l, &tsk->p.tsk.lun);
3354 		host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3355 		    sizeof(tsk->p.tsk.lun));
3356 	}
3357 
3358 	sts = &tsk->p.sts;
3359 	rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3360 	if (rval != QLA_SUCCESS) {
3361 		ql_dbg(ql_dbg_mbx, vha, 0x1094,
3362 		    "Failed to issue %s reset IOCB (%x).\n", name, rval);
3363 	} else if (sts->entry_status != 0) {
3364 		ql_dbg(ql_dbg_mbx, vha, 0x1095,
3365 		    "Failed to complete IOCB -- error status (%x).\n",
3366 		    sts->entry_status);
3367 		rval = QLA_FUNCTION_FAILED;
3368 	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3369 		ql_dbg(ql_dbg_mbx, vha, 0x1096,
3370 		    "Failed to complete IOCB -- completion status (%x).\n",
3371 		    le16_to_cpu(sts->comp_status));
3372 		rval = QLA_FUNCTION_FAILED;
3373 	} else if (le16_to_cpu(sts->scsi_status) &
3374 	    SS_RESPONSE_INFO_LEN_VALID) {
3375 		if (le32_to_cpu(sts->rsp_data_len) < 4) {
3376 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3377 			    "Ignoring inconsistent data length -- not enough "
3378 			    "response info (%d).\n",
3379 			    le32_to_cpu(sts->rsp_data_len));
3380 		} else if (sts->data[3]) {
3381 			ql_dbg(ql_dbg_mbx, vha, 0x1098,
3382 			    "Failed to complete IOCB -- response (%x).\n",
3383 			    sts->data[3]);
3384 			rval = QLA_FUNCTION_FAILED;
3385 		}
3386 	}
3387 
3388 	/* Issue marker IOCB. */
3389 	rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3390 	    type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3391 	if (rval2 != QLA_SUCCESS) {
3392 		ql_dbg(ql_dbg_mbx, vha, 0x1099,
3393 		    "Failed to issue marker IOCB (%x).\n", rval2);
3394 	} else {
3395 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3396 		    "Done %s.\n", __func__);
3397 	}
3398 
3399 	dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3400 
3401 	return rval;
3402 }
3403 
3404 int
3405 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3406 {
3407 	struct qla_hw_data *ha = fcport->vha->hw;
3408 
3409 	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3410 		return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3411 
3412 	return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3413 }
3414 
3415 int
3416 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3417 {
3418 	struct qla_hw_data *ha = fcport->vha->hw;
3419 
3420 	if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3421 		return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3422 
3423 	return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3424 }
3425 
3426 int
3427 qla2x00_system_error(scsi_qla_host_t *vha)
3428 {
3429 	int rval;
3430 	mbx_cmd_t mc;
3431 	mbx_cmd_t *mcp = &mc;
3432 	struct qla_hw_data *ha = vha->hw;
3433 
3434 	if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3435 		return QLA_FUNCTION_FAILED;
3436 
3437 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3438 	    "Entered %s.\n", __func__);
3439 
3440 	mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3441 	mcp->out_mb = MBX_0;
3442 	mcp->in_mb = MBX_0;
3443 	mcp->tov = 5;
3444 	mcp->flags = 0;
3445 	rval = qla2x00_mailbox_command(vha, mcp);
3446 
3447 	if (rval != QLA_SUCCESS) {
3448 		ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3449 	} else {
3450 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3451 		    "Done %s.\n", __func__);
3452 	}
3453 
3454 	return rval;
3455 }
3456 
3457 int
3458 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3459 {
3460 	int rval;
3461 	mbx_cmd_t mc;
3462 	mbx_cmd_t *mcp = &mc;
3463 
3464 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3465 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3466 		return QLA_FUNCTION_FAILED;
3467 
3468 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3469 	    "Entered %s.\n", __func__);
3470 
3471 	mcp->mb[0] = MBC_WRITE_SERDES;
3472 	mcp->mb[1] = addr;
3473 	if (IS_QLA2031(vha->hw))
3474 		mcp->mb[2] = data & 0xff;
3475 	else
3476 		mcp->mb[2] = data;
3477 
3478 	mcp->mb[3] = 0;
3479 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3480 	mcp->in_mb = MBX_0;
3481 	mcp->tov = MBX_TOV_SECONDS;
3482 	mcp->flags = 0;
3483 	rval = qla2x00_mailbox_command(vha, mcp);
3484 
3485 	if (rval != QLA_SUCCESS) {
3486 		ql_dbg(ql_dbg_mbx, vha, 0x1183,
3487 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3488 	} else {
3489 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3490 		    "Done %s.\n", __func__);
3491 	}
3492 
3493 	return rval;
3494 }
3495 
3496 int
3497 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3498 {
3499 	int rval;
3500 	mbx_cmd_t mc;
3501 	mbx_cmd_t *mcp = &mc;
3502 
3503 	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3504 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3505 		return QLA_FUNCTION_FAILED;
3506 
3507 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3508 	    "Entered %s.\n", __func__);
3509 
3510 	mcp->mb[0] = MBC_READ_SERDES;
3511 	mcp->mb[1] = addr;
3512 	mcp->mb[3] = 0;
3513 	mcp->out_mb = MBX_3|MBX_1|MBX_0;
3514 	mcp->in_mb = MBX_1|MBX_0;
3515 	mcp->tov = MBX_TOV_SECONDS;
3516 	mcp->flags = 0;
3517 	rval = qla2x00_mailbox_command(vha, mcp);
3518 
3519 	if (IS_QLA2031(vha->hw))
3520 		*data = mcp->mb[1] & 0xff;
3521 	else
3522 		*data = mcp->mb[1];
3523 
3524 	if (rval != QLA_SUCCESS) {
3525 		ql_dbg(ql_dbg_mbx, vha, 0x1186,
3526 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3527 	} else {
3528 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3529 		    "Done %s.\n", __func__);
3530 	}
3531 
3532 	return rval;
3533 }
3534 
3535 int
3536 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3537 {
3538 	int rval;
3539 	mbx_cmd_t mc;
3540 	mbx_cmd_t *mcp = &mc;
3541 
3542 	if (!IS_QLA8044(vha->hw))
3543 		return QLA_FUNCTION_FAILED;
3544 
3545 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3546 	    "Entered %s.\n", __func__);
3547 
3548 	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3549 	mcp->mb[1] = HCS_WRITE_SERDES;
3550 	mcp->mb[3] = LSW(addr);
3551 	mcp->mb[4] = MSW(addr);
3552 	mcp->mb[5] = LSW(data);
3553 	mcp->mb[6] = MSW(data);
3554 	mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3555 	mcp->in_mb = MBX_0;
3556 	mcp->tov = MBX_TOV_SECONDS;
3557 	mcp->flags = 0;
3558 	rval = qla2x00_mailbox_command(vha, mcp);
3559 
3560 	if (rval != QLA_SUCCESS) {
3561 		ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3562 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3563 	} else {
3564 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3565 		    "Done %s.\n", __func__);
3566 	}
3567 
3568 	return rval;
3569 }
3570 
3571 int
3572 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3573 {
3574 	int rval;
3575 	mbx_cmd_t mc;
3576 	mbx_cmd_t *mcp = &mc;
3577 
3578 	if (!IS_QLA8044(vha->hw))
3579 		return QLA_FUNCTION_FAILED;
3580 
3581 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3582 	    "Entered %s.\n", __func__);
3583 
3584 	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3585 	mcp->mb[1] = HCS_READ_SERDES;
3586 	mcp->mb[3] = LSW(addr);
3587 	mcp->mb[4] = MSW(addr);
3588 	mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3589 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
3590 	mcp->tov = MBX_TOV_SECONDS;
3591 	mcp->flags = 0;
3592 	rval = qla2x00_mailbox_command(vha, mcp);
3593 
3594 	*data = mcp->mb[2] << 16 | mcp->mb[1];
3595 
3596 	if (rval != QLA_SUCCESS) {
3597 		ql_dbg(ql_dbg_mbx, vha, 0x118a,
3598 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3599 	} else {
3600 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3601 		    "Done %s.\n", __func__);
3602 	}
3603 
3604 	return rval;
3605 }
3606 
3607 /**
3608  * qla2x00_set_serdes_params() -
3609  * @vha: HA context
3610  * @sw_em_1g: serial link options
3611  * @sw_em_2g: serial link options
3612  * @sw_em_4g: serial link options
3613  *
3614  * Returns
3615  */
3616 int
3617 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3618     uint16_t sw_em_2g, uint16_t sw_em_4g)
3619 {
3620 	int rval;
3621 	mbx_cmd_t mc;
3622 	mbx_cmd_t *mcp = &mc;
3623 
3624 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3625 	    "Entered %s.\n", __func__);
3626 
3627 	mcp->mb[0] = MBC_SERDES_PARAMS;
3628 	mcp->mb[1] = BIT_0;
3629 	mcp->mb[2] = sw_em_1g | BIT_15;
3630 	mcp->mb[3] = sw_em_2g | BIT_15;
3631 	mcp->mb[4] = sw_em_4g | BIT_15;
3632 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3633 	mcp->in_mb = MBX_0;
3634 	mcp->tov = MBX_TOV_SECONDS;
3635 	mcp->flags = 0;
3636 	rval = qla2x00_mailbox_command(vha, mcp);
3637 
3638 	if (rval != QLA_SUCCESS) {
3639 		/*EMPTY*/
3640 		ql_dbg(ql_dbg_mbx, vha, 0x109f,
3641 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3642 	} else {
3643 		/*EMPTY*/
3644 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3645 		    "Done %s.\n", __func__);
3646 	}
3647 
3648 	return rval;
3649 }
3650 
3651 int
3652 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3653 {
3654 	int rval;
3655 	mbx_cmd_t mc;
3656 	mbx_cmd_t *mcp = &mc;
3657 
3658 	if (!IS_FWI2_CAPABLE(vha->hw))
3659 		return QLA_FUNCTION_FAILED;
3660 
3661 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3662 	    "Entered %s.\n", __func__);
3663 
3664 	mcp->mb[0] = MBC_STOP_FIRMWARE;
3665 	mcp->mb[1] = 0;
3666 	mcp->out_mb = MBX_1|MBX_0;
3667 	mcp->in_mb = MBX_0;
3668 	mcp->tov = 5;
3669 	mcp->flags = 0;
3670 	rval = qla2x00_mailbox_command(vha, mcp);
3671 
3672 	if (rval != QLA_SUCCESS) {
3673 		ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3674 		if (mcp->mb[0] == MBS_INVALID_COMMAND)
3675 			rval = QLA_INVALID_COMMAND;
3676 	} else {
3677 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3678 		    "Done %s.\n", __func__);
3679 	}
3680 
3681 	return rval;
3682 }
3683 
3684 int
3685 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3686     uint16_t buffers)
3687 {
3688 	int rval;
3689 	mbx_cmd_t mc;
3690 	mbx_cmd_t *mcp = &mc;
3691 
3692 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3693 	    "Entered %s.\n", __func__);
3694 
3695 	if (!IS_FWI2_CAPABLE(vha->hw))
3696 		return QLA_FUNCTION_FAILED;
3697 
3698 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3699 		return QLA_FUNCTION_FAILED;
3700 
3701 	mcp->mb[0] = MBC_TRACE_CONTROL;
3702 	mcp->mb[1] = TC_EFT_ENABLE;
3703 	mcp->mb[2] = LSW(eft_dma);
3704 	mcp->mb[3] = MSW(eft_dma);
3705 	mcp->mb[4] = LSW(MSD(eft_dma));
3706 	mcp->mb[5] = MSW(MSD(eft_dma));
3707 	mcp->mb[6] = buffers;
3708 	mcp->mb[7] = TC_AEN_DISABLE;
3709 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3710 	mcp->in_mb = MBX_1|MBX_0;
3711 	mcp->tov = MBX_TOV_SECONDS;
3712 	mcp->flags = 0;
3713 	rval = qla2x00_mailbox_command(vha, mcp);
3714 	if (rval != QLA_SUCCESS) {
3715 		ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3716 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3717 		    rval, mcp->mb[0], mcp->mb[1]);
3718 	} else {
3719 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3720 		    "Done %s.\n", __func__);
3721 	}
3722 
3723 	return rval;
3724 }
3725 
3726 int
3727 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3728 {
3729 	int rval;
3730 	mbx_cmd_t mc;
3731 	mbx_cmd_t *mcp = &mc;
3732 
3733 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3734 	    "Entered %s.\n", __func__);
3735 
3736 	if (!IS_FWI2_CAPABLE(vha->hw))
3737 		return QLA_FUNCTION_FAILED;
3738 
3739 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3740 		return QLA_FUNCTION_FAILED;
3741 
3742 	mcp->mb[0] = MBC_TRACE_CONTROL;
3743 	mcp->mb[1] = TC_EFT_DISABLE;
3744 	mcp->out_mb = MBX_1|MBX_0;
3745 	mcp->in_mb = MBX_1|MBX_0;
3746 	mcp->tov = MBX_TOV_SECONDS;
3747 	mcp->flags = 0;
3748 	rval = qla2x00_mailbox_command(vha, mcp);
3749 	if (rval != QLA_SUCCESS) {
3750 		ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3751 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3752 		    rval, mcp->mb[0], mcp->mb[1]);
3753 	} else {
3754 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3755 		    "Done %s.\n", __func__);
3756 	}
3757 
3758 	return rval;
3759 }
3760 
3761 int
3762 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3763     uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3764 {
3765 	int rval;
3766 	mbx_cmd_t mc;
3767 	mbx_cmd_t *mcp = &mc;
3768 
3769 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3770 	    "Entered %s.\n", __func__);
3771 
3772 	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3773 	    !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3774 	    !IS_QLA28XX(vha->hw))
3775 		return QLA_FUNCTION_FAILED;
3776 
3777 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3778 		return QLA_FUNCTION_FAILED;
3779 
3780 	mcp->mb[0] = MBC_TRACE_CONTROL;
3781 	mcp->mb[1] = TC_FCE_ENABLE;
3782 	mcp->mb[2] = LSW(fce_dma);
3783 	mcp->mb[3] = MSW(fce_dma);
3784 	mcp->mb[4] = LSW(MSD(fce_dma));
3785 	mcp->mb[5] = MSW(MSD(fce_dma));
3786 	mcp->mb[6] = buffers;
3787 	mcp->mb[7] = TC_AEN_DISABLE;
3788 	mcp->mb[8] = 0;
3789 	mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3790 	mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3791 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3792 	    MBX_1|MBX_0;
3793 	mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3794 	mcp->tov = MBX_TOV_SECONDS;
3795 	mcp->flags = 0;
3796 	rval = qla2x00_mailbox_command(vha, mcp);
3797 	if (rval != QLA_SUCCESS) {
3798 		ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3799 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3800 		    rval, mcp->mb[0], mcp->mb[1]);
3801 	} else {
3802 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3803 		    "Done %s.\n", __func__);
3804 
3805 		if (mb)
3806 			memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3807 		if (dwords)
3808 			*dwords = buffers;
3809 	}
3810 
3811 	return rval;
3812 }
3813 
3814 int
3815 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3816 {
3817 	int rval;
3818 	mbx_cmd_t mc;
3819 	mbx_cmd_t *mcp = &mc;
3820 
3821 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3822 	    "Entered %s.\n", __func__);
3823 
3824 	if (!IS_FWI2_CAPABLE(vha->hw))
3825 		return QLA_FUNCTION_FAILED;
3826 
3827 	if (unlikely(pci_channel_offline(vha->hw->pdev)))
3828 		return QLA_FUNCTION_FAILED;
3829 
3830 	mcp->mb[0] = MBC_TRACE_CONTROL;
3831 	mcp->mb[1] = TC_FCE_DISABLE;
3832 	mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3833 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
3834 	mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3835 	    MBX_1|MBX_0;
3836 	mcp->tov = MBX_TOV_SECONDS;
3837 	mcp->flags = 0;
3838 	rval = qla2x00_mailbox_command(vha, mcp);
3839 	if (rval != QLA_SUCCESS) {
3840 		ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3841 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
3842 		    rval, mcp->mb[0], mcp->mb[1]);
3843 	} else {
3844 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3845 		    "Done %s.\n", __func__);
3846 
3847 		if (wr)
3848 			*wr = (uint64_t) mcp->mb[5] << 48 |
3849 			    (uint64_t) mcp->mb[4] << 32 |
3850 			    (uint64_t) mcp->mb[3] << 16 |
3851 			    (uint64_t) mcp->mb[2];
3852 		if (rd)
3853 			*rd = (uint64_t) mcp->mb[9] << 48 |
3854 			    (uint64_t) mcp->mb[8] << 32 |
3855 			    (uint64_t) mcp->mb[7] << 16 |
3856 			    (uint64_t) mcp->mb[6];
3857 	}
3858 
3859 	return rval;
3860 }
3861 
3862 int
3863 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3864 	uint16_t *port_speed, uint16_t *mb)
3865 {
3866 	int rval;
3867 	mbx_cmd_t mc;
3868 	mbx_cmd_t *mcp = &mc;
3869 
3870 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3871 	    "Entered %s.\n", __func__);
3872 
3873 	if (!IS_IIDMA_CAPABLE(vha->hw))
3874 		return QLA_FUNCTION_FAILED;
3875 
3876 	mcp->mb[0] = MBC_PORT_PARAMS;
3877 	mcp->mb[1] = loop_id;
3878 	mcp->mb[2] = mcp->mb[3] = 0;
3879 	mcp->mb[9] = vha->vp_idx;
3880 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3881 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
3882 	mcp->tov = MBX_TOV_SECONDS;
3883 	mcp->flags = 0;
3884 	rval = qla2x00_mailbox_command(vha, mcp);
3885 
3886 	/* Return mailbox statuses. */
3887 	if (mb) {
3888 		mb[0] = mcp->mb[0];
3889 		mb[1] = mcp->mb[1];
3890 		mb[3] = mcp->mb[3];
3891 	}
3892 
3893 	if (rval != QLA_SUCCESS) {
3894 		ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3895 	} else {
3896 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3897 		    "Done %s.\n", __func__);
3898 		if (port_speed)
3899 			*port_speed = mcp->mb[3];
3900 	}
3901 
3902 	return rval;
3903 }
3904 
3905 int
3906 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3907     uint16_t port_speed, uint16_t *mb)
3908 {
3909 	int rval;
3910 	mbx_cmd_t mc;
3911 	mbx_cmd_t *mcp = &mc;
3912 
3913 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3914 	    "Entered %s.\n", __func__);
3915 
3916 	if (!IS_IIDMA_CAPABLE(vha->hw))
3917 		return QLA_FUNCTION_FAILED;
3918 
3919 	mcp->mb[0] = MBC_PORT_PARAMS;
3920 	mcp->mb[1] = loop_id;
3921 	mcp->mb[2] = BIT_0;
3922 	mcp->mb[3] = port_speed & 0x3F;
3923 	mcp->mb[9] = vha->vp_idx;
3924 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3925 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
3926 	mcp->tov = MBX_TOV_SECONDS;
3927 	mcp->flags = 0;
3928 	rval = qla2x00_mailbox_command(vha, mcp);
3929 
3930 	/* Return mailbox statuses. */
3931 	if (mb) {
3932 		mb[0] = mcp->mb[0];
3933 		mb[1] = mcp->mb[1];
3934 		mb[3] = mcp->mb[3];
3935 	}
3936 
3937 	if (rval != QLA_SUCCESS) {
3938 		ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3939 		    "Failed=%x.\n", rval);
3940 	} else {
3941 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3942 		    "Done %s.\n", __func__);
3943 	}
3944 
3945 	return rval;
3946 }
3947 
3948 void
3949 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3950 	struct vp_rpt_id_entry_24xx *rptid_entry)
3951 {
3952 	struct qla_hw_data *ha = vha->hw;
3953 	scsi_qla_host_t *vp = NULL;
3954 	unsigned long   flags;
3955 	int found;
3956 	port_id_t id;
3957 	struct fc_port *fcport;
3958 
3959 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3960 	    "Entered %s.\n", __func__);
3961 
3962 	if (rptid_entry->entry_status != 0)
3963 		return;
3964 
3965 	id.b.domain = rptid_entry->port_id[2];
3966 	id.b.area   = rptid_entry->port_id[1];
3967 	id.b.al_pa  = rptid_entry->port_id[0];
3968 	id.b.rsvd_1 = 0;
3969 	ha->flags.n2n_ae = 0;
3970 
3971 	if (rptid_entry->format == 0) {
3972 		/* loop */
3973 		ql_dbg(ql_dbg_async, vha, 0x10b7,
3974 		    "Format 0 : Number of VPs setup %d, number of "
3975 		    "VPs acquired %d.\n", rptid_entry->vp_setup,
3976 		    rptid_entry->vp_acquired);
3977 		ql_dbg(ql_dbg_async, vha, 0x10b8,
3978 		    "Primary port id %02x%02x%02x.\n",
3979 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
3980 		    rptid_entry->port_id[0]);
3981 		ha->current_topology = ISP_CFG_NL;
3982 		qlt_update_host_map(vha, id);
3983 
3984 	} else if (rptid_entry->format == 1) {
3985 		/* fabric */
3986 		ql_dbg(ql_dbg_async, vha, 0x10b9,
3987 		    "Format 1: VP[%d] enabled - status %d - with "
3988 		    "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3989 			rptid_entry->vp_status,
3990 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
3991 		    rptid_entry->port_id[0]);
3992 		ql_dbg(ql_dbg_async, vha, 0x5075,
3993 		   "Format 1: Remote WWPN %8phC.\n",
3994 		   rptid_entry->u.f1.port_name);
3995 
3996 		ql_dbg(ql_dbg_async, vha, 0x5075,
3997 		   "Format 1: WWPN %8phC.\n",
3998 		   vha->port_name);
3999 
4000 		switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4001 		case TOPO_N2N:
4002 			ha->current_topology = ISP_CFG_N;
4003 			spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4004 			list_for_each_entry(fcport, &vha->vp_fcports, list) {
4005 				fcport->scan_state = QLA_FCPORT_SCAN;
4006 				fcport->n2n_flag = 0;
4007 			}
4008 			id.b24 = 0;
4009 			if (wwn_to_u64(vha->port_name) >
4010 			    wwn_to_u64(rptid_entry->u.f1.port_name)) {
4011 				vha->d_id.b24 = 0;
4012 				vha->d_id.b.al_pa = 1;
4013 				ha->flags.n2n_bigger = 1;
4014 
4015 				id.b.al_pa = 2;
4016 				ql_dbg(ql_dbg_async, vha, 0x5075,
4017 				    "Format 1: assign local id %x remote id %x\n",
4018 				    vha->d_id.b24, id.b24);
4019 			} else {
4020 				ql_dbg(ql_dbg_async, vha, 0x5075,
4021 				    "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4022 				    rptid_entry->u.f1.port_name);
4023 				ha->flags.n2n_bigger = 0;
4024 			}
4025 
4026 			fcport = qla2x00_find_fcport_by_wwpn(vha,
4027 			    rptid_entry->u.f1.port_name, 1);
4028 			spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4029 
4030 
4031 			if (fcport) {
4032 				fcport->plogi_nack_done_deadline = jiffies + HZ;
4033 				fcport->dm_login_expire = jiffies +
4034 					QLA_N2N_WAIT_TIME * HZ;
4035 				fcport->scan_state = QLA_FCPORT_FOUND;
4036 				fcport->n2n_flag = 1;
4037 				fcport->keep_nport_handle = 1;
4038 
4039 				if (wwn_to_u64(vha->port_name) >
4040 				    wwn_to_u64(fcport->port_name)) {
4041 					fcport->d_id = id;
4042 				}
4043 
4044 				switch (fcport->disc_state) {
4045 				case DSC_DELETED:
4046 					set_bit(RELOGIN_NEEDED,
4047 					    &vha->dpc_flags);
4048 					break;
4049 				case DSC_DELETE_PEND:
4050 					break;
4051 				default:
4052 					qlt_schedule_sess_for_deletion(fcport);
4053 					break;
4054 				}
4055 			} else {
4056 				qla24xx_post_newsess_work(vha, &id,
4057 				    rptid_entry->u.f1.port_name,
4058 				    rptid_entry->u.f1.node_name,
4059 				    NULL,
4060 				    FS_FCP_IS_N2N);
4061 			}
4062 
4063 			/* if our portname is higher then initiate N2N login */
4064 
4065 			set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4066 			return;
4067 		case TOPO_FL:
4068 			ha->current_topology = ISP_CFG_FL;
4069 			break;
4070 		case TOPO_F:
4071 			ha->current_topology = ISP_CFG_F;
4072 			break;
4073 		default:
4074 			break;
4075 		}
4076 
4077 		ha->flags.gpsc_supported = 1;
4078 		ha->current_topology = ISP_CFG_F;
4079 		/* buffer to buffer credit flag */
4080 		vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4081 
4082 		if (rptid_entry->vp_idx == 0) {
4083 			if (rptid_entry->vp_status == VP_STAT_COMPL) {
4084 				/* FA-WWN is only for physical port */
4085 				if (qla_ini_mode_enabled(vha) &&
4086 				    ha->flags.fawwpn_enabled &&
4087 				    (rptid_entry->u.f1.flags &
4088 				     BIT_6)) {
4089 					memcpy(vha->port_name,
4090 					    rptid_entry->u.f1.port_name,
4091 					    WWN_SIZE);
4092 				}
4093 
4094 				qlt_update_host_map(vha, id);
4095 			}
4096 
4097 			set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4098 			set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4099 		} else {
4100 			if (rptid_entry->vp_status != VP_STAT_COMPL &&
4101 				rptid_entry->vp_status != VP_STAT_ID_CHG) {
4102 				ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4103 				    "Could not acquire ID for VP[%d].\n",
4104 				    rptid_entry->vp_idx);
4105 				return;
4106 			}
4107 
4108 			found = 0;
4109 			spin_lock_irqsave(&ha->vport_slock, flags);
4110 			list_for_each_entry(vp, &ha->vp_list, list) {
4111 				if (rptid_entry->vp_idx == vp->vp_idx) {
4112 					found = 1;
4113 					break;
4114 				}
4115 			}
4116 			spin_unlock_irqrestore(&ha->vport_slock, flags);
4117 
4118 			if (!found)
4119 				return;
4120 
4121 			qlt_update_host_map(vp, id);
4122 
4123 			/*
4124 			 * Cannot configure here as we are still sitting on the
4125 			 * response queue. Handle it in dpc context.
4126 			 */
4127 			set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4128 			set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4129 			set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4130 		}
4131 		set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4132 		qla2xxx_wake_dpc(vha);
4133 	} else if (rptid_entry->format == 2) {
4134 		ql_dbg(ql_dbg_async, vha, 0x505f,
4135 		    "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4136 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
4137 		    rptid_entry->port_id[0]);
4138 
4139 		ql_dbg(ql_dbg_async, vha, 0x5075,
4140 		    "N2N: Remote WWPN %8phC.\n",
4141 		    rptid_entry->u.f2.port_name);
4142 
4143 		/* N2N.  direct connect */
4144 		ha->current_topology = ISP_CFG_N;
4145 		ha->flags.rida_fmt2 = 1;
4146 		vha->d_id.b.domain = rptid_entry->port_id[2];
4147 		vha->d_id.b.area = rptid_entry->port_id[1];
4148 		vha->d_id.b.al_pa = rptid_entry->port_id[0];
4149 
4150 		ha->flags.n2n_ae = 1;
4151 		spin_lock_irqsave(&ha->vport_slock, flags);
4152 		qlt_update_vp_map(vha, SET_AL_PA);
4153 		spin_unlock_irqrestore(&ha->vport_slock, flags);
4154 
4155 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
4156 			fcport->scan_state = QLA_FCPORT_SCAN;
4157 			fcport->n2n_flag = 0;
4158 		}
4159 
4160 		fcport = qla2x00_find_fcport_by_wwpn(vha,
4161 		    rptid_entry->u.f2.port_name, 1);
4162 
4163 		if (fcport) {
4164 			fcport->login_retry = vha->hw->login_retry_count;
4165 			fcport->plogi_nack_done_deadline = jiffies + HZ;
4166 			fcport->scan_state = QLA_FCPORT_FOUND;
4167 			fcport->keep_nport_handle = 1;
4168 			fcport->n2n_flag = 1;
4169 			fcport->d_id.b.domain =
4170 				rptid_entry->u.f2.remote_nport_id[2];
4171 			fcport->d_id.b.area =
4172 				rptid_entry->u.f2.remote_nport_id[1];
4173 			fcport->d_id.b.al_pa =
4174 				rptid_entry->u.f2.remote_nport_id[0];
4175 		}
4176 	}
4177 }
4178 
4179 /*
4180  * qla24xx_modify_vp_config
4181  *	Change VP configuration for vha
4182  *
4183  * Input:
4184  *	vha = adapter block pointer.
4185  *
4186  * Returns:
4187  *	qla2xxx local function return status code.
4188  *
4189  * Context:
4190  *	Kernel context.
4191  */
4192 int
4193 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4194 {
4195 	int		rval;
4196 	struct vp_config_entry_24xx *vpmod;
4197 	dma_addr_t	vpmod_dma;
4198 	struct qla_hw_data *ha = vha->hw;
4199 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4200 
4201 	/* This can be called by the parent */
4202 
4203 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4204 	    "Entered %s.\n", __func__);
4205 
4206 	vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4207 	if (!vpmod) {
4208 		ql_log(ql_log_warn, vha, 0x10bc,
4209 		    "Failed to allocate modify VP IOCB.\n");
4210 		return QLA_MEMORY_ALLOC_FAILED;
4211 	}
4212 
4213 	vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4214 	vpmod->entry_count = 1;
4215 	vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4216 	vpmod->vp_count = 1;
4217 	vpmod->vp_index1 = vha->vp_idx;
4218 	vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4219 
4220 	qlt_modify_vp_config(vha, vpmod);
4221 
4222 	memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4223 	memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4224 	vpmod->entry_count = 1;
4225 
4226 	rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4227 	if (rval != QLA_SUCCESS) {
4228 		ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4229 		    "Failed to issue VP config IOCB (%x).\n", rval);
4230 	} else if (vpmod->comp_status != 0) {
4231 		ql_dbg(ql_dbg_mbx, vha, 0x10be,
4232 		    "Failed to complete IOCB -- error status (%x).\n",
4233 		    vpmod->comp_status);
4234 		rval = QLA_FUNCTION_FAILED;
4235 	} else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4236 		ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4237 		    "Failed to complete IOCB -- completion status (%x).\n",
4238 		    le16_to_cpu(vpmod->comp_status));
4239 		rval = QLA_FUNCTION_FAILED;
4240 	} else {
4241 		/* EMPTY */
4242 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4243 		    "Done %s.\n", __func__);
4244 		fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4245 	}
4246 	dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4247 
4248 	return rval;
4249 }
4250 
4251 /*
4252  * qla2x00_send_change_request
4253  *	Receive or disable RSCN request from fabric controller
4254  *
4255  * Input:
4256  *	ha = adapter block pointer
4257  *	format = registration format:
4258  *		0 - Reserved
4259  *		1 - Fabric detected registration
4260  *		2 - N_port detected registration
4261  *		3 - Full registration
4262  *		FF - clear registration
4263  *	vp_idx = Virtual port index
4264  *
4265  * Returns:
4266  *	qla2x00 local function return status code.
4267  *
4268  * Context:
4269  *	Kernel Context
4270  */
4271 
4272 int
4273 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4274 			    uint16_t vp_idx)
4275 {
4276 	int rval;
4277 	mbx_cmd_t mc;
4278 	mbx_cmd_t *mcp = &mc;
4279 
4280 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4281 	    "Entered %s.\n", __func__);
4282 
4283 	mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4284 	mcp->mb[1] = format;
4285 	mcp->mb[9] = vp_idx;
4286 	mcp->out_mb = MBX_9|MBX_1|MBX_0;
4287 	mcp->in_mb = MBX_0|MBX_1;
4288 	mcp->tov = MBX_TOV_SECONDS;
4289 	mcp->flags = 0;
4290 	rval = qla2x00_mailbox_command(vha, mcp);
4291 
4292 	if (rval == QLA_SUCCESS) {
4293 		if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4294 			rval = BIT_1;
4295 		}
4296 	} else
4297 		rval = BIT_1;
4298 
4299 	return rval;
4300 }
4301 
4302 int
4303 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4304     uint32_t size)
4305 {
4306 	int rval;
4307 	mbx_cmd_t mc;
4308 	mbx_cmd_t *mcp = &mc;
4309 
4310 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4311 	    "Entered %s.\n", __func__);
4312 
4313 	if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4314 		mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4315 		mcp->mb[8] = MSW(addr);
4316 		mcp->mb[10] = 0;
4317 		mcp->out_mb = MBX_10|MBX_8|MBX_0;
4318 	} else {
4319 		mcp->mb[0] = MBC_DUMP_RISC_RAM;
4320 		mcp->out_mb = MBX_0;
4321 	}
4322 	mcp->mb[1] = LSW(addr);
4323 	mcp->mb[2] = MSW(req_dma);
4324 	mcp->mb[3] = LSW(req_dma);
4325 	mcp->mb[6] = MSW(MSD(req_dma));
4326 	mcp->mb[7] = LSW(MSD(req_dma));
4327 	mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4328 	if (IS_FWI2_CAPABLE(vha->hw)) {
4329 		mcp->mb[4] = MSW(size);
4330 		mcp->mb[5] = LSW(size);
4331 		mcp->out_mb |= MBX_5|MBX_4;
4332 	} else {
4333 		mcp->mb[4] = LSW(size);
4334 		mcp->out_mb |= MBX_4;
4335 	}
4336 
4337 	mcp->in_mb = MBX_0;
4338 	mcp->tov = MBX_TOV_SECONDS;
4339 	mcp->flags = 0;
4340 	rval = qla2x00_mailbox_command(vha, mcp);
4341 
4342 	if (rval != QLA_SUCCESS) {
4343 		ql_dbg(ql_dbg_mbx, vha, 0x1008,
4344 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4345 	} else {
4346 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4347 		    "Done %s.\n", __func__);
4348 	}
4349 
4350 	return rval;
4351 }
4352 /* 84XX Support **************************************************************/
4353 
4354 struct cs84xx_mgmt_cmd {
4355 	union {
4356 		struct verify_chip_entry_84xx req;
4357 		struct verify_chip_rsp_84xx rsp;
4358 	} p;
4359 };
4360 
4361 int
4362 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4363 {
4364 	int rval, retry;
4365 	struct cs84xx_mgmt_cmd *mn;
4366 	dma_addr_t mn_dma;
4367 	uint16_t options;
4368 	unsigned long flags;
4369 	struct qla_hw_data *ha = vha->hw;
4370 
4371 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4372 	    "Entered %s.\n", __func__);
4373 
4374 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4375 	if (mn == NULL) {
4376 		return QLA_MEMORY_ALLOC_FAILED;
4377 	}
4378 
4379 	/* Force Update? */
4380 	options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4381 	/* Diagnostic firmware? */
4382 	/* options |= MENLO_DIAG_FW; */
4383 	/* We update the firmware with only one data sequence. */
4384 	options |= VCO_END_OF_DATA;
4385 
4386 	do {
4387 		retry = 0;
4388 		memset(mn, 0, sizeof(*mn));
4389 		mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4390 		mn->p.req.entry_count = 1;
4391 		mn->p.req.options = cpu_to_le16(options);
4392 
4393 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4394 		    "Dump of Verify Request.\n");
4395 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4396 		    mn, sizeof(*mn));
4397 
4398 		rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4399 		if (rval != QLA_SUCCESS) {
4400 			ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4401 			    "Failed to issue verify IOCB (%x).\n", rval);
4402 			goto verify_done;
4403 		}
4404 
4405 		ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4406 		    "Dump of Verify Response.\n");
4407 		ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4408 		    mn, sizeof(*mn));
4409 
4410 		status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4411 		status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4412 		    le16_to_cpu(mn->p.rsp.failure_code) : 0;
4413 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4414 		    "cs=%x fc=%x.\n", status[0], status[1]);
4415 
4416 		if (status[0] != CS_COMPLETE) {
4417 			rval = QLA_FUNCTION_FAILED;
4418 			if (!(options & VCO_DONT_UPDATE_FW)) {
4419 				ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4420 				    "Firmware update failed. Retrying "
4421 				    "without update firmware.\n");
4422 				options |= VCO_DONT_UPDATE_FW;
4423 				options &= ~VCO_FORCE_UPDATE;
4424 				retry = 1;
4425 			}
4426 		} else {
4427 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4428 			    "Firmware updated to %x.\n",
4429 			    le32_to_cpu(mn->p.rsp.fw_ver));
4430 
4431 			/* NOTE: we only update OP firmware. */
4432 			spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4433 			ha->cs84xx->op_fw_version =
4434 			    le32_to_cpu(mn->p.rsp.fw_ver);
4435 			spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4436 			    flags);
4437 		}
4438 	} while (retry);
4439 
4440 verify_done:
4441 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4442 
4443 	if (rval != QLA_SUCCESS) {
4444 		ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4445 		    "Failed=%x.\n", rval);
4446 	} else {
4447 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4448 		    "Done %s.\n", __func__);
4449 	}
4450 
4451 	return rval;
4452 }
4453 
4454 int
4455 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4456 {
4457 	int rval;
4458 	unsigned long flags;
4459 	mbx_cmd_t mc;
4460 	mbx_cmd_t *mcp = &mc;
4461 	struct qla_hw_data *ha = vha->hw;
4462 
4463 	if (!ha->flags.fw_started)
4464 		return QLA_SUCCESS;
4465 
4466 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4467 	    "Entered %s.\n", __func__);
4468 
4469 	if (IS_SHADOW_REG_CAPABLE(ha))
4470 		req->options |= BIT_13;
4471 
4472 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4473 	mcp->mb[1] = req->options;
4474 	mcp->mb[2] = MSW(LSD(req->dma));
4475 	mcp->mb[3] = LSW(LSD(req->dma));
4476 	mcp->mb[6] = MSW(MSD(req->dma));
4477 	mcp->mb[7] = LSW(MSD(req->dma));
4478 	mcp->mb[5] = req->length;
4479 	if (req->rsp)
4480 		mcp->mb[10] = req->rsp->id;
4481 	mcp->mb[12] = req->qos;
4482 	mcp->mb[11] = req->vp_idx;
4483 	mcp->mb[13] = req->rid;
4484 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4485 		mcp->mb[15] = 0;
4486 
4487 	mcp->mb[4] = req->id;
4488 	/* que in ptr index */
4489 	mcp->mb[8] = 0;
4490 	/* que out ptr index */
4491 	mcp->mb[9] = *req->out_ptr = 0;
4492 	mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4493 			MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4494 	mcp->in_mb = MBX_0;
4495 	mcp->flags = MBX_DMA_OUT;
4496 	mcp->tov = MBX_TOV_SECONDS * 2;
4497 
4498 	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4499 	    IS_QLA28XX(ha))
4500 		mcp->in_mb |= MBX_1;
4501 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4502 		mcp->out_mb |= MBX_15;
4503 		/* debug q create issue in SR-IOV */
4504 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4505 	}
4506 
4507 	spin_lock_irqsave(&ha->hardware_lock, flags);
4508 	if (!(req->options & BIT_0)) {
4509 		wrt_reg_dword(req->req_q_in, 0);
4510 		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4511 			wrt_reg_dword(req->req_q_out, 0);
4512 	}
4513 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4514 
4515 	rval = qla2x00_mailbox_command(vha, mcp);
4516 	if (rval != QLA_SUCCESS) {
4517 		ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4518 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4519 	} else {
4520 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4521 		    "Done %s.\n", __func__);
4522 	}
4523 
4524 	return rval;
4525 }
4526 
4527 int
4528 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4529 {
4530 	int rval;
4531 	unsigned long flags;
4532 	mbx_cmd_t mc;
4533 	mbx_cmd_t *mcp = &mc;
4534 	struct qla_hw_data *ha = vha->hw;
4535 
4536 	if (!ha->flags.fw_started)
4537 		return QLA_SUCCESS;
4538 
4539 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4540 	    "Entered %s.\n", __func__);
4541 
4542 	if (IS_SHADOW_REG_CAPABLE(ha))
4543 		rsp->options |= BIT_13;
4544 
4545 	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4546 	mcp->mb[1] = rsp->options;
4547 	mcp->mb[2] = MSW(LSD(rsp->dma));
4548 	mcp->mb[3] = LSW(LSD(rsp->dma));
4549 	mcp->mb[6] = MSW(MSD(rsp->dma));
4550 	mcp->mb[7] = LSW(MSD(rsp->dma));
4551 	mcp->mb[5] = rsp->length;
4552 	mcp->mb[14] = rsp->msix->entry;
4553 	mcp->mb[13] = rsp->rid;
4554 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4555 		mcp->mb[15] = 0;
4556 
4557 	mcp->mb[4] = rsp->id;
4558 	/* que in ptr index */
4559 	mcp->mb[8] = *rsp->in_ptr = 0;
4560 	/* que out ptr index */
4561 	mcp->mb[9] = 0;
4562 	mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4563 			|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4564 	mcp->in_mb = MBX_0;
4565 	mcp->flags = MBX_DMA_OUT;
4566 	mcp->tov = MBX_TOV_SECONDS * 2;
4567 
4568 	if (IS_QLA81XX(ha)) {
4569 		mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4570 		mcp->in_mb |= MBX_1;
4571 	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4572 		mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4573 		mcp->in_mb |= MBX_1;
4574 		/* debug q create issue in SR-IOV */
4575 		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4576 	}
4577 
4578 	spin_lock_irqsave(&ha->hardware_lock, flags);
4579 	if (!(rsp->options & BIT_0)) {
4580 		wrt_reg_dword(rsp->rsp_q_out, 0);
4581 		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4582 			wrt_reg_dword(rsp->rsp_q_in, 0);
4583 	}
4584 
4585 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4586 
4587 	rval = qla2x00_mailbox_command(vha, mcp);
4588 	if (rval != QLA_SUCCESS) {
4589 		ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4590 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4591 	} else {
4592 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4593 		    "Done %s.\n", __func__);
4594 	}
4595 
4596 	return rval;
4597 }
4598 
4599 int
4600 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4601 {
4602 	int rval;
4603 	mbx_cmd_t mc;
4604 	mbx_cmd_t *mcp = &mc;
4605 
4606 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4607 	    "Entered %s.\n", __func__);
4608 
4609 	mcp->mb[0] = MBC_IDC_ACK;
4610 	memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4611 	mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4612 	mcp->in_mb = MBX_0;
4613 	mcp->tov = MBX_TOV_SECONDS;
4614 	mcp->flags = 0;
4615 	rval = qla2x00_mailbox_command(vha, mcp);
4616 
4617 	if (rval != QLA_SUCCESS) {
4618 		ql_dbg(ql_dbg_mbx, vha, 0x10da,
4619 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4620 	} else {
4621 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4622 		    "Done %s.\n", __func__);
4623 	}
4624 
4625 	return rval;
4626 }
4627 
4628 int
4629 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4630 {
4631 	int rval;
4632 	mbx_cmd_t mc;
4633 	mbx_cmd_t *mcp = &mc;
4634 
4635 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4636 	    "Entered %s.\n", __func__);
4637 
4638 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4639 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4640 		return QLA_FUNCTION_FAILED;
4641 
4642 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4643 	mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4644 	mcp->out_mb = MBX_1|MBX_0;
4645 	mcp->in_mb = MBX_1|MBX_0;
4646 	mcp->tov = MBX_TOV_SECONDS;
4647 	mcp->flags = 0;
4648 	rval = qla2x00_mailbox_command(vha, mcp);
4649 
4650 	if (rval != QLA_SUCCESS) {
4651 		ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4652 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4653 		    rval, mcp->mb[0], mcp->mb[1]);
4654 	} else {
4655 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4656 		    "Done %s.\n", __func__);
4657 		*sector_size = mcp->mb[1];
4658 	}
4659 
4660 	return rval;
4661 }
4662 
4663 int
4664 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4665 {
4666 	int rval;
4667 	mbx_cmd_t mc;
4668 	mbx_cmd_t *mcp = &mc;
4669 
4670 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4671 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4672 		return QLA_FUNCTION_FAILED;
4673 
4674 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4675 	    "Entered %s.\n", __func__);
4676 
4677 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4678 	mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4679 	    FAC_OPT_CMD_WRITE_PROTECT;
4680 	mcp->out_mb = MBX_1|MBX_0;
4681 	mcp->in_mb = MBX_1|MBX_0;
4682 	mcp->tov = MBX_TOV_SECONDS;
4683 	mcp->flags = 0;
4684 	rval = qla2x00_mailbox_command(vha, mcp);
4685 
4686 	if (rval != QLA_SUCCESS) {
4687 		ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4688 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4689 		    rval, mcp->mb[0], mcp->mb[1]);
4690 	} else {
4691 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4692 		    "Done %s.\n", __func__);
4693 	}
4694 
4695 	return rval;
4696 }
4697 
4698 int
4699 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4700 {
4701 	int rval;
4702 	mbx_cmd_t mc;
4703 	mbx_cmd_t *mcp = &mc;
4704 
4705 	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4706 	    !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4707 		return QLA_FUNCTION_FAILED;
4708 
4709 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4710 	    "Entered %s.\n", __func__);
4711 
4712 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4713 	mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4714 	mcp->mb[2] = LSW(start);
4715 	mcp->mb[3] = MSW(start);
4716 	mcp->mb[4] = LSW(finish);
4717 	mcp->mb[5] = MSW(finish);
4718 	mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4719 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
4720 	mcp->tov = MBX_TOV_SECONDS;
4721 	mcp->flags = 0;
4722 	rval = qla2x00_mailbox_command(vha, mcp);
4723 
4724 	if (rval != QLA_SUCCESS) {
4725 		ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4726 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4727 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4728 	} else {
4729 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4730 		    "Done %s.\n", __func__);
4731 	}
4732 
4733 	return rval;
4734 }
4735 
4736 int
4737 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4738 {
4739 	int rval = QLA_SUCCESS;
4740 	mbx_cmd_t mc;
4741 	mbx_cmd_t *mcp = &mc;
4742 	struct qla_hw_data *ha = vha->hw;
4743 
4744 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4745 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4746 		return rval;
4747 
4748 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4749 	    "Entered %s.\n", __func__);
4750 
4751 	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4752 	mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4753 	    FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4754 	mcp->out_mb = MBX_1|MBX_0;
4755 	mcp->in_mb = MBX_1|MBX_0;
4756 	mcp->tov = MBX_TOV_SECONDS;
4757 	mcp->flags = 0;
4758 	rval = qla2x00_mailbox_command(vha, mcp);
4759 
4760 	if (rval != QLA_SUCCESS) {
4761 		ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4762 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4763 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4764 	} else {
4765 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4766 		    "Done %s.\n", __func__);
4767 	}
4768 
4769 	return rval;
4770 }
4771 
4772 int
4773 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4774 {
4775 	int rval = 0;
4776 	mbx_cmd_t mc;
4777 	mbx_cmd_t *mcp = &mc;
4778 
4779 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4780 	    "Entered %s.\n", __func__);
4781 
4782 	mcp->mb[0] = MBC_RESTART_MPI_FW;
4783 	mcp->out_mb = MBX_0;
4784 	mcp->in_mb = MBX_0|MBX_1;
4785 	mcp->tov = MBX_TOV_SECONDS;
4786 	mcp->flags = 0;
4787 	rval = qla2x00_mailbox_command(vha, mcp);
4788 
4789 	if (rval != QLA_SUCCESS) {
4790 		ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4791 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
4792 		    rval, mcp->mb[0], mcp->mb[1]);
4793 	} else {
4794 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4795 		    "Done %s.\n", __func__);
4796 	}
4797 
4798 	return rval;
4799 }
4800 
4801 int
4802 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4803 {
4804 	int rval;
4805 	mbx_cmd_t mc;
4806 	mbx_cmd_t *mcp = &mc;
4807 	int i;
4808 	int len;
4809 	__le16 *str;
4810 	struct qla_hw_data *ha = vha->hw;
4811 
4812 	if (!IS_P3P_TYPE(ha))
4813 		return QLA_FUNCTION_FAILED;
4814 
4815 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4816 	    "Entered %s.\n", __func__);
4817 
4818 	str = (__force __le16 *)version;
4819 	len = strlen(version);
4820 
4821 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
4822 	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4823 	mcp->out_mb = MBX_1|MBX_0;
4824 	for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4825 		mcp->mb[i] = le16_to_cpup(str);
4826 		mcp->out_mb |= 1<<i;
4827 	}
4828 	for (; i < 16; i++) {
4829 		mcp->mb[i] = 0;
4830 		mcp->out_mb |= 1<<i;
4831 	}
4832 	mcp->in_mb = MBX_1|MBX_0;
4833 	mcp->tov = MBX_TOV_SECONDS;
4834 	mcp->flags = 0;
4835 	rval = qla2x00_mailbox_command(vha, mcp);
4836 
4837 	if (rval != QLA_SUCCESS) {
4838 		ql_dbg(ql_dbg_mbx, vha, 0x117c,
4839 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4840 	} else {
4841 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4842 		    "Done %s.\n", __func__);
4843 	}
4844 
4845 	return rval;
4846 }
4847 
4848 int
4849 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4850 {
4851 	int rval;
4852 	mbx_cmd_t mc;
4853 	mbx_cmd_t *mcp = &mc;
4854 	int len;
4855 	uint16_t dwlen;
4856 	uint8_t *str;
4857 	dma_addr_t str_dma;
4858 	struct qla_hw_data *ha = vha->hw;
4859 
4860 	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4861 	    IS_P3P_TYPE(ha))
4862 		return QLA_FUNCTION_FAILED;
4863 
4864 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4865 	    "Entered %s.\n", __func__);
4866 
4867 	str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4868 	if (!str) {
4869 		ql_log(ql_log_warn, vha, 0x117f,
4870 		    "Failed to allocate driver version param.\n");
4871 		return QLA_MEMORY_ALLOC_FAILED;
4872 	}
4873 
4874 	memcpy(str, "\x7\x3\x11\x0", 4);
4875 	dwlen = str[0];
4876 	len = dwlen * 4 - 4;
4877 	memset(str + 4, 0, len);
4878 	if (len > strlen(version))
4879 		len = strlen(version);
4880 	memcpy(str + 4, version, len);
4881 
4882 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
4883 	mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4884 	mcp->mb[2] = MSW(LSD(str_dma));
4885 	mcp->mb[3] = LSW(LSD(str_dma));
4886 	mcp->mb[6] = MSW(MSD(str_dma));
4887 	mcp->mb[7] = LSW(MSD(str_dma));
4888 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4889 	mcp->in_mb = MBX_1|MBX_0;
4890 	mcp->tov = MBX_TOV_SECONDS;
4891 	mcp->flags = 0;
4892 	rval = qla2x00_mailbox_command(vha, mcp);
4893 
4894 	if (rval != QLA_SUCCESS) {
4895 		ql_dbg(ql_dbg_mbx, vha, 0x1180,
4896 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4897 	} else {
4898 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4899 		    "Done %s.\n", __func__);
4900 	}
4901 
4902 	dma_pool_free(ha->s_dma_pool, str, str_dma);
4903 
4904 	return rval;
4905 }
4906 
4907 int
4908 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4909 			     void *buf, uint16_t bufsiz)
4910 {
4911 	int rval, i;
4912 	mbx_cmd_t mc;
4913 	mbx_cmd_t *mcp = &mc;
4914 	uint32_t	*bp;
4915 
4916 	if (!IS_FWI2_CAPABLE(vha->hw))
4917 		return QLA_FUNCTION_FAILED;
4918 
4919 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4920 	    "Entered %s.\n", __func__);
4921 
4922 	mcp->mb[0] = MBC_GET_RNID_PARAMS;
4923 	mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4924 	mcp->mb[2] = MSW(buf_dma);
4925 	mcp->mb[3] = LSW(buf_dma);
4926 	mcp->mb[6] = MSW(MSD(buf_dma));
4927 	mcp->mb[7] = LSW(MSD(buf_dma));
4928 	mcp->mb[8] = bufsiz/4;
4929 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4930 	mcp->in_mb = MBX_1|MBX_0;
4931 	mcp->tov = MBX_TOV_SECONDS;
4932 	mcp->flags = 0;
4933 	rval = qla2x00_mailbox_command(vha, mcp);
4934 
4935 	if (rval != QLA_SUCCESS) {
4936 		ql_dbg(ql_dbg_mbx, vha, 0x115a,
4937 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4938 	} else {
4939 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4940 		    "Done %s.\n", __func__);
4941 		bp = (uint32_t *) buf;
4942 		for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4943 			*bp = le32_to_cpu((__force __le32)*bp);
4944 	}
4945 
4946 	return rval;
4947 }
4948 
4949 #define PUREX_CMD_COUNT	2
4950 int
4951 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
4952 {
4953 	int rval;
4954 	mbx_cmd_t mc;
4955 	mbx_cmd_t *mcp = &mc;
4956 	uint8_t *els_cmd_map;
4957 	dma_addr_t els_cmd_map_dma;
4958 	uint8_t cmd_opcode[PUREX_CMD_COUNT];
4959 	uint8_t i, index, purex_bit;
4960 	struct qla_hw_data *ha = vha->hw;
4961 
4962 	if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
4963 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4964 		return QLA_SUCCESS;
4965 
4966 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
4967 	    "Entered %s.\n", __func__);
4968 
4969 	els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
4970 	    &els_cmd_map_dma, GFP_KERNEL);
4971 	if (!els_cmd_map) {
4972 		ql_log(ql_log_warn, vha, 0x7101,
4973 		    "Failed to allocate RDP els command param.\n");
4974 		return QLA_MEMORY_ALLOC_FAILED;
4975 	}
4976 
4977 	/* List of Purex ELS */
4978 	cmd_opcode[0] = ELS_FPIN;
4979 	cmd_opcode[1] = ELS_RDP;
4980 
4981 	for (i = 0; i < PUREX_CMD_COUNT; i++) {
4982 		index = cmd_opcode[i] / 8;
4983 		purex_bit = cmd_opcode[i] % 8;
4984 		els_cmd_map[index] |= 1 << purex_bit;
4985 	}
4986 
4987 	mcp->mb[0] = MBC_SET_RNID_PARAMS;
4988 	mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
4989 	mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
4990 	mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
4991 	mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
4992 	mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
4993 	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4994 	mcp->in_mb = MBX_1|MBX_0;
4995 	mcp->tov = MBX_TOV_SECONDS;
4996 	mcp->flags = MBX_DMA_OUT;
4997 	mcp->buf_size = ELS_CMD_MAP_SIZE;
4998 	rval = qla2x00_mailbox_command(vha, mcp);
4999 
5000 	if (rval != QLA_SUCCESS) {
5001 		ql_dbg(ql_dbg_mbx, vha, 0x118d,
5002 		    "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5003 	} else {
5004 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5005 		    "Done %s.\n", __func__);
5006 	}
5007 
5008 	dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5009 	   els_cmd_map, els_cmd_map_dma);
5010 
5011 	return rval;
5012 }
5013 
5014 static int
5015 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5016 {
5017 	int rval;
5018 	mbx_cmd_t mc;
5019 	mbx_cmd_t *mcp = &mc;
5020 
5021 	if (!IS_FWI2_CAPABLE(vha->hw))
5022 		return QLA_FUNCTION_FAILED;
5023 
5024 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5025 	    "Entered %s.\n", __func__);
5026 
5027 	mcp->mb[0] = MBC_GET_RNID_PARAMS;
5028 	mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5029 	mcp->out_mb = MBX_1|MBX_0;
5030 	mcp->in_mb = MBX_1|MBX_0;
5031 	mcp->tov = MBX_TOV_SECONDS;
5032 	mcp->flags = 0;
5033 	rval = qla2x00_mailbox_command(vha, mcp);
5034 	*temp = mcp->mb[1];
5035 
5036 	if (rval != QLA_SUCCESS) {
5037 		ql_dbg(ql_dbg_mbx, vha, 0x115a,
5038 		    "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5039 	} else {
5040 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5041 		    "Done %s.\n", __func__);
5042 	}
5043 
5044 	return rval;
5045 }
5046 
5047 int
5048 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5049 	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5050 {
5051 	int rval;
5052 	mbx_cmd_t mc;
5053 	mbx_cmd_t *mcp = &mc;
5054 	struct qla_hw_data *ha = vha->hw;
5055 
5056 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5057 	    "Entered %s.\n", __func__);
5058 
5059 	if (!IS_FWI2_CAPABLE(ha))
5060 		return QLA_FUNCTION_FAILED;
5061 
5062 	if (len == 1)
5063 		opt |= BIT_0;
5064 
5065 	mcp->mb[0] = MBC_READ_SFP;
5066 	mcp->mb[1] = dev;
5067 	mcp->mb[2] = MSW(LSD(sfp_dma));
5068 	mcp->mb[3] = LSW(LSD(sfp_dma));
5069 	mcp->mb[6] = MSW(MSD(sfp_dma));
5070 	mcp->mb[7] = LSW(MSD(sfp_dma));
5071 	mcp->mb[8] = len;
5072 	mcp->mb[9] = off;
5073 	mcp->mb[10] = opt;
5074 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5075 	mcp->in_mb = MBX_1|MBX_0;
5076 	mcp->tov = MBX_TOV_SECONDS;
5077 	mcp->flags = 0;
5078 	rval = qla2x00_mailbox_command(vha, mcp);
5079 
5080 	if (opt & BIT_0)
5081 		*sfp = mcp->mb[1];
5082 
5083 	if (rval != QLA_SUCCESS) {
5084 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5085 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5086 		if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5087 			/* sfp is not there */
5088 			rval = QLA_INTERFACE_ERROR;
5089 		}
5090 	} else {
5091 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5092 		    "Done %s.\n", __func__);
5093 	}
5094 
5095 	return rval;
5096 }
5097 
5098 int
5099 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5100 	uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5101 {
5102 	int rval;
5103 	mbx_cmd_t mc;
5104 	mbx_cmd_t *mcp = &mc;
5105 	struct qla_hw_data *ha = vha->hw;
5106 
5107 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5108 	    "Entered %s.\n", __func__);
5109 
5110 	if (!IS_FWI2_CAPABLE(ha))
5111 		return QLA_FUNCTION_FAILED;
5112 
5113 	if (len == 1)
5114 		opt |= BIT_0;
5115 
5116 	if (opt & BIT_0)
5117 		len = *sfp;
5118 
5119 	mcp->mb[0] = MBC_WRITE_SFP;
5120 	mcp->mb[1] = dev;
5121 	mcp->mb[2] = MSW(LSD(sfp_dma));
5122 	mcp->mb[3] = LSW(LSD(sfp_dma));
5123 	mcp->mb[6] = MSW(MSD(sfp_dma));
5124 	mcp->mb[7] = LSW(MSD(sfp_dma));
5125 	mcp->mb[8] = len;
5126 	mcp->mb[9] = off;
5127 	mcp->mb[10] = opt;
5128 	mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5129 	mcp->in_mb = MBX_1|MBX_0;
5130 	mcp->tov = MBX_TOV_SECONDS;
5131 	mcp->flags = 0;
5132 	rval = qla2x00_mailbox_command(vha, mcp);
5133 
5134 	if (rval != QLA_SUCCESS) {
5135 		ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5136 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5137 	} else {
5138 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5139 		    "Done %s.\n", __func__);
5140 	}
5141 
5142 	return rval;
5143 }
5144 
5145 int
5146 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5147     uint16_t size_in_bytes, uint16_t *actual_size)
5148 {
5149 	int rval;
5150 	mbx_cmd_t mc;
5151 	mbx_cmd_t *mcp = &mc;
5152 
5153 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5154 	    "Entered %s.\n", __func__);
5155 
5156 	if (!IS_CNA_CAPABLE(vha->hw))
5157 		return QLA_FUNCTION_FAILED;
5158 
5159 	mcp->mb[0] = MBC_GET_XGMAC_STATS;
5160 	mcp->mb[2] = MSW(stats_dma);
5161 	mcp->mb[3] = LSW(stats_dma);
5162 	mcp->mb[6] = MSW(MSD(stats_dma));
5163 	mcp->mb[7] = LSW(MSD(stats_dma));
5164 	mcp->mb[8] = size_in_bytes >> 2;
5165 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5166 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5167 	mcp->tov = MBX_TOV_SECONDS;
5168 	mcp->flags = 0;
5169 	rval = qla2x00_mailbox_command(vha, mcp);
5170 
5171 	if (rval != QLA_SUCCESS) {
5172 		ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5173 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5174 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5175 	} else {
5176 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5177 		    "Done %s.\n", __func__);
5178 
5179 
5180 		*actual_size = mcp->mb[2] << 2;
5181 	}
5182 
5183 	return rval;
5184 }
5185 
5186 int
5187 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5188     uint16_t size)
5189 {
5190 	int rval;
5191 	mbx_cmd_t mc;
5192 	mbx_cmd_t *mcp = &mc;
5193 
5194 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5195 	    "Entered %s.\n", __func__);
5196 
5197 	if (!IS_CNA_CAPABLE(vha->hw))
5198 		return QLA_FUNCTION_FAILED;
5199 
5200 	mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5201 	mcp->mb[1] = 0;
5202 	mcp->mb[2] = MSW(tlv_dma);
5203 	mcp->mb[3] = LSW(tlv_dma);
5204 	mcp->mb[6] = MSW(MSD(tlv_dma));
5205 	mcp->mb[7] = LSW(MSD(tlv_dma));
5206 	mcp->mb[8] = size;
5207 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5208 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5209 	mcp->tov = MBX_TOV_SECONDS;
5210 	mcp->flags = 0;
5211 	rval = qla2x00_mailbox_command(vha, mcp);
5212 
5213 	if (rval != QLA_SUCCESS) {
5214 		ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5215 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5216 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5217 	} else {
5218 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5219 		    "Done %s.\n", __func__);
5220 	}
5221 
5222 	return rval;
5223 }
5224 
5225 int
5226 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5227 {
5228 	int rval;
5229 	mbx_cmd_t mc;
5230 	mbx_cmd_t *mcp = &mc;
5231 
5232 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5233 	    "Entered %s.\n", __func__);
5234 
5235 	if (!IS_FWI2_CAPABLE(vha->hw))
5236 		return QLA_FUNCTION_FAILED;
5237 
5238 	mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5239 	mcp->mb[1] = LSW(risc_addr);
5240 	mcp->mb[8] = MSW(risc_addr);
5241 	mcp->out_mb = MBX_8|MBX_1|MBX_0;
5242 	mcp->in_mb = MBX_3|MBX_2|MBX_0;
5243 	mcp->tov = MBX_TOV_SECONDS;
5244 	mcp->flags = 0;
5245 	rval = qla2x00_mailbox_command(vha, mcp);
5246 	if (rval != QLA_SUCCESS) {
5247 		ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5248 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5249 	} else {
5250 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5251 		    "Done %s.\n", __func__);
5252 		*data = mcp->mb[3] << 16 | mcp->mb[2];
5253 	}
5254 
5255 	return rval;
5256 }
5257 
5258 int
5259 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5260 	uint16_t *mresp)
5261 {
5262 	int rval;
5263 	mbx_cmd_t mc;
5264 	mbx_cmd_t *mcp = &mc;
5265 
5266 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5267 	    "Entered %s.\n", __func__);
5268 
5269 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5270 	mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5271 	mcp->mb[1] = mreq->options | BIT_6;	// BIT_6 specifies 64 bit addressing
5272 
5273 	/* transfer count */
5274 	mcp->mb[10] = LSW(mreq->transfer_size);
5275 	mcp->mb[11] = MSW(mreq->transfer_size);
5276 
5277 	/* send data address */
5278 	mcp->mb[14] = LSW(mreq->send_dma);
5279 	mcp->mb[15] = MSW(mreq->send_dma);
5280 	mcp->mb[20] = LSW(MSD(mreq->send_dma));
5281 	mcp->mb[21] = MSW(MSD(mreq->send_dma));
5282 
5283 	/* receive data address */
5284 	mcp->mb[16] = LSW(mreq->rcv_dma);
5285 	mcp->mb[17] = MSW(mreq->rcv_dma);
5286 	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5287 	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5288 
5289 	/* Iteration count */
5290 	mcp->mb[18] = LSW(mreq->iteration_count);
5291 	mcp->mb[19] = MSW(mreq->iteration_count);
5292 
5293 	mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5294 	    MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5295 	if (IS_CNA_CAPABLE(vha->hw))
5296 		mcp->out_mb |= MBX_2;
5297 	mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5298 
5299 	mcp->buf_size = mreq->transfer_size;
5300 	mcp->tov = MBX_TOV_SECONDS;
5301 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5302 
5303 	rval = qla2x00_mailbox_command(vha, mcp);
5304 
5305 	if (rval != QLA_SUCCESS) {
5306 		ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5307 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5308 		    "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5309 		    mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5310 	} else {
5311 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5312 		    "Done %s.\n", __func__);
5313 	}
5314 
5315 	/* Copy mailbox information */
5316 	memcpy( mresp, mcp->mb, 64);
5317 	return rval;
5318 }
5319 
5320 int
5321 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5322 	uint16_t *mresp)
5323 {
5324 	int rval;
5325 	mbx_cmd_t mc;
5326 	mbx_cmd_t *mcp = &mc;
5327 	struct qla_hw_data *ha = vha->hw;
5328 
5329 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5330 	    "Entered %s.\n", __func__);
5331 
5332 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5333 	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5334 	/* BIT_6 specifies 64bit address */
5335 	mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5336 	if (IS_CNA_CAPABLE(ha)) {
5337 		mcp->mb[2] = vha->fcoe_fcf_idx;
5338 	}
5339 	mcp->mb[16] = LSW(mreq->rcv_dma);
5340 	mcp->mb[17] = MSW(mreq->rcv_dma);
5341 	mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5342 	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5343 
5344 	mcp->mb[10] = LSW(mreq->transfer_size);
5345 
5346 	mcp->mb[14] = LSW(mreq->send_dma);
5347 	mcp->mb[15] = MSW(mreq->send_dma);
5348 	mcp->mb[20] = LSW(MSD(mreq->send_dma));
5349 	mcp->mb[21] = MSW(MSD(mreq->send_dma));
5350 
5351 	mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5352 	    MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5353 	if (IS_CNA_CAPABLE(ha))
5354 		mcp->out_mb |= MBX_2;
5355 
5356 	mcp->in_mb = MBX_0;
5357 	if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5358 	    IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5359 		mcp->in_mb |= MBX_1;
5360 	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5361 	    IS_QLA28XX(ha))
5362 		mcp->in_mb |= MBX_3;
5363 
5364 	mcp->tov = MBX_TOV_SECONDS;
5365 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5366 	mcp->buf_size = mreq->transfer_size;
5367 
5368 	rval = qla2x00_mailbox_command(vha, mcp);
5369 
5370 	if (rval != QLA_SUCCESS) {
5371 		ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5372 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
5373 		    rval, mcp->mb[0], mcp->mb[1]);
5374 	} else {
5375 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5376 		    "Done %s.\n", __func__);
5377 	}
5378 
5379 	/* Copy mailbox information */
5380 	memcpy(mresp, mcp->mb, 64);
5381 	return rval;
5382 }
5383 
5384 int
5385 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5386 {
5387 	int rval;
5388 	mbx_cmd_t mc;
5389 	mbx_cmd_t *mcp = &mc;
5390 
5391 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5392 	    "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5393 
5394 	mcp->mb[0] = MBC_ISP84XX_RESET;
5395 	mcp->mb[1] = enable_diagnostic;
5396 	mcp->out_mb = MBX_1|MBX_0;
5397 	mcp->in_mb = MBX_1|MBX_0;
5398 	mcp->tov = MBX_TOV_SECONDS;
5399 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5400 	rval = qla2x00_mailbox_command(vha, mcp);
5401 
5402 	if (rval != QLA_SUCCESS)
5403 		ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5404 	else
5405 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5406 		    "Done %s.\n", __func__);
5407 
5408 	return rval;
5409 }
5410 
5411 int
5412 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5413 {
5414 	int rval;
5415 	mbx_cmd_t mc;
5416 	mbx_cmd_t *mcp = &mc;
5417 
5418 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5419 	    "Entered %s.\n", __func__);
5420 
5421 	if (!IS_FWI2_CAPABLE(vha->hw))
5422 		return QLA_FUNCTION_FAILED;
5423 
5424 	mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5425 	mcp->mb[1] = LSW(risc_addr);
5426 	mcp->mb[2] = LSW(data);
5427 	mcp->mb[3] = MSW(data);
5428 	mcp->mb[8] = MSW(risc_addr);
5429 	mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5430 	mcp->in_mb = MBX_1|MBX_0;
5431 	mcp->tov = MBX_TOV_SECONDS;
5432 	mcp->flags = 0;
5433 	rval = qla2x00_mailbox_command(vha, mcp);
5434 	if (rval != QLA_SUCCESS) {
5435 		ql_dbg(ql_dbg_mbx, vha, 0x1101,
5436 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
5437 		    rval, mcp->mb[0], mcp->mb[1]);
5438 	} else {
5439 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5440 		    "Done %s.\n", __func__);
5441 	}
5442 
5443 	return rval;
5444 }
5445 
5446 int
5447 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5448 {
5449 	int rval;
5450 	uint32_t stat, timer;
5451 	uint16_t mb0 = 0;
5452 	struct qla_hw_data *ha = vha->hw;
5453 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5454 
5455 	rval = QLA_SUCCESS;
5456 
5457 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5458 	    "Entered %s.\n", __func__);
5459 
5460 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5461 
5462 	/* Write the MBC data to the registers */
5463 	wrt_reg_word(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5464 	wrt_reg_word(&reg->mailbox1, mb[0]);
5465 	wrt_reg_word(&reg->mailbox2, mb[1]);
5466 	wrt_reg_word(&reg->mailbox3, mb[2]);
5467 	wrt_reg_word(&reg->mailbox4, mb[3]);
5468 
5469 	wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
5470 
5471 	/* Poll for MBC interrupt */
5472 	for (timer = 6000000; timer; timer--) {
5473 		/* Check for pending interrupts. */
5474 		stat = rd_reg_dword(&reg->host_status);
5475 		if (stat & HSRX_RISC_INT) {
5476 			stat &= 0xff;
5477 
5478 			if (stat == 0x1 || stat == 0x2 ||
5479 			    stat == 0x10 || stat == 0x11) {
5480 				set_bit(MBX_INTERRUPT,
5481 				    &ha->mbx_cmd_flags);
5482 				mb0 = rd_reg_word(&reg->mailbox0);
5483 				wrt_reg_dword(&reg->hccr,
5484 				    HCCRX_CLR_RISC_INT);
5485 				rd_reg_dword(&reg->hccr);
5486 				break;
5487 			}
5488 		}
5489 		udelay(5);
5490 	}
5491 
5492 	if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5493 		rval = mb0 & MBS_MASK;
5494 	else
5495 		rval = QLA_FUNCTION_FAILED;
5496 
5497 	if (rval != QLA_SUCCESS) {
5498 		ql_dbg(ql_dbg_mbx, vha, 0x1104,
5499 		    "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5500 	} else {
5501 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5502 		    "Done %s.\n", __func__);
5503 	}
5504 
5505 	return rval;
5506 }
5507 
5508 /* Set the specified data rate */
5509 int
5510 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5511 {
5512 	int rval;
5513 	mbx_cmd_t mc;
5514 	mbx_cmd_t *mcp = &mc;
5515 	struct qla_hw_data *ha = vha->hw;
5516 	uint16_t val;
5517 
5518 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5519 	    "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5520 	    mode);
5521 
5522 	if (!IS_FWI2_CAPABLE(ha))
5523 		return QLA_FUNCTION_FAILED;
5524 
5525 	memset(mcp, 0, sizeof(*mcp));
5526 	switch (ha->set_data_rate) {
5527 	case PORT_SPEED_AUTO:
5528 	case PORT_SPEED_4GB:
5529 	case PORT_SPEED_8GB:
5530 	case PORT_SPEED_16GB:
5531 	case PORT_SPEED_32GB:
5532 		val = ha->set_data_rate;
5533 		break;
5534 	default:
5535 		ql_log(ql_log_warn, vha, 0x1199,
5536 		    "Unrecognized speed setting:%d. Setting Autoneg\n",
5537 		    ha->set_data_rate);
5538 		val = ha->set_data_rate = PORT_SPEED_AUTO;
5539 		break;
5540 	}
5541 
5542 	mcp->mb[0] = MBC_DATA_RATE;
5543 	mcp->mb[1] = mode;
5544 	mcp->mb[2] = val;
5545 
5546 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
5547 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5548 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5549 		mcp->in_mb |= MBX_4|MBX_3;
5550 	mcp->tov = MBX_TOV_SECONDS;
5551 	mcp->flags = 0;
5552 	rval = qla2x00_mailbox_command(vha, mcp);
5553 	if (rval != QLA_SUCCESS) {
5554 		ql_dbg(ql_dbg_mbx, vha, 0x1107,
5555 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5556 	} else {
5557 		if (mcp->mb[1] != 0x7)
5558 			ql_dbg(ql_dbg_mbx, vha, 0x1179,
5559 				"Speed set:0x%x\n", mcp->mb[1]);
5560 
5561 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5562 		    "Done %s.\n", __func__);
5563 	}
5564 
5565 	return rval;
5566 }
5567 
5568 int
5569 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5570 {
5571 	int rval;
5572 	mbx_cmd_t mc;
5573 	mbx_cmd_t *mcp = &mc;
5574 	struct qla_hw_data *ha = vha->hw;
5575 
5576 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5577 	    "Entered %s.\n", __func__);
5578 
5579 	if (!IS_FWI2_CAPABLE(ha))
5580 		return QLA_FUNCTION_FAILED;
5581 
5582 	mcp->mb[0] = MBC_DATA_RATE;
5583 	mcp->mb[1] = QLA_GET_DATA_RATE;
5584 	mcp->out_mb = MBX_1|MBX_0;
5585 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
5586 	if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5587 		mcp->in_mb |= MBX_3;
5588 	mcp->tov = MBX_TOV_SECONDS;
5589 	mcp->flags = 0;
5590 	rval = qla2x00_mailbox_command(vha, mcp);
5591 	if (rval != QLA_SUCCESS) {
5592 		ql_dbg(ql_dbg_mbx, vha, 0x1107,
5593 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5594 	} else {
5595 		if (mcp->mb[1] != 0x7)
5596 			ha->link_data_rate = mcp->mb[1];
5597 
5598 		if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5599 			if (mcp->mb[4] & BIT_0)
5600 				ql_log(ql_log_info, vha, 0x11a2,
5601 				    "FEC=enabled (data rate).\n");
5602 		}
5603 
5604 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5605 		    "Done %s.\n", __func__);
5606 		if (mcp->mb[1] != 0x7)
5607 			ha->link_data_rate = mcp->mb[1];
5608 	}
5609 
5610 	return rval;
5611 }
5612 
5613 int
5614 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5615 {
5616 	int rval;
5617 	mbx_cmd_t mc;
5618 	mbx_cmd_t *mcp = &mc;
5619 	struct qla_hw_data *ha = vha->hw;
5620 
5621 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5622 	    "Entered %s.\n", __func__);
5623 
5624 	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5625 	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5626 		return QLA_FUNCTION_FAILED;
5627 	mcp->mb[0] = MBC_GET_PORT_CONFIG;
5628 	mcp->out_mb = MBX_0;
5629 	mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5630 	mcp->tov = MBX_TOV_SECONDS;
5631 	mcp->flags = 0;
5632 
5633 	rval = qla2x00_mailbox_command(vha, mcp);
5634 
5635 	if (rval != QLA_SUCCESS) {
5636 		ql_dbg(ql_dbg_mbx, vha, 0x110a,
5637 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5638 	} else {
5639 		/* Copy all bits to preserve original value */
5640 		memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5641 
5642 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5643 		    "Done %s.\n", __func__);
5644 	}
5645 	return rval;
5646 }
5647 
5648 int
5649 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5650 {
5651 	int rval;
5652 	mbx_cmd_t mc;
5653 	mbx_cmd_t *mcp = &mc;
5654 
5655 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5656 	    "Entered %s.\n", __func__);
5657 
5658 	mcp->mb[0] = MBC_SET_PORT_CONFIG;
5659 	/* Copy all bits to preserve original setting */
5660 	memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5661 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5662 	mcp->in_mb = MBX_0;
5663 	mcp->tov = MBX_TOV_SECONDS;
5664 	mcp->flags = 0;
5665 	rval = qla2x00_mailbox_command(vha, mcp);
5666 
5667 	if (rval != QLA_SUCCESS) {
5668 		ql_dbg(ql_dbg_mbx, vha, 0x110d,
5669 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5670 	} else
5671 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5672 		    "Done %s.\n", __func__);
5673 
5674 	return rval;
5675 }
5676 
5677 
5678 int
5679 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5680 		uint16_t *mb)
5681 {
5682 	int rval;
5683 	mbx_cmd_t mc;
5684 	mbx_cmd_t *mcp = &mc;
5685 	struct qla_hw_data *ha = vha->hw;
5686 
5687 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5688 	    "Entered %s.\n", __func__);
5689 
5690 	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5691 		return QLA_FUNCTION_FAILED;
5692 
5693 	mcp->mb[0] = MBC_PORT_PARAMS;
5694 	mcp->mb[1] = loop_id;
5695 	if (ha->flags.fcp_prio_enabled)
5696 		mcp->mb[2] = BIT_1;
5697 	else
5698 		mcp->mb[2] = BIT_2;
5699 	mcp->mb[4] = priority & 0xf;
5700 	mcp->mb[9] = vha->vp_idx;
5701 	mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5702 	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5703 	mcp->tov = MBX_TOV_SECONDS;
5704 	mcp->flags = 0;
5705 	rval = qla2x00_mailbox_command(vha, mcp);
5706 	if (mb != NULL) {
5707 		mb[0] = mcp->mb[0];
5708 		mb[1] = mcp->mb[1];
5709 		mb[3] = mcp->mb[3];
5710 		mb[4] = mcp->mb[4];
5711 	}
5712 
5713 	if (rval != QLA_SUCCESS) {
5714 		ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5715 	} else {
5716 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5717 		    "Done %s.\n", __func__);
5718 	}
5719 
5720 	return rval;
5721 }
5722 
5723 int
5724 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5725 {
5726 	int rval = QLA_FUNCTION_FAILED;
5727 	struct qla_hw_data *ha = vha->hw;
5728 	uint8_t byte;
5729 
5730 	if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5731 		ql_dbg(ql_dbg_mbx, vha, 0x1150,
5732 		    "Thermal not supported by this card.\n");
5733 		return rval;
5734 	}
5735 
5736 	if (IS_QLA25XX(ha)) {
5737 		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5738 		    ha->pdev->subsystem_device == 0x0175) {
5739 			rval = qla2x00_read_sfp(vha, 0, &byte,
5740 			    0x98, 0x1, 1, BIT_13|BIT_0);
5741 			*temp = byte;
5742 			return rval;
5743 		}
5744 		if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5745 		    ha->pdev->subsystem_device == 0x338e) {
5746 			rval = qla2x00_read_sfp(vha, 0, &byte,
5747 			    0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5748 			*temp = byte;
5749 			return rval;
5750 		}
5751 		ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5752 		    "Thermal not supported by this card.\n");
5753 		return rval;
5754 	}
5755 
5756 	if (IS_QLA82XX(ha)) {
5757 		*temp = qla82xx_read_temperature(vha);
5758 		rval = QLA_SUCCESS;
5759 		return rval;
5760 	} else if (IS_QLA8044(ha)) {
5761 		*temp = qla8044_read_temperature(vha);
5762 		rval = QLA_SUCCESS;
5763 		return rval;
5764 	}
5765 
5766 	rval = qla2x00_read_asic_temperature(vha, temp);
5767 	return rval;
5768 }
5769 
5770 int
5771 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5772 {
5773 	int rval;
5774 	struct qla_hw_data *ha = vha->hw;
5775 	mbx_cmd_t mc;
5776 	mbx_cmd_t *mcp = &mc;
5777 
5778 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5779 	    "Entered %s.\n", __func__);
5780 
5781 	if (!IS_FWI2_CAPABLE(ha))
5782 		return QLA_FUNCTION_FAILED;
5783 
5784 	memset(mcp, 0, sizeof(mbx_cmd_t));
5785 	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5786 	mcp->mb[1] = 1;
5787 
5788 	mcp->out_mb = MBX_1|MBX_0;
5789 	mcp->in_mb = MBX_0;
5790 	mcp->tov = MBX_TOV_SECONDS;
5791 	mcp->flags = 0;
5792 
5793 	rval = qla2x00_mailbox_command(vha, mcp);
5794 	if (rval != QLA_SUCCESS) {
5795 		ql_dbg(ql_dbg_mbx, vha, 0x1016,
5796 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5797 	} else {
5798 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5799 		    "Done %s.\n", __func__);
5800 	}
5801 
5802 	return rval;
5803 }
5804 
5805 int
5806 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5807 {
5808 	int rval;
5809 	struct qla_hw_data *ha = vha->hw;
5810 	mbx_cmd_t mc;
5811 	mbx_cmd_t *mcp = &mc;
5812 
5813 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5814 	    "Entered %s.\n", __func__);
5815 
5816 	if (!IS_P3P_TYPE(ha))
5817 		return QLA_FUNCTION_FAILED;
5818 
5819 	memset(mcp, 0, sizeof(mbx_cmd_t));
5820 	mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5821 	mcp->mb[1] = 0;
5822 
5823 	mcp->out_mb = MBX_1|MBX_0;
5824 	mcp->in_mb = MBX_0;
5825 	mcp->tov = MBX_TOV_SECONDS;
5826 	mcp->flags = 0;
5827 
5828 	rval = qla2x00_mailbox_command(vha, mcp);
5829 	if (rval != QLA_SUCCESS) {
5830 		ql_dbg(ql_dbg_mbx, vha, 0x100c,
5831 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5832 	} else {
5833 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5834 		    "Done %s.\n", __func__);
5835 	}
5836 
5837 	return rval;
5838 }
5839 
5840 int
5841 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5842 {
5843 	struct qla_hw_data *ha = vha->hw;
5844 	mbx_cmd_t mc;
5845 	mbx_cmd_t *mcp = &mc;
5846 	int rval = QLA_FUNCTION_FAILED;
5847 
5848 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5849 	    "Entered %s.\n", __func__);
5850 
5851 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5852 	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5853 	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5854 	mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5855 	mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5856 
5857 	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5858 	mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5859 	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5860 
5861 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5862 	mcp->tov = MBX_TOV_SECONDS;
5863 	rval = qla2x00_mailbox_command(vha, mcp);
5864 
5865 	/* Always copy back return mailbox values. */
5866 	if (rval != QLA_SUCCESS) {
5867 		ql_dbg(ql_dbg_mbx, vha, 0x1120,
5868 		    "mailbox command FAILED=0x%x, subcode=%x.\n",
5869 		    (mcp->mb[1] << 16) | mcp->mb[0],
5870 		    (mcp->mb[3] << 16) | mcp->mb[2]);
5871 	} else {
5872 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5873 		    "Done %s.\n", __func__);
5874 		ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5875 		if (!ha->md_template_size) {
5876 			ql_dbg(ql_dbg_mbx, vha, 0x1122,
5877 			    "Null template size obtained.\n");
5878 			rval = QLA_FUNCTION_FAILED;
5879 		}
5880 	}
5881 	return rval;
5882 }
5883 
5884 int
5885 qla82xx_md_get_template(scsi_qla_host_t *vha)
5886 {
5887 	struct qla_hw_data *ha = vha->hw;
5888 	mbx_cmd_t mc;
5889 	mbx_cmd_t *mcp = &mc;
5890 	int rval = QLA_FUNCTION_FAILED;
5891 
5892 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5893 	    "Entered %s.\n", __func__);
5894 
5895 	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5896 	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5897 	if (!ha->md_tmplt_hdr) {
5898 		ql_log(ql_log_warn, vha, 0x1124,
5899 		    "Unable to allocate memory for Minidump template.\n");
5900 		return rval;
5901 	}
5902 
5903 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5904 	mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5905 	mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5906 	mcp->mb[2] = LSW(RQST_TMPLT);
5907 	mcp->mb[3] = MSW(RQST_TMPLT);
5908 	mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5909 	mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5910 	mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5911 	mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5912 	mcp->mb[8] = LSW(ha->md_template_size);
5913 	mcp->mb[9] = MSW(ha->md_template_size);
5914 
5915 	mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5916 	mcp->tov = MBX_TOV_SECONDS;
5917 	mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5918 	    MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5919 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5920 	rval = qla2x00_mailbox_command(vha, mcp);
5921 
5922 	if (rval != QLA_SUCCESS) {
5923 		ql_dbg(ql_dbg_mbx, vha, 0x1125,
5924 		    "mailbox command FAILED=0x%x, subcode=%x.\n",
5925 		    ((mcp->mb[1] << 16) | mcp->mb[0]),
5926 		    ((mcp->mb[3] << 16) | mcp->mb[2]));
5927 	} else
5928 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5929 		    "Done %s.\n", __func__);
5930 	return rval;
5931 }
5932 
5933 int
5934 qla8044_md_get_template(scsi_qla_host_t *vha)
5935 {
5936 	struct qla_hw_data *ha = vha->hw;
5937 	mbx_cmd_t mc;
5938 	mbx_cmd_t *mcp = &mc;
5939 	int rval = QLA_FUNCTION_FAILED;
5940 	int offset = 0, size = MINIDUMP_SIZE_36K;
5941 
5942 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5943 	    "Entered %s.\n", __func__);
5944 
5945 	ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5946 	   ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5947 	if (!ha->md_tmplt_hdr) {
5948 		ql_log(ql_log_warn, vha, 0xb11b,
5949 		    "Unable to allocate memory for Minidump template.\n");
5950 		return rval;
5951 	}
5952 
5953 	memset(mcp->mb, 0 , sizeof(mcp->mb));
5954 	while (offset < ha->md_template_size) {
5955 		mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5956 		mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5957 		mcp->mb[2] = LSW(RQST_TMPLT);
5958 		mcp->mb[3] = MSW(RQST_TMPLT);
5959 		mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5960 		mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5961 		mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5962 		mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5963 		mcp->mb[8] = LSW(size);
5964 		mcp->mb[9] = MSW(size);
5965 		mcp->mb[10] = offset & 0x0000FFFF;
5966 		mcp->mb[11] = offset & 0xFFFF0000;
5967 		mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5968 		mcp->tov = MBX_TOV_SECONDS;
5969 		mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5970 			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5971 		mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5972 		rval = qla2x00_mailbox_command(vha, mcp);
5973 
5974 		if (rval != QLA_SUCCESS) {
5975 			ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5976 				"mailbox command FAILED=0x%x, subcode=%x.\n",
5977 				((mcp->mb[1] << 16) | mcp->mb[0]),
5978 				((mcp->mb[3] << 16) | mcp->mb[2]));
5979 			return rval;
5980 		} else
5981 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5982 				"Done %s.\n", __func__);
5983 		offset = offset + size;
5984 	}
5985 	return rval;
5986 }
5987 
5988 int
5989 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5990 {
5991 	int rval;
5992 	struct qla_hw_data *ha = vha->hw;
5993 	mbx_cmd_t mc;
5994 	mbx_cmd_t *mcp = &mc;
5995 
5996 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5997 		return QLA_FUNCTION_FAILED;
5998 
5999 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6000 	    "Entered %s.\n", __func__);
6001 
6002 	memset(mcp, 0, sizeof(mbx_cmd_t));
6003 	mcp->mb[0] = MBC_SET_LED_CONFIG;
6004 	mcp->mb[1] = led_cfg[0];
6005 	mcp->mb[2] = led_cfg[1];
6006 	if (IS_QLA8031(ha)) {
6007 		mcp->mb[3] = led_cfg[2];
6008 		mcp->mb[4] = led_cfg[3];
6009 		mcp->mb[5] = led_cfg[4];
6010 		mcp->mb[6] = led_cfg[5];
6011 	}
6012 
6013 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
6014 	if (IS_QLA8031(ha))
6015 		mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6016 	mcp->in_mb = MBX_0;
6017 	mcp->tov = MBX_TOV_SECONDS;
6018 	mcp->flags = 0;
6019 
6020 	rval = qla2x00_mailbox_command(vha, mcp);
6021 	if (rval != QLA_SUCCESS) {
6022 		ql_dbg(ql_dbg_mbx, vha, 0x1134,
6023 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6024 	} else {
6025 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6026 		    "Done %s.\n", __func__);
6027 	}
6028 
6029 	return rval;
6030 }
6031 
6032 int
6033 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6034 {
6035 	int rval;
6036 	struct qla_hw_data *ha = vha->hw;
6037 	mbx_cmd_t mc;
6038 	mbx_cmd_t *mcp = &mc;
6039 
6040 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6041 		return QLA_FUNCTION_FAILED;
6042 
6043 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6044 	    "Entered %s.\n", __func__);
6045 
6046 	memset(mcp, 0, sizeof(mbx_cmd_t));
6047 	mcp->mb[0] = MBC_GET_LED_CONFIG;
6048 
6049 	mcp->out_mb = MBX_0;
6050 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
6051 	if (IS_QLA8031(ha))
6052 		mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6053 	mcp->tov = MBX_TOV_SECONDS;
6054 	mcp->flags = 0;
6055 
6056 	rval = qla2x00_mailbox_command(vha, mcp);
6057 	if (rval != QLA_SUCCESS) {
6058 		ql_dbg(ql_dbg_mbx, vha, 0x1137,
6059 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6060 	} else {
6061 		led_cfg[0] = mcp->mb[1];
6062 		led_cfg[1] = mcp->mb[2];
6063 		if (IS_QLA8031(ha)) {
6064 			led_cfg[2] = mcp->mb[3];
6065 			led_cfg[3] = mcp->mb[4];
6066 			led_cfg[4] = mcp->mb[5];
6067 			led_cfg[5] = mcp->mb[6];
6068 		}
6069 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6070 		    "Done %s.\n", __func__);
6071 	}
6072 
6073 	return rval;
6074 }
6075 
6076 int
6077 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6078 {
6079 	int rval;
6080 	struct qla_hw_data *ha = vha->hw;
6081 	mbx_cmd_t mc;
6082 	mbx_cmd_t *mcp = &mc;
6083 
6084 	if (!IS_P3P_TYPE(ha))
6085 		return QLA_FUNCTION_FAILED;
6086 
6087 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6088 		"Entered %s.\n", __func__);
6089 
6090 	memset(mcp, 0, sizeof(mbx_cmd_t));
6091 	mcp->mb[0] = MBC_SET_LED_CONFIG;
6092 	if (enable)
6093 		mcp->mb[7] = 0xE;
6094 	else
6095 		mcp->mb[7] = 0xD;
6096 
6097 	mcp->out_mb = MBX_7|MBX_0;
6098 	mcp->in_mb = MBX_0;
6099 	mcp->tov = MBX_TOV_SECONDS;
6100 	mcp->flags = 0;
6101 
6102 	rval = qla2x00_mailbox_command(vha, mcp);
6103 	if (rval != QLA_SUCCESS) {
6104 		ql_dbg(ql_dbg_mbx, vha, 0x1128,
6105 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6106 	} else {
6107 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6108 		    "Done %s.\n", __func__);
6109 	}
6110 
6111 	return rval;
6112 }
6113 
6114 int
6115 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6116 {
6117 	int rval;
6118 	struct qla_hw_data *ha = vha->hw;
6119 	mbx_cmd_t mc;
6120 	mbx_cmd_t *mcp = &mc;
6121 
6122 	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6123 		return QLA_FUNCTION_FAILED;
6124 
6125 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6126 	    "Entered %s.\n", __func__);
6127 
6128 	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6129 	mcp->mb[1] = LSW(reg);
6130 	mcp->mb[2] = MSW(reg);
6131 	mcp->mb[3] = LSW(data);
6132 	mcp->mb[4] = MSW(data);
6133 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6134 
6135 	mcp->in_mb = MBX_1|MBX_0;
6136 	mcp->tov = MBX_TOV_SECONDS;
6137 	mcp->flags = 0;
6138 	rval = qla2x00_mailbox_command(vha, mcp);
6139 
6140 	if (rval != QLA_SUCCESS) {
6141 		ql_dbg(ql_dbg_mbx, vha, 0x1131,
6142 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6143 	} else {
6144 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6145 		    "Done %s.\n", __func__);
6146 	}
6147 
6148 	return rval;
6149 }
6150 
6151 int
6152 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6153 {
6154 	int rval;
6155 	struct qla_hw_data *ha = vha->hw;
6156 	mbx_cmd_t mc;
6157 	mbx_cmd_t *mcp = &mc;
6158 
6159 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6160 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6161 		    "Implicit LOGO Unsupported.\n");
6162 		return QLA_FUNCTION_FAILED;
6163 	}
6164 
6165 
6166 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6167 	    "Entering %s.\n",  __func__);
6168 
6169 	/* Perform Implicit LOGO. */
6170 	mcp->mb[0] = MBC_PORT_LOGOUT;
6171 	mcp->mb[1] = fcport->loop_id;
6172 	mcp->mb[10] = BIT_15;
6173 	mcp->out_mb = MBX_10|MBX_1|MBX_0;
6174 	mcp->in_mb = MBX_0;
6175 	mcp->tov = MBX_TOV_SECONDS;
6176 	mcp->flags = 0;
6177 	rval = qla2x00_mailbox_command(vha, mcp);
6178 	if (rval != QLA_SUCCESS)
6179 		ql_dbg(ql_dbg_mbx, vha, 0x113d,
6180 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6181 	else
6182 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6183 		    "Done %s.\n", __func__);
6184 
6185 	return rval;
6186 }
6187 
6188 int
6189 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6190 {
6191 	int rval;
6192 	mbx_cmd_t mc;
6193 	mbx_cmd_t *mcp = &mc;
6194 	struct qla_hw_data *ha = vha->hw;
6195 	unsigned long retry_max_time = jiffies + (2 * HZ);
6196 
6197 	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6198 		return QLA_FUNCTION_FAILED;
6199 
6200 	ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6201 
6202 retry_rd_reg:
6203 	mcp->mb[0] = MBC_READ_REMOTE_REG;
6204 	mcp->mb[1] = LSW(reg);
6205 	mcp->mb[2] = MSW(reg);
6206 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
6207 	mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6208 	mcp->tov = MBX_TOV_SECONDS;
6209 	mcp->flags = 0;
6210 	rval = qla2x00_mailbox_command(vha, mcp);
6211 
6212 	if (rval != QLA_SUCCESS) {
6213 		ql_dbg(ql_dbg_mbx, vha, 0x114c,
6214 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
6215 		    rval, mcp->mb[0], mcp->mb[1]);
6216 	} else {
6217 		*data = (mcp->mb[3] | (mcp->mb[4] << 16));
6218 		if (*data == QLA8XXX_BAD_VALUE) {
6219 			/*
6220 			 * During soft-reset CAMRAM register reads might
6221 			 * return 0xbad0bad0. So retry for MAX of 2 sec
6222 			 * while reading camram registers.
6223 			 */
6224 			if (time_after(jiffies, retry_max_time)) {
6225 				ql_dbg(ql_dbg_mbx, vha, 0x1141,
6226 				    "Failure to read CAMRAM register. "
6227 				    "data=0x%x.\n", *data);
6228 				return QLA_FUNCTION_FAILED;
6229 			}
6230 			msleep(100);
6231 			goto retry_rd_reg;
6232 		}
6233 		ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6234 	}
6235 
6236 	return rval;
6237 }
6238 
6239 int
6240 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6241 {
6242 	int rval;
6243 	mbx_cmd_t mc;
6244 	mbx_cmd_t *mcp = &mc;
6245 	struct qla_hw_data *ha = vha->hw;
6246 
6247 	if (!IS_QLA83XX(ha))
6248 		return QLA_FUNCTION_FAILED;
6249 
6250 	ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6251 
6252 	mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6253 	mcp->out_mb = MBX_0;
6254 	mcp->in_mb = MBX_1|MBX_0;
6255 	mcp->tov = MBX_TOV_SECONDS;
6256 	mcp->flags = 0;
6257 	rval = qla2x00_mailbox_command(vha, mcp);
6258 
6259 	if (rval != QLA_SUCCESS) {
6260 		ql_dbg(ql_dbg_mbx, vha, 0x1144,
6261 		    "Failed=%x mb[0]=%x mb[1]=%x.\n",
6262 		    rval, mcp->mb[0], mcp->mb[1]);
6263 		qla2xxx_dump_fw(vha);
6264 	} else {
6265 		ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6266 	}
6267 
6268 	return rval;
6269 }
6270 
6271 int
6272 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6273 	uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6274 {
6275 	int rval;
6276 	mbx_cmd_t mc;
6277 	mbx_cmd_t *mcp = &mc;
6278 	uint8_t subcode = (uint8_t)options;
6279 	struct qla_hw_data *ha = vha->hw;
6280 
6281 	if (!IS_QLA8031(ha))
6282 		return QLA_FUNCTION_FAILED;
6283 
6284 	ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6285 
6286 	mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6287 	mcp->mb[1] = options;
6288 	mcp->out_mb = MBX_1|MBX_0;
6289 	if (subcode & BIT_2) {
6290 		mcp->mb[2] = LSW(start_addr);
6291 		mcp->mb[3] = MSW(start_addr);
6292 		mcp->mb[4] = LSW(end_addr);
6293 		mcp->mb[5] = MSW(end_addr);
6294 		mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6295 	}
6296 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
6297 	if (!(subcode & (BIT_2 | BIT_5)))
6298 		mcp->in_mb |= MBX_4|MBX_3;
6299 	mcp->tov = MBX_TOV_SECONDS;
6300 	mcp->flags = 0;
6301 	rval = qla2x00_mailbox_command(vha, mcp);
6302 
6303 	if (rval != QLA_SUCCESS) {
6304 		ql_dbg(ql_dbg_mbx, vha, 0x1147,
6305 		    "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6306 		    rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6307 		    mcp->mb[4]);
6308 		qla2xxx_dump_fw(vha);
6309 	} else {
6310 		if (subcode & BIT_5)
6311 			*sector_size = mcp->mb[1];
6312 		else if (subcode & (BIT_6 | BIT_7)) {
6313 			ql_dbg(ql_dbg_mbx, vha, 0x1148,
6314 			    "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6315 		} else if (subcode & (BIT_3 | BIT_4)) {
6316 			ql_dbg(ql_dbg_mbx, vha, 0x1149,
6317 			    "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6318 		}
6319 		ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6320 	}
6321 
6322 	return rval;
6323 }
6324 
6325 int
6326 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6327 	uint32_t size)
6328 {
6329 	int rval;
6330 	mbx_cmd_t mc;
6331 	mbx_cmd_t *mcp = &mc;
6332 
6333 	if (!IS_MCTP_CAPABLE(vha->hw))
6334 		return QLA_FUNCTION_FAILED;
6335 
6336 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6337 	    "Entered %s.\n", __func__);
6338 
6339 	mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6340 	mcp->mb[1] = LSW(addr);
6341 	mcp->mb[2] = MSW(req_dma);
6342 	mcp->mb[3] = LSW(req_dma);
6343 	mcp->mb[4] = MSW(size);
6344 	mcp->mb[5] = LSW(size);
6345 	mcp->mb[6] = MSW(MSD(req_dma));
6346 	mcp->mb[7] = LSW(MSD(req_dma));
6347 	mcp->mb[8] = MSW(addr);
6348 	/* Setting RAM ID to valid */
6349 	/* For MCTP RAM ID is 0x40 */
6350 	mcp->mb[10] = BIT_7 | 0x40;
6351 
6352 	mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6353 	    MBX_0;
6354 
6355 	mcp->in_mb = MBX_0;
6356 	mcp->tov = MBX_TOV_SECONDS;
6357 	mcp->flags = 0;
6358 	rval = qla2x00_mailbox_command(vha, mcp);
6359 
6360 	if (rval != QLA_SUCCESS) {
6361 		ql_dbg(ql_dbg_mbx, vha, 0x114e,
6362 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6363 	} else {
6364 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6365 		    "Done %s.\n", __func__);
6366 	}
6367 
6368 	return rval;
6369 }
6370 
6371 int
6372 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6373 	void *dd_buf, uint size, uint options)
6374 {
6375 	int rval;
6376 	mbx_cmd_t mc;
6377 	mbx_cmd_t *mcp = &mc;
6378 	dma_addr_t dd_dma;
6379 
6380 	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6381 	    !IS_QLA28XX(vha->hw))
6382 		return QLA_FUNCTION_FAILED;
6383 
6384 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6385 	    "Entered %s.\n", __func__);
6386 
6387 	dd_dma = dma_map_single(&vha->hw->pdev->dev,
6388 	    dd_buf, size, DMA_FROM_DEVICE);
6389 	if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6390 		ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6391 		return QLA_MEMORY_ALLOC_FAILED;
6392 	}
6393 
6394 	memset(dd_buf, 0, size);
6395 
6396 	mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6397 	mcp->mb[1] = options;
6398 	mcp->mb[2] = MSW(LSD(dd_dma));
6399 	mcp->mb[3] = LSW(LSD(dd_dma));
6400 	mcp->mb[6] = MSW(MSD(dd_dma));
6401 	mcp->mb[7] = LSW(MSD(dd_dma));
6402 	mcp->mb[8] = size;
6403 	mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6404 	mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6405 	mcp->buf_size = size;
6406 	mcp->flags = MBX_DMA_IN;
6407 	mcp->tov = MBX_TOV_SECONDS * 4;
6408 	rval = qla2x00_mailbox_command(vha, mcp);
6409 
6410 	if (rval != QLA_SUCCESS) {
6411 		ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6412 	} else {
6413 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6414 		    "Done %s.\n", __func__);
6415 	}
6416 
6417 	dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6418 	    size, DMA_FROM_DEVICE);
6419 
6420 	return rval;
6421 }
6422 
6423 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6424 {
6425 	sp->u.iocb_cmd.u.mbx.rc = res;
6426 
6427 	complete(&sp->u.iocb_cmd.u.mbx.comp);
6428 	/* don't free sp here. Let the caller do the free */
6429 }
6430 
6431 /*
6432  * This mailbox uses the iocb interface to send MB command.
6433  * This allows non-critial (non chip setup) command to go
6434  * out in parrallel.
6435  */
6436 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6437 {
6438 	int rval = QLA_FUNCTION_FAILED;
6439 	srb_t *sp;
6440 	struct srb_iocb *c;
6441 
6442 	if (!vha->hw->flags.fw_started)
6443 		goto done;
6444 
6445 	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6446 	if (!sp)
6447 		goto done;
6448 
6449 	sp->type = SRB_MB_IOCB;
6450 	sp->name = mb_to_str(mcp->mb[0]);
6451 
6452 	c = &sp->u.iocb_cmd;
6453 	c->timeout = qla2x00_async_iocb_timeout;
6454 	init_completion(&c->u.mbx.comp);
6455 
6456 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6457 
6458 	memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6459 
6460 	sp->done = qla2x00_async_mb_sp_done;
6461 
6462 	rval = qla2x00_start_sp(sp);
6463 	if (rval != QLA_SUCCESS) {
6464 		ql_dbg(ql_dbg_mbx, vha, 0x1018,
6465 		    "%s: %s Failed submission. %x.\n",
6466 		    __func__, sp->name, rval);
6467 		goto done_free_sp;
6468 	}
6469 
6470 	ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6471 	    sp->name, sp->handle);
6472 
6473 	wait_for_completion(&c->u.mbx.comp);
6474 	memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6475 
6476 	rval = c->u.mbx.rc;
6477 	switch (rval) {
6478 	case QLA_FUNCTION_TIMEOUT:
6479 		ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6480 		    __func__, sp->name, rval);
6481 		break;
6482 	case  QLA_SUCCESS:
6483 		ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6484 		    __func__, sp->name);
6485 		break;
6486 	default:
6487 		ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6488 		    __func__, sp->name, rval);
6489 		break;
6490 	}
6491 
6492 done_free_sp:
6493 	sp->free(sp);
6494 done:
6495 	return rval;
6496 }
6497 
6498 /*
6499  * qla24xx_gpdb_wait
6500  * NOTE: Do not call this routine from DPC thread
6501  */
6502 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6503 {
6504 	int rval = QLA_FUNCTION_FAILED;
6505 	dma_addr_t pd_dma;
6506 	struct port_database_24xx *pd;
6507 	struct qla_hw_data *ha = vha->hw;
6508 	mbx_cmd_t mc;
6509 
6510 	if (!vha->hw->flags.fw_started)
6511 		goto done;
6512 
6513 	pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6514 	if (pd  == NULL) {
6515 		ql_log(ql_log_warn, vha, 0xd047,
6516 		    "Failed to allocate port database structure.\n");
6517 		goto done_free_sp;
6518 	}
6519 
6520 	memset(&mc, 0, sizeof(mc));
6521 	mc.mb[0] = MBC_GET_PORT_DATABASE;
6522 	mc.mb[1] = fcport->loop_id;
6523 	mc.mb[2] = MSW(pd_dma);
6524 	mc.mb[3] = LSW(pd_dma);
6525 	mc.mb[6] = MSW(MSD(pd_dma));
6526 	mc.mb[7] = LSW(MSD(pd_dma));
6527 	mc.mb[9] = vha->vp_idx;
6528 	mc.mb[10] = opt;
6529 
6530 	rval = qla24xx_send_mb_cmd(vha, &mc);
6531 	if (rval != QLA_SUCCESS) {
6532 		ql_dbg(ql_dbg_mbx, vha, 0x1193,
6533 		    "%s: %8phC fail\n", __func__, fcport->port_name);
6534 		goto done_free_sp;
6535 	}
6536 
6537 	rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6538 
6539 	ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6540 	    __func__, fcport->port_name);
6541 
6542 done_free_sp:
6543 	if (pd)
6544 		dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6545 done:
6546 	return rval;
6547 }
6548 
6549 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6550     struct port_database_24xx *pd)
6551 {
6552 	int rval = QLA_SUCCESS;
6553 	uint64_t zero = 0;
6554 	u8 current_login_state, last_login_state;
6555 
6556 	if (NVME_TARGET(vha->hw, fcport)) {
6557 		current_login_state = pd->current_login_state >> 4;
6558 		last_login_state = pd->last_login_state >> 4;
6559 	} else {
6560 		current_login_state = pd->current_login_state & 0xf;
6561 		last_login_state = pd->last_login_state & 0xf;
6562 	}
6563 
6564 	/* Check for logged in state. */
6565 	if (current_login_state != PDS_PRLI_COMPLETE) {
6566 		ql_dbg(ql_dbg_mbx, vha, 0x119a,
6567 		    "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6568 		    current_login_state, last_login_state, fcport->loop_id);
6569 		rval = QLA_FUNCTION_FAILED;
6570 		goto gpd_error_out;
6571 	}
6572 
6573 	if (fcport->loop_id == FC_NO_LOOP_ID ||
6574 	    (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6575 	     memcmp(fcport->port_name, pd->port_name, 8))) {
6576 		/* We lost the device mid way. */
6577 		rval = QLA_NOT_LOGGED_IN;
6578 		goto gpd_error_out;
6579 	}
6580 
6581 	/* Names are little-endian. */
6582 	memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6583 	memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6584 
6585 	/* Get port_id of device. */
6586 	fcport->d_id.b.domain = pd->port_id[0];
6587 	fcport->d_id.b.area = pd->port_id[1];
6588 	fcport->d_id.b.al_pa = pd->port_id[2];
6589 	fcport->d_id.b.rsvd_1 = 0;
6590 
6591 	if (NVME_TARGET(vha->hw, fcport)) {
6592 		fcport->port_type = FCT_NVME;
6593 		if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6594 			fcport->port_type |= FCT_NVME_INITIATOR;
6595 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6596 			fcport->port_type |= FCT_NVME_TARGET;
6597 		if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6598 			fcport->port_type |= FCT_NVME_DISCOVERY;
6599 	} else {
6600 		/* If not target must be initiator or unknown type. */
6601 		if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6602 			fcport->port_type = FCT_INITIATOR;
6603 		else
6604 			fcport->port_type = FCT_TARGET;
6605 	}
6606 	/* Passback COS information. */
6607 	fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6608 		FC_COS_CLASS2 : FC_COS_CLASS3;
6609 
6610 	if (pd->prli_svc_param_word_3[0] & BIT_7) {
6611 		fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6612 		fcport->conf_compl_supported = 1;
6613 	}
6614 
6615 gpd_error_out:
6616 	return rval;
6617 }
6618 
6619 /*
6620  * qla24xx_gidlist__wait
6621  * NOTE: don't call this routine from DPC thread.
6622  */
6623 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6624 	void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6625 {
6626 	int rval = QLA_FUNCTION_FAILED;
6627 	mbx_cmd_t mc;
6628 
6629 	if (!vha->hw->flags.fw_started)
6630 		goto done;
6631 
6632 	memset(&mc, 0, sizeof(mc));
6633 	mc.mb[0] = MBC_GET_ID_LIST;
6634 	mc.mb[2] = MSW(id_list_dma);
6635 	mc.mb[3] = LSW(id_list_dma);
6636 	mc.mb[6] = MSW(MSD(id_list_dma));
6637 	mc.mb[7] = LSW(MSD(id_list_dma));
6638 	mc.mb[8] = 0;
6639 	mc.mb[9] = vha->vp_idx;
6640 
6641 	rval = qla24xx_send_mb_cmd(vha, &mc);
6642 	if (rval != QLA_SUCCESS) {
6643 		ql_dbg(ql_dbg_mbx, vha, 0x119b,
6644 		    "%s:  fail\n", __func__);
6645 	} else {
6646 		*entries = mc.mb[1];
6647 		ql_dbg(ql_dbg_mbx, vha, 0x119c,
6648 		    "%s:  done\n", __func__);
6649 	}
6650 done:
6651 	return rval;
6652 }
6653 
6654 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6655 {
6656 	int rval;
6657 	mbx_cmd_t	mc;
6658 	mbx_cmd_t	*mcp = &mc;
6659 
6660 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6661 	    "Entered %s\n", __func__);
6662 
6663 	memset(mcp->mb, 0 , sizeof(mcp->mb));
6664 	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6665 	mcp->mb[1] = 1;
6666 	mcp->mb[2] = value;
6667 	mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6668 	mcp->in_mb = MBX_2 | MBX_0;
6669 	mcp->tov = MBX_TOV_SECONDS;
6670 	mcp->flags = 0;
6671 
6672 	rval = qla2x00_mailbox_command(vha, mcp);
6673 
6674 	ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6675 	    (rval != QLA_SUCCESS) ? "Failed"  : "Done", rval);
6676 
6677 	return rval;
6678 }
6679 
6680 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6681 {
6682 	int rval;
6683 	mbx_cmd_t	mc;
6684 	mbx_cmd_t	*mcp = &mc;
6685 
6686 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6687 	    "Entered %s\n", __func__);
6688 
6689 	memset(mcp->mb, 0, sizeof(mcp->mb));
6690 	mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6691 	mcp->mb[1] = 0;
6692 	mcp->out_mb = MBX_1 | MBX_0;
6693 	mcp->in_mb = MBX_2 | MBX_0;
6694 	mcp->tov = MBX_TOV_SECONDS;
6695 	mcp->flags = 0;
6696 
6697 	rval = qla2x00_mailbox_command(vha, mcp);
6698 	if (rval == QLA_SUCCESS)
6699 		*value = mc.mb[2];
6700 
6701 	ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6702 	    (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6703 
6704 	return rval;
6705 }
6706 
6707 int
6708 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6709 {
6710 	struct qla_hw_data *ha = vha->hw;
6711 	uint16_t iter, addr, offset;
6712 	dma_addr_t phys_addr;
6713 	int rval, c;
6714 	u8 *sfp_data;
6715 
6716 	memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6717 	addr = 0xa0;
6718 	phys_addr = ha->sfp_data_dma;
6719 	sfp_data = ha->sfp_data;
6720 	offset = c = 0;
6721 
6722 	for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6723 		if (iter == 4) {
6724 			/* Skip to next device address. */
6725 			addr = 0xa2;
6726 			offset = 0;
6727 		}
6728 
6729 		rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6730 		    addr, offset, SFP_BLOCK_SIZE, BIT_1);
6731 		if (rval != QLA_SUCCESS) {
6732 			ql_log(ql_log_warn, vha, 0x706d,
6733 			    "Unable to read SFP data (%x/%x/%x).\n", rval,
6734 			    addr, offset);
6735 
6736 			return rval;
6737 		}
6738 
6739 		if (buf && (c < count)) {
6740 			u16 sz;
6741 
6742 			if ((count - c) >= SFP_BLOCK_SIZE)
6743 				sz = SFP_BLOCK_SIZE;
6744 			else
6745 				sz = count - c;
6746 
6747 			memcpy(buf, sfp_data, sz);
6748 			buf += SFP_BLOCK_SIZE;
6749 			c += sz;
6750 		}
6751 		phys_addr += SFP_BLOCK_SIZE;
6752 		sfp_data  += SFP_BLOCK_SIZE;
6753 		offset += SFP_BLOCK_SIZE;
6754 	}
6755 
6756 	return rval;
6757 }
6758 
6759 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6760     uint16_t *out_mb, int out_mb_sz)
6761 {
6762 	int rval = QLA_FUNCTION_FAILED;
6763 	mbx_cmd_t mc;
6764 
6765 	if (!vha->hw->flags.fw_started)
6766 		goto done;
6767 
6768 	memset(&mc, 0, sizeof(mc));
6769 	mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6770 
6771 	rval = qla24xx_send_mb_cmd(vha, &mc);
6772 	if (rval != QLA_SUCCESS) {
6773 		ql_dbg(ql_dbg_mbx, vha, 0xffff,
6774 			"%s:  fail\n", __func__);
6775 	} else {
6776 		if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6777 			memcpy(out_mb, mc.mb, out_mb_sz);
6778 		else
6779 			memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6780 
6781 		ql_dbg(ql_dbg_mbx, vha, 0xffff,
6782 			"%s:  done\n", __func__);
6783 	}
6784 done:
6785 	return rval;
6786 }
6787 
6788 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6789     uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6790     uint32_t sfub_len)
6791 {
6792 	int		rval;
6793 	mbx_cmd_t mc;
6794 	mbx_cmd_t *mcp = &mc;
6795 
6796 	mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6797 	mcp->mb[1] = opts;
6798 	mcp->mb[2] = region;
6799 	mcp->mb[3] = MSW(len);
6800 	mcp->mb[4] = LSW(len);
6801 	mcp->mb[5] = MSW(sfub_dma_addr);
6802 	mcp->mb[6] = LSW(sfub_dma_addr);
6803 	mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6804 	mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6805 	mcp->mb[9] = sfub_len;
6806 	mcp->out_mb =
6807 	    MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6808 	mcp->in_mb = MBX_2|MBX_1|MBX_0;
6809 	mcp->tov = MBX_TOV_SECONDS;
6810 	mcp->flags = 0;
6811 	rval = qla2x00_mailbox_command(vha, mcp);
6812 
6813 	if (rval != QLA_SUCCESS) {
6814 		ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6815 			__func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6816 			mcp->mb[2]);
6817 	}
6818 
6819 	return rval;
6820 }
6821 
6822 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6823     uint32_t data)
6824 {
6825 	int rval;
6826 	mbx_cmd_t mc;
6827 	mbx_cmd_t *mcp = &mc;
6828 
6829 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6830 	    "Entered %s.\n", __func__);
6831 
6832 	mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6833 	mcp->mb[1] = LSW(addr);
6834 	mcp->mb[2] = MSW(addr);
6835 	mcp->mb[3] = LSW(data);
6836 	mcp->mb[4] = MSW(data);
6837 	mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6838 	mcp->in_mb = MBX_1|MBX_0;
6839 	mcp->tov = MBX_TOV_SECONDS;
6840 	mcp->flags = 0;
6841 	rval = qla2x00_mailbox_command(vha, mcp);
6842 
6843 	if (rval != QLA_SUCCESS) {
6844 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6845 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6846 	} else {
6847 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6848 		    "Done %s.\n", __func__);
6849 	}
6850 
6851 	return rval;
6852 }
6853 
6854 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6855     uint32_t *data)
6856 {
6857 	int rval;
6858 	mbx_cmd_t mc;
6859 	mbx_cmd_t *mcp = &mc;
6860 
6861 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6862 	    "Entered %s.\n", __func__);
6863 
6864 	mcp->mb[0] = MBC_READ_REMOTE_REG;
6865 	mcp->mb[1] = LSW(addr);
6866 	mcp->mb[2] = MSW(addr);
6867 	mcp->out_mb = MBX_2|MBX_1|MBX_0;
6868 	mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6869 	mcp->tov = MBX_TOV_SECONDS;
6870 	mcp->flags = 0;
6871 	rval = qla2x00_mailbox_command(vha, mcp);
6872 
6873 	*data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6874 
6875 	if (rval != QLA_SUCCESS) {
6876 		ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6877 		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6878 	} else {
6879 		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6880 		    "Done %s.\n", __func__);
6881 	}
6882 
6883 	return rval;
6884 }
6885 
6886 int
6887 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
6888 {
6889 	struct qla_hw_data *ha = vha->hw;
6890 	mbx_cmd_t mc;
6891 	mbx_cmd_t *mcp = &mc;
6892 	int rval;
6893 
6894 	if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6895 		return QLA_FUNCTION_FAILED;
6896 
6897 	ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
6898 	    __func__, options);
6899 
6900 	mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
6901 	mcp->mb[1] = options;
6902 	mcp->out_mb = MBX_1|MBX_0;
6903 	mcp->in_mb = MBX_1|MBX_0;
6904 	if (options & BIT_0) {
6905 		if (options & BIT_1) {
6906 			mcp->mb[2] = led[2];
6907 			mcp->out_mb |= MBX_2;
6908 		}
6909 		if (options & BIT_2) {
6910 			mcp->mb[3] = led[0];
6911 			mcp->out_mb |= MBX_3;
6912 		}
6913 		if (options & BIT_3) {
6914 			mcp->mb[4] = led[1];
6915 			mcp->out_mb |= MBX_4;
6916 		}
6917 	} else {
6918 		mcp->in_mb |= MBX_4|MBX_3|MBX_2;
6919 	}
6920 	mcp->tov = MBX_TOV_SECONDS;
6921 	mcp->flags = 0;
6922 	rval = qla2x00_mailbox_command(vha, mcp);
6923 	if (rval) {
6924 		ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
6925 		    __func__, rval, mcp->mb[0], mcp->mb[1]);
6926 		return rval;
6927 	}
6928 
6929 	if (options & BIT_0) {
6930 		ha->beacon_blink_led = 0;
6931 		ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
6932 	} else {
6933 		led[2] = mcp->mb[2];
6934 		led[0] = mcp->mb[3];
6935 		led[1] = mcp->mb[4];
6936 		ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
6937 		    __func__, led[0], led[1], led[2]);
6938 	}
6939 
6940 	return rval;
6941 }
6942 
6943 /**
6944  * qla_no_op_mb(): This MB is used to check if FW is still alive and
6945  * able to generate an interrupt. Otherwise, a timeout will trigger
6946  * FW dump + reset
6947  * @vha: host adapter pointer
6948  * Return: None
6949  */
6950 void qla_no_op_mb(struct scsi_qla_host *vha)
6951 {
6952 	mbx_cmd_t mc;
6953 	mbx_cmd_t *mcp = &mc;
6954 	int rval;
6955 
6956 	memset(&mc, 0, sizeof(mc));
6957 	mcp->mb[0] = 0; // noop cmd= 0
6958 	mcp->out_mb = MBX_0;
6959 	mcp->in_mb = MBX_0;
6960 	mcp->tov = 5;
6961 	mcp->flags = 0;
6962 	rval = qla2x00_mailbox_command(vha, mcp);
6963 
6964 	if (rval) {
6965 		ql_dbg(ql_dbg_async, vha, 0x7071,
6966 			"Failed %s %x\n", __func__, rval);
6967 	}
6968 }
6969