xref: /linux/drivers/scsi/qla2xxx/qla_dbg.c (revision 03c11eb3b16dc0058589751dfd91f254be2be613)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 
7 /*
8  * Table for showing the current message id in use for particular level
9  * Change this table for addition of log/debug messages.
10  * ----------------------------------------------------------------------
11  * |             Level            |   Last Value Used  |     Holes	|
12  * ----------------------------------------------------------------------
13  * | Module Init and Probe        |       0x0199       |                |
14  * | Mailbox commands             |       0x1206       | 0x11a5-0x11ff	|
15  * | Device Discovery             |       0x2134       | 0x2112-0x2115  |
16  * |                              |                    | 0x2127-0x2128  |
17  * | Queue Command and IO tracing |       0x3074       | 0x300b         |
18  * |                              |                    | 0x3027-0x3028  |
19  * |                              |                    | 0x303d-0x3041  |
20  * |                              |                    | 0x302e,0x3033  |
21  * |                              |                    | 0x3036,0x3038  |
22  * |                              |                    | 0x303a		|
23  * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
24  * | Async Events                 |       0x509c       |                |
25  * | Timer Routines               |       0x6012       |                |
26  * | User Space Interactions      |       0x70e3       | 0x7018,0x702e  |
27  * |				  |		       | 0x7020,0x7024  |
28  * |                              |                    | 0x7039,0x7045  |
29  * |                              |                    | 0x7073-0x7075  |
30  * |                              |                    | 0x70a5-0x70a6  |
31  * |                              |                    | 0x70a8,0x70ab  |
32  * |                              |                    | 0x70ad-0x70ae  |
33  * |                              |                    | 0x70d0-0x70d6	|
34  * |                              |                    | 0x70d7-0x70db  |
35  * | Task Management              |       0x8042       | 0x8000         |
36  * |                              |                    | 0x8019         |
37  * |                              |                    | 0x8025,0x8026  |
38  * |                              |                    | 0x8031,0x8032  |
39  * |                              |                    | 0x8039,0x803c  |
40  * | AER/EEH                      |       0x9011       |		|
41  * | Virtual Port                 |       0xa007       |		|
42  * | ISP82XX Specific             |       0xb157       | 0xb002,0xb024  |
43  * |                              |                    | 0xb09e,0xb0ae  |
44  * |				  |		       | 0xb0c3,0xb0c6  |
45  * |                              |                    | 0xb0e0-0xb0ef  |
46  * |                              |                    | 0xb085,0xb0dc  |
47  * |                              |                    | 0xb107,0xb108  |
48  * |                              |                    | 0xb111,0xb11e  |
49  * |                              |                    | 0xb12c,0xb12d  |
50  * |                              |                    | 0xb13a,0xb142  |
51  * |                              |                    | 0xb13c-0xb140  |
52  * |                              |                    | 0xb149		|
53  * | MultiQ                       |       0xc010       |		|
54  * | Misc                         |       0xd303       | 0xd031-0xd0ff	|
55  * |                              |                    | 0xd101-0xd1fe	|
56  * |                              |                    | 0xd214-0xd2fe	|
57  * | Target Mode		  |	  0xe081       |		|
58  * | Target Mode Management	  |	  0xf09b       | 0xf002		|
59  * |                              |                    | 0xf046-0xf049  |
60  * | Target Mode Task Management  |	  0x1000d      |		|
61  * ----------------------------------------------------------------------
62  */
63 
64 #include "qla_def.h"
65 
66 #include <linux/delay.h>
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/qla.h>
69 
70 static uint32_t ql_dbg_offset = 0x800;
71 
72 static inline void
qla2xxx_prep_dump(struct qla_hw_data * ha,struct qla2xxx_fw_dump * fw_dump)73 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
74 {
75 	fw_dump->fw_major_version = htonl(ha->fw_major_version);
76 	fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
77 	fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
78 	fw_dump->fw_attributes = htonl(ha->fw_attributes);
79 
80 	fw_dump->vendor = htonl(ha->pdev->vendor);
81 	fw_dump->device = htonl(ha->pdev->device);
82 	fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
83 	fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
84 }
85 
86 static inline void *
qla2xxx_copy_queues(struct qla_hw_data * ha,void * ptr)87 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
88 {
89 	struct req_que *req = ha->req_q_map[0];
90 	struct rsp_que *rsp = ha->rsp_q_map[0];
91 	/* Request queue. */
92 	memcpy(ptr, req->ring, req->length *
93 	    sizeof(request_t));
94 
95 	/* Response queue. */
96 	ptr += req->length * sizeof(request_t);
97 	memcpy(ptr, rsp->ring, rsp->length  *
98 	    sizeof(response_t));
99 
100 	return ptr + (rsp->length * sizeof(response_t));
101 }
102 
103 int
qla27xx_dump_mpi_ram(struct qla_hw_data * ha,uint32_t addr,uint32_t * ram,uint32_t ram_dwords,void ** nxt)104 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
105 	uint32_t ram_dwords, void **nxt)
106 {
107 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
108 	dma_addr_t dump_dma = ha->gid_list_dma;
109 	uint32_t *chunk = (uint32_t *)ha->gid_list;
110 	uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
111 	uint32_t stat;
112 	ulong i, j, timer = 6000000;
113 	int rval = QLA_FUNCTION_FAILED;
114 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
115 
116 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
117 
118 	if (qla_pci_disconnected(vha, reg))
119 		return rval;
120 
121 	for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
122 		if (i + dwords > ram_dwords)
123 			dwords = ram_dwords - i;
124 
125 		wrt_reg_word(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
126 		wrt_reg_word(&reg->mailbox1, LSW(addr));
127 		wrt_reg_word(&reg->mailbox8, MSW(addr));
128 
129 		wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
130 		wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
131 		wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
132 		wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
133 
134 		wrt_reg_word(&reg->mailbox4, MSW(dwords));
135 		wrt_reg_word(&reg->mailbox5, LSW(dwords));
136 
137 		wrt_reg_word(&reg->mailbox9, 0);
138 		wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
139 
140 		ha->flags.mbox_int = 0;
141 		while (timer--) {
142 			udelay(5);
143 
144 			if (qla_pci_disconnected(vha, reg))
145 				return rval;
146 
147 			stat = rd_reg_dword(&reg->host_status);
148 			/* Check for pending interrupts. */
149 			if (!(stat & HSRX_RISC_INT))
150 				continue;
151 
152 			stat &= 0xff;
153 			if (stat != 0x1 && stat != 0x2 &&
154 			    stat != 0x10 && stat != 0x11) {
155 
156 				/* Clear this intr; it wasn't a mailbox intr */
157 				wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
158 				rd_reg_dword(&reg->hccr);
159 				continue;
160 			}
161 
162 			set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
163 			rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
164 			wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
165 			rd_reg_dword(&reg->hccr);
166 			break;
167 		}
168 		ha->flags.mbox_int = 1;
169 		*nxt = ram + i;
170 
171 		if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
172 			/* no interrupt, timed out*/
173 			return rval;
174 		}
175 		if (rval) {
176 			/* error completion status */
177 			return rval;
178 		}
179 		for (j = 0; j < dwords; j++) {
180 			ram[i + j] =
181 			    (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
182 			    chunk[j] : swab32(chunk[j]);
183 		}
184 	}
185 
186 	*nxt = ram + i;
187 	return QLA_SUCCESS;
188 }
189 
190 int
qla24xx_dump_ram(struct qla_hw_data * ha,uint32_t addr,__be32 * ram,uint32_t ram_dwords,void ** nxt)191 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
192 		 uint32_t ram_dwords, void **nxt)
193 {
194 	int rval = QLA_FUNCTION_FAILED;
195 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
196 	dma_addr_t dump_dma = ha->gid_list_dma;
197 	uint32_t *chunk = (uint32_t *)ha->gid_list;
198 	uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
199 	uint32_t stat;
200 	ulong i, j, timer = 6000000;
201 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
202 
203 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
204 
205 	if (qla_pci_disconnected(vha, reg))
206 		return rval;
207 
208 	for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
209 		if (i + dwords > ram_dwords)
210 			dwords = ram_dwords - i;
211 
212 		wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
213 		wrt_reg_word(&reg->mailbox1, LSW(addr));
214 		wrt_reg_word(&reg->mailbox8, MSW(addr));
215 		wrt_reg_word(&reg->mailbox10, 0);
216 
217 		wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
218 		wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
219 		wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
220 		wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
221 
222 		wrt_reg_word(&reg->mailbox4, MSW(dwords));
223 		wrt_reg_word(&reg->mailbox5, LSW(dwords));
224 		wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
225 
226 		ha->flags.mbox_int = 0;
227 		while (timer--) {
228 			udelay(5);
229 			if (qla_pci_disconnected(vha, reg))
230 				return rval;
231 
232 			stat = rd_reg_dword(&reg->host_status);
233 			/* Check for pending interrupts. */
234 			if (!(stat & HSRX_RISC_INT))
235 				continue;
236 
237 			stat &= 0xff;
238 			if (stat != 0x1 && stat != 0x2 &&
239 			    stat != 0x10 && stat != 0x11) {
240 				wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
241 				rd_reg_dword(&reg->hccr);
242 				continue;
243 			}
244 
245 			set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
246 			rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
247 			wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
248 			rd_reg_dword(&reg->hccr);
249 			break;
250 		}
251 		ha->flags.mbox_int = 1;
252 		*nxt = ram + i;
253 
254 		if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
255 			/* no interrupt, timed out*/
256 			return rval;
257 		}
258 		if (rval) {
259 			/* error completion status */
260 			return rval;
261 		}
262 		for (j = 0; j < dwords; j++) {
263 			ram[i + j] = (__force __be32)
264 				((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
265 				 chunk[j] : swab32(chunk[j]));
266 		}
267 	}
268 
269 	*nxt = ram + i;
270 	return QLA_SUCCESS;
271 }
272 
273 static int
qla24xx_dump_memory(struct qla_hw_data * ha,__be32 * code_ram,uint32_t cram_size,void ** nxt)274 qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram,
275 		    uint32_t cram_size, void **nxt)
276 {
277 	int rval;
278 
279 	/* Code RAM. */
280 	rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
281 	if (rval != QLA_SUCCESS)
282 		return rval;
283 
284 	set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
285 
286 	/* External Memory. */
287 	rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
288 	    ha->fw_memory_size - 0x100000 + 1, nxt);
289 	if (rval == QLA_SUCCESS)
290 		set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
291 
292 	return rval;
293 }
294 
295 static __be32 *
qla24xx_read_window(struct device_reg_24xx __iomem * reg,uint32_t iobase,uint32_t count,__be32 * buf)296 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
297 		    uint32_t count, __be32 *buf)
298 {
299 	__le32 __iomem *dmp_reg;
300 
301 	wrt_reg_dword(&reg->iobase_addr, iobase);
302 	dmp_reg = &reg->iobase_window;
303 	for ( ; count--; dmp_reg++)
304 		*buf++ = htonl(rd_reg_dword(dmp_reg));
305 
306 	return buf;
307 }
308 
309 void
qla24xx_pause_risc(struct device_reg_24xx __iomem * reg,struct qla_hw_data * ha)310 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
311 {
312 	wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_PAUSE);
313 
314 	/* 100 usec delay is sufficient enough for hardware to pause RISC */
315 	udelay(100);
316 	if (rd_reg_dword(&reg->host_status) & HSRX_RISC_PAUSED)
317 		set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
318 }
319 
320 int
qla24xx_soft_reset(struct qla_hw_data * ha)321 qla24xx_soft_reset(struct qla_hw_data *ha)
322 {
323 	int rval = QLA_SUCCESS;
324 	uint32_t cnt;
325 	uint16_t wd;
326 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
327 
328 	/*
329 	 * Reset RISC. The delay is dependent on system architecture.
330 	 * Driver can proceed with the reset sequence after waiting
331 	 * for a timeout period.
332 	 */
333 	wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
334 	for (cnt = 0; cnt < 30000; cnt++) {
335 		if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
336 			break;
337 
338 		udelay(10);
339 	}
340 	if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
341 		set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
342 
343 	wrt_reg_dword(&reg->ctrl_status,
344 	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
345 	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
346 
347 	udelay(100);
348 
349 	/* Wait for soft-reset to complete. */
350 	for (cnt = 0; cnt < 30000; cnt++) {
351 		if ((rd_reg_dword(&reg->ctrl_status) &
352 		    CSRX_ISP_SOFT_RESET) == 0)
353 			break;
354 
355 		udelay(10);
356 	}
357 	if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
358 		set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
359 
360 	wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
361 	rd_reg_dword(&reg->hccr);             /* PCI Posting. */
362 
363 	for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
364 	    rval == QLA_SUCCESS; cnt--) {
365 		if (cnt)
366 			udelay(10);
367 		else
368 			rval = QLA_FUNCTION_TIMEOUT;
369 	}
370 	if (rval == QLA_SUCCESS)
371 		set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
372 
373 	return rval;
374 }
375 
376 static int
qla2xxx_dump_ram(struct qla_hw_data * ha,uint32_t addr,__be16 * ram,uint32_t ram_words,void ** nxt)377 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram,
378     uint32_t ram_words, void **nxt)
379 {
380 	int rval;
381 	uint32_t cnt, stat, timer, words, idx;
382 	uint16_t mb0;
383 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
384 	dma_addr_t dump_dma = ha->gid_list_dma;
385 	__le16 *dump = (__force __le16 *)ha->gid_list;
386 
387 	rval = QLA_SUCCESS;
388 	mb0 = 0;
389 
390 	WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
391 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
392 
393 	words = qla2x00_gid_list_size(ha) / 2;
394 	for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
395 	    cnt += words, addr += words) {
396 		if (cnt + words > ram_words)
397 			words = ram_words - cnt;
398 
399 		WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
400 		WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
401 
402 		WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
403 		WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
404 		WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
405 		WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
406 
407 		WRT_MAILBOX_REG(ha, reg, 4, words);
408 		wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
409 
410 		for (timer = 6000000; timer; timer--) {
411 			/* Check for pending interrupts. */
412 			stat = rd_reg_dword(&reg->u.isp2300.host_status);
413 			if (stat & HSR_RISC_INT) {
414 				stat &= 0xff;
415 
416 				if (stat == 0x1 || stat == 0x2) {
417 					set_bit(MBX_INTERRUPT,
418 					    &ha->mbx_cmd_flags);
419 
420 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
421 
422 					/* Release mailbox registers. */
423 					wrt_reg_word(&reg->semaphore, 0);
424 					wrt_reg_word(&reg->hccr,
425 					    HCCR_CLR_RISC_INT);
426 					rd_reg_word(&reg->hccr);
427 					break;
428 				} else if (stat == 0x10 || stat == 0x11) {
429 					set_bit(MBX_INTERRUPT,
430 					    &ha->mbx_cmd_flags);
431 
432 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
433 
434 					wrt_reg_word(&reg->hccr,
435 					    HCCR_CLR_RISC_INT);
436 					rd_reg_word(&reg->hccr);
437 					break;
438 				}
439 
440 				/* clear this intr; it wasn't a mailbox intr */
441 				wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
442 				rd_reg_word(&reg->hccr);
443 			}
444 			udelay(5);
445 		}
446 
447 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
448 			rval = mb0 & MBS_MASK;
449 			for (idx = 0; idx < words; idx++)
450 				ram[cnt + idx] =
451 					cpu_to_be16(le16_to_cpu(dump[idx]));
452 		} else {
453 			rval = QLA_FUNCTION_FAILED;
454 		}
455 	}
456 
457 	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
458 	return rval;
459 }
460 
461 static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem * reg,uint32_t count,__be16 * buf)462 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
463 		    __be16 *buf)
464 {
465 	__le16 __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
466 
467 	for ( ; count--; dmp_reg++)
468 		*buf++ = htons(rd_reg_word(dmp_reg));
469 }
470 
471 static inline void *
qla24xx_copy_eft(struct qla_hw_data * ha,void * ptr)472 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
473 {
474 	if (!ha->eft)
475 		return ptr;
476 
477 	memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
478 	return ptr + ntohl(ha->fw_dump->eft_size);
479 }
480 
481 static inline void *
qla25xx_copy_fce(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)482 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
483 {
484 	uint32_t cnt;
485 	__be32 *iter_reg;
486 	struct qla2xxx_fce_chain *fcec = ptr;
487 
488 	if (!ha->fce)
489 		return ptr;
490 
491 	*last_chain = &fcec->type;
492 	fcec->type = htonl(DUMP_CHAIN_FCE);
493 	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
494 	    fce_calc_size(ha->fce_bufs));
495 	fcec->size = htonl(fce_calc_size(ha->fce_bufs));
496 	fcec->addr_l = htonl(LSD(ha->fce_dma));
497 	fcec->addr_h = htonl(MSD(ha->fce_dma));
498 
499 	iter_reg = fcec->eregs;
500 	for (cnt = 0; cnt < 8; cnt++)
501 		*iter_reg++ = htonl(ha->fce_mb[cnt]);
502 
503 	memcpy(iter_reg, ha->fce, ntohl(fcec->size));
504 
505 	return (char *)iter_reg + ntohl(fcec->size);
506 }
507 
508 static inline void *
qla25xx_copy_exlogin(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)509 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
510 {
511 	struct qla2xxx_offld_chain *c = ptr;
512 
513 	if (!ha->exlogin_buf)
514 		return ptr;
515 
516 	*last_chain = &c->type;
517 
518 	c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
519 	c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
520 	    ha->exlogin_size);
521 	c->size = cpu_to_be32(ha->exlogin_size);
522 	c->addr = cpu_to_be64(ha->exlogin_buf_dma);
523 
524 	ptr += sizeof(struct qla2xxx_offld_chain);
525 	memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
526 
527 	return (char *)ptr + be32_to_cpu(c->size);
528 }
529 
530 static inline void *
qla81xx_copy_exchoffld(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)531 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
532 {
533 	struct qla2xxx_offld_chain *c = ptr;
534 
535 	if (!ha->exchoffld_buf)
536 		return ptr;
537 
538 	*last_chain = &c->type;
539 
540 	c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
541 	c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
542 	    ha->exchoffld_size);
543 	c->size = cpu_to_be32(ha->exchoffld_size);
544 	c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
545 
546 	ptr += sizeof(struct qla2xxx_offld_chain);
547 	memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
548 
549 	return (char *)ptr + be32_to_cpu(c->size);
550 }
551 
552 static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)553 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
554 			__be32 **last_chain)
555 {
556 	struct qla2xxx_mqueue_chain *q;
557 	struct qla2xxx_mqueue_header *qh;
558 	uint32_t num_queues;
559 	int que;
560 	struct {
561 		int length;
562 		void *ring;
563 	} aq, *aqp;
564 
565 	if (!ha->tgt.atio_ring)
566 		return ptr;
567 
568 	num_queues = 1;
569 	aqp = &aq;
570 	aqp->length = ha->tgt.atio_q_length;
571 	aqp->ring = ha->tgt.atio_ring;
572 
573 	for (que = 0; que < num_queues; que++) {
574 		/* aqp = ha->atio_q_map[que]; */
575 		q = ptr;
576 		*last_chain = &q->type;
577 		q->type = htonl(DUMP_CHAIN_QUEUE);
578 		q->chain_size = htonl(
579 		    sizeof(struct qla2xxx_mqueue_chain) +
580 		    sizeof(struct qla2xxx_mqueue_header) +
581 		    (aqp->length * sizeof(request_t)));
582 		ptr += sizeof(struct qla2xxx_mqueue_chain);
583 
584 		/* Add header. */
585 		qh = ptr;
586 		qh->queue = htonl(TYPE_ATIO_QUEUE);
587 		qh->number = htonl(que);
588 		qh->size = htonl(aqp->length * sizeof(request_t));
589 		ptr += sizeof(struct qla2xxx_mqueue_header);
590 
591 		/* Add data. */
592 		memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
593 
594 		ptr += aqp->length * sizeof(request_t);
595 	}
596 
597 	return ptr;
598 }
599 
600 static inline void *
qla25xx_copy_mqueues(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)601 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
602 {
603 	struct qla2xxx_mqueue_chain *q;
604 	struct qla2xxx_mqueue_header *qh;
605 	struct req_que *req;
606 	struct rsp_que *rsp;
607 	int que;
608 
609 	if (!ha->mqenable)
610 		return ptr;
611 
612 	/* Request queues */
613 	for (que = 1; que < ha->max_req_queues; que++) {
614 		req = ha->req_q_map[que];
615 		if (!req)
616 			break;
617 
618 		/* Add chain. */
619 		q = ptr;
620 		*last_chain = &q->type;
621 		q->type = htonl(DUMP_CHAIN_QUEUE);
622 		q->chain_size = htonl(
623 		    sizeof(struct qla2xxx_mqueue_chain) +
624 		    sizeof(struct qla2xxx_mqueue_header) +
625 		    (req->length * sizeof(request_t)));
626 		ptr += sizeof(struct qla2xxx_mqueue_chain);
627 
628 		/* Add header. */
629 		qh = ptr;
630 		qh->queue = htonl(TYPE_REQUEST_QUEUE);
631 		qh->number = htonl(que);
632 		qh->size = htonl(req->length * sizeof(request_t));
633 		ptr += sizeof(struct qla2xxx_mqueue_header);
634 
635 		/* Add data. */
636 		memcpy(ptr, req->ring, req->length * sizeof(request_t));
637 		ptr += req->length * sizeof(request_t);
638 	}
639 
640 	/* Response queues */
641 	for (que = 1; que < ha->max_rsp_queues; que++) {
642 		rsp = ha->rsp_q_map[que];
643 		if (!rsp)
644 			break;
645 
646 		/* Add chain. */
647 		q = ptr;
648 		*last_chain = &q->type;
649 		q->type = htonl(DUMP_CHAIN_QUEUE);
650 		q->chain_size = htonl(
651 		    sizeof(struct qla2xxx_mqueue_chain) +
652 		    sizeof(struct qla2xxx_mqueue_header) +
653 		    (rsp->length * sizeof(response_t)));
654 		ptr += sizeof(struct qla2xxx_mqueue_chain);
655 
656 		/* Add header. */
657 		qh = ptr;
658 		qh->queue = htonl(TYPE_RESPONSE_QUEUE);
659 		qh->number = htonl(que);
660 		qh->size = htonl(rsp->length * sizeof(response_t));
661 		ptr += sizeof(struct qla2xxx_mqueue_header);
662 
663 		/* Add data. */
664 		memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
665 		ptr += rsp->length * sizeof(response_t);
666 	}
667 
668 	return ptr;
669 }
670 
671 static inline void *
qla25xx_copy_mq(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)672 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
673 {
674 	uint32_t cnt, que_idx;
675 	uint8_t que_cnt;
676 	struct qla2xxx_mq_chain *mq = ptr;
677 	device_reg_t *reg;
678 
679 	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
680 	    IS_QLA28XX(ha))
681 		return ptr;
682 
683 	mq = ptr;
684 	*last_chain = &mq->type;
685 	mq->type = htonl(DUMP_CHAIN_MQ);
686 	mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
687 
688 	que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
689 		ha->max_req_queues : ha->max_rsp_queues;
690 	mq->count = htonl(que_cnt);
691 	for (cnt = 0; cnt < que_cnt; cnt++) {
692 		reg = ISP_QUE_REG(ha, cnt);
693 		que_idx = cnt * 4;
694 		mq->qregs[que_idx] =
695 		    htonl(rd_reg_dword(&reg->isp25mq.req_q_in));
696 		mq->qregs[que_idx+1] =
697 		    htonl(rd_reg_dword(&reg->isp25mq.req_q_out));
698 		mq->qregs[que_idx+2] =
699 		    htonl(rd_reg_dword(&reg->isp25mq.rsp_q_in));
700 		mq->qregs[que_idx+3] =
701 		    htonl(rd_reg_dword(&reg->isp25mq.rsp_q_out));
702 	}
703 
704 	return ptr + sizeof(struct qla2xxx_mq_chain);
705 }
706 
707 void
qla2xxx_dump_post_process(scsi_qla_host_t * vha,int rval)708 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
709 {
710 	struct qla_hw_data *ha = vha->hw;
711 
712 	if (rval != QLA_SUCCESS) {
713 		ql_log(ql_log_warn, vha, 0xd000,
714 		    "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
715 		    rval, ha->fw_dump_cap_flags);
716 		ha->fw_dumped = false;
717 	} else {
718 		ql_log(ql_log_info, vha, 0xd001,
719 		    "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
720 		    vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
721 		ha->fw_dumped = true;
722 		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
723 	}
724 }
725 
qla2xxx_dump_fw(scsi_qla_host_t * vha)726 void qla2xxx_dump_fw(scsi_qla_host_t *vha)
727 {
728 	unsigned long flags;
729 
730 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
731 	vha->hw->isp_ops->fw_dump(vha);
732 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
733 }
734 
735 /**
736  * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
737  * @vha: HA context
738  */
739 void
qla2300_fw_dump(scsi_qla_host_t * vha)740 qla2300_fw_dump(scsi_qla_host_t *vha)
741 {
742 	int		rval;
743 	uint32_t	cnt;
744 	struct qla_hw_data *ha = vha->hw;
745 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
746 	__le16 __iomem *dmp_reg;
747 	struct qla2300_fw_dump	*fw;
748 	void		*nxt;
749 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
750 
751 	lockdep_assert_held(&ha->hardware_lock);
752 
753 	if (!ha->fw_dump) {
754 		ql_log(ql_log_warn, vha, 0xd002,
755 		    "No buffer available for dump.\n");
756 		return;
757 	}
758 
759 	if (ha->fw_dumped) {
760 		ql_log(ql_log_warn, vha, 0xd003,
761 		    "Firmware has been previously dumped (%p) "
762 		    "-- ignoring request.\n",
763 		    ha->fw_dump);
764 		return;
765 	}
766 	fw = &ha->fw_dump->isp.isp23;
767 	qla2xxx_prep_dump(ha, ha->fw_dump);
768 
769 	rval = QLA_SUCCESS;
770 	fw->hccr = htons(rd_reg_word(&reg->hccr));
771 
772 	/* Pause RISC. */
773 	wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
774 	if (IS_QLA2300(ha)) {
775 		for (cnt = 30000;
776 		    (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
777 			rval == QLA_SUCCESS; cnt--) {
778 			if (cnt)
779 				udelay(100);
780 			else
781 				rval = QLA_FUNCTION_TIMEOUT;
782 		}
783 	} else {
784 		rd_reg_word(&reg->hccr);		/* PCI Posting. */
785 		udelay(10);
786 	}
787 
788 	if (rval == QLA_SUCCESS) {
789 		dmp_reg = &reg->flash_address;
790 		for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
791 			fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
792 
793 		dmp_reg = &reg->u.isp2300.req_q_in;
794 		for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg);
795 		    cnt++, dmp_reg++)
796 			fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg));
797 
798 		dmp_reg = &reg->u.isp2300.mailbox0;
799 		for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg);
800 		    cnt++, dmp_reg++)
801 			fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
802 
803 		wrt_reg_word(&reg->ctrl_status, 0x40);
804 		qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
805 
806 		wrt_reg_word(&reg->ctrl_status, 0x50);
807 		qla2xxx_read_window(reg, 48, fw->dma_reg);
808 
809 		wrt_reg_word(&reg->ctrl_status, 0x00);
810 		dmp_reg = &reg->risc_hw;
811 		for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg);
812 		    cnt++, dmp_reg++)
813 			fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
814 
815 		wrt_reg_word(&reg->pcr, 0x2000);
816 		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
817 
818 		wrt_reg_word(&reg->pcr, 0x2200);
819 		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
820 
821 		wrt_reg_word(&reg->pcr, 0x2400);
822 		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
823 
824 		wrt_reg_word(&reg->pcr, 0x2600);
825 		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
826 
827 		wrt_reg_word(&reg->pcr, 0x2800);
828 		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
829 
830 		wrt_reg_word(&reg->pcr, 0x2A00);
831 		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
832 
833 		wrt_reg_word(&reg->pcr, 0x2C00);
834 		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
835 
836 		wrt_reg_word(&reg->pcr, 0x2E00);
837 		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
838 
839 		wrt_reg_word(&reg->ctrl_status, 0x10);
840 		qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
841 
842 		wrt_reg_word(&reg->ctrl_status, 0x20);
843 		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
844 
845 		wrt_reg_word(&reg->ctrl_status, 0x30);
846 		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
847 
848 		/* Reset RISC. */
849 		wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
850 		for (cnt = 0; cnt < 30000; cnt++) {
851 			if ((rd_reg_word(&reg->ctrl_status) &
852 			    CSR_ISP_SOFT_RESET) == 0)
853 				break;
854 
855 			udelay(10);
856 		}
857 	}
858 
859 	if (!IS_QLA2300(ha)) {
860 		for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
861 		    rval == QLA_SUCCESS; cnt--) {
862 			if (cnt)
863 				udelay(100);
864 			else
865 				rval = QLA_FUNCTION_TIMEOUT;
866 		}
867 	}
868 
869 	/* Get RISC SRAM. */
870 	if (rval == QLA_SUCCESS)
871 		rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
872 					ARRAY_SIZE(fw->risc_ram), &nxt);
873 
874 	/* Get stack SRAM. */
875 	if (rval == QLA_SUCCESS)
876 		rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
877 					ARRAY_SIZE(fw->stack_ram), &nxt);
878 
879 	/* Get data SRAM. */
880 	if (rval == QLA_SUCCESS)
881 		rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
882 		    ha->fw_memory_size - 0x11000 + 1, &nxt);
883 
884 	if (rval == QLA_SUCCESS)
885 		qla2xxx_copy_queues(ha, nxt);
886 
887 	qla2xxx_dump_post_process(base_vha, rval);
888 }
889 
890 /**
891  * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
892  * @vha: HA context
893  */
894 void
qla2100_fw_dump(scsi_qla_host_t * vha)895 qla2100_fw_dump(scsi_qla_host_t *vha)
896 {
897 	int		rval;
898 	uint32_t	cnt, timer;
899 	uint16_t	risc_address = 0;
900 	uint16_t	mb0 = 0, mb2 = 0;
901 	struct qla_hw_data *ha = vha->hw;
902 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
903 	__le16 __iomem *dmp_reg;
904 	struct qla2100_fw_dump	*fw;
905 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
906 
907 	lockdep_assert_held(&ha->hardware_lock);
908 
909 	if (!ha->fw_dump) {
910 		ql_log(ql_log_warn, vha, 0xd004,
911 		    "No buffer available for dump.\n");
912 		return;
913 	}
914 
915 	if (ha->fw_dumped) {
916 		ql_log(ql_log_warn, vha, 0xd005,
917 		    "Firmware has been previously dumped (%p) "
918 		    "-- ignoring request.\n",
919 		    ha->fw_dump);
920 		return;
921 	}
922 	fw = &ha->fw_dump->isp.isp21;
923 	qla2xxx_prep_dump(ha, ha->fw_dump);
924 
925 	rval = QLA_SUCCESS;
926 	fw->hccr = htons(rd_reg_word(&reg->hccr));
927 
928 	/* Pause RISC. */
929 	wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
930 	for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
931 	    rval == QLA_SUCCESS; cnt--) {
932 		if (cnt)
933 			udelay(100);
934 		else
935 			rval = QLA_FUNCTION_TIMEOUT;
936 	}
937 	if (rval == QLA_SUCCESS) {
938 		dmp_reg = &reg->flash_address;
939 		for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
940 			fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
941 
942 		dmp_reg = &reg->u.isp2100.mailbox0;
943 		for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
944 			if (cnt == 8)
945 				dmp_reg = &reg->u_end.isp2200.mailbox8;
946 
947 			fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
948 		}
949 
950 		dmp_reg = &reg->u.isp2100.unused_2[0];
951 		for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++)
952 			fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg));
953 
954 		wrt_reg_word(&reg->ctrl_status, 0x00);
955 		dmp_reg = &reg->risc_hw;
956 		for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++)
957 			fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
958 
959 		wrt_reg_word(&reg->pcr, 0x2000);
960 		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
961 
962 		wrt_reg_word(&reg->pcr, 0x2100);
963 		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
964 
965 		wrt_reg_word(&reg->pcr, 0x2200);
966 		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
967 
968 		wrt_reg_word(&reg->pcr, 0x2300);
969 		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
970 
971 		wrt_reg_word(&reg->pcr, 0x2400);
972 		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
973 
974 		wrt_reg_word(&reg->pcr, 0x2500);
975 		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
976 
977 		wrt_reg_word(&reg->pcr, 0x2600);
978 		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
979 
980 		wrt_reg_word(&reg->pcr, 0x2700);
981 		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
982 
983 		wrt_reg_word(&reg->ctrl_status, 0x10);
984 		qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
985 
986 		wrt_reg_word(&reg->ctrl_status, 0x20);
987 		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
988 
989 		wrt_reg_word(&reg->ctrl_status, 0x30);
990 		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
991 
992 		/* Reset the ISP. */
993 		wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
994 	}
995 
996 	for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
997 	    rval == QLA_SUCCESS; cnt--) {
998 		if (cnt)
999 			udelay(100);
1000 		else
1001 			rval = QLA_FUNCTION_TIMEOUT;
1002 	}
1003 
1004 	/* Pause RISC. */
1005 	if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
1006 	    (rd_reg_word(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
1007 
1008 		wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
1009 		for (cnt = 30000;
1010 		    (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
1011 		    rval == QLA_SUCCESS; cnt--) {
1012 			if (cnt)
1013 				udelay(100);
1014 			else
1015 				rval = QLA_FUNCTION_TIMEOUT;
1016 		}
1017 		if (rval == QLA_SUCCESS) {
1018 			/* Set memory configuration and timing. */
1019 			if (IS_QLA2100(ha))
1020 				wrt_reg_word(&reg->mctr, 0xf1);
1021 			else
1022 				wrt_reg_word(&reg->mctr, 0xf2);
1023 			rd_reg_word(&reg->mctr);	/* PCI Posting. */
1024 
1025 			/* Release RISC. */
1026 			wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
1027 		}
1028 	}
1029 
1030 	if (rval == QLA_SUCCESS) {
1031 		/* Get RISC SRAM. */
1032 		risc_address = 0x1000;
1033  		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
1034 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1035 	}
1036 	for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS;
1037 	    cnt++, risc_address++) {
1038  		WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1039 		wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
1040 
1041 		for (timer = 6000000; timer != 0; timer--) {
1042 			/* Check for pending interrupts. */
1043 			if (rd_reg_word(&reg->istatus) & ISR_RISC_INT) {
1044 				if (rd_reg_word(&reg->semaphore) & BIT_0) {
1045 					set_bit(MBX_INTERRUPT,
1046 					    &ha->mbx_cmd_flags);
1047 
1048 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
1049 					mb2 = RD_MAILBOX_REG(ha, reg, 2);
1050 
1051 					wrt_reg_word(&reg->semaphore, 0);
1052 					wrt_reg_word(&reg->hccr,
1053 					    HCCR_CLR_RISC_INT);
1054 					rd_reg_word(&reg->hccr);
1055 					break;
1056 				}
1057 				wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
1058 				rd_reg_word(&reg->hccr);
1059 			}
1060 			udelay(5);
1061 		}
1062 
1063 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1064 			rval = mb0 & MBS_MASK;
1065 			fw->risc_ram[cnt] = htons(mb2);
1066 		} else {
1067 			rval = QLA_FUNCTION_FAILED;
1068 		}
1069 	}
1070 
1071 	if (rval == QLA_SUCCESS)
1072 		qla2xxx_copy_queues(ha, &fw->queue_dump[0]);
1073 
1074 	qla2xxx_dump_post_process(base_vha, rval);
1075 }
1076 
1077 void
qla24xx_fw_dump(scsi_qla_host_t * vha)1078 qla24xx_fw_dump(scsi_qla_host_t *vha)
1079 {
1080 	int		rval;
1081 	uint32_t	cnt;
1082 	struct qla_hw_data *ha = vha->hw;
1083 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1084 	__le32 __iomem *dmp_reg;
1085 	__be32		*iter_reg;
1086 	__le16 __iomem *mbx_reg;
1087 	struct qla24xx_fw_dump *fw;
1088 	void		*nxt;
1089 	void		*nxt_chain;
1090 	__be32		*last_chain = NULL;
1091 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1092 
1093 	lockdep_assert_held(&ha->hardware_lock);
1094 
1095 	if (IS_P3P_TYPE(ha))
1096 		return;
1097 
1098 	ha->fw_dump_cap_flags = 0;
1099 
1100 	if (!ha->fw_dump) {
1101 		ql_log(ql_log_warn, vha, 0xd006,
1102 		    "No buffer available for dump.\n");
1103 		return;
1104 	}
1105 
1106 	if (ha->fw_dumped) {
1107 		ql_log(ql_log_warn, vha, 0xd007,
1108 		    "Firmware has been previously dumped (%p) "
1109 		    "-- ignoring request.\n",
1110 		    ha->fw_dump);
1111 		return;
1112 	}
1113 	QLA_FW_STOPPED(ha);
1114 	fw = &ha->fw_dump->isp.isp24;
1115 	qla2xxx_prep_dump(ha, ha->fw_dump);
1116 
1117 	fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1118 
1119 	/*
1120 	 * Pause RISC. No need to track timeout, as resetting the chip
1121 	 * is the right approach incase of pause timeout
1122 	 */
1123 	qla24xx_pause_risc(reg, ha);
1124 
1125 	/* Host interface registers. */
1126 	dmp_reg = &reg->flash_addr;
1127 	for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1128 		fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1129 
1130 	/* Disable interrupts. */
1131 	wrt_reg_dword(&reg->ictrl, 0);
1132 	rd_reg_dword(&reg->ictrl);
1133 
1134 	/* Shadow registers. */
1135 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1136 	rd_reg_dword(&reg->iobase_addr);
1137 	wrt_reg_dword(&reg->iobase_select, 0xB0000000);
1138 	fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
1139 
1140 	wrt_reg_dword(&reg->iobase_select, 0xB0100000);
1141 	fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
1142 
1143 	wrt_reg_dword(&reg->iobase_select, 0xB0200000);
1144 	fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
1145 
1146 	wrt_reg_dword(&reg->iobase_select, 0xB0300000);
1147 	fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
1148 
1149 	wrt_reg_dword(&reg->iobase_select, 0xB0400000);
1150 	fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
1151 
1152 	wrt_reg_dword(&reg->iobase_select, 0xB0500000);
1153 	fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
1154 
1155 	wrt_reg_dword(&reg->iobase_select, 0xB0600000);
1156 	fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
1157 
1158 	/* Mailbox registers. */
1159 	mbx_reg = &reg->mailbox0;
1160 	for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1161 		fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1162 
1163 	/* Transfer sequence registers. */
1164 	iter_reg = fw->xseq_gp_reg;
1165 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1166 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1167 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1168 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1169 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1170 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1171 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1172 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1173 
1174 	qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1175 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1176 
1177 	/* Receive sequence registers. */
1178 	iter_reg = fw->rseq_gp_reg;
1179 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1180 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1181 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1182 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1183 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1184 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1185 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1186 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1187 
1188 	qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1189 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1190 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1191 
1192 	/* Command DMA registers. */
1193 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1194 
1195 	/* Queues. */
1196 	iter_reg = fw->req0_dma_reg;
1197 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1198 	dmp_reg = &reg->iobase_q;
1199 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1200 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1201 
1202 	iter_reg = fw->resp0_dma_reg;
1203 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1204 	dmp_reg = &reg->iobase_q;
1205 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1206 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1207 
1208 	iter_reg = fw->req1_dma_reg;
1209 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1210 	dmp_reg = &reg->iobase_q;
1211 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1212 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1213 
1214 	/* Transmit DMA registers. */
1215 	iter_reg = fw->xmt0_dma_reg;
1216 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1217 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1218 
1219 	iter_reg = fw->xmt1_dma_reg;
1220 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1221 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1222 
1223 	iter_reg = fw->xmt2_dma_reg;
1224 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1225 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1226 
1227 	iter_reg = fw->xmt3_dma_reg;
1228 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1229 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1230 
1231 	iter_reg = fw->xmt4_dma_reg;
1232 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1233 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1234 
1235 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1236 
1237 	/* Receive DMA registers. */
1238 	iter_reg = fw->rcvt0_data_dma_reg;
1239 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1240 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1241 
1242 	iter_reg = fw->rcvt1_data_dma_reg;
1243 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1244 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1245 
1246 	/* RISC registers. */
1247 	iter_reg = fw->risc_gp_reg;
1248 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1249 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1250 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1251 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1252 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1253 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1254 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1255 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1256 
1257 	/* Local memory controller registers. */
1258 	iter_reg = fw->lmc_reg;
1259 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1260 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1261 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1262 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1263 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1264 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1265 	qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1266 
1267 	/* Fibre Protocol Module registers. */
1268 	iter_reg = fw->fpm_hdw_reg;
1269 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1270 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1271 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1272 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1273 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1274 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1275 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1276 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1277 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1278 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1279 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1280 	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1281 
1282 	/* Frame Buffer registers. */
1283 	iter_reg = fw->fb_hdw_reg;
1284 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1285 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1286 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1287 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1288 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1289 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1290 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1291 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1292 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1293 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1294 	qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1295 
1296 	rval = qla24xx_soft_reset(ha);
1297 	if (rval != QLA_SUCCESS)
1298 		goto qla24xx_fw_dump_failed_0;
1299 
1300 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1301 	    &nxt);
1302 	if (rval != QLA_SUCCESS)
1303 		goto qla24xx_fw_dump_failed_0;
1304 
1305 	nxt = qla2xxx_copy_queues(ha, nxt);
1306 
1307 	qla24xx_copy_eft(ha, nxt);
1308 
1309 	nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1310 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1311 	if (last_chain) {
1312 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1313 		*last_chain |= htonl(DUMP_CHAIN_LAST);
1314 	}
1315 
1316 	/* Adjust valid length. */
1317 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1318 
1319 qla24xx_fw_dump_failed_0:
1320 	qla2xxx_dump_post_process(base_vha, rval);
1321 }
1322 
1323 void
qla25xx_fw_dump(scsi_qla_host_t * vha)1324 qla25xx_fw_dump(scsi_qla_host_t *vha)
1325 {
1326 	int		rval;
1327 	uint32_t	cnt;
1328 	struct qla_hw_data *ha = vha->hw;
1329 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1330 	__le32 __iomem *dmp_reg;
1331 	__be32		*iter_reg;
1332 	__le16 __iomem *mbx_reg;
1333 	struct qla25xx_fw_dump *fw;
1334 	void		*nxt, *nxt_chain;
1335 	__be32		*last_chain = NULL;
1336 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1337 
1338 	lockdep_assert_held(&ha->hardware_lock);
1339 
1340 	ha->fw_dump_cap_flags = 0;
1341 
1342 	if (!ha->fw_dump) {
1343 		ql_log(ql_log_warn, vha, 0xd008,
1344 		    "No buffer available for dump.\n");
1345 		return;
1346 	}
1347 
1348 	if (ha->fw_dumped) {
1349 		ql_log(ql_log_warn, vha, 0xd009,
1350 		    "Firmware has been previously dumped (%p) "
1351 		    "-- ignoring request.\n",
1352 		    ha->fw_dump);
1353 		return;
1354 	}
1355 	QLA_FW_STOPPED(ha);
1356 	fw = &ha->fw_dump->isp.isp25;
1357 	qla2xxx_prep_dump(ha, ha->fw_dump);
1358 	ha->fw_dump->version = htonl(2);
1359 
1360 	fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1361 
1362 	/*
1363 	 * Pause RISC. No need to track timeout, as resetting the chip
1364 	 * is the right approach incase of pause timeout
1365 	 */
1366 	qla24xx_pause_risc(reg, ha);
1367 
1368 	/* Host/Risc registers. */
1369 	iter_reg = fw->host_risc_reg;
1370 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1371 	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1372 
1373 	/* PCIe registers. */
1374 	wrt_reg_dword(&reg->iobase_addr, 0x7C00);
1375 	rd_reg_dword(&reg->iobase_addr);
1376 	wrt_reg_dword(&reg->iobase_window, 0x01);
1377 	dmp_reg = &reg->iobase_c4;
1378 	fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
1379 	dmp_reg++;
1380 	fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
1381 	dmp_reg++;
1382 	fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
1383 	fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
1384 
1385 	wrt_reg_dword(&reg->iobase_window, 0x00);
1386 	rd_reg_dword(&reg->iobase_window);
1387 
1388 	/* Host interface registers. */
1389 	dmp_reg = &reg->flash_addr;
1390 	for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1391 		fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1392 
1393 	/* Disable interrupts. */
1394 	wrt_reg_dword(&reg->ictrl, 0);
1395 	rd_reg_dword(&reg->ictrl);
1396 
1397 	/* Shadow registers. */
1398 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1399 	rd_reg_dword(&reg->iobase_addr);
1400 	wrt_reg_dword(&reg->iobase_select, 0xB0000000);
1401 	fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
1402 
1403 	wrt_reg_dword(&reg->iobase_select, 0xB0100000);
1404 	fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
1405 
1406 	wrt_reg_dword(&reg->iobase_select, 0xB0200000);
1407 	fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
1408 
1409 	wrt_reg_dword(&reg->iobase_select, 0xB0300000);
1410 	fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
1411 
1412 	wrt_reg_dword(&reg->iobase_select, 0xB0400000);
1413 	fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
1414 
1415 	wrt_reg_dword(&reg->iobase_select, 0xB0500000);
1416 	fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
1417 
1418 	wrt_reg_dword(&reg->iobase_select, 0xB0600000);
1419 	fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
1420 
1421 	wrt_reg_dword(&reg->iobase_select, 0xB0700000);
1422 	fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
1423 
1424 	wrt_reg_dword(&reg->iobase_select, 0xB0800000);
1425 	fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
1426 
1427 	wrt_reg_dword(&reg->iobase_select, 0xB0900000);
1428 	fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
1429 
1430 	wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
1431 	fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
1432 
1433 	/* RISC I/O register. */
1434 	wrt_reg_dword(&reg->iobase_addr, 0x0010);
1435 	fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
1436 
1437 	/* Mailbox registers. */
1438 	mbx_reg = &reg->mailbox0;
1439 	for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1440 		fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1441 
1442 	/* Transfer sequence registers. */
1443 	iter_reg = fw->xseq_gp_reg;
1444 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1445 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1446 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1447 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1448 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1449 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1450 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1451 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1452 
1453 	iter_reg = fw->xseq_0_reg;
1454 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1455 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1456 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1457 
1458 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1459 
1460 	/* Receive sequence registers. */
1461 	iter_reg = fw->rseq_gp_reg;
1462 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1463 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1464 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1465 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1466 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1467 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1468 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1469 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1470 
1471 	iter_reg = fw->rseq_0_reg;
1472 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1473 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1474 
1475 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1476 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1477 
1478 	/* Auxiliary sequence registers. */
1479 	iter_reg = fw->aseq_gp_reg;
1480 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1481 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1482 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1483 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1484 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1485 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1486 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1487 	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1488 
1489 	iter_reg = fw->aseq_0_reg;
1490 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1491 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1492 
1493 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1494 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1495 
1496 	/* Command DMA registers. */
1497 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1498 
1499 	/* Queues. */
1500 	iter_reg = fw->req0_dma_reg;
1501 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1502 	dmp_reg = &reg->iobase_q;
1503 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1504 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1505 
1506 	iter_reg = fw->resp0_dma_reg;
1507 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1508 	dmp_reg = &reg->iobase_q;
1509 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1510 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1511 
1512 	iter_reg = fw->req1_dma_reg;
1513 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1514 	dmp_reg = &reg->iobase_q;
1515 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1516 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1517 
1518 	/* Transmit DMA registers. */
1519 	iter_reg = fw->xmt0_dma_reg;
1520 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1521 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1522 
1523 	iter_reg = fw->xmt1_dma_reg;
1524 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1525 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1526 
1527 	iter_reg = fw->xmt2_dma_reg;
1528 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1529 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1530 
1531 	iter_reg = fw->xmt3_dma_reg;
1532 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1533 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1534 
1535 	iter_reg = fw->xmt4_dma_reg;
1536 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1537 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1538 
1539 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1540 
1541 	/* Receive DMA registers. */
1542 	iter_reg = fw->rcvt0_data_dma_reg;
1543 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1544 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1545 
1546 	iter_reg = fw->rcvt1_data_dma_reg;
1547 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1548 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1549 
1550 	/* RISC registers. */
1551 	iter_reg = fw->risc_gp_reg;
1552 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1553 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1554 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1555 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1556 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1557 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1558 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1559 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1560 
1561 	/* Local memory controller registers. */
1562 	iter_reg = fw->lmc_reg;
1563 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1564 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1565 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1566 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1567 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1568 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1569 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1570 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1571 
1572 	/* Fibre Protocol Module registers. */
1573 	iter_reg = fw->fpm_hdw_reg;
1574 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1575 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1576 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1577 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1578 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1579 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1580 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1581 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1582 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1583 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1584 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1585 	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1586 
1587 	/* Frame Buffer registers. */
1588 	iter_reg = fw->fb_hdw_reg;
1589 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1590 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1591 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1592 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1593 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1594 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1595 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1596 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1597 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1598 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1599 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1600 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1601 
1602 	/* Multi queue registers */
1603 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1604 	    &last_chain);
1605 
1606 	rval = qla24xx_soft_reset(ha);
1607 	if (rval != QLA_SUCCESS)
1608 		goto qla25xx_fw_dump_failed_0;
1609 
1610 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1611 	    &nxt);
1612 	if (rval != QLA_SUCCESS)
1613 		goto qla25xx_fw_dump_failed_0;
1614 
1615 	nxt = qla2xxx_copy_queues(ha, nxt);
1616 
1617 	qla24xx_copy_eft(ha, nxt);
1618 
1619 	/* Chain entries -- started with MQ. */
1620 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1621 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1622 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1623 	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1624 	if (last_chain) {
1625 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1626 		*last_chain |= htonl(DUMP_CHAIN_LAST);
1627 	}
1628 
1629 	/* Adjust valid length. */
1630 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1631 
1632 qla25xx_fw_dump_failed_0:
1633 	qla2xxx_dump_post_process(base_vha, rval);
1634 }
1635 
1636 void
qla81xx_fw_dump(scsi_qla_host_t * vha)1637 qla81xx_fw_dump(scsi_qla_host_t *vha)
1638 {
1639 	int		rval;
1640 	uint32_t	cnt;
1641 	struct qla_hw_data *ha = vha->hw;
1642 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1643 	__le32 __iomem *dmp_reg;
1644 	__be32		*iter_reg;
1645 	__le16 __iomem *mbx_reg;
1646 	struct qla81xx_fw_dump *fw;
1647 	void		*nxt, *nxt_chain;
1648 	__be32		*last_chain = NULL;
1649 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1650 
1651 	lockdep_assert_held(&ha->hardware_lock);
1652 
1653 	ha->fw_dump_cap_flags = 0;
1654 
1655 	if (!ha->fw_dump) {
1656 		ql_log(ql_log_warn, vha, 0xd00a,
1657 		    "No buffer available for dump.\n");
1658 		return;
1659 	}
1660 
1661 	if (ha->fw_dumped) {
1662 		ql_log(ql_log_warn, vha, 0xd00b,
1663 		    "Firmware has been previously dumped (%p) "
1664 		    "-- ignoring request.\n",
1665 		    ha->fw_dump);
1666 		return;
1667 	}
1668 	fw = &ha->fw_dump->isp.isp81;
1669 	qla2xxx_prep_dump(ha, ha->fw_dump);
1670 
1671 	fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1672 
1673 	/*
1674 	 * Pause RISC. No need to track timeout, as resetting the chip
1675 	 * is the right approach incase of pause timeout
1676 	 */
1677 	qla24xx_pause_risc(reg, ha);
1678 
1679 	/* Host/Risc registers. */
1680 	iter_reg = fw->host_risc_reg;
1681 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1682 	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1683 
1684 	/* PCIe registers. */
1685 	wrt_reg_dword(&reg->iobase_addr, 0x7C00);
1686 	rd_reg_dword(&reg->iobase_addr);
1687 	wrt_reg_dword(&reg->iobase_window, 0x01);
1688 	dmp_reg = &reg->iobase_c4;
1689 	fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
1690 	dmp_reg++;
1691 	fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
1692 	dmp_reg++;
1693 	fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
1694 	fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
1695 
1696 	wrt_reg_dword(&reg->iobase_window, 0x00);
1697 	rd_reg_dword(&reg->iobase_window);
1698 
1699 	/* Host interface registers. */
1700 	dmp_reg = &reg->flash_addr;
1701 	for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1702 		fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1703 
1704 	/* Disable interrupts. */
1705 	wrt_reg_dword(&reg->ictrl, 0);
1706 	rd_reg_dword(&reg->ictrl);
1707 
1708 	/* Shadow registers. */
1709 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1710 	rd_reg_dword(&reg->iobase_addr);
1711 	wrt_reg_dword(&reg->iobase_select, 0xB0000000);
1712 	fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
1713 
1714 	wrt_reg_dword(&reg->iobase_select, 0xB0100000);
1715 	fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
1716 
1717 	wrt_reg_dword(&reg->iobase_select, 0xB0200000);
1718 	fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
1719 
1720 	wrt_reg_dword(&reg->iobase_select, 0xB0300000);
1721 	fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
1722 
1723 	wrt_reg_dword(&reg->iobase_select, 0xB0400000);
1724 	fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
1725 
1726 	wrt_reg_dword(&reg->iobase_select, 0xB0500000);
1727 	fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
1728 
1729 	wrt_reg_dword(&reg->iobase_select, 0xB0600000);
1730 	fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
1731 
1732 	wrt_reg_dword(&reg->iobase_select, 0xB0700000);
1733 	fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
1734 
1735 	wrt_reg_dword(&reg->iobase_select, 0xB0800000);
1736 	fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
1737 
1738 	wrt_reg_dword(&reg->iobase_select, 0xB0900000);
1739 	fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
1740 
1741 	wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
1742 	fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
1743 
1744 	/* RISC I/O register. */
1745 	wrt_reg_dword(&reg->iobase_addr, 0x0010);
1746 	fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
1747 
1748 	/* Mailbox registers. */
1749 	mbx_reg = &reg->mailbox0;
1750 	for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1751 		fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1752 
1753 	/* Transfer sequence registers. */
1754 	iter_reg = fw->xseq_gp_reg;
1755 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1756 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1757 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1758 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1759 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1760 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1761 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1762 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1763 
1764 	iter_reg = fw->xseq_0_reg;
1765 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1766 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1767 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1768 
1769 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1770 
1771 	/* Receive sequence registers. */
1772 	iter_reg = fw->rseq_gp_reg;
1773 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1774 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1775 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1776 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1777 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1778 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1779 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1780 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1781 
1782 	iter_reg = fw->rseq_0_reg;
1783 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1784 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1785 
1786 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1787 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1788 
1789 	/* Auxiliary sequence registers. */
1790 	iter_reg = fw->aseq_gp_reg;
1791 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1792 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1793 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1794 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1795 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1796 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1797 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1798 	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1799 
1800 	iter_reg = fw->aseq_0_reg;
1801 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1802 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1803 
1804 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1805 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1806 
1807 	/* Command DMA registers. */
1808 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1809 
1810 	/* Queues. */
1811 	iter_reg = fw->req0_dma_reg;
1812 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1813 	dmp_reg = &reg->iobase_q;
1814 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1815 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1816 
1817 	iter_reg = fw->resp0_dma_reg;
1818 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1819 	dmp_reg = &reg->iobase_q;
1820 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1821 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1822 
1823 	iter_reg = fw->req1_dma_reg;
1824 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1825 	dmp_reg = &reg->iobase_q;
1826 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1827 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1828 
1829 	/* Transmit DMA registers. */
1830 	iter_reg = fw->xmt0_dma_reg;
1831 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1832 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1833 
1834 	iter_reg = fw->xmt1_dma_reg;
1835 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1836 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1837 
1838 	iter_reg = fw->xmt2_dma_reg;
1839 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1840 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1841 
1842 	iter_reg = fw->xmt3_dma_reg;
1843 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1844 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1845 
1846 	iter_reg = fw->xmt4_dma_reg;
1847 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1848 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1849 
1850 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1851 
1852 	/* Receive DMA registers. */
1853 	iter_reg = fw->rcvt0_data_dma_reg;
1854 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1855 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1856 
1857 	iter_reg = fw->rcvt1_data_dma_reg;
1858 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1859 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1860 
1861 	/* RISC registers. */
1862 	iter_reg = fw->risc_gp_reg;
1863 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1864 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1865 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1866 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1867 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1868 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1869 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1870 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1871 
1872 	/* Local memory controller registers. */
1873 	iter_reg = fw->lmc_reg;
1874 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1875 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1876 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1877 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1878 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1879 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1880 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1881 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1882 
1883 	/* Fibre Protocol Module registers. */
1884 	iter_reg = fw->fpm_hdw_reg;
1885 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1886 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1887 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1888 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1889 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1890 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1891 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1892 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1893 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1894 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1895 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1896 	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1897 	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1898 	qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1899 
1900 	/* Frame Buffer registers. */
1901 	iter_reg = fw->fb_hdw_reg;
1902 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1903 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1904 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1905 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1906 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1907 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1908 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1909 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1910 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1911 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1912 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1913 	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1914 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1915 
1916 	/* Multi queue registers */
1917 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1918 	    &last_chain);
1919 
1920 	rval = qla24xx_soft_reset(ha);
1921 	if (rval != QLA_SUCCESS)
1922 		goto qla81xx_fw_dump_failed_0;
1923 
1924 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1925 	    &nxt);
1926 	if (rval != QLA_SUCCESS)
1927 		goto qla81xx_fw_dump_failed_0;
1928 
1929 	nxt = qla2xxx_copy_queues(ha, nxt);
1930 
1931 	qla24xx_copy_eft(ha, nxt);
1932 
1933 	/* Chain entries -- started with MQ. */
1934 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1935 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1936 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1937 	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1938 	nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
1939 	if (last_chain) {
1940 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1941 		*last_chain |= htonl(DUMP_CHAIN_LAST);
1942 	}
1943 
1944 	/* Adjust valid length. */
1945 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1946 
1947 qla81xx_fw_dump_failed_0:
1948 	qla2xxx_dump_post_process(base_vha, rval);
1949 }
1950 
1951 void
qla83xx_fw_dump(scsi_qla_host_t * vha)1952 qla83xx_fw_dump(scsi_qla_host_t *vha)
1953 {
1954 	int		rval;
1955 	uint32_t	cnt;
1956 	struct qla_hw_data *ha = vha->hw;
1957 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1958 	__le32 __iomem *dmp_reg;
1959 	__be32		*iter_reg;
1960 	__le16 __iomem *mbx_reg;
1961 	struct qla83xx_fw_dump *fw;
1962 	void		*nxt, *nxt_chain;
1963 	__be32		*last_chain = NULL;
1964 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1965 
1966 	lockdep_assert_held(&ha->hardware_lock);
1967 
1968 	ha->fw_dump_cap_flags = 0;
1969 
1970 	if (!ha->fw_dump) {
1971 		ql_log(ql_log_warn, vha, 0xd00c,
1972 		    "No buffer available for dump!!!\n");
1973 		return;
1974 	}
1975 
1976 	if (ha->fw_dumped) {
1977 		ql_log(ql_log_warn, vha, 0xd00d,
1978 		    "Firmware has been previously dumped (%p) -- ignoring "
1979 		    "request...\n", ha->fw_dump);
1980 		return;
1981 	}
1982 	QLA_FW_STOPPED(ha);
1983 	fw = &ha->fw_dump->isp.isp83;
1984 	qla2xxx_prep_dump(ha, ha->fw_dump);
1985 
1986 	fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1987 
1988 	/*
1989 	 * Pause RISC. No need to track timeout, as resetting the chip
1990 	 * is the right approach incase of pause timeout
1991 	 */
1992 	qla24xx_pause_risc(reg, ha);
1993 
1994 	wrt_reg_dword(&reg->iobase_addr, 0x6000);
1995 	dmp_reg = &reg->iobase_window;
1996 	rd_reg_dword(dmp_reg);
1997 	wrt_reg_dword(dmp_reg, 0);
1998 
1999 	dmp_reg = &reg->unused_4_1[0];
2000 	rd_reg_dword(dmp_reg);
2001 	wrt_reg_dword(dmp_reg, 0);
2002 
2003 	wrt_reg_dword(&reg->iobase_addr, 0x6010);
2004 	dmp_reg = &reg->unused_4_1[2];
2005 	rd_reg_dword(dmp_reg);
2006 	wrt_reg_dword(dmp_reg, 0);
2007 
2008 	/* select PCR and disable ecc checking and correction */
2009 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
2010 	rd_reg_dword(&reg->iobase_addr);
2011 	wrt_reg_dword(&reg->iobase_select, 0x60000000);	/* write to F0h = PCR */
2012 
2013 	/* Host/Risc registers. */
2014 	iter_reg = fw->host_risc_reg;
2015 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2016 	iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2017 	qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2018 
2019 	/* PCIe registers. */
2020 	wrt_reg_dword(&reg->iobase_addr, 0x7C00);
2021 	rd_reg_dword(&reg->iobase_addr);
2022 	wrt_reg_dword(&reg->iobase_window, 0x01);
2023 	dmp_reg = &reg->iobase_c4;
2024 	fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
2025 	dmp_reg++;
2026 	fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
2027 	dmp_reg++;
2028 	fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
2029 	fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
2030 
2031 	wrt_reg_dword(&reg->iobase_window, 0x00);
2032 	rd_reg_dword(&reg->iobase_window);
2033 
2034 	/* Host interface registers. */
2035 	dmp_reg = &reg->flash_addr;
2036 	for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
2037 		fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
2038 
2039 	/* Disable interrupts. */
2040 	wrt_reg_dword(&reg->ictrl, 0);
2041 	rd_reg_dword(&reg->ictrl);
2042 
2043 	/* Shadow registers. */
2044 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
2045 	rd_reg_dword(&reg->iobase_addr);
2046 	wrt_reg_dword(&reg->iobase_select, 0xB0000000);
2047 	fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
2048 
2049 	wrt_reg_dword(&reg->iobase_select, 0xB0100000);
2050 	fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
2051 
2052 	wrt_reg_dword(&reg->iobase_select, 0xB0200000);
2053 	fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
2054 
2055 	wrt_reg_dword(&reg->iobase_select, 0xB0300000);
2056 	fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
2057 
2058 	wrt_reg_dword(&reg->iobase_select, 0xB0400000);
2059 	fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
2060 
2061 	wrt_reg_dword(&reg->iobase_select, 0xB0500000);
2062 	fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
2063 
2064 	wrt_reg_dword(&reg->iobase_select, 0xB0600000);
2065 	fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
2066 
2067 	wrt_reg_dword(&reg->iobase_select, 0xB0700000);
2068 	fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
2069 
2070 	wrt_reg_dword(&reg->iobase_select, 0xB0800000);
2071 	fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
2072 
2073 	wrt_reg_dword(&reg->iobase_select, 0xB0900000);
2074 	fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
2075 
2076 	wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
2077 	fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
2078 
2079 	/* RISC I/O register. */
2080 	wrt_reg_dword(&reg->iobase_addr, 0x0010);
2081 	fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
2082 
2083 	/* Mailbox registers. */
2084 	mbx_reg = &reg->mailbox0;
2085 	for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
2086 		fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
2087 
2088 	/* Transfer sequence registers. */
2089 	iter_reg = fw->xseq_gp_reg;
2090 	iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2091 	iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2092 	iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2093 	iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2094 	iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2095 	iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2096 	iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2097 	iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2098 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2099 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2100 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2101 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2102 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2103 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2104 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2105 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2106 
2107 	iter_reg = fw->xseq_0_reg;
2108 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2109 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2110 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2111 
2112 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2113 
2114 	qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2115 
2116 	/* Receive sequence registers. */
2117 	iter_reg = fw->rseq_gp_reg;
2118 	iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2119 	iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2120 	iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2121 	iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2122 	iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2123 	iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2124 	iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2125 	iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2126 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2127 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2128 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2129 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2130 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2131 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2132 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2133 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2134 
2135 	iter_reg = fw->rseq_0_reg;
2136 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2137 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2138 
2139 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2140 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2141 	qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2142 
2143 	/* Auxiliary sequence registers. */
2144 	iter_reg = fw->aseq_gp_reg;
2145 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2146 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2147 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2148 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2149 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2150 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2151 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2152 	iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2153 	iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2154 	iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2155 	iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2156 	iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2157 	iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2158 	iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2159 	iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2160 	qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2161 
2162 	iter_reg = fw->aseq_0_reg;
2163 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2164 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2165 
2166 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2167 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2168 	qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2169 
2170 	/* Command DMA registers. */
2171 	iter_reg = fw->cmd_dma_reg;
2172 	iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2173 	iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2174 	iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2175 	qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2176 
2177 	/* Queues. */
2178 	iter_reg = fw->req0_dma_reg;
2179 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2180 	dmp_reg = &reg->iobase_q;
2181 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2182 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2183 
2184 	iter_reg = fw->resp0_dma_reg;
2185 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2186 	dmp_reg = &reg->iobase_q;
2187 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2188 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2189 
2190 	iter_reg = fw->req1_dma_reg;
2191 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2192 	dmp_reg = &reg->iobase_q;
2193 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2194 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2195 
2196 	/* Transmit DMA registers. */
2197 	iter_reg = fw->xmt0_dma_reg;
2198 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2199 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2200 
2201 	iter_reg = fw->xmt1_dma_reg;
2202 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2203 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2204 
2205 	iter_reg = fw->xmt2_dma_reg;
2206 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2207 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2208 
2209 	iter_reg = fw->xmt3_dma_reg;
2210 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2211 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2212 
2213 	iter_reg = fw->xmt4_dma_reg;
2214 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2215 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2216 
2217 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2218 
2219 	/* Receive DMA registers. */
2220 	iter_reg = fw->rcvt0_data_dma_reg;
2221 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2222 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2223 
2224 	iter_reg = fw->rcvt1_data_dma_reg;
2225 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2226 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2227 
2228 	/* RISC registers. */
2229 	iter_reg = fw->risc_gp_reg;
2230 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2231 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2232 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2233 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2234 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2235 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2236 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2237 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2238 
2239 	/* Local memory controller registers. */
2240 	iter_reg = fw->lmc_reg;
2241 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2242 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2243 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2244 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2245 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2246 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2247 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2248 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2249 
2250 	/* Fibre Protocol Module registers. */
2251 	iter_reg = fw->fpm_hdw_reg;
2252 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2253 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2254 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2255 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2256 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2257 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2258 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2259 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2260 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2261 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2262 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2263 	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2264 	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2265 	iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2266 	iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2267 	qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2268 
2269 	/* RQ0 Array registers. */
2270 	iter_reg = fw->rq0_array_reg;
2271 	iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2272 	iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2273 	iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2274 	iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2275 	iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2276 	iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2277 	iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2278 	iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2279 	iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2280 	iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2281 	iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2282 	iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2283 	iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2284 	iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2285 	iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2286 	qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2287 
2288 	/* RQ1 Array registers. */
2289 	iter_reg = fw->rq1_array_reg;
2290 	iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2291 	iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2292 	iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2293 	iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2294 	iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2295 	iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2296 	iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2297 	iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2298 	iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2299 	iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2300 	iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2301 	iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2302 	iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2303 	iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2304 	iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2305 	qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2306 
2307 	/* RP0 Array registers. */
2308 	iter_reg = fw->rp0_array_reg;
2309 	iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2310 	iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2311 	iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2312 	iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2313 	iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2314 	iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2315 	iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2316 	iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2317 	iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2318 	iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2319 	iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2320 	iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2321 	iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2322 	iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2323 	iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2324 	qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2325 
2326 	/* RP1 Array registers. */
2327 	iter_reg = fw->rp1_array_reg;
2328 	iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2329 	iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2330 	iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2331 	iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2332 	iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2333 	iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2334 	iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2335 	iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2336 	iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2337 	iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2338 	iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2339 	iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2340 	iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2341 	iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2342 	iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2343 	qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2344 
2345 	iter_reg = fw->at0_array_reg;
2346 	iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2347 	iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2348 	iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2349 	iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2350 	iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2351 	iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2352 	iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2353 	qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2354 
2355 	/* I/O Queue Control registers. */
2356 	qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2357 
2358 	/* Frame Buffer registers. */
2359 	iter_reg = fw->fb_hdw_reg;
2360 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2361 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2362 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2363 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2364 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2365 	iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2366 	iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2367 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2368 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2369 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2370 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2371 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2372 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2373 	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2374 	iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2375 	iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2376 	iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2377 	iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2378 	iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2379 	iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2380 	iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2381 	iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2382 	iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2383 	iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2384 	iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2385 	iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2386 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2387 
2388 	/* Multi queue registers */
2389 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2390 	    &last_chain);
2391 
2392 	rval = qla24xx_soft_reset(ha);
2393 	if (rval != QLA_SUCCESS) {
2394 		ql_log(ql_log_warn, vha, 0xd00e,
2395 		    "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2396 		rval = QLA_SUCCESS;
2397 
2398 		ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2399 
2400 		wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
2401 		rd_reg_dword(&reg->hccr);
2402 
2403 		wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2404 		rd_reg_dword(&reg->hccr);
2405 
2406 		wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
2407 		rd_reg_dword(&reg->hccr);
2408 
2409 		for (cnt = 30000; cnt && (rd_reg_word(&reg->mailbox0)); cnt--)
2410 			udelay(5);
2411 
2412 		if (!cnt) {
2413 			nxt = fw->code_ram;
2414 			nxt += sizeof(fw->code_ram);
2415 			nxt += (ha->fw_memory_size - 0x100000 + 1);
2416 			goto copy_queue;
2417 		} else {
2418 			set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2419 			ql_log(ql_log_warn, vha, 0xd010,
2420 			    "bigger hammer success?\n");
2421 		}
2422 	}
2423 
2424 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2425 	    &nxt);
2426 	if (rval != QLA_SUCCESS)
2427 		goto qla83xx_fw_dump_failed_0;
2428 
2429 copy_queue:
2430 	nxt = qla2xxx_copy_queues(ha, nxt);
2431 
2432 	qla24xx_copy_eft(ha, nxt);
2433 
2434 	/* Chain entries -- started with MQ. */
2435 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2436 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2437 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2438 	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
2439 	nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
2440 	if (last_chain) {
2441 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2442 		*last_chain |= htonl(DUMP_CHAIN_LAST);
2443 	}
2444 
2445 	/* Adjust valid length. */
2446 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2447 
2448 qla83xx_fw_dump_failed_0:
2449 	qla2xxx_dump_post_process(base_vha, rval);
2450 }
2451 
2452 /****************************************************************************/
2453 /*                         Driver Debug Functions.                          */
2454 /****************************************************************************/
2455 
2456 /* Write the debug message prefix into @pbuf. */
ql_dbg_prefix(char * pbuf,int pbuf_size,struct pci_dev * pdev,const scsi_qla_host_t * vha,uint msg_id)2457 static void ql_dbg_prefix(char *pbuf, int pbuf_size, struct pci_dev *pdev,
2458 			  const scsi_qla_host_t *vha, uint msg_id)
2459 {
2460 	if (vha) {
2461 		const struct pci_dev *pdev = vha->hw->pdev;
2462 
2463 		/* <module-name> [<dev-name>]-<msg-id>:<host>: */
2464 		snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
2465 			 dev_name(&(pdev->dev)), msg_id, vha->host_no);
2466 	} else if (pdev) {
2467 		snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
2468 			 dev_name(&pdev->dev), msg_id);
2469 	} else {
2470 		/* <module-name> [<dev-name>]-<msg-id>: : */
2471 		snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
2472 			 "0000:00:00.0", msg_id);
2473 	}
2474 }
2475 
2476 /*
2477  * This function is for formatting and logging debug information.
2478  * It is to be used when vha is available. It formats the message
2479  * and logs it to the messages file.
2480  * parameters:
2481  * level: The level of the debug messages to be printed.
2482  *        If ql2xextended_error_logging value is correctly set,
2483  *        this message will appear in the messages file.
2484  * vha:   Pointer to the scsi_qla_host_t.
2485  * id:    This is a unique identifier for the level. It identifies the
2486  *        part of the code from where the message originated.
2487  * msg:   The message to be displayed.
2488  */
2489 void
ql_dbg(uint level,scsi_qla_host_t * vha,uint id,const char * fmt,...)2490 ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2491 {
2492 	va_list va;
2493 	struct va_format vaf;
2494 	char pbuf[64];
2495 
2496 	ql_ktrace(1, level, pbuf, NULL, vha, id, fmt);
2497 
2498 	if (!ql_mask_match(level))
2499 		return;
2500 
2501 	if (!pbuf[0]) /* set by ql_ktrace */
2502 		ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
2503 
2504 	va_start(va, fmt);
2505 
2506 	vaf.fmt = fmt;
2507 	vaf.va = &va;
2508 
2509 	pr_warn("%s%pV", pbuf, &vaf);
2510 
2511 	va_end(va);
2512 
2513 }
2514 
2515 /*
2516  * This function is for formatting and logging debug information.
2517  * It is to be used when vha is not available and pci is available,
2518  * i.e., before host allocation. It formats the message and logs it
2519  * to the messages file.
2520  * parameters:
2521  * level: The level of the debug messages to be printed.
2522  *        If ql2xextended_error_logging value is correctly set,
2523  *        this message will appear in the messages file.
2524  * pdev:  Pointer to the struct pci_dev.
2525  * id:    This is a unique id for the level. It identifies the part
2526  *        of the code from where the message originated.
2527  * msg:   The message to be displayed.
2528  */
2529 void
ql_dbg_pci(uint level,struct pci_dev * pdev,uint id,const char * fmt,...)2530 ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2531 {
2532 	va_list va;
2533 	struct va_format vaf;
2534 	char pbuf[128];
2535 
2536 	if (pdev == NULL)
2537 		return;
2538 
2539 	ql_ktrace(1, level, pbuf, pdev, NULL, id, fmt);
2540 
2541 	if (!ql_mask_match(level))
2542 		return;
2543 
2544 	va_start(va, fmt);
2545 
2546 	vaf.fmt = fmt;
2547 	vaf.va = &va;
2548 
2549 	if (!pbuf[0]) /* set by ql_ktrace */
2550 		ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL,
2551 			      id + ql_dbg_offset);
2552 	pr_warn("%s%pV", pbuf, &vaf);
2553 
2554 	va_end(va);
2555 }
2556 
2557 /*
2558  * This function is for formatting and logging log messages.
2559  * It is to be used when vha is available. It formats the message
2560  * and logs it to the messages file. All the messages will be logged
2561  * irrespective of value of ql2xextended_error_logging.
2562  * parameters:
2563  * level: The level of the log messages to be printed in the
2564  *        messages file.
2565  * vha:   Pointer to the scsi_qla_host_t
2566  * id:    This is a unique id for the level. It identifies the
2567  *        part of the code from where the message originated.
2568  * msg:   The message to be displayed.
2569  */
2570 void
ql_log(uint level,scsi_qla_host_t * vha,uint id,const char * fmt,...)2571 ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2572 {
2573 	va_list va;
2574 	struct va_format vaf;
2575 	char pbuf[128];
2576 
2577 	if (level > ql_errlev)
2578 		return;
2579 
2580 	ql_ktrace(0, level, pbuf, NULL, vha, id, fmt);
2581 
2582 	if (!pbuf[0]) /* set by ql_ktrace */
2583 		ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
2584 
2585 	va_start(va, fmt);
2586 
2587 	vaf.fmt = fmt;
2588 	vaf.va = &va;
2589 
2590 	switch (level) {
2591 	case ql_log_fatal: /* FATAL LOG */
2592 		pr_crit("%s%pV", pbuf, &vaf);
2593 		break;
2594 	case ql_log_warn:
2595 		pr_err("%s%pV", pbuf, &vaf);
2596 		break;
2597 	case ql_log_info:
2598 		pr_warn("%s%pV", pbuf, &vaf);
2599 		break;
2600 	default:
2601 		pr_info("%s%pV", pbuf, &vaf);
2602 		break;
2603 	}
2604 
2605 	va_end(va);
2606 }
2607 
2608 /*
2609  * This function is for formatting and logging log messages.
2610  * It is to be used when vha is not available and pci is available,
2611  * i.e., before host allocation. It formats the message and logs
2612  * it to the messages file. All the messages are logged irrespective
2613  * of the value of ql2xextended_error_logging.
2614  * parameters:
2615  * level: The level of the log messages to be printed in the
2616  *        messages file.
2617  * pdev:  Pointer to the struct pci_dev.
2618  * id:    This is a unique id for the level. It identifies the
2619  *        part of the code from where the message originated.
2620  * msg:   The message to be displayed.
2621  */
2622 void
ql_log_pci(uint level,struct pci_dev * pdev,uint id,const char * fmt,...)2623 ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2624 {
2625 	va_list va;
2626 	struct va_format vaf;
2627 	char pbuf[128];
2628 
2629 	if (pdev == NULL)
2630 		return;
2631 	if (level > ql_errlev)
2632 		return;
2633 
2634 	ql_ktrace(0, level, pbuf, pdev, NULL, id, fmt);
2635 
2636 	if (!pbuf[0]) /* set by ql_ktrace */
2637 		ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, id);
2638 
2639 	va_start(va, fmt);
2640 
2641 	vaf.fmt = fmt;
2642 	vaf.va = &va;
2643 
2644 	switch (level) {
2645 	case ql_log_fatal: /* FATAL LOG */
2646 		pr_crit("%s%pV", pbuf, &vaf);
2647 		break;
2648 	case ql_log_warn:
2649 		pr_err("%s%pV", pbuf, &vaf);
2650 		break;
2651 	case ql_log_info:
2652 		pr_warn("%s%pV", pbuf, &vaf);
2653 		break;
2654 	default:
2655 		pr_info("%s%pV", pbuf, &vaf);
2656 		break;
2657 	}
2658 
2659 	va_end(va);
2660 }
2661 
2662 void
ql_dump_regs(uint level,scsi_qla_host_t * vha,uint id)2663 ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
2664 {
2665 	int i;
2666 	struct qla_hw_data *ha = vha->hw;
2667 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2668 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2669 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2670 	__le16 __iomem *mbx_reg;
2671 
2672 	if (!ql_mask_match(level))
2673 		return;
2674 
2675 	if (IS_P3P_TYPE(ha))
2676 		mbx_reg = &reg82->mailbox_in[0];
2677 	else if (IS_FWI2_CAPABLE(ha))
2678 		mbx_reg = &reg24->mailbox0;
2679 	else
2680 		mbx_reg = MAILBOX_REG(ha, reg, 0);
2681 
2682 	ql_dbg(level, vha, id, "Mailbox registers:\n");
2683 	for (i = 0; i < 6; i++, mbx_reg++)
2684 		ql_dbg(level, vha, id,
2685 		    "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg));
2686 }
2687 
2688 void
ql_dump_buffer(uint level,scsi_qla_host_t * vha,uint id,const void * buf,uint size)2689 ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
2690 	       uint size)
2691 {
2692 	uint cnt;
2693 
2694 	if (!ql_mask_match(level))
2695 		return;
2696 
2697 	ql_dbg(level, vha, id,
2698 	    "%-+5d  0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F\n", size);
2699 	ql_dbg(level, vha, id,
2700 	    "----- -----------------------------------------------\n");
2701 	for (cnt = 0; cnt < size; cnt += 16) {
2702 		ql_dbg(level, vha, id, "%04x: ", cnt);
2703 		print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2704 			       buf + cnt, min(16U, size - cnt), false);
2705 	}
2706 }
2707 
2708 /*
2709  * This function is for formatting and logging log messages.
2710  * It is to be used when vha is available. It formats the message
2711  * and logs it to the messages file. All the messages will be logged
2712  * irrespective of value of ql2xextended_error_logging.
2713  * parameters:
2714  * level: The level of the log messages to be printed in the
2715  *        messages file.
2716  * vha:   Pointer to the scsi_qla_host_t
2717  * id:    This is a unique id for the level. It identifies the
2718  *        part of the code from where the message originated.
2719  * msg:   The message to be displayed.
2720  */
2721 void
ql_log_qp(uint32_t level,struct qla_qpair * qpair,int32_t id,const char * fmt,...)2722 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2723     const char *fmt, ...)
2724 {
2725 	va_list va;
2726 	struct va_format vaf;
2727 	char pbuf[128];
2728 
2729 	if (level > ql_errlev)
2730 		return;
2731 
2732 	ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
2733 
2734 	if (!pbuf[0]) /* set by ql_ktrace */
2735 		ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
2736 			      qpair ? qpair->vha : NULL, id);
2737 
2738 	va_start(va, fmt);
2739 
2740 	vaf.fmt = fmt;
2741 	vaf.va = &va;
2742 
2743 	switch (level) {
2744 	case ql_log_fatal: /* FATAL LOG */
2745 		pr_crit("%s%pV", pbuf, &vaf);
2746 		break;
2747 	case ql_log_warn:
2748 		pr_err("%s%pV", pbuf, &vaf);
2749 		break;
2750 	case ql_log_info:
2751 		pr_warn("%s%pV", pbuf, &vaf);
2752 		break;
2753 	default:
2754 		pr_info("%s%pV", pbuf, &vaf);
2755 		break;
2756 	}
2757 
2758 	va_end(va);
2759 }
2760 
2761 /*
2762  * This function is for formatting and logging debug information.
2763  * It is to be used when vha is available. It formats the message
2764  * and logs it to the messages file.
2765  * parameters:
2766  * level: The level of the debug messages to be printed.
2767  *        If ql2xextended_error_logging value is correctly set,
2768  *        this message will appear in the messages file.
2769  * vha:   Pointer to the scsi_qla_host_t.
2770  * id:    This is a unique identifier for the level. It identifies the
2771  *        part of the code from where the message originated.
2772  * msg:   The message to be displayed.
2773  */
2774 void
ql_dbg_qp(uint32_t level,struct qla_qpair * qpair,int32_t id,const char * fmt,...)2775 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2776     const char *fmt, ...)
2777 {
2778 	va_list va;
2779 	struct va_format vaf;
2780 	char pbuf[128];
2781 
2782 	ql_ktrace(1, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
2783 
2784 	if (!ql_mask_match(level))
2785 		return;
2786 
2787 	va_start(va, fmt);
2788 
2789 	vaf.fmt = fmt;
2790 	vaf.va = &va;
2791 
2792 	if (!pbuf[0]) /* set by ql_ktrace */
2793 		ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
2794 			      qpair ? qpair->vha : NULL, id + ql_dbg_offset);
2795 
2796 	pr_warn("%s%pV", pbuf, &vaf);
2797 
2798 	va_end(va);
2799 
2800 }
2801