xref: /linux/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic qlcnic NIC Driver
4  * Copyright (c) 2009-2013 QLogic Corporation
5  */
6 
7 #include <net/ip.h>
8 
9 #include "qlcnic.h"
10 #include "qlcnic_hdr.h"
11 #include "qlcnic_83xx_hw.h"
12 #include "qlcnic_hw.h"
13 
14 #define QLC_83XX_MINIDUMP_FLASH		0x520000
15 #define QLC_83XX_OCM_INDEX			3
16 #define QLC_83XX_PCI_INDEX			0
17 #define QLC_83XX_DMA_ENGINE_INDEX		8
18 
19 static const u32 qlcnic_ms_read_data[] = {
20 	0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
21 };
22 
23 #define QLCNIC_DUMP_WCRB	BIT_0
24 #define QLCNIC_DUMP_RWCRB	BIT_1
25 #define QLCNIC_DUMP_ANDCRB	BIT_2
26 #define QLCNIC_DUMP_ORCRB	BIT_3
27 #define QLCNIC_DUMP_POLLCRB	BIT_4
28 #define QLCNIC_DUMP_RD_SAVE	BIT_5
29 #define QLCNIC_DUMP_WRT_SAVED	BIT_6
30 #define QLCNIC_DUMP_MOD_SAVE_ST	BIT_7
31 #define QLCNIC_DUMP_SKIP	BIT_7
32 
33 #define QLCNIC_DUMP_MASK_MAX	0xff
34 
35 struct qlcnic_pex_dma_descriptor {
36 	u32	read_data_size;
37 	u32	dma_desc_cmd;
38 	u32	src_addr_low;
39 	u32	src_addr_high;
40 	u32	dma_bus_addr_low;
41 	u32	dma_bus_addr_high;
42 	u32	rsvd[6];
43 } __packed;
44 
45 struct qlcnic_common_entry_hdr {
46 	u32     type;
47 	u32     offset;
48 	u32     cap_size;
49 #if defined(__LITTLE_ENDIAN)
50 	u8      mask;
51 	u8      rsvd[2];
52 	u8      flags;
53 #else
54 	u8      flags;
55 	u8      rsvd[2];
56 	u8      mask;
57 #endif
58 } __packed;
59 
60 struct __crb {
61 	u32	addr;
62 #if defined(__LITTLE_ENDIAN)
63 	u8	stride;
64 	u8	rsvd1[3];
65 #else
66 	u8	rsvd1[3];
67 	u8	stride;
68 #endif
69 	u32	data_size;
70 	u32	no_ops;
71 	u32	rsvd2[4];
72 } __packed;
73 
74 struct __ctrl {
75 	u32	addr;
76 #if defined(__LITTLE_ENDIAN)
77 	u8	stride;
78 	u8	index_a;
79 	u16	timeout;
80 #else
81 	u16	timeout;
82 	u8	index_a;
83 	u8	stride;
84 #endif
85 	u32	data_size;
86 	u32	no_ops;
87 #if defined(__LITTLE_ENDIAN)
88 	u8	opcode;
89 	u8	index_v;
90 	u8	shl_val;
91 	u8	shr_val;
92 #else
93 	u8	shr_val;
94 	u8	shl_val;
95 	u8	index_v;
96 	u8	opcode;
97 #endif
98 	u32	val1;
99 	u32	val2;
100 	u32	val3;
101 } __packed;
102 
103 struct __cache {
104 	u32	addr;
105 #if defined(__LITTLE_ENDIAN)
106 	u16	stride;
107 	u16	init_tag_val;
108 #else
109 	u16	init_tag_val;
110 	u16	stride;
111 #endif
112 	u32	size;
113 	u32	no_ops;
114 	u32	ctrl_addr;
115 	u32	ctrl_val;
116 	u32	read_addr;
117 #if defined(__LITTLE_ENDIAN)
118 	u8	read_addr_stride;
119 	u8	read_addr_num;
120 	u8	rsvd1[2];
121 #else
122 	u8	rsvd1[2];
123 	u8	read_addr_num;
124 	u8	read_addr_stride;
125 #endif
126 } __packed;
127 
128 struct __ocm {
129 	u8	rsvd[8];
130 	u32	size;
131 	u32	no_ops;
132 	u8	rsvd1[8];
133 	u32	read_addr;
134 	u32	read_addr_stride;
135 } __packed;
136 
137 struct __mem {
138 	u32	desc_card_addr;
139 	u32	dma_desc_cmd;
140 	u32	start_dma_cmd;
141 	u32	rsvd[3];
142 	u32	addr;
143 	u32	size;
144 } __packed;
145 
146 struct __mux {
147 	u32	addr;
148 	u8	rsvd[4];
149 	u32	size;
150 	u32	no_ops;
151 	u32	val;
152 	u32	val_stride;
153 	u32	read_addr;
154 	u8	rsvd2[4];
155 } __packed;
156 
157 struct __queue {
158 	u32	sel_addr;
159 #if defined(__LITTLE_ENDIAN)
160 	u16	stride;
161 	u8	rsvd[2];
162 #else
163 	u8	rsvd[2];
164 	u16	stride;
165 #endif
166 	u32	size;
167 	u32	no_ops;
168 	u8	rsvd2[8];
169 	u32	read_addr;
170 #if defined(__LITTLE_ENDIAN)
171 	u8	read_addr_stride;
172 	u8	read_addr_cnt;
173 	u8	rsvd3[2];
174 #else
175 	u8	rsvd3[2];
176 	u8	read_addr_cnt;
177 	u8	read_addr_stride;
178 #endif
179 } __packed;
180 
181 struct __pollrd {
182 	u32	sel_addr;
183 	u32	read_addr;
184 	u32	sel_val;
185 #if defined(__LITTLE_ENDIAN)
186 	u16	sel_val_stride;
187 	u16	no_ops;
188 #else
189 	u16	no_ops;
190 	u16	sel_val_stride;
191 #endif
192 	u32	poll_wait;
193 	u32	poll_mask;
194 	u32	data_size;
195 	u8	rsvd[4];
196 } __packed;
197 
198 struct __mux2 {
199 	u32	sel_addr1;
200 	u32	sel_addr2;
201 	u32	sel_val1;
202 	u32	sel_val2;
203 	u32	no_ops;
204 	u32	sel_val_mask;
205 	u32	read_addr;
206 #if defined(__LITTLE_ENDIAN)
207 	u8	sel_val_stride;
208 	u8	data_size;
209 	u8	rsvd[2];
210 #else
211 	u8	rsvd[2];
212 	u8	data_size;
213 	u8	sel_val_stride;
214 #endif
215 } __packed;
216 
217 struct __pollrdmwr {
218 	u32	addr1;
219 	u32	addr2;
220 	u32	val1;
221 	u32	val2;
222 	u32	poll_wait;
223 	u32	poll_mask;
224 	u32	mod_mask;
225 	u32	data_size;
226 } __packed;
227 
228 struct qlcnic_dump_entry {
229 	struct qlcnic_common_entry_hdr hdr;
230 	union {
231 		struct __crb		crb;
232 		struct __cache		cache;
233 		struct __ocm		ocm;
234 		struct __mem		mem;
235 		struct __mux		mux;
236 		struct __queue		que;
237 		struct __ctrl		ctrl;
238 		struct __pollrdmwr	pollrdmwr;
239 		struct __mux2		mux2;
240 		struct __pollrd		pollrd;
241 	} region;
242 } __packed;
243 
244 enum qlcnic_minidump_opcode {
245 	QLCNIC_DUMP_NOP		= 0,
246 	QLCNIC_DUMP_READ_CRB	= 1,
247 	QLCNIC_DUMP_READ_MUX	= 2,
248 	QLCNIC_DUMP_QUEUE	= 3,
249 	QLCNIC_DUMP_BRD_CONFIG	= 4,
250 	QLCNIC_DUMP_READ_OCM	= 6,
251 	QLCNIC_DUMP_PEG_REG	= 7,
252 	QLCNIC_DUMP_L1_DTAG	= 8,
253 	QLCNIC_DUMP_L1_ITAG	= 9,
254 	QLCNIC_DUMP_L1_DATA	= 11,
255 	QLCNIC_DUMP_L1_INST	= 12,
256 	QLCNIC_DUMP_L2_DTAG	= 21,
257 	QLCNIC_DUMP_L2_ITAG	= 22,
258 	QLCNIC_DUMP_L2_DATA	= 23,
259 	QLCNIC_DUMP_L2_INST	= 24,
260 	QLCNIC_DUMP_POLL_RD	= 35,
261 	QLCNIC_READ_MUX2	= 36,
262 	QLCNIC_READ_POLLRDMWR	= 37,
263 	QLCNIC_DUMP_READ_ROM	= 71,
264 	QLCNIC_DUMP_READ_MEM	= 72,
265 	QLCNIC_DUMP_READ_CTRL	= 98,
266 	QLCNIC_DUMP_TLHDR	= 99,
267 	QLCNIC_DUMP_RDEND	= 255
268 };
269 
qlcnic_82xx_get_saved_state(void * t_hdr,u32 index)270 inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
271 {
272 	struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
273 
274 	return hdr->saved_state[index];
275 }
276 
qlcnic_82xx_set_saved_state(void * t_hdr,u32 index,u32 value)277 inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
278 					u32 value)
279 {
280 	struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
281 
282 	hdr->saved_state[index] = value;
283 }
284 
qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump * fw_dump)285 void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
286 {
287 	struct qlcnic_82xx_dump_template_hdr *hdr;
288 
289 	hdr = fw_dump->tmpl_hdr;
290 	fw_dump->tmpl_hdr_size = hdr->size;
291 	fw_dump->version = hdr->version;
292 	fw_dump->num_entries = hdr->num_entries;
293 	fw_dump->offset = hdr->offset;
294 
295 	hdr->drv_cap_mask = hdr->cap_mask;
296 	fw_dump->cap_mask = hdr->cap_mask;
297 
298 	fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
299 }
300 
qlcnic_82xx_get_cap_size(void * t_hdr,int index)301 inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
302 {
303 	struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
304 
305 	return hdr->cap_sizes[index];
306 }
307 
qlcnic_82xx_set_sys_info(void * t_hdr,int idx,u32 value)308 void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
309 {
310 	struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
311 
312 	hdr->sys_info[idx] = value;
313 }
314 
qlcnic_82xx_store_cap_mask(void * tmpl_hdr,u32 mask)315 void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
316 {
317 	struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
318 
319 	hdr->drv_cap_mask = mask;
320 }
321 
qlcnic_83xx_get_saved_state(void * t_hdr,u32 index)322 inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
323 {
324 	struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
325 
326 	return hdr->saved_state[index];
327 }
328 
qlcnic_83xx_set_saved_state(void * t_hdr,u32 index,u32 value)329 inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
330 					u32 value)
331 {
332 	struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
333 
334 	hdr->saved_state[index] = value;
335 }
336 
337 #define QLCNIC_TEMPLATE_VERSION (0x20001)
338 
qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump * fw_dump)339 void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
340 {
341 	struct qlcnic_83xx_dump_template_hdr *hdr;
342 
343 	hdr = fw_dump->tmpl_hdr;
344 	fw_dump->tmpl_hdr_size = hdr->size;
345 	fw_dump->version = hdr->version;
346 	fw_dump->num_entries = hdr->num_entries;
347 	fw_dump->offset = hdr->offset;
348 
349 	hdr->drv_cap_mask = hdr->cap_mask;
350 	fw_dump->cap_mask = hdr->cap_mask;
351 
352 	fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
353 			       QLCNIC_TEMPLATE_VERSION;
354 }
355 
qlcnic_83xx_get_cap_size(void * t_hdr,int index)356 inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
357 {
358 	struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
359 
360 	return hdr->cap_sizes[index];
361 }
362 
qlcnic_83xx_set_sys_info(void * t_hdr,int idx,u32 value)363 void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
364 {
365 	struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
366 
367 	hdr->sys_info[idx] = value;
368 }
369 
qlcnic_83xx_store_cap_mask(void * tmpl_hdr,u32 mask)370 void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
371 {
372 	struct qlcnic_83xx_dump_template_hdr *hdr;
373 
374 	hdr = tmpl_hdr;
375 	hdr->drv_cap_mask = mask;
376 }
377 
378 struct qlcnic_dump_operations {
379 	enum qlcnic_minidump_opcode opcode;
380 	u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
381 		       __le32 *);
382 };
383 
qlcnic_dump_crb(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)384 static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
385 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
386 {
387 	int i;
388 	u32 addr, data;
389 	struct __crb *crb = &entry->region.crb;
390 
391 	addr = crb->addr;
392 
393 	for (i = 0; i < crb->no_ops; i++) {
394 		data = qlcnic_ind_rd(adapter, addr);
395 		*buffer++ = cpu_to_le32(addr);
396 		*buffer++ = cpu_to_le32(data);
397 		addr += crb->stride;
398 	}
399 	return crb->no_ops * 2 * sizeof(u32);
400 }
401 
qlcnic_dump_ctrl(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)402 static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
403 			    struct qlcnic_dump_entry *entry, __le32 *buffer)
404 {
405 	void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
406 	struct __ctrl *ctr = &entry->region.ctrl;
407 	int i, k, timeout = 0;
408 	u32 addr, data, temp;
409 	u8 no_ops;
410 
411 	addr = ctr->addr;
412 	no_ops = ctr->no_ops;
413 
414 	for (i = 0; i < no_ops; i++) {
415 		k = 0;
416 		for (k = 0; k < 8; k++) {
417 			if (!(ctr->opcode & (1 << k)))
418 				continue;
419 			switch (1 << k) {
420 			case QLCNIC_DUMP_WCRB:
421 				qlcnic_ind_wr(adapter, addr, ctr->val1);
422 				break;
423 			case QLCNIC_DUMP_RWCRB:
424 				data = qlcnic_ind_rd(adapter, addr);
425 				qlcnic_ind_wr(adapter, addr, data);
426 				break;
427 			case QLCNIC_DUMP_ANDCRB:
428 				data = qlcnic_ind_rd(adapter, addr);
429 				qlcnic_ind_wr(adapter, addr,
430 					      (data & ctr->val2));
431 				break;
432 			case QLCNIC_DUMP_ORCRB:
433 				data = qlcnic_ind_rd(adapter, addr);
434 				qlcnic_ind_wr(adapter, addr,
435 					      (data | ctr->val3));
436 				break;
437 			case QLCNIC_DUMP_POLLCRB:
438 				while (timeout <= ctr->timeout) {
439 					data = qlcnic_ind_rd(adapter, addr);
440 					if ((data & ctr->val2) == ctr->val1)
441 						break;
442 					usleep_range(1000, 2000);
443 					timeout++;
444 				}
445 				if (timeout > ctr->timeout) {
446 					dev_info(&adapter->pdev->dev,
447 					"Timed out, aborting poll CRB\n");
448 					return -EINVAL;
449 				}
450 				break;
451 			case QLCNIC_DUMP_RD_SAVE:
452 				temp = ctr->index_a;
453 				if (temp)
454 					addr = qlcnic_get_saved_state(adapter,
455 								      hdr,
456 								      temp);
457 				data = qlcnic_ind_rd(adapter, addr);
458 				qlcnic_set_saved_state(adapter, hdr,
459 						       ctr->index_v, data);
460 				break;
461 			case QLCNIC_DUMP_WRT_SAVED:
462 				temp = ctr->index_v;
463 				if (temp)
464 					data = qlcnic_get_saved_state(adapter,
465 								      hdr,
466 								      temp);
467 				else
468 					data = ctr->val1;
469 
470 				temp = ctr->index_a;
471 				if (temp)
472 					addr = qlcnic_get_saved_state(adapter,
473 								      hdr,
474 								      temp);
475 				qlcnic_ind_wr(adapter, addr, data);
476 				break;
477 			case QLCNIC_DUMP_MOD_SAVE_ST:
478 				data = qlcnic_get_saved_state(adapter, hdr,
479 							      ctr->index_v);
480 				data <<= ctr->shl_val;
481 				data >>= ctr->shr_val;
482 				if (ctr->val2)
483 					data &= ctr->val2;
484 				data |= ctr->val3;
485 				data += ctr->val1;
486 				qlcnic_set_saved_state(adapter, hdr,
487 						       ctr->index_v, data);
488 				break;
489 			default:
490 				dev_info(&adapter->pdev->dev,
491 					 "Unknown opcode\n");
492 				break;
493 			}
494 		}
495 		addr += ctr->stride;
496 	}
497 	return 0;
498 }
499 
qlcnic_dump_mux(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)500 static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
501 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
502 {
503 	int loop;
504 	u32 val, data = 0;
505 	struct __mux *mux = &entry->region.mux;
506 
507 	val = mux->val;
508 	for (loop = 0; loop < mux->no_ops; loop++) {
509 		qlcnic_ind_wr(adapter, mux->addr, val);
510 		data = qlcnic_ind_rd(adapter, mux->read_addr);
511 		*buffer++ = cpu_to_le32(val);
512 		*buffer++ = cpu_to_le32(data);
513 		val += mux->val_stride;
514 	}
515 	return 2 * mux->no_ops * sizeof(u32);
516 }
517 
qlcnic_dump_que(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)518 static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
519 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
520 {
521 	int i, loop;
522 	u32 cnt, addr, data, que_id = 0;
523 	struct __queue *que = &entry->region.que;
524 
525 	addr = que->read_addr;
526 	cnt = que->read_addr_cnt;
527 
528 	for (loop = 0; loop < que->no_ops; loop++) {
529 		qlcnic_ind_wr(adapter, que->sel_addr, que_id);
530 		addr = que->read_addr;
531 		for (i = 0; i < cnt; i++) {
532 			data = qlcnic_ind_rd(adapter, addr);
533 			*buffer++ = cpu_to_le32(data);
534 			addr += que->read_addr_stride;
535 		}
536 		que_id += que->stride;
537 	}
538 	return que->no_ops * cnt * sizeof(u32);
539 }
540 
qlcnic_dump_ocm(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)541 static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
542 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
543 {
544 	int i;
545 	u32 data;
546 	void __iomem *addr;
547 	struct __ocm *ocm = &entry->region.ocm;
548 
549 	addr = adapter->ahw->pci_base0 + ocm->read_addr;
550 	for (i = 0; i < ocm->no_ops; i++) {
551 		data = readl(addr);
552 		*buffer++ = cpu_to_le32(data);
553 		addr += ocm->read_addr_stride;
554 	}
555 	return ocm->no_ops * sizeof(u32);
556 }
557 
qlcnic_read_rom(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)558 static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
559 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
560 {
561 	int i, count = 0;
562 	u32 fl_addr, size, val, lck_val, addr;
563 	struct __mem *rom = &entry->region.mem;
564 
565 	fl_addr = rom->addr;
566 	size = rom->size / 4;
567 lock_try:
568 	lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
569 	if (!lck_val && count < MAX_CTL_CHECK) {
570 		usleep_range(10000, 11000);
571 		count++;
572 		goto lock_try;
573 	}
574 	QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
575 			    adapter->ahw->pci_func);
576 	for (i = 0; i < size; i++) {
577 		addr = fl_addr & 0xFFFF0000;
578 		qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
579 		addr = LSW(fl_addr) + FLASH_ROM_DATA;
580 		val = qlcnic_ind_rd(adapter, addr);
581 		fl_addr += 4;
582 		*buffer++ = cpu_to_le32(val);
583 	}
584 	QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
585 	return rom->size;
586 }
587 
qlcnic_dump_l1_cache(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)588 static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
589 				struct qlcnic_dump_entry *entry, __le32 *buffer)
590 {
591 	int i;
592 	u32 cnt, val, data, addr;
593 	struct __cache *l1 = &entry->region.cache;
594 
595 	val = l1->init_tag_val;
596 
597 	for (i = 0; i < l1->no_ops; i++) {
598 		qlcnic_ind_wr(adapter, l1->addr, val);
599 		qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
600 		addr = l1->read_addr;
601 		cnt = l1->read_addr_num;
602 		while (cnt) {
603 			data = qlcnic_ind_rd(adapter, addr);
604 			*buffer++ = cpu_to_le32(data);
605 			addr += l1->read_addr_stride;
606 			cnt--;
607 		}
608 		val += l1->stride;
609 	}
610 	return l1->no_ops * l1->read_addr_num * sizeof(u32);
611 }
612 
qlcnic_dump_l2_cache(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)613 static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
614 				struct qlcnic_dump_entry *entry, __le32 *buffer)
615 {
616 	int i;
617 	u32 cnt, val, data, addr;
618 	u8 poll_mask, poll_to, time_out = 0;
619 	struct __cache *l2 = &entry->region.cache;
620 
621 	val = l2->init_tag_val;
622 	poll_mask = LSB(MSW(l2->ctrl_val));
623 	poll_to = MSB(MSW(l2->ctrl_val));
624 
625 	for (i = 0; i < l2->no_ops; i++) {
626 		qlcnic_ind_wr(adapter, l2->addr, val);
627 		if (LSW(l2->ctrl_val))
628 			qlcnic_ind_wr(adapter, l2->ctrl_addr,
629 				      LSW(l2->ctrl_val));
630 		if (!poll_mask)
631 			goto skip_poll;
632 		do {
633 			data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
634 			if (!(data & poll_mask))
635 				break;
636 			usleep_range(1000, 2000);
637 			time_out++;
638 		} while (time_out <= poll_to);
639 
640 		if (time_out > poll_to) {
641 			dev_err(&adapter->pdev->dev,
642 				"Timeout exceeded in %s, aborting dump\n",
643 				__func__);
644 			return -EINVAL;
645 		}
646 skip_poll:
647 		addr = l2->read_addr;
648 		cnt = l2->read_addr_num;
649 		while (cnt) {
650 			data = qlcnic_ind_rd(adapter, addr);
651 			*buffer++ = cpu_to_le32(data);
652 			addr += l2->read_addr_stride;
653 			cnt--;
654 		}
655 		val += l2->stride;
656 	}
657 	return l2->no_ops * l2->read_addr_num * sizeof(u32);
658 }
659 
qlcnic_read_memory_test_agent(struct qlcnic_adapter * adapter,struct __mem * mem,__le32 * buffer,int * ret)660 static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
661 					 struct __mem *mem, __le32 *buffer,
662 					 int *ret)
663 {
664 	u32 addr, data, test;
665 	int i, reg_read;
666 
667 	reg_read = mem->size;
668 	addr = mem->addr;
669 	/* check for data size of multiple of 16 and 16 byte alignment */
670 	if ((addr & 0xf) || (reg_read%16)) {
671 		dev_info(&adapter->pdev->dev,
672 			 "Unaligned memory addr:0x%x size:0x%x\n",
673 			 addr, reg_read);
674 		*ret = -EINVAL;
675 		return 0;
676 	}
677 
678 	mutex_lock(&adapter->ahw->mem_lock);
679 
680 	while (reg_read != 0) {
681 		qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
682 		qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
683 		qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
684 
685 		for (i = 0; i < MAX_CTL_CHECK; i++) {
686 			test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
687 			if (!(test & TA_CTL_BUSY))
688 				break;
689 		}
690 		if (i == MAX_CTL_CHECK) {
691 			if (printk_ratelimit()) {
692 				dev_err(&adapter->pdev->dev,
693 					"failed to read through agent\n");
694 				*ret = -EIO;
695 				goto out;
696 			}
697 		}
698 		for (i = 0; i < 4; i++) {
699 			data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
700 			*buffer++ = cpu_to_le32(data);
701 		}
702 		addr += 16;
703 		reg_read -= 16;
704 		ret += 16;
705 		cond_resched();
706 	}
707 out:
708 	mutex_unlock(&adapter->ahw->mem_lock);
709 	return mem->size;
710 }
711 
712 /* DMA register base address */
713 #define QLC_DMA_REG_BASE_ADDR(dma_no)	(0x77320000 + (dma_no * 0x10000))
714 
715 /* DMA register offsets w.r.t base address */
716 #define QLC_DMA_CMD_BUFF_ADDR_LOW	0
717 #define QLC_DMA_CMD_BUFF_ADDR_HI	4
718 #define QLC_DMA_CMD_STATUS_CTRL		8
719 
qlcnic_start_pex_dma(struct qlcnic_adapter * adapter,struct __mem * mem)720 static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
721 				struct __mem *mem)
722 {
723 	struct device *dev = &adapter->pdev->dev;
724 	u32 dma_no, dma_base_addr, temp_addr;
725 	int i, ret, dma_sts;
726 	void *tmpl_hdr;
727 
728 	tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
729 	dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
730 					QLC_83XX_DMA_ENGINE_INDEX);
731 	dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
732 
733 	temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
734 	ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
735 	if (ret)
736 		return ret;
737 
738 	temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
739 	ret = qlcnic_ind_wr(adapter, temp_addr, 0);
740 	if (ret)
741 		return ret;
742 
743 	temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
744 	ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
745 	if (ret)
746 		return ret;
747 
748 	/* Wait for DMA to complete */
749 	temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
750 	for (i = 0; i < 400; i++) {
751 		dma_sts = qlcnic_ind_rd(adapter, temp_addr);
752 
753 		if (dma_sts & BIT_1)
754 			usleep_range(250, 500);
755 		else
756 			break;
757 	}
758 
759 	if (i >= 400) {
760 		dev_info(dev, "PEX DMA operation timed out");
761 		ret = -EIO;
762 	}
763 
764 	return ret;
765 }
766 
qlcnic_read_memory_pexdma(struct qlcnic_adapter * adapter,struct __mem * mem,__le32 * buffer,int * ret)767 static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
768 				     struct __mem *mem,
769 				     __le32 *buffer, int *ret)
770 {
771 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
772 	u32 temp, dma_base_addr, size = 0, read_size = 0;
773 	struct qlcnic_pex_dma_descriptor *dma_descr;
774 	struct device *dev = &adapter->pdev->dev;
775 	dma_addr_t dma_phys_addr;
776 	void *dma_buffer;
777 	void *tmpl_hdr;
778 
779 	tmpl_hdr = fw_dump->tmpl_hdr;
780 
781 	/* Check if DMA engine is available */
782 	temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
783 				      QLC_83XX_DMA_ENGINE_INDEX);
784 	dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
785 	temp = qlcnic_ind_rd(adapter,
786 			     dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
787 
788 	if (!(temp & BIT_31)) {
789 		dev_info(dev, "%s: DMA engine is not available\n", __func__);
790 		*ret = -EIO;
791 		return 0;
792 	}
793 
794 	/* Create DMA descriptor */
795 	dma_descr = kzalloc_obj(struct qlcnic_pex_dma_descriptor);
796 	if (!dma_descr) {
797 		*ret = -ENOMEM;
798 		return 0;
799 	}
800 
801 	/* dma_desc_cmd  0:15  = 0
802 	 * dma_desc_cmd 16:19  = mem->dma_desc_cmd 0:3
803 	 * dma_desc_cmd 20:23  = pci function number
804 	 * dma_desc_cmd 24:31  = mem->dma_desc_cmd 8:15
805 	 */
806 	dma_phys_addr = fw_dump->phys_addr;
807 	dma_buffer = fw_dump->dma_buffer;
808 	temp = 0;
809 	temp = mem->dma_desc_cmd & 0xff0f;
810 	temp |= (adapter->ahw->pci_func & 0xf) << 4;
811 	dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
812 	dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
813 	dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
814 	dma_descr->src_addr_high = 0;
815 
816 	/* Collect memory dump using multiple DMA operations if required */
817 	while (read_size < mem->size) {
818 		if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
819 			size = QLC_PEX_DMA_READ_SIZE;
820 		else
821 			size = mem->size - read_size;
822 
823 		dma_descr->src_addr_low = mem->addr + read_size;
824 		dma_descr->read_data_size = size;
825 
826 		/* Write DMA descriptor to MS memory*/
827 		temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
828 		*ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
829 					      (u32 *)dma_descr, temp);
830 		if (*ret) {
831 			dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
832 				 mem->desc_card_addr);
833 			goto free_dma_descr;
834 		}
835 
836 		*ret = qlcnic_start_pex_dma(adapter, mem);
837 		if (*ret) {
838 			dev_info(dev, "Failed to start PEX DMA operation\n");
839 			goto free_dma_descr;
840 		}
841 
842 		memcpy(buffer, dma_buffer, size);
843 		buffer += size / 4;
844 		read_size += size;
845 	}
846 
847 free_dma_descr:
848 	kfree(dma_descr);
849 
850 	return read_size;
851 }
852 
qlcnic_read_memory(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)853 static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
854 			      struct qlcnic_dump_entry *entry, __le32 *buffer)
855 {
856 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
857 	struct device *dev = &adapter->pdev->dev;
858 	struct __mem *mem = &entry->region.mem;
859 	u32 data_size;
860 	int ret = 0;
861 
862 	if (fw_dump->use_pex_dma) {
863 		data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
864 						      &ret);
865 		if (ret)
866 			dev_info(dev,
867 				 "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
868 				 entry->hdr.mask);
869 		else
870 			return data_size;
871 	}
872 
873 	data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
874 	if (ret) {
875 		dev_info(dev,
876 			 "Failed to read memory dump using test agent method: mask[0x%x]\n",
877 			 entry->hdr.mask);
878 		return 0;
879 	} else {
880 		return data_size;
881 	}
882 }
883 
qlcnic_dump_nop(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)884 static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
885 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
886 {
887 	entry->hdr.flags |= QLCNIC_DUMP_SKIP;
888 	return 0;
889 }
890 
qlcnic_valid_dump_entry(struct device * dev,struct qlcnic_dump_entry * entry,u32 size)891 static int qlcnic_valid_dump_entry(struct device *dev,
892 				   struct qlcnic_dump_entry *entry, u32 size)
893 {
894 	int ret = 1;
895 	if (size != entry->hdr.cap_size) {
896 		dev_err(dev,
897 			"Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
898 			entry->hdr.type, entry->hdr.mask, size,
899 			entry->hdr.cap_size);
900 		ret = 0;
901 	}
902 	return ret;
903 }
904 
qlcnic_read_pollrdmwr(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)905 static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
906 				 struct qlcnic_dump_entry *entry,
907 				 __le32 *buffer)
908 {
909 	struct __pollrdmwr *poll = &entry->region.pollrdmwr;
910 	u32 data, wait_count, poll_wait, temp;
911 
912 	poll_wait = poll->poll_wait;
913 
914 	qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
915 	wait_count = 0;
916 
917 	while (wait_count < poll_wait) {
918 		data = qlcnic_ind_rd(adapter, poll->addr1);
919 		if ((data & poll->poll_mask) != 0)
920 			break;
921 		wait_count++;
922 	}
923 
924 	if (wait_count == poll_wait) {
925 		dev_err(&adapter->pdev->dev,
926 			"Timeout exceeded in %s, aborting dump\n",
927 			__func__);
928 		return 0;
929 	}
930 
931 	data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
932 	qlcnic_ind_wr(adapter, poll->addr2, data);
933 	qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
934 	wait_count = 0;
935 
936 	while (wait_count < poll_wait) {
937 		temp = qlcnic_ind_rd(adapter, poll->addr1);
938 		if ((temp & poll->poll_mask) != 0)
939 			break;
940 		wait_count++;
941 	}
942 
943 	*buffer++ = cpu_to_le32(poll->addr2);
944 	*buffer++ = cpu_to_le32(data);
945 
946 	return 2 * sizeof(u32);
947 
948 }
949 
qlcnic_read_pollrd(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)950 static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
951 			      struct qlcnic_dump_entry *entry, __le32 *buffer)
952 {
953 	struct __pollrd *pollrd = &entry->region.pollrd;
954 	u32 data, wait_count, poll_wait, sel_val;
955 	int i;
956 
957 	poll_wait = pollrd->poll_wait;
958 	sel_val = pollrd->sel_val;
959 
960 	for (i = 0; i < pollrd->no_ops; i++) {
961 		qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
962 		wait_count = 0;
963 		while (wait_count < poll_wait) {
964 			data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
965 			if ((data & pollrd->poll_mask) != 0)
966 				break;
967 			wait_count++;
968 		}
969 
970 		if (wait_count == poll_wait) {
971 			dev_err(&adapter->pdev->dev,
972 				"Timeout exceeded in %s, aborting dump\n",
973 				__func__);
974 			return 0;
975 		}
976 
977 		data = qlcnic_ind_rd(adapter, pollrd->read_addr);
978 		*buffer++ = cpu_to_le32(sel_val);
979 		*buffer++ = cpu_to_le32(data);
980 		sel_val += pollrd->sel_val_stride;
981 	}
982 	return pollrd->no_ops * (2 * sizeof(u32));
983 }
984 
qlcnic_read_mux2(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)985 static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
986 			    struct qlcnic_dump_entry *entry, __le32 *buffer)
987 {
988 	struct __mux2 *mux2 = &entry->region.mux2;
989 	u32 data;
990 	u32 t_sel_val, sel_val1, sel_val2;
991 	int i;
992 
993 	sel_val1 = mux2->sel_val1;
994 	sel_val2 = mux2->sel_val2;
995 
996 	for (i = 0; i < mux2->no_ops; i++) {
997 		qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
998 		t_sel_val = sel_val1 & mux2->sel_val_mask;
999 		qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
1000 		data = qlcnic_ind_rd(adapter, mux2->read_addr);
1001 		*buffer++ = cpu_to_le32(t_sel_val);
1002 		*buffer++ = cpu_to_le32(data);
1003 		qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
1004 		t_sel_val = sel_val2 & mux2->sel_val_mask;
1005 		qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
1006 		data = qlcnic_ind_rd(adapter, mux2->read_addr);
1007 		*buffer++ = cpu_to_le32(t_sel_val);
1008 		*buffer++ = cpu_to_le32(data);
1009 		sel_val1 += mux2->sel_val_stride;
1010 		sel_val2 += mux2->sel_val_stride;
1011 	}
1012 
1013 	return mux2->no_ops * (4 * sizeof(u32));
1014 }
1015 
qlcnic_83xx_dump_rom(struct qlcnic_adapter * adapter,struct qlcnic_dump_entry * entry,__le32 * buffer)1016 static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
1017 				struct qlcnic_dump_entry *entry, __le32 *buffer)
1018 {
1019 	u32 fl_addr, size;
1020 	struct __mem *rom = &entry->region.mem;
1021 
1022 	fl_addr = rom->addr;
1023 	size = rom->size / 4;
1024 
1025 	if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
1026 					       (u8 *)buffer, size))
1027 		return rom->size;
1028 
1029 	return 0;
1030 }
1031 
1032 static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
1033 	{QLCNIC_DUMP_NOP, qlcnic_dump_nop},
1034 	{QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
1035 	{QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
1036 	{QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
1037 	{QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
1038 	{QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
1039 	{QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
1040 	{QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
1041 	{QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
1042 	{QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
1043 	{QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
1044 	{QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
1045 	{QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
1046 	{QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
1047 	{QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
1048 	{QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
1049 	{QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
1050 	{QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
1051 	{QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
1052 	{QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
1053 };
1054 
1055 static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
1056 	{QLCNIC_DUMP_NOP, qlcnic_dump_nop},
1057 	{QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
1058 	{QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
1059 	{QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
1060 	{QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
1061 	{QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
1062 	{QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
1063 	{QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
1064 	{QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
1065 	{QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
1066 	{QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
1067 	{QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
1068 	{QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
1069 	{QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
1070 	{QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
1071 	{QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
1072 	{QLCNIC_READ_MUX2, qlcnic_read_mux2},
1073 	{QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
1074 	{QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
1075 	{QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
1076 	{QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
1077 	{QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
1078 	{QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
1079 };
1080 
qlcnic_temp_checksum(uint32_t * temp_buffer,u32 temp_size)1081 static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
1082 {
1083 	uint64_t sum = 0;
1084 	int count = temp_size / sizeof(uint32_t);
1085 	while (count-- > 0)
1086 		sum += *temp_buffer++;
1087 	while (sum >> 32)
1088 		sum = (sum & 0xFFFFFFFF) + (sum >> 32);
1089 	return ~sum;
1090 }
1091 
qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter * adapter,u8 * buffer,u32 size)1092 static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
1093 					     u8 *buffer, u32 size)
1094 {
1095 	int ret = 0;
1096 
1097 	if (qlcnic_82xx_check(adapter))
1098 		return -EIO;
1099 
1100 	if (qlcnic_83xx_lock_flash(adapter))
1101 		return -EIO;
1102 
1103 	ret = qlcnic_83xx_lockless_flash_read32(adapter,
1104 						QLC_83XX_MINIDUMP_FLASH,
1105 						buffer, size / sizeof(u32));
1106 
1107 	qlcnic_83xx_unlock_flash(adapter);
1108 
1109 	return ret;
1110 }
1111 
1112 static int
qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter * adapter,struct qlcnic_cmd_args * cmd)1113 qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
1114 				       struct qlcnic_cmd_args *cmd)
1115 {
1116 	struct qlcnic_83xx_dump_template_hdr tmp_hdr;
1117 	u32 size = sizeof(tmp_hdr) / sizeof(u32);
1118 	int ret = 0;
1119 
1120 	if (qlcnic_82xx_check(adapter))
1121 		return -EIO;
1122 
1123 	if (qlcnic_83xx_lock_flash(adapter))
1124 		return -EIO;
1125 
1126 	ret = qlcnic_83xx_lockless_flash_read32(adapter,
1127 						QLC_83XX_MINIDUMP_FLASH,
1128 						(u8 *)&tmp_hdr, size);
1129 
1130 	qlcnic_83xx_unlock_flash(adapter);
1131 
1132 	cmd->rsp.arg[2] = tmp_hdr.size;
1133 	cmd->rsp.arg[3] = tmp_hdr.version;
1134 
1135 	return ret;
1136 }
1137 
qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter * adapter,u32 * version,u32 * temp_size,u8 * use_flash_temp)1138 static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
1139 					    u32 *version, u32 *temp_size,
1140 					    u8 *use_flash_temp)
1141 {
1142 	int err = 0;
1143 	struct qlcnic_cmd_args cmd;
1144 
1145 	if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
1146 		return -ENOMEM;
1147 
1148 	err = qlcnic_issue_cmd(adapter, &cmd);
1149 	if (err != QLCNIC_RCODE_SUCCESS) {
1150 		if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
1151 			qlcnic_free_mbx_args(&cmd);
1152 			return -EIO;
1153 		}
1154 		*use_flash_temp = 1;
1155 	}
1156 
1157 	*temp_size = cmd.rsp.arg[2];
1158 	*version = cmd.rsp.arg[3];
1159 	qlcnic_free_mbx_args(&cmd);
1160 
1161 	if (!(*temp_size))
1162 		return -EIO;
1163 
1164 	return 0;
1165 }
1166 
__qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter * adapter,u32 * buffer,u32 temp_size)1167 static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
1168 					     u32 *buffer, u32 temp_size)
1169 {
1170 	int err = 0, i;
1171 	void *tmp_addr;
1172 	__le32 *tmp_buf;
1173 	struct qlcnic_cmd_args cmd;
1174 	dma_addr_t tmp_addr_t = 0;
1175 
1176 	tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
1177 				      &tmp_addr_t, GFP_KERNEL);
1178 	if (!tmp_addr)
1179 		return -ENOMEM;
1180 
1181 	if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
1182 		err = -ENOMEM;
1183 		goto free_mem;
1184 	}
1185 
1186 	cmd.req.arg[1] = LSD(tmp_addr_t);
1187 	cmd.req.arg[2] = MSD(tmp_addr_t);
1188 	cmd.req.arg[3] = temp_size;
1189 	err = qlcnic_issue_cmd(adapter, &cmd);
1190 
1191 	tmp_buf = tmp_addr;
1192 	if (err == QLCNIC_RCODE_SUCCESS) {
1193 		for (i = 0; i < temp_size / sizeof(u32); i++)
1194 			*buffer++ = __le32_to_cpu(*tmp_buf++);
1195 	}
1196 
1197 	qlcnic_free_mbx_args(&cmd);
1198 
1199 free_mem:
1200 	dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
1201 
1202 	return err;
1203 }
1204 
qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter * adapter)1205 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1206 {
1207 	struct qlcnic_hardware_context *ahw;
1208 	struct qlcnic_fw_dump *fw_dump;
1209 	u32 version, csum, *tmp_buf;
1210 	u8 use_flash_temp = 0;
1211 	u32 temp_size = 0;
1212 	void *temp_buffer;
1213 	int err;
1214 
1215 	ahw = adapter->ahw;
1216 	fw_dump = &ahw->fw_dump;
1217 	err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
1218 					       &use_flash_temp);
1219 	if (err) {
1220 		dev_err(&adapter->pdev->dev,
1221 			"Can't get template size %d\n", err);
1222 		return -EIO;
1223 	}
1224 
1225 	fw_dump->tmpl_hdr = vzalloc(temp_size);
1226 	if (!fw_dump->tmpl_hdr)
1227 		return -ENOMEM;
1228 
1229 	tmp_buf = (u32 *)fw_dump->tmpl_hdr;
1230 	if (use_flash_temp)
1231 		goto flash_temp;
1232 
1233 	err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
1234 
1235 	if (err) {
1236 flash_temp:
1237 		err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
1238 							temp_size);
1239 
1240 		if (err) {
1241 			dev_err(&adapter->pdev->dev,
1242 				"Failed to get minidump template header %d\n",
1243 				err);
1244 			vfree(fw_dump->tmpl_hdr);
1245 			fw_dump->tmpl_hdr = NULL;
1246 			return -EIO;
1247 		}
1248 	}
1249 
1250 	csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
1251 
1252 	if (csum) {
1253 		dev_err(&adapter->pdev->dev,
1254 			"Template header checksum validation failed\n");
1255 		vfree(fw_dump->tmpl_hdr);
1256 		fw_dump->tmpl_hdr = NULL;
1257 		return -EIO;
1258 	}
1259 
1260 	qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
1261 
1262 	if (fw_dump->use_pex_dma) {
1263 		fw_dump->dma_buffer = NULL;
1264 		temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
1265 						 QLC_PEX_DMA_READ_SIZE,
1266 						 &fw_dump->phys_addr,
1267 						 GFP_KERNEL);
1268 		if (!temp_buffer)
1269 			fw_dump->use_pex_dma = false;
1270 		else
1271 			fw_dump->dma_buffer = temp_buffer;
1272 	}
1273 
1274 
1275 	dev_info(&adapter->pdev->dev,
1276 		 "Default minidump capture mask 0x%x\n",
1277 		 fw_dump->cap_mask);
1278 
1279 	qlcnic_enable_fw_dump_state(adapter);
1280 
1281 	return 0;
1282 }
1283 
qlcnic_dump_fw(struct qlcnic_adapter * adapter)1284 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1285 {
1286 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1287 	const struct qlcnic_dump_operations *fw_dump_ops;
1288 	struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
1289 	u32 entry_offset, dump, no_entries, buf_offset = 0;
1290 	int i, k, ops_cnt, ops_index, dump_size = 0;
1291 	struct device *dev = &adapter->pdev->dev;
1292 	struct qlcnic_hardware_context *ahw;
1293 	struct qlcnic_dump_entry *entry;
1294 	void *tmpl_hdr;
1295 	u32 ocm_window;
1296 	__le32 *buffer;
1297 	char mesg[64];
1298 	char *msg[] = {mesg, NULL};
1299 
1300 	ahw = adapter->ahw;
1301 	tmpl_hdr = fw_dump->tmpl_hdr;
1302 
1303 	/* Return if we don't have firmware dump template header */
1304 	if (!tmpl_hdr)
1305 		return -EIO;
1306 
1307 	if (!qlcnic_check_fw_dump_state(adapter)) {
1308 		dev_info(&adapter->pdev->dev, "Dump not enabled\n");
1309 		return -EIO;
1310 	}
1311 
1312 	if (fw_dump->clr) {
1313 		dev_info(&adapter->pdev->dev,
1314 			 "Previous dump not cleared, not capturing dump\n");
1315 		return -EIO;
1316 	}
1317 
1318 	netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
1319 	/* Calculate the size for dump data area only */
1320 	for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1321 		if (i & fw_dump->cap_mask)
1322 			dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
1323 
1324 	if (!dump_size)
1325 		return -EIO;
1326 
1327 	fw_dump->data = vzalloc(dump_size);
1328 	if (!fw_dump->data)
1329 		return -ENOMEM;
1330 
1331 	buffer = fw_dump->data;
1332 	fw_dump->size = dump_size;
1333 	no_entries = fw_dump->num_entries;
1334 	entry_offset = fw_dump->offset;
1335 	qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
1336 	qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
1337 
1338 	if (qlcnic_82xx_check(adapter)) {
1339 		ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1340 		fw_dump_ops = qlcnic_fw_dump_ops;
1341 	} else {
1342 		hdr_83xx = tmpl_hdr;
1343 		ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
1344 		fw_dump_ops = qlcnic_83xx_fw_dump_ops;
1345 		ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
1346 		hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
1347 		hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
1348 	}
1349 
1350 	for (i = 0; i < no_entries; i++) {
1351 		entry = tmpl_hdr + entry_offset;
1352 		if (!(entry->hdr.mask & fw_dump->cap_mask)) {
1353 			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1354 			entry_offset += entry->hdr.offset;
1355 			continue;
1356 		}
1357 
1358 		/* Find the handler for this entry */
1359 		ops_index = 0;
1360 		while (ops_index < ops_cnt) {
1361 			if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1362 				break;
1363 			ops_index++;
1364 		}
1365 
1366 		if (ops_index == ops_cnt) {
1367 			dev_info(dev, "Skipping unknown entry opcode %d\n",
1368 				 entry->hdr.type);
1369 			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1370 			entry_offset += entry->hdr.offset;
1371 			continue;
1372 		}
1373 
1374 		/* Collect dump for this entry */
1375 		dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1376 		if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
1377 			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1378 			entry_offset += entry->hdr.offset;
1379 			continue;
1380 		}
1381 
1382 		buf_offset += entry->hdr.cap_size;
1383 		entry_offset += entry->hdr.offset;
1384 		buffer = fw_dump->data + buf_offset;
1385 		cond_resched();
1386 	}
1387 
1388 	fw_dump->clr = 1;
1389 	snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
1390 	netdev_info(adapter->netdev,
1391 		    "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
1392 		    fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
1393 		    fw_dump->tmpl_hdr);
1394 	/* Send a udev event to notify availability of FW dump */
1395 	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1396 
1397 	return 0;
1398 }
1399 
1400 static inline bool
qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter * adapter)1401 qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
1402 {
1403 	/* For special adapters (with 0x8830 device ID), where iSCSI firmware
1404 	 * dump needs to be captured as part of regular firmware dump
1405 	 * collection process, firmware exports it's capability through
1406 	 * capability registers
1407 	 */
1408 	return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
1409 		(adapter->ahw->extra_capability[0] &
1410 		 QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
1411 }
1412 
qlcnic_83xx_get_minidump_template(struct qlcnic_adapter * adapter)1413 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1414 {
1415 	u32 prev_version, current_version;
1416 	struct qlcnic_hardware_context *ahw = adapter->ahw;
1417 	struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
1418 	struct pci_dev *pdev = adapter->pdev;
1419 	bool extended = false;
1420 	int ret;
1421 
1422 	prev_version = adapter->fw_version;
1423 	current_version = qlcnic_83xx_get_fw_version(adapter);
1424 
1425 	if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
1426 		vfree(fw_dump->tmpl_hdr);
1427 		fw_dump->tmpl_hdr = NULL;
1428 
1429 		if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
1430 			extended = !qlcnic_83xx_extend_md_capab(adapter);
1431 
1432 		ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
1433 		if (ret)
1434 			return;
1435 
1436 		dev_info(&pdev->dev, "Supports FW dump capability\n");
1437 
1438 		/* Once we have minidump template with extended iSCSI dump
1439 		 * capability, update the minidump capture mask to 0x1f as
1440 		 * per FW requirement
1441 		 */
1442 		if (extended) {
1443 			struct qlcnic_83xx_dump_template_hdr *hdr;
1444 
1445 			hdr = fw_dump->tmpl_hdr;
1446 			if (!hdr)
1447 				return;
1448 			hdr->drv_cap_mask = 0x1f;
1449 			fw_dump->cap_mask = 0x1f;
1450 			dev_info(&pdev->dev,
1451 				 "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
1452 		}
1453 	}
1454 }
1455