xref: /linux/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c (revision 5ba0a3be6ecc3a0b0d52c2a818b05564c6b42510)
1 #include "qlcnic.h"
2 #include "qlcnic_hdr.h"
3 
4 #include <net/ip.h>
5 
6 #define QLCNIC_DUMP_WCRB	BIT_0
7 #define QLCNIC_DUMP_RWCRB	BIT_1
8 #define QLCNIC_DUMP_ANDCRB	BIT_2
9 #define QLCNIC_DUMP_ORCRB	BIT_3
10 #define QLCNIC_DUMP_POLLCRB	BIT_4
11 #define QLCNIC_DUMP_RD_SAVE	BIT_5
12 #define QLCNIC_DUMP_WRT_SAVED	BIT_6
13 #define QLCNIC_DUMP_MOD_SAVE_ST	BIT_7
14 #define QLCNIC_DUMP_SKIP	BIT_7
15 
16 #define QLCNIC_DUMP_MASK_MAX	0xff
17 
18 struct qlcnic_common_entry_hdr {
19 	u32     type;
20 	u32     offset;
21 	u32     cap_size;
22 	u8      mask;
23 	u8      rsvd[2];
24 	u8      flags;
25 } __packed;
26 
27 struct __crb {
28 	u32	addr;
29 	u8	stride;
30 	u8	rsvd1[3];
31 	u32	data_size;
32 	u32	no_ops;
33 	u32	rsvd2[4];
34 } __packed;
35 
36 struct __ctrl {
37 	u32	addr;
38 	u8	stride;
39 	u8	index_a;
40 	u16	timeout;
41 	u32	data_size;
42 	u32	no_ops;
43 	u8	opcode;
44 	u8	index_v;
45 	u8	shl_val;
46 	u8	shr_val;
47 	u32	val1;
48 	u32	val2;
49 	u32	val3;
50 } __packed;
51 
52 struct __cache {
53 	u32	addr;
54 	u16	stride;
55 	u16	init_tag_val;
56 	u32	size;
57 	u32	no_ops;
58 	u32	ctrl_addr;
59 	u32	ctrl_val;
60 	u32	read_addr;
61 	u8	read_addr_stride;
62 	u8	read_addr_num;
63 	u8	rsvd1[2];
64 } __packed;
65 
66 struct __ocm {
67 	u8	rsvd[8];
68 	u32	size;
69 	u32	no_ops;
70 	u8	rsvd1[8];
71 	u32	read_addr;
72 	u32	read_addr_stride;
73 } __packed;
74 
75 struct __mem {
76 	u8	rsvd[24];
77 	u32	addr;
78 	u32	size;
79 } __packed;
80 
81 struct __mux {
82 	u32	addr;
83 	u8	rsvd[4];
84 	u32	size;
85 	u32	no_ops;
86 	u32	val;
87 	u32	val_stride;
88 	u32	read_addr;
89 	u8	rsvd2[4];
90 } __packed;
91 
92 struct __queue {
93 	u32	sel_addr;
94 	u16	stride;
95 	u8	rsvd[2];
96 	u32	size;
97 	u32	no_ops;
98 	u8	rsvd2[8];
99 	u32	read_addr;
100 	u8	read_addr_stride;
101 	u8	read_addr_cnt;
102 	u8	rsvd3[2];
103 } __packed;
104 
105 struct qlcnic_dump_entry {
106 	struct qlcnic_common_entry_hdr hdr;
107 	union {
108 		struct __crb	crb;
109 		struct __cache	cache;
110 		struct __ocm	ocm;
111 		struct __mem	mem;
112 		struct __mux	mux;
113 		struct __queue	que;
114 		struct __ctrl	ctrl;
115 	} region;
116 } __packed;
117 
118 enum qlcnic_minidump_opcode {
119 	QLCNIC_DUMP_NOP		= 0,
120 	QLCNIC_DUMP_READ_CRB	= 1,
121 	QLCNIC_DUMP_READ_MUX	= 2,
122 	QLCNIC_DUMP_QUEUE	= 3,
123 	QLCNIC_DUMP_BRD_CONFIG	= 4,
124 	QLCNIC_DUMP_READ_OCM	= 6,
125 	QLCNIC_DUMP_PEG_REG	= 7,
126 	QLCNIC_DUMP_L1_DTAG	= 8,
127 	QLCNIC_DUMP_L1_ITAG	= 9,
128 	QLCNIC_DUMP_L1_DATA	= 11,
129 	QLCNIC_DUMP_L1_INST	= 12,
130 	QLCNIC_DUMP_L2_DTAG	= 21,
131 	QLCNIC_DUMP_L2_ITAG	= 22,
132 	QLCNIC_DUMP_L2_DATA	= 23,
133 	QLCNIC_DUMP_L2_INST	= 24,
134 	QLCNIC_DUMP_READ_ROM	= 71,
135 	QLCNIC_DUMP_READ_MEM	= 72,
136 	QLCNIC_DUMP_READ_CTRL	= 98,
137 	QLCNIC_DUMP_TLHDR	= 99,
138 	QLCNIC_DUMP_RDEND	= 255
139 };
140 
141 struct qlcnic_dump_operations {
142 	enum qlcnic_minidump_opcode opcode;
143 	u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
144 		       __le32 *);
145 };
146 
147 static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
148 {
149 	u32 dest;
150 	void __iomem *window_reg;
151 
152 	dest = addr & 0xFFFF0000;
153 	window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
154 	writel(dest, window_reg);
155 	readl(window_reg);
156 	window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
157 	*data = readl(window_reg);
158 }
159 
160 static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
161 {
162 	u32 dest;
163 	void __iomem *window_reg;
164 
165 	dest = addr & 0xFFFF0000;
166 	window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
167 	writel(dest, window_reg);
168 	readl(window_reg);
169 	window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
170 	writel(data, window_reg);
171 	readl(window_reg);
172 }
173 
174 /* FW dump related functions */
175 static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
176 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
177 {
178 	int i;
179 	u32 addr, data;
180 	struct __crb *crb = &entry->region.crb;
181 	void __iomem *base = adapter->ahw->pci_base0;
182 
183 	addr = crb->addr;
184 
185 	for (i = 0; i < crb->no_ops; i++) {
186 		qlcnic_read_dump_reg(addr, base, &data);
187 		*buffer++ = cpu_to_le32(addr);
188 		*buffer++ = cpu_to_le32(data);
189 		addr += crb->stride;
190 	}
191 	return crb->no_ops * 2 * sizeof(u32);
192 }
193 
194 static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
195 			    struct qlcnic_dump_entry *entry, __le32 *buffer)
196 {
197 	int i, k, timeout = 0;
198 	void __iomem *base = adapter->ahw->pci_base0;
199 	u32 addr, data;
200 	u8 no_ops;
201 	struct __ctrl *ctr = &entry->region.ctrl;
202 	struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
203 
204 	addr = ctr->addr;
205 	no_ops = ctr->no_ops;
206 
207 	for (i = 0; i < no_ops; i++) {
208 		k = 0;
209 		for (k = 0; k < 8; k++) {
210 			if (!(ctr->opcode & (1 << k)))
211 				continue;
212 			switch (1 << k) {
213 			case QLCNIC_DUMP_WCRB:
214 				qlcnic_write_dump_reg(addr, base, ctr->val1);
215 				break;
216 			case QLCNIC_DUMP_RWCRB:
217 				qlcnic_read_dump_reg(addr, base, &data);
218 				qlcnic_write_dump_reg(addr, base, data);
219 				break;
220 			case QLCNIC_DUMP_ANDCRB:
221 				qlcnic_read_dump_reg(addr, base, &data);
222 				qlcnic_write_dump_reg(addr, base,
223 						      data & ctr->val2);
224 				break;
225 			case QLCNIC_DUMP_ORCRB:
226 				qlcnic_read_dump_reg(addr, base, &data);
227 				qlcnic_write_dump_reg(addr, base,
228 						      data | ctr->val3);
229 				break;
230 			case QLCNIC_DUMP_POLLCRB:
231 				while (timeout <= ctr->timeout) {
232 					qlcnic_read_dump_reg(addr, base, &data);
233 					if ((data & ctr->val2) == ctr->val1)
234 						break;
235 					msleep(1);
236 					timeout++;
237 				}
238 				if (timeout > ctr->timeout) {
239 					dev_info(&adapter->pdev->dev,
240 					"Timed out, aborting poll CRB\n");
241 					return -EINVAL;
242 				}
243 				break;
244 			case QLCNIC_DUMP_RD_SAVE:
245 				if (ctr->index_a)
246 					addr = t_hdr->saved_state[ctr->index_a];
247 				qlcnic_read_dump_reg(addr, base, &data);
248 				t_hdr->saved_state[ctr->index_v] = data;
249 				break;
250 			case QLCNIC_DUMP_WRT_SAVED:
251 				if (ctr->index_v)
252 					data = t_hdr->saved_state[ctr->index_v];
253 				else
254 					data = ctr->val1;
255 				if (ctr->index_a)
256 					addr = t_hdr->saved_state[ctr->index_a];
257 				qlcnic_write_dump_reg(addr, base, data);
258 				break;
259 			case QLCNIC_DUMP_MOD_SAVE_ST:
260 				data = t_hdr->saved_state[ctr->index_v];
261 				data <<= ctr->shl_val;
262 				data >>= ctr->shr_val;
263 				if (ctr->val2)
264 					data &= ctr->val2;
265 				data |= ctr->val3;
266 				data += ctr->val1;
267 				t_hdr->saved_state[ctr->index_v] = data;
268 				break;
269 			default:
270 				dev_info(&adapter->pdev->dev,
271 					 "Unknown opcode\n");
272 				break;
273 			}
274 		}
275 		addr += ctr->stride;
276 	}
277 	return 0;
278 }
279 
280 static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
281 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
282 {
283 	int loop;
284 	u32 val, data = 0;
285 	struct __mux *mux = &entry->region.mux;
286 	void __iomem *base = adapter->ahw->pci_base0;
287 
288 	val = mux->val;
289 	for (loop = 0; loop < mux->no_ops; loop++) {
290 		qlcnic_write_dump_reg(mux->addr, base, val);
291 		qlcnic_read_dump_reg(mux->read_addr, base, &data);
292 		*buffer++ = cpu_to_le32(val);
293 		*buffer++ = cpu_to_le32(data);
294 		val += mux->val_stride;
295 	}
296 	return 2 * mux->no_ops * sizeof(u32);
297 }
298 
299 static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
300 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
301 {
302 	int i, loop;
303 	u32 cnt, addr, data, que_id = 0;
304 	void __iomem *base = adapter->ahw->pci_base0;
305 	struct __queue *que = &entry->region.que;
306 
307 	addr = que->read_addr;
308 	cnt = que->read_addr_cnt;
309 
310 	for (loop = 0; loop < que->no_ops; loop++) {
311 		qlcnic_write_dump_reg(que->sel_addr, base, que_id);
312 		addr = que->read_addr;
313 		for (i = 0; i < cnt; i++) {
314 			qlcnic_read_dump_reg(addr, base, &data);
315 			*buffer++ = cpu_to_le32(data);
316 			addr += que->read_addr_stride;
317 		}
318 		que_id += que->stride;
319 	}
320 	return que->no_ops * cnt * sizeof(u32);
321 }
322 
323 static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
324 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
325 {
326 	int i;
327 	u32 data;
328 	void __iomem *addr;
329 	struct __ocm *ocm = &entry->region.ocm;
330 
331 	addr = adapter->ahw->pci_base0 + ocm->read_addr;
332 	for (i = 0; i < ocm->no_ops; i++) {
333 		data = readl(addr);
334 		*buffer++ = cpu_to_le32(data);
335 		addr += ocm->read_addr_stride;
336 	}
337 	return ocm->no_ops * sizeof(u32);
338 }
339 
340 static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
341 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
342 {
343 	int i, count = 0;
344 	u32 fl_addr, size, val, lck_val, addr;
345 	struct __mem *rom = &entry->region.mem;
346 	void __iomem *base = adapter->ahw->pci_base0;
347 
348 	fl_addr = rom->addr;
349 	size = rom->size/4;
350 lock_try:
351 	lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
352 	if (!lck_val && count < MAX_CTL_CHECK) {
353 		msleep(10);
354 		count++;
355 		goto lock_try;
356 	}
357 	writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
358 	for (i = 0; i < size; i++) {
359 		addr = fl_addr & 0xFFFF0000;
360 		qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
361 		addr = LSW(fl_addr) + FLASH_ROM_DATA;
362 		qlcnic_read_dump_reg(addr, base, &val);
363 		fl_addr += 4;
364 		*buffer++ = cpu_to_le32(val);
365 	}
366 	readl(base + QLCNIC_FLASH_SEM2_ULK);
367 	return rom->size;
368 }
369 
370 static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
371 				struct qlcnic_dump_entry *entry, __le32 *buffer)
372 {
373 	int i;
374 	u32 cnt, val, data, addr;
375 	void __iomem *base = adapter->ahw->pci_base0;
376 	struct __cache *l1 = &entry->region.cache;
377 
378 	val = l1->init_tag_val;
379 
380 	for (i = 0; i < l1->no_ops; i++) {
381 		qlcnic_write_dump_reg(l1->addr, base, val);
382 		qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
383 		addr = l1->read_addr;
384 		cnt = l1->read_addr_num;
385 		while (cnt) {
386 			qlcnic_read_dump_reg(addr, base, &data);
387 			*buffer++ = cpu_to_le32(data);
388 			addr += l1->read_addr_stride;
389 			cnt--;
390 		}
391 		val += l1->stride;
392 	}
393 	return l1->no_ops * l1->read_addr_num * sizeof(u32);
394 }
395 
396 static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
397 				struct qlcnic_dump_entry *entry, __le32 *buffer)
398 {
399 	int i;
400 	u32 cnt, val, data, addr;
401 	u8 poll_mask, poll_to, time_out = 0;
402 	void __iomem *base = adapter->ahw->pci_base0;
403 	struct __cache *l2 = &entry->region.cache;
404 
405 	val = l2->init_tag_val;
406 	poll_mask = LSB(MSW(l2->ctrl_val));
407 	poll_to = MSB(MSW(l2->ctrl_val));
408 
409 	for (i = 0; i < l2->no_ops; i++) {
410 		qlcnic_write_dump_reg(l2->addr, base, val);
411 		if (LSW(l2->ctrl_val))
412 			qlcnic_write_dump_reg(l2->ctrl_addr, base,
413 					      LSW(l2->ctrl_val));
414 		if (!poll_mask)
415 			goto skip_poll;
416 		do {
417 			qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
418 			if (!(data & poll_mask))
419 				break;
420 			msleep(1);
421 			time_out++;
422 		} while (time_out <= poll_to);
423 
424 		if (time_out > poll_to) {
425 			dev_err(&adapter->pdev->dev,
426 				"Timeout exceeded in %s, aborting dump\n",
427 				__func__);
428 			return -EINVAL;
429 		}
430 skip_poll:
431 		addr = l2->read_addr;
432 		cnt = l2->read_addr_num;
433 		while (cnt) {
434 			qlcnic_read_dump_reg(addr, base, &data);
435 			*buffer++ = cpu_to_le32(data);
436 			addr += l2->read_addr_stride;
437 			cnt--;
438 		}
439 		val += l2->stride;
440 	}
441 	return l2->no_ops * l2->read_addr_num * sizeof(u32);
442 }
443 
444 static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
445 			      struct qlcnic_dump_entry *entry, __le32 *buffer)
446 {
447 	u32 addr, data, test, ret = 0;
448 	int i, reg_read;
449 	struct __mem *mem = &entry->region.mem;
450 	void __iomem *base = adapter->ahw->pci_base0;
451 
452 	reg_read = mem->size;
453 	addr = mem->addr;
454 	/* check for data size of multiple of 16 and 16 byte alignment */
455 	if ((addr & 0xf) || (reg_read%16)) {
456 		dev_info(&adapter->pdev->dev,
457 			 "Unaligned memory addr:0x%x size:0x%x\n",
458 			 addr, reg_read);
459 		return -EINVAL;
460 	}
461 
462 	mutex_lock(&adapter->ahw->mem_lock);
463 
464 	while (reg_read != 0) {
465 		qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
466 		qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
467 		qlcnic_write_dump_reg(MIU_TEST_CTR, base,
468 				      TA_CTL_ENABLE | TA_CTL_START);
469 
470 		for (i = 0; i < MAX_CTL_CHECK; i++) {
471 			qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
472 			if (!(test & TA_CTL_BUSY))
473 				break;
474 		}
475 		if (i == MAX_CTL_CHECK) {
476 			if (printk_ratelimit()) {
477 				dev_err(&adapter->pdev->dev,
478 					"failed to read through agent\n");
479 				ret = -EINVAL;
480 				goto out;
481 			}
482 		}
483 		for (i = 0; i < 4; i++) {
484 			qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
485 					     &data);
486 			*buffer++ = cpu_to_le32(data);
487 		}
488 		addr += 16;
489 		reg_read -= 16;
490 		ret += 16;
491 	}
492 out:
493 	mutex_unlock(&adapter->ahw->mem_lock);
494 	return mem->size;
495 }
496 
497 static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
498 			   struct qlcnic_dump_entry *entry, __le32 *buffer)
499 {
500 	entry->hdr.flags |= QLCNIC_DUMP_SKIP;
501 	return 0;
502 }
503 
504 static const struct qlcnic_dump_operations fw_dump_ops[] = {
505 	{ QLCNIC_DUMP_NOP, qlcnic_dump_nop },
506 	{ QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
507 	{ QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
508 	{ QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
509 	{ QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
510 	{ QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
511 	{ QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
512 	{ QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
513 	{ QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
514 	{ QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
515 	{ QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
516 	{ QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
517 	{ QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
518 	{ QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
519 	{ QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
520 	{ QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
521 	{ QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
522 	{ QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
523 	{ QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
524 	{ QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
525 };
526 
527 /* Walk the template and collect dump for each entry in the dump template */
528 static int
529 qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
530 			u32 size)
531 {
532 	int ret = 1;
533 	if (size != entry->hdr.cap_size) {
534 		dev_info(dev,
535 			 "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
536 		entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
537 		dev_info(dev, "Aborting further dump capture\n");
538 		ret = 0;
539 	}
540 	return ret;
541 }
542 
543 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
544 {
545 	__le32 *buffer;
546 	char mesg[64];
547 	char *msg[] = {mesg, NULL};
548 	int i, k, ops_cnt, ops_index, dump_size = 0;
549 	u32 entry_offset, dump, no_entries, buf_offset = 0;
550 	struct qlcnic_dump_entry *entry;
551 	struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
552 	struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
553 
554 	if (fw_dump->clr) {
555 		dev_info(&adapter->pdev->dev,
556 			 "Previous dump not cleared, not capturing dump\n");
557 		return -EIO;
558 	}
559 	/* Calculate the size for dump data area only */
560 	for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
561 		if (i & tmpl_hdr->drv_cap_mask)
562 			dump_size += tmpl_hdr->cap_sizes[k];
563 	if (!dump_size)
564 		return -EIO;
565 
566 	fw_dump->data = vzalloc(dump_size);
567 	if (!fw_dump->data) {
568 		dev_info(&adapter->pdev->dev,
569 			 "Unable to allocate (%d KB) for fw dump\n",
570 			 dump_size / 1024);
571 		return -ENOMEM;
572 	}
573 	buffer = fw_dump->data;
574 	fw_dump->size = dump_size;
575 	no_entries = tmpl_hdr->num_entries;
576 	ops_cnt = ARRAY_SIZE(fw_dump_ops);
577 	entry_offset = tmpl_hdr->offset;
578 	tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
579 	tmpl_hdr->sys_info[1] = adapter->fw_version;
580 
581 	for (i = 0; i < no_entries; i++) {
582 		entry = (void *)tmpl_hdr + entry_offset;
583 		if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
584 			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
585 			entry_offset += entry->hdr.offset;
586 			continue;
587 		}
588 		/* Find the handler for this entry */
589 		ops_index = 0;
590 		while (ops_index < ops_cnt) {
591 			if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
592 				break;
593 			ops_index++;
594 		}
595 		if (ops_index == ops_cnt) {
596 			dev_info(&adapter->pdev->dev,
597 				 "Invalid entry type %d, exiting dump\n",
598 				 entry->hdr.type);
599 			goto error;
600 		}
601 		/* Collect dump for this entry */
602 		dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
603 		if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
604 						     dump))
605 			entry->hdr.flags |= QLCNIC_DUMP_SKIP;
606 		buf_offset += entry->hdr.cap_size;
607 		entry_offset += entry->hdr.offset;
608 		buffer = fw_dump->data + buf_offset;
609 	}
610 	if (dump_size != buf_offset) {
611 		dev_info(&adapter->pdev->dev,
612 			 "Captured(%d) and expected size(%d) do not match\n",
613 			 buf_offset, dump_size);
614 		goto error;
615 	} else {
616 		fw_dump->clr = 1;
617 		snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
618 			 adapter->netdev->name);
619 		dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
620 			 fw_dump->size);
621 		/* Send a udev event to notify availability of FW dump */
622 		kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
623 		return 0;
624 	}
625 error:
626 	vfree(fw_dump->data);
627 	return -EINVAL;
628 }
629