xref: /freebsd/sys/dev/cxgbe/cudbg/cudbg_lib.c (revision d93a896ef95946b0bf1219866fcb324b78543444)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/types.h>
31 #include <sys/param.h>
32 
33 #include "common/common.h"
34 #include "common/t4_regs.h"
35 #include "cudbg.h"
36 #include "cudbg_lib_common.h"
37 #include "cudbg_lib.h"
38 #include "cudbg_entity.h"
39 #define  BUFFER_WARN_LIMIT 10000000
40 
41 struct large_entity large_entity_list[] = {
42 	{CUDBG_EDC0, 0, 0},
43 	{CUDBG_EDC1, 0 , 0},
44 	{CUDBG_MC0, 0, 0},
45 	{CUDBG_MC1, 0, 0}
46 };
47 
48 static int is_fw_attached(struct cudbg_init *pdbg_init)
49 {
50 
51 	return (pdbg_init->adap->flags & FW_OK);
52 }
53 
54 /* This function will add additional padding bytes into debug_buffer to make it
55  * 4 byte aligned.*/
56 static void align_debug_buffer(struct cudbg_buffer *dbg_buff,
57 			struct cudbg_entity_hdr *entity_hdr)
58 {
59 	u8 zero_buf[4] = {0};
60 	u8 padding, remain;
61 
62 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
63 	padding = 4 - remain;
64 	if (remain) {
65 		memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
66 		       padding);
67 		dbg_buff->offset += padding;
68 		entity_hdr->num_pad = padding;
69 	}
70 
71 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
72 }
73 
74 static void read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
75 			  enum ctxt_type ctype, u32 *data)
76 {
77 	struct adapter *padap = pdbg_init->adap;
78 	int rc = -1;
79 
80 	if (is_fw_attached(pdbg_init))
81 		rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
82 				    data);
83 
84 	if (rc)
85 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
86 }
87 
88 static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
89 			    struct cudbg_buffer *dbg_buff,
90 			    struct cudbg_entity_hdr **entity_hdr)
91 {
92 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
93 	int rc = 0;
94 	u32 ext_offset = cudbg_hdr->data_len;
95 	*ext_size = 0;
96 
97 	if (dbg_buff->size - dbg_buff->offset <=
98 		 sizeof(struct cudbg_entity_hdr)) {
99 		rc = CUDBG_STATUS_BUFFER_SHORT;
100 		goto err;
101 	}
102 
103 	*entity_hdr = (struct cudbg_entity_hdr *)
104 		       ((char *)outbuf + cudbg_hdr->data_len);
105 
106 	/* Find the last extended entity header */
107 	while ((*entity_hdr)->size) {
108 
109 		ext_offset += sizeof(struct cudbg_entity_hdr) +
110 				     (*entity_hdr)->size;
111 
112 		*ext_size += (*entity_hdr)->size +
113 			      sizeof(struct cudbg_entity_hdr);
114 
115 		if (dbg_buff->size - dbg_buff->offset + *ext_size  <=
116 			sizeof(struct cudbg_entity_hdr)) {
117 			rc = CUDBG_STATUS_BUFFER_SHORT;
118 			goto err;
119 		}
120 
121 		if (ext_offset != (*entity_hdr)->next_ext_offset) {
122 			ext_offset -= sizeof(struct cudbg_entity_hdr) +
123 				     (*entity_hdr)->size;
124 			break;
125 		}
126 
127 		(*entity_hdr)->next_ext_offset = *ext_size;
128 
129 		*entity_hdr = (struct cudbg_entity_hdr *)
130 					   ((char *)outbuf +
131 					   ext_offset);
132 	}
133 
134 	/* update the data offset */
135 	dbg_buff->offset = ext_offset;
136 err:
137 	return rc;
138 }
139 
140 static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
141 		       u32 cur_entity_data_offset,
142 		       u32 cur_entity_size,
143 		       int entity_nu, u32 ext_size)
144 {
145 	struct cudbg_private *priv = handle;
146 	struct cudbg_init *cudbg_init = &priv->dbg_init;
147 	struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
148 	u64 timestamp;
149 	u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
150 	u32 remain_flash_size;
151 	u32 flash_data_offset;
152 	u32 data_hdr_size;
153 	int rc = -1;
154 
155 	data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
156 			sizeof(struct cudbg_hdr);
157 
158 	flash_data_offset = (FLASH_CUDBG_NSECS *
159 			     (sizeof(struct cudbg_flash_hdr) +
160 			      data_hdr_size)) +
161 			    (cur_entity_data_offset - data_hdr_size);
162 
163 	if (flash_data_offset > CUDBG_FLASH_SIZE) {
164 		update_skip_size(sec_info, cur_entity_size);
165 		if (cudbg_init->verbose)
166 			cudbg_init->print("Large entity skipping...\n");
167 		return rc;
168 	}
169 
170 	remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
171 
172 	if (cur_entity_size > remain_flash_size) {
173 		update_skip_size(sec_info, cur_entity_size);
174 		if (cudbg_init->verbose)
175 			cudbg_init->print("Large entity skipping...\n");
176 	} else {
177 		timestamp = 0;
178 
179 		cur_entity_hdr_offset +=
180 			(sizeof(struct cudbg_entity_hdr) *
181 			(entity_nu - 1));
182 
183 		rc = cudbg_write_flash(handle, timestamp, dbg_buff,
184 				       cur_entity_data_offset,
185 				       cur_entity_hdr_offset,
186 				       cur_entity_size,
187 				       ext_size);
188 		if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
189 			cudbg_init->print("\n\tFLASH is full... "
190 				"can not write in flash more\n\n");
191 	}
192 
193 	return rc;
194 }
195 
196 int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
197 {
198 	struct cudbg_entity_hdr *entity_hdr = NULL;
199 	struct cudbg_entity_hdr *ext_entity_hdr = NULL;
200 	struct cudbg_hdr *cudbg_hdr;
201 	struct cudbg_buffer dbg_buff;
202 	struct cudbg_error cudbg_err = {0};
203 	int large_entity_code;
204 
205 	u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
206 	struct cudbg_init *cudbg_init =
207 		&(((struct cudbg_private *)handle)->dbg_init);
208 	struct adapter *padap = cudbg_init->adap;
209 	u32 total_size, remaining_buf_size;
210 	u32 ext_size = 0;
211 	int index, bit, i, rc = -1;
212 	int all;
213 	bool flag_ext = 0;
214 
215 	reset_skip_entity();
216 
217 	dbg_buff.data = outbuf;
218 	dbg_buff.size = *outbuf_size;
219 	dbg_buff.offset = 0;
220 
221 	cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
222 	cudbg_hdr->signature = CUDBG_SIGNATURE;
223 	cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
224 	cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
225 	cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
226 	cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
227 	cudbg_hdr->chip_ver = padap->params.chipid;
228 
229 	if (cudbg_hdr->data_len)
230 		flag_ext = 1;
231 
232 	if (cudbg_init->use_flash) {
233 #ifndef notyet
234 		rc = t4_get_flash_params(padap);
235 		if (rc) {
236 			if (cudbg_init->verbose)
237 				cudbg_init->print("\nGet flash params failed.\n\n");
238 			cudbg_init->use_flash = 0;
239 		}
240 #endif
241 
242 #ifdef notyet
243 		/* Timestamp is mandatory. If it is not passed then disable
244 		 * flash support
245 		 */
246 		if (!cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM].u.time) {
247 			if (cudbg_init->verbose)
248 				cudbg_init->print("\nTimestamp param missing,"
249 					  "so ignoring flash write request\n\n");
250 			cudbg_init->use_flash = 0;
251 		}
252 #endif
253 	}
254 
255 	if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
256 	    dbg_buff.size) {
257 		rc = CUDBG_STATUS_SMALL_BUFF;
258 		total_size = cudbg_hdr->hdr_len;
259 		goto err;
260 	}
261 
262 	/* If ext flag is set then move the offset to the end of the buf
263 	 * so that we can add ext entities
264 	 */
265 	if (flag_ext) {
266 		ext_entity_hdr = (struct cudbg_entity_hdr *)
267 			      ((char *)outbuf + cudbg_hdr->hdr_len +
268 			      (sizeof(struct cudbg_entity_hdr) *
269 			      (CUDBG_EXT_ENTITY - 1)));
270 		ext_entity_hdr->start_offset = cudbg_hdr->data_len;
271 		ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
272 		ext_entity_hdr->size = 0;
273 		dbg_buff.offset = cudbg_hdr->data_len;
274 	} else {
275 		dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
276 		dbg_buff.offset += CUDBG_MAX_ENTITY *
277 					sizeof(struct cudbg_entity_hdr);
278 	}
279 
280 	total_size = dbg_buff.offset;
281 	all = dbg_bitmap[0] & (1 << CUDBG_ALL);
282 
283 	/*sort(large_entity_list);*/
284 
285 	for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
286 		index = i / 8;
287 		bit = i % 8;
288 
289 		if (entity_list[i].bit == CUDBG_EXT_ENTITY)
290 			continue;
291 
292 		if (all || (dbg_bitmap[index] & (1 << bit))) {
293 
294 			if (!flag_ext) {
295 				rc = get_entity_hdr(outbuf, i, dbg_buff.size,
296 						    &entity_hdr);
297 				if (rc)
298 					cudbg_hdr->hdr_flags = rc;
299 			} else {
300 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
301 							     &dbg_buff,
302 							     &entity_hdr);
303 				if (rc)
304 					goto err;
305 
306 				/* move the offset after the ext header */
307 				dbg_buff.offset +=
308 					sizeof(struct cudbg_entity_hdr);
309 			}
310 
311 			entity_hdr->entity_type = i;
312 			entity_hdr->start_offset = dbg_buff.offset;
313 			/* process each entity by calling process_entity fp */
314 			remaining_buf_size = dbg_buff.size - dbg_buff.offset;
315 
316 			if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
317 			    is_large_entity(i)) {
318 				if (cudbg_init->verbose)
319 					cudbg_init->print("Skipping %s\n",
320 					    entity_list[i].name);
321 				skip_entity(i);
322 				continue;
323 			} else {
324 
325 				/* If fw_attach is 0, then skip entities which
326 				 * communicates with firmware
327 				 */
328 
329 				if (!is_fw_attached(cudbg_init) &&
330 				    (entity_list[i].flag &
331 				    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
332 					if (cudbg_init->verbose)
333 						cudbg_init->print("Skipping %s entity,"\
334 							  "because fw_attach "\
335 							  "is 0\n",
336 							  entity_list[i].name);
337 					continue;
338 				}
339 
340 				if (cudbg_init->verbose)
341 					cudbg_init->print("collecting debug entity: "\
342 						  "%s\n", entity_list[i].name);
343 				memset(&cudbg_err, 0,
344 				       sizeof(struct cudbg_error));
345 				rc = process_entity[i-1](cudbg_init, &dbg_buff,
346 							 &cudbg_err);
347 			}
348 
349 			if (rc) {
350 				entity_hdr->size = 0;
351 				dbg_buff.offset = entity_hdr->start_offset;
352 			} else
353 				align_debug_buffer(&dbg_buff, entity_hdr);
354 
355 			if (cudbg_err.sys_err)
356 				rc = CUDBG_SYSTEM_ERROR;
357 
358 			entity_hdr->hdr_flags =  rc;
359 			entity_hdr->sys_err = cudbg_err.sys_err;
360 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
361 
362 			/* We don't want to include ext entity size in global
363 			 * header
364 			 */
365 			if (!flag_ext)
366 				total_size += entity_hdr->size;
367 
368 			cudbg_hdr->data_len = total_size;
369 			*outbuf_size = total_size;
370 
371 			/* consider the size of the ext entity header and data
372 			 * also
373 			 */
374 			if (flag_ext) {
375 				ext_size += (sizeof(struct cudbg_entity_hdr) +
376 					     entity_hdr->size);
377 				entity_hdr->start_offset -= cudbg_hdr->data_len;
378 				ext_entity_hdr->size = ext_size;
379 				entity_hdr->next_ext_offset = ext_size;
380 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
381 			}
382 
383 			if (cudbg_init->use_flash) {
384 				if (flag_ext) {
385 					wr_entity_to_flash(handle,
386 							   &dbg_buff,
387 							   ext_entity_hdr->
388 							   start_offset,
389 							   entity_hdr->
390 							   size,
391 							   CUDBG_EXT_ENTITY,
392 							   ext_size);
393 				}
394 				else
395 					wr_entity_to_flash(handle,
396 							   &dbg_buff,
397 							   entity_hdr->\
398 							   start_offset,
399 							   entity_hdr->size,
400 							   i, ext_size);
401 			}
402 		}
403 	}
404 
405 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
406 	     i++) {
407 		large_entity_code = large_entity_list[i].entity_code;
408 		if (large_entity_list[i].skip_flag) {
409 			if (!flag_ext) {
410 				rc = get_entity_hdr(outbuf, large_entity_code,
411 						    dbg_buff.size, &entity_hdr);
412 				if (rc)
413 					cudbg_hdr->hdr_flags = rc;
414 			} else {
415 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
416 							     &dbg_buff,
417 							     &entity_hdr);
418 				if (rc)
419 					goto err;
420 
421 				dbg_buff.offset +=
422 					sizeof(struct cudbg_entity_hdr);
423 			}
424 
425 			/* If fw_attach is 0, then skip entities which
426 			 * communicates with firmware
427 			 */
428 			if (!is_fw_attached(cudbg_init) &&
429 			    (entity_list[large_entity_code].flag &
430 			    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
431 				if (cudbg_init->verbose)
432 					cudbg_init->print("Skipping %s entity,"\
433 						  "because fw_attach "\
434 						  "is 0\n",
435 						  entity_list[large_entity_code]
436 						  .name);
437 				continue;
438 			}
439 
440 			entity_hdr->entity_type = large_entity_code;
441 			entity_hdr->start_offset = dbg_buff.offset;
442 			if (cudbg_init->verbose)
443 				cudbg_init->print("Re-trying debug entity: %s\n",
444 					  entity_list[large_entity_code].name);
445 
446 			memset(&cudbg_err, 0, sizeof(struct cudbg_error));
447 			rc = process_entity[large_entity_code - 1](cudbg_init,
448 								   &dbg_buff,
449 								   &cudbg_err);
450 			if (rc) {
451 				entity_hdr->size = 0;
452 				dbg_buff.offset = entity_hdr->start_offset;
453 			} else
454 				align_debug_buffer(&dbg_buff, entity_hdr);
455 
456 			if (cudbg_err.sys_err)
457 				rc = CUDBG_SYSTEM_ERROR;
458 
459 			entity_hdr->hdr_flags = rc;
460 			entity_hdr->sys_err = cudbg_err.sys_err;
461 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
462 
463 			/* We don't want to include ext entity size in global
464 			 * header
465 			 */
466 			if (!flag_ext)
467 				total_size += entity_hdr->size;
468 
469 			cudbg_hdr->data_len = total_size;
470 			*outbuf_size = total_size;
471 
472 			/* consider the size of the ext entity header and
473 			 * data also
474 			 */
475 			if (flag_ext) {
476 				ext_size += (sizeof(struct cudbg_entity_hdr) +
477 						   entity_hdr->size);
478 				entity_hdr->start_offset -=
479 							cudbg_hdr->data_len;
480 				ext_entity_hdr->size = ext_size;
481 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
482 			}
483 
484 			if (cudbg_init->use_flash) {
485 				if (flag_ext)
486 					wr_entity_to_flash(handle,
487 							   &dbg_buff,
488 							   ext_entity_hdr->
489 							   start_offset,
490 							   entity_hdr->size,
491 							   CUDBG_EXT_ENTITY,
492 							   ext_size);
493 				else
494 					wr_entity_to_flash(handle,
495 							   &dbg_buff,
496 							   entity_hdr->
497 							   start_offset,
498 							   entity_hdr->
499 							   size,
500 							   large_entity_list[i].
501 							   entity_code,
502 							   ext_size);
503 			}
504 		}
505 	}
506 
507 	cudbg_hdr->data_len = total_size;
508 	*outbuf_size = total_size;
509 
510 	if (flag_ext)
511 		*outbuf_size += ext_size;
512 
513 	return 0;
514 err:
515 	return rc;
516 }
517 
518 void reset_skip_entity(void)
519 {
520 	int i;
521 
522 	for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
523 		large_entity_list[i].skip_flag = 0;
524 }
525 
526 void skip_entity(int entity_code)
527 {
528 	int i;
529 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
530 	     i++) {
531 		if (large_entity_list[i].entity_code == entity_code)
532 			large_entity_list[i].skip_flag = 1;
533 	}
534 }
535 
536 int is_large_entity(int entity_code)
537 {
538 	int i;
539 
540 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
541 	     i++) {
542 		if (large_entity_list[i].entity_code == entity_code)
543 			return 1;
544 	}
545 	return 0;
546 }
547 
548 int get_entity_hdr(void *outbuf, int i, u32 size,
549 		   struct cudbg_entity_hdr **entity_hdr)
550 {
551 	int rc = 0;
552 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
553 
554 	if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
555 		return CUDBG_STATUS_SMALL_BUFF;
556 
557 	*entity_hdr = (struct cudbg_entity_hdr *)
558 		      ((char *)outbuf+cudbg_hdr->hdr_len +
559 		       (sizeof(struct cudbg_entity_hdr)*(i-1)));
560 	return rc;
561 }
562 
563 static int collect_rss(struct cudbg_init *pdbg_init,
564 		       struct cudbg_buffer *dbg_buff,
565 		       struct cudbg_error *cudbg_err)
566 {
567 	struct adapter *padap = pdbg_init->adap;
568 	struct cudbg_buffer scratch_buff;
569 	u32 size;
570 	int rc = 0;
571 
572 	size = RSS_NENTRIES  * sizeof(u16);
573 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
574 	if (rc)
575 		goto err;
576 
577 	rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
578 	if (rc) {
579 		if (pdbg_init->verbose)
580 			pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n",
581 				 __func__, rc);
582 		cudbg_err->sys_err = rc;
583 		goto err1;
584 	}
585 
586 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
587 	if (rc)
588 		goto err1;
589 
590 	rc = compress_buff(&scratch_buff, dbg_buff);
591 
592 err1:
593 	release_scratch_buff(&scratch_buff, dbg_buff);
594 err:
595 	return rc;
596 }
597 
598 static int collect_sw_state(struct cudbg_init *pdbg_init,
599 			    struct cudbg_buffer *dbg_buff,
600 			    struct cudbg_error *cudbg_err)
601 {
602 	struct adapter *padap = pdbg_init->adap;
603 	struct cudbg_buffer scratch_buff;
604 	struct sw_state *swstate;
605 	u32 size;
606 	int rc = 0;
607 
608 	size = sizeof(struct sw_state);
609 
610 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
611 	if (rc)
612 		goto err;
613 
614 	swstate = (struct sw_state *) scratch_buff.data;
615 
616 	swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
617 	snprintf(swstate->caller_string, sizeof(swstate->caller_string), "%s",
618 	    "FreeBSD");
619 	swstate->os_type = 0;
620 
621 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
622 	if (rc)
623 		goto err1;
624 
625 	rc = compress_buff(&scratch_buff, dbg_buff);
626 
627 err1:
628 	release_scratch_buff(&scratch_buff, dbg_buff);
629 err:
630 	return rc;
631 }
632 
633 static int collect_ddp_stats(struct cudbg_init *pdbg_init,
634 			     struct cudbg_buffer *dbg_buff,
635 			     struct cudbg_error *cudbg_err)
636 {
637 	struct adapter *padap = pdbg_init->adap;
638 	struct cudbg_buffer scratch_buff;
639 	struct tp_usm_stats  *tp_usm_stats_buff;
640 	u32 size;
641 	int rc = 0;
642 
643 	size = sizeof(struct tp_usm_stats);
644 
645 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
646 	if (rc)
647 		goto err;
648 
649 	tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
650 
651 	/* spin_lock(&padap->stats_lock);	TODO*/
652 	t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
653 	/* spin_unlock(&padap->stats_lock);	TODO*/
654 
655 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
656 	if (rc)
657 		goto err1;
658 
659 	rc = compress_buff(&scratch_buff, dbg_buff);
660 
661 err1:
662 	release_scratch_buff(&scratch_buff, dbg_buff);
663 err:
664 	return rc;
665 }
666 
667 static int collect_ulptx_la(struct cudbg_init *pdbg_init,
668 			    struct cudbg_buffer *dbg_buff,
669 			    struct cudbg_error *cudbg_err)
670 {
671 	struct adapter *padap = pdbg_init->adap;
672 	struct cudbg_buffer scratch_buff;
673 	struct struct_ulptx_la *ulptx_la_buff;
674 	u32 size, i, j;
675 	int rc = 0;
676 
677 	size = sizeof(struct struct_ulptx_la);
678 
679 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
680 	if (rc)
681 		goto err;
682 
683 	ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
684 
685 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
686 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
687 						      A_ULP_TX_LA_RDPTR_0 +
688 						      0x10 * i);
689 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
690 						      A_ULP_TX_LA_WRPTR_0 +
691 						      0x10 * i);
692 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
693 						       A_ULP_TX_LA_RDDATA_0 +
694 						       0x10 * i);
695 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
696 			ulptx_la_buff->rd_data[i][j] =
697 				t4_read_reg(padap,
698 					    A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
699 		}
700 	}
701 
702 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
703 	if (rc)
704 		goto err1;
705 
706 	rc = compress_buff(&scratch_buff, dbg_buff);
707 
708 err1:
709 	release_scratch_buff(&scratch_buff, dbg_buff);
710 err:
711 	return rc;
712 
713 }
714 
715 static int collect_ulprx_la(struct cudbg_init *pdbg_init,
716 			    struct cudbg_buffer *dbg_buff,
717 			    struct cudbg_error *cudbg_err)
718 {
719 	struct adapter *padap = pdbg_init->adap;
720 	struct cudbg_buffer scratch_buff;
721 	struct struct_ulprx_la *ulprx_la_buff;
722 	u32 size;
723 	int rc = 0;
724 
725 	size = sizeof(struct struct_ulprx_la);
726 
727 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
728 	if (rc)
729 		goto err;
730 
731 	ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
732 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
733 	ulprx_la_buff->size = ULPRX_LA_SIZE;
734 
735 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
736 	if (rc)
737 		goto err1;
738 
739 	rc = compress_buff(&scratch_buff, dbg_buff);
740 
741 err1:
742 	release_scratch_buff(&scratch_buff, dbg_buff);
743 err:
744 	return rc;
745 }
746 
747 static int collect_cpl_stats(struct cudbg_init *pdbg_init,
748 			     struct cudbg_buffer *dbg_buff,
749 			     struct cudbg_error *cudbg_err)
750 {
751 	struct adapter *padap = pdbg_init->adap;
752 	struct cudbg_buffer scratch_buff;
753 	struct struct_tp_cpl_stats *tp_cpl_stats_buff;
754 	u32 size;
755 	int rc = 0;
756 
757 	size = sizeof(struct struct_tp_cpl_stats);
758 
759 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
760 	if (rc)
761 		goto err;
762 
763 	tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
764 	tp_cpl_stats_buff->nchan = padap->chip_params->nchan;
765 
766 	/* spin_lock(&padap->stats_lock);	TODO*/
767 	t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
768 	/* spin_unlock(&padap->stats_lock);	TODO*/
769 
770 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
771 	if (rc)
772 		goto err1;
773 
774 	rc = compress_buff(&scratch_buff, dbg_buff);
775 
776 err1:
777 	release_scratch_buff(&scratch_buff, dbg_buff);
778 err:
779 	return rc;
780 }
781 
782 static int collect_wc_stats(struct cudbg_init *pdbg_init,
783 			    struct cudbg_buffer *dbg_buff,
784 			    struct cudbg_error *cudbg_err)
785 {
786 	struct adapter *padap = pdbg_init->adap;
787 	struct cudbg_buffer scratch_buff;
788 	struct struct_wc_stats *wc_stats_buff;
789 	u32 val1;
790 	u32 val2;
791 	u32 size;
792 
793 	int rc = 0;
794 
795 	size = sizeof(struct struct_wc_stats);
796 
797 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
798 	if (rc)
799 		goto err;
800 
801 	wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
802 
803 	if (!is_t4(padap)) {
804 		val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
805 		val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
806 		wc_stats_buff->wr_cl_success = val1 - val2;
807 		wc_stats_buff->wr_cl_fail = val2;
808 	} else {
809 		wc_stats_buff->wr_cl_success = 0;
810 		wc_stats_buff->wr_cl_fail = 0;
811 	}
812 
813 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
814 	if (rc)
815 		goto err1;
816 
817 	rc = compress_buff(&scratch_buff, dbg_buff);
818 err1:
819 	release_scratch_buff(&scratch_buff, dbg_buff);
820 err:
821 	return rc;
822 }
823 
824 static int mem_desc_cmp(const void *a, const void *b)
825 {
826 	return ((const struct struct_mem_desc *)a)->base -
827 		((const struct struct_mem_desc *)b)->base;
828 }
829 
830 static int fill_meminfo(struct adapter *padap,
831 			struct struct_meminfo *meminfo_buff)
832 {
833 	struct struct_mem_desc *md;
834 	u32 size, lo, hi;
835 	u32 used, alloc;
836 	int n, i, rc = 0;
837 
838 	size = sizeof(struct struct_meminfo);
839 
840 	memset(meminfo_buff->avail, 0,
841 	       ARRAY_SIZE(meminfo_buff->avail) *
842 	       sizeof(struct struct_mem_desc));
843 	memset(meminfo_buff->mem, 0,
844 	       (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
845 	md  = meminfo_buff->mem;
846 
847 	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
848 		meminfo_buff->mem[i].limit = 0;
849 		meminfo_buff->mem[i].idx = i;
850 	}
851 
852 	i = 0;
853 
854 	lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
855 
856 	if (lo & F_EDRAM0_ENABLE) {
857 		hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
858 		meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
859 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
860 					       (G_EDRAM0_SIZE(hi) << 20);
861 		meminfo_buff->avail[i].idx = 0;
862 		i++;
863 	}
864 
865 	if (lo & F_EDRAM1_ENABLE) {
866 		hi =  t4_read_reg(padap, A_MA_EDRAM1_BAR);
867 		meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
868 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
869 					       (G_EDRAM1_SIZE(hi) << 20);
870 		meminfo_buff->avail[i].idx = 1;
871 		i++;
872 	}
873 
874 	if (is_t5(padap)) {
875 		if (lo & F_EXT_MEM0_ENABLE) {
876 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
877 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
878 			meminfo_buff->avail[i].limit =
879 				meminfo_buff->avail[i].base +
880 				(G_EXT_MEM_SIZE(hi) << 20);
881 			meminfo_buff->avail[i].idx = 3;
882 			i++;
883 		}
884 
885 		if (lo & F_EXT_MEM1_ENABLE) {
886 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
887 			meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
888 			meminfo_buff->avail[i].limit =
889 				meminfo_buff->avail[i].base +
890 				(G_EXT_MEM1_SIZE(hi) << 20);
891 			meminfo_buff->avail[i].idx = 4;
892 			i++;
893 		}
894 	} else if (is_t6(padap)) {
895 		if (lo & F_EXT_MEM_ENABLE) {
896 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
897 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
898 			meminfo_buff->avail[i].limit =
899 				meminfo_buff->avail[i].base +
900 				(G_EXT_MEM_SIZE(hi) << 20);
901 			meminfo_buff->avail[i].idx = 2;
902 			i++;
903 		}
904 	}
905 
906 	if (!i) {				   /* no memory available */
907 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
908 		goto err;
909 	}
910 
911 	meminfo_buff->avail_c = i;
912 	qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
913 	    mem_desc_cmp);
914 	(md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
915 	(md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
916 	(md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
917 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
918 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
919 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
920 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
921 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
922 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
923 
924 	/* the next few have explicit upper bounds */
925 	md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
926 	md->limit = md->base - 1 +
927 		    t4_read_reg(padap,
928 				A_TP_PMM_TX_PAGE_SIZE) *
929 				G_PMTXMAXPAGE(t4_read_reg(padap,
930 							  A_TP_PMM_TX_MAX_PAGE)
931 					     );
932 	md++;
933 
934 	md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
935 	md->limit = md->base - 1 +
936 		    t4_read_reg(padap,
937 				A_TP_PMM_RX_PAGE_SIZE) *
938 				G_PMRXMAXPAGE(t4_read_reg(padap,
939 							  A_TP_PMM_RX_MAX_PAGE)
940 					      );
941 	md++;
942 	if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
943 		if (chip_id(padap) <= CHELSIO_T5) {
944 			hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
945 			md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
946 		} else {
947 			hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
948 			md->base = t4_read_reg(padap,
949 					       A_LE_DB_HASH_TBL_BASE_ADDR);
950 		}
951 		md->limit = 0;
952 	} else {
953 		md->base = 0;
954 		md->idx = ARRAY_SIZE(region);  /* hide it */
955 	}
956 	md++;
957 #define ulp_region(reg) \
958 	{\
959 		md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
960 		(md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
961 	}
962 
963 	ulp_region(RX_ISCSI);
964 	ulp_region(RX_TDDP);
965 	ulp_region(TX_TPT);
966 	ulp_region(RX_STAG);
967 	ulp_region(RX_RQ);
968 	ulp_region(RX_RQUDP);
969 	ulp_region(RX_PBL);
970 	ulp_region(TX_PBL);
971 #undef ulp_region
972 	md->base = 0;
973 	md->idx = ARRAY_SIZE(region);
974 	if (!is_t4(padap)) {
975 		u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
976 		u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
977 		if (is_t5(padap)) {
978 			if (sge_ctrl & F_VFIFO_ENABLE)
979 				size = G_DBVFIFO_SIZE(fifo_size);
980 		} else
981 			size = G_T6_DBVFIFO_SIZE(fifo_size);
982 
983 		if (size) {
984 			md->base = G_BASEADDR(t4_read_reg(padap,
985 							  A_SGE_DBVFIFO_BADDR));
986 			md->limit = md->base + (size << 2) - 1;
987 		}
988 	}
989 
990 	md++;
991 
992 	md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
993 	md->limit = 0;
994 	md++;
995 	md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
996 	md->limit = 0;
997 	md++;
998 #ifndef __NO_DRIVER_OCQ_SUPPORT__
999 	/*md->base = padap->vres.ocq.start;*/
1000 	/*if (adap->vres.ocq.size)*/
1001 	/*	  md->limit = md->base + adap->vres.ocq.size - 1;*/
1002 	/*else*/
1003 	md->idx = ARRAY_SIZE(region);  /* hide it */
1004 	md++;
1005 #endif
1006 
1007 	/* add any address-space holes, there can be up to 3 */
1008 	for (n = 0; n < i - 1; n++)
1009 		if (meminfo_buff->avail[n].limit <
1010 		    meminfo_buff->avail[n + 1].base)
1011 			(md++)->base = meminfo_buff->avail[n].limit;
1012 
1013 	if (meminfo_buff->avail[n].limit)
1014 		(md++)->base = meminfo_buff->avail[n].limit;
1015 
1016 	n = (int) (md - meminfo_buff->mem);
1017 	meminfo_buff->mem_c = n;
1018 
1019 	qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
1020 	    mem_desc_cmp);
1021 
1022 	lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
1023 	hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
1024 	meminfo_buff->up_ram_lo = lo;
1025 	meminfo_buff->up_ram_hi = hi;
1026 
1027 	lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
1028 	hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
1029 	meminfo_buff->up_extmem2_lo = lo;
1030 	meminfo_buff->up_extmem2_hi = hi;
1031 
1032 	lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
1033 	meminfo_buff->rx_pages_data[0] =  G_PMRXMAXPAGE(lo);
1034 	meminfo_buff->rx_pages_data[1] =
1035 		t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
1036 	meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
1037 
1038 	lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
1039 	hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
1040 	meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
1041 	meminfo_buff->tx_pages_data[1] =
1042 		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
1043 	meminfo_buff->tx_pages_data[2] =
1044 		hi >= (1 << 20) ? 'M' : 'K';
1045 	meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
1046 
1047 	for (i = 0; i < 4; i++) {
1048 		if (chip_id(padap) > CHELSIO_T5)
1049 			lo = t4_read_reg(padap,
1050 					 A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
1051 		else
1052 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
1053 		if (is_t5(padap)) {
1054 			used = G_T5_USED(lo);
1055 			alloc = G_T5_ALLOC(lo);
1056 		} else {
1057 			used = G_USED(lo);
1058 			alloc = G_ALLOC(lo);
1059 		}
1060 		meminfo_buff->port_used[i] = used;
1061 		meminfo_buff->port_alloc[i] = alloc;
1062 	}
1063 
1064 	for (i = 0; i < padap->chip_params->nchan; i++) {
1065 		if (chip_id(padap) > CHELSIO_T5)
1066 			lo = t4_read_reg(padap,
1067 					 A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
1068 		else
1069 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
1070 		if (is_t5(padap)) {
1071 			used = G_T5_USED(lo);
1072 			alloc = G_T5_ALLOC(lo);
1073 		} else {
1074 			used = G_USED(lo);
1075 			alloc = G_ALLOC(lo);
1076 		}
1077 		meminfo_buff->loopback_used[i] = used;
1078 		meminfo_buff->loopback_alloc[i] = alloc;
1079 	}
1080 err:
1081 	return rc;
1082 }
1083 
1084 static int collect_meminfo(struct cudbg_init *pdbg_init,
1085 			   struct cudbg_buffer *dbg_buff,
1086 			   struct cudbg_error *cudbg_err)
1087 {
1088 	struct adapter *padap = pdbg_init->adap;
1089 	struct cudbg_buffer scratch_buff;
1090 	struct struct_meminfo *meminfo_buff;
1091 	int rc = 0;
1092 	u32 size;
1093 
1094 	size = sizeof(struct struct_meminfo);
1095 
1096 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1097 	if (rc)
1098 		goto err;
1099 
1100 	meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
1101 
1102 	rc = fill_meminfo(padap, meminfo_buff);
1103 	if (rc)
1104 		goto err;
1105 
1106 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1107 	if (rc)
1108 		goto err1;
1109 
1110 	rc = compress_buff(&scratch_buff, dbg_buff);
1111 err1:
1112 	release_scratch_buff(&scratch_buff, dbg_buff);
1113 err:
1114 	return rc;
1115 }
1116 
1117 static int collect_lb_stats(struct cudbg_init *pdbg_init,
1118 			    struct cudbg_buffer *dbg_buff,
1119 			    struct cudbg_error *cudbg_err)
1120 {
1121 	struct adapter *padap = pdbg_init->adap;
1122 	struct cudbg_buffer scratch_buff;
1123 	struct lb_port_stats *tmp_stats;
1124 	struct struct_lb_stats *lb_stats_buff;
1125 	u32 i, n, size;
1126 	int rc = 0;
1127 
1128 	rc = padap->params.nports;
1129 	if (rc < 0)
1130 		goto err;
1131 
1132 	n = rc;
1133 	size = sizeof(struct struct_lb_stats) +
1134 	       n * sizeof(struct lb_port_stats);
1135 
1136 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1137 	if (rc)
1138 		goto err;
1139 
1140 	lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
1141 
1142 	lb_stats_buff->nchan = n;
1143 	tmp_stats = lb_stats_buff->s;
1144 
1145 	for (i = 0; i < n; i += 2, tmp_stats += 2) {
1146 		t4_get_lb_stats(padap, i, tmp_stats);
1147 		t4_get_lb_stats(padap, i + 1, tmp_stats+1);
1148 	}
1149 
1150 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1151 	if (rc)
1152 		goto err1;
1153 
1154 	rc = compress_buff(&scratch_buff, dbg_buff);
1155 err1:
1156 	release_scratch_buff(&scratch_buff, dbg_buff);
1157 err:
1158 	return rc;
1159 }
1160 
1161 static int collect_rdma_stats(struct cudbg_init *pdbg_init,
1162 			      struct cudbg_buffer *dbg_buff,
1163 			      struct cudbg_error *cudbg_er)
1164 {
1165 	struct adapter *padap = pdbg_init->adap;
1166 	struct cudbg_buffer scratch_buff;
1167 	struct tp_rdma_stats *rdma_stats_buff;
1168 	u32 size;
1169 	int rc = 0;
1170 
1171 	size = sizeof(struct tp_rdma_stats);
1172 
1173 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1174 	if (rc)
1175 		goto err;
1176 
1177 	rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
1178 
1179 	/* spin_lock(&padap->stats_lock);	TODO*/
1180 	t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
1181 	/* spin_unlock(&padap->stats_lock);	TODO*/
1182 
1183 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1184 	if (rc)
1185 		goto err1;
1186 
1187 	rc = compress_buff(&scratch_buff, dbg_buff);
1188 err1:
1189 	release_scratch_buff(&scratch_buff, dbg_buff);
1190 err:
1191 	return rc;
1192 }
1193 
1194 static int collect_clk_info(struct cudbg_init *pdbg_init,
1195 			    struct cudbg_buffer *dbg_buff,
1196 			    struct cudbg_error *cudbg_err)
1197 {
1198 	struct cudbg_buffer scratch_buff;
1199 	struct adapter *padap = pdbg_init->adap;
1200 	struct struct_clk_info *clk_info_buff;
1201 	u64 tp_tick_us;
1202 	int size;
1203 	int rc = 0;
1204 
1205 	if (!padap->params.vpd.cclk) {
1206 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1207 		goto err;
1208 	}
1209 
1210 	size = sizeof(struct struct_clk_info);
1211 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1212 	if (rc)
1213 		goto err;
1214 
1215 	clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
1216 
1217 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;  /* in ps
1218 	*/
1219 	clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
1220 	clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
1221 	clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
1222 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1223 	/* in us */
1224 	clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
1225 				      clk_info_buff->dack_re) / 1000000) *
1226 				     t4_read_reg(padap, A_TP_DACK_TIMER);
1227 
1228 	clk_info_buff->retransmit_min =
1229 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
1230 	clk_info_buff->retransmit_max =
1231 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
1232 
1233 	clk_info_buff->persist_timer_min =
1234 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
1235 	clk_info_buff->persist_timer_max =
1236 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
1237 
1238 	clk_info_buff->keepalive_idle_timer =
1239 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
1240 	clk_info_buff->keepalive_interval =
1241 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
1242 
1243 	clk_info_buff->initial_srtt =
1244 		tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
1245 	clk_info_buff->finwait2_timer =
1246 		tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
1247 
1248 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1249 
1250 	if (rc)
1251 		goto err1;
1252 
1253 	rc = compress_buff(&scratch_buff, dbg_buff);
1254 err1:
1255 	release_scratch_buff(&scratch_buff, dbg_buff);
1256 err:
1257 	return rc;
1258 
1259 }
1260 
1261 static int collect_macstats(struct cudbg_init *pdbg_init,
1262 			    struct cudbg_buffer *dbg_buff,
1263 			    struct cudbg_error *cudbg_err)
1264 {
1265 	struct adapter *padap = pdbg_init->adap;
1266 	struct cudbg_buffer scratch_buff;
1267 	struct struct_mac_stats_rev1 *mac_stats_buff;
1268 	u32 i, n, size;
1269 	int rc = 0;
1270 
1271 	rc = padap->params.nports;
1272 	if (rc < 0)
1273 		goto err;
1274 
1275 	n = rc;
1276 	size = sizeof(struct struct_mac_stats_rev1);
1277 
1278 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1279 	if (rc)
1280 		goto err;
1281 
1282 	mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
1283 
1284 	mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1285 	mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
1286 	mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
1287 				       sizeof(struct cudbg_ver_hdr);
1288 
1289 	mac_stats_buff->port_count = n;
1290 	for (i = 0; i <  mac_stats_buff->port_count; i++)
1291 		t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
1292 
1293 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1294 	if (rc)
1295 		goto err1;
1296 
1297 	rc = compress_buff(&scratch_buff, dbg_buff);
1298 err1:
1299 	release_scratch_buff(&scratch_buff, dbg_buff);
1300 err:
1301 	return rc;
1302 }
1303 
1304 static int collect_cim_pif_la(struct cudbg_init *pdbg_init,
1305 			      struct cudbg_buffer *dbg_buff,
1306 			      struct cudbg_error *cudbg_err)
1307 {
1308 	struct adapter *padap = pdbg_init->adap;
1309 	struct cudbg_buffer scratch_buff;
1310 	struct cim_pif_la *cim_pif_la_buff;
1311 	u32 size;
1312 	int rc = 0;
1313 
1314 	size = sizeof(struct cim_pif_la) +
1315 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1316 
1317 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1318 	if (rc)
1319 		goto err;
1320 
1321 	cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
1322 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1323 
1324 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1325 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1326 			   NULL, NULL);
1327 
1328 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1329 	if (rc)
1330 		goto err1;
1331 
1332 	rc = compress_buff(&scratch_buff, dbg_buff);
1333 err1:
1334 	release_scratch_buff(&scratch_buff, dbg_buff);
1335 err:
1336 	return rc;
1337 }
1338 
1339 static int collect_tp_la(struct cudbg_init *pdbg_init,
1340 			 struct cudbg_buffer *dbg_buff,
1341 			 struct cudbg_error *cudbg_err)
1342 {
1343 	struct adapter *padap = pdbg_init->adap;
1344 	struct cudbg_buffer scratch_buff;
1345 	struct struct_tp_la *tp_la_buff;
1346 	u32 size;
1347 	int rc = 0;
1348 
1349 	size = sizeof(struct struct_tp_la) + TPLA_SIZE *  sizeof(u64);
1350 
1351 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1352 	if (rc)
1353 		goto err;
1354 
1355 	tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
1356 
1357 	tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
1358 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1359 
1360 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1361 	if (rc)
1362 		goto err1;
1363 
1364 	rc = compress_buff(&scratch_buff, dbg_buff);
1365 err1:
1366 	release_scratch_buff(&scratch_buff, dbg_buff);
1367 err:
1368 	return rc;
1369 }
1370 
1371 static int collect_fcoe_stats(struct cudbg_init *pdbg_init,
1372 			      struct cudbg_buffer *dbg_buff,
1373 			      struct cudbg_error *cudbg_err)
1374 {
1375 	struct adapter *padap = pdbg_init->adap;
1376 	struct cudbg_buffer scratch_buff;
1377 	struct struct_tp_fcoe_stats  *tp_fcoe_stats_buff;
1378 	u32 size;
1379 	int rc = 0;
1380 
1381 	size = sizeof(struct struct_tp_fcoe_stats);
1382 
1383 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1384 	if (rc)
1385 		goto err;
1386 
1387 	tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
1388 
1389 	/* spin_lock(&padap->stats_lock);	TODO*/
1390 	t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
1391 	t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
1392 	if (padap->chip_params->nchan == NCHAN) {
1393 		t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
1394 		t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
1395 	}
1396 	/* spin_unlock(&padap->stats_lock);	TODO*/
1397 
1398 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1399 	if (rc)
1400 		goto err1;
1401 
1402 	rc = compress_buff(&scratch_buff, dbg_buff);
1403 err1:
1404 	release_scratch_buff(&scratch_buff, dbg_buff);
1405 err:
1406 	return rc;
1407 }
1408 
1409 static int collect_tp_err_stats(struct cudbg_init *pdbg_init,
1410 				struct cudbg_buffer *dbg_buff,
1411 				struct cudbg_error *cudbg_err)
1412 {
1413 	struct adapter *padap = pdbg_init->adap;
1414 	struct cudbg_buffer scratch_buff;
1415 	struct struct_tp_err_stats *tp_err_stats_buff;
1416 	u32 size;
1417 	int rc = 0;
1418 
1419 	size = sizeof(struct struct_tp_err_stats);
1420 
1421 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1422 	if (rc)
1423 		goto err;
1424 
1425 	tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
1426 
1427 	/* spin_lock(&padap->stats_lock);	TODO*/
1428 	t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
1429 	/* spin_unlock(&padap->stats_lock);	TODO*/
1430 	tp_err_stats_buff->nchan = padap->chip_params->nchan;
1431 
1432 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1433 	if (rc)
1434 		goto err1;
1435 
1436 	rc = compress_buff(&scratch_buff, dbg_buff);
1437 err1:
1438 	release_scratch_buff(&scratch_buff, dbg_buff);
1439 err:
1440 	return rc;
1441 }
1442 
1443 static int collect_tcp_stats(struct cudbg_init *pdbg_init,
1444 			     struct cudbg_buffer *dbg_buff,
1445 			     struct cudbg_error *cudbg_err)
1446 {
1447 	struct adapter *padap = pdbg_init->adap;
1448 	struct cudbg_buffer scratch_buff;
1449 	struct struct_tcp_stats *tcp_stats_buff;
1450 	u32 size;
1451 	int rc = 0;
1452 
1453 	size = sizeof(struct struct_tcp_stats);
1454 
1455 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1456 	if (rc)
1457 		goto err;
1458 
1459 	tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
1460 
1461 	/* spin_lock(&padap->stats_lock);	TODO*/
1462 	t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
1463 	/* spin_unlock(&padap->stats_lock);	TODO*/
1464 
1465 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1466 	if (rc)
1467 		goto err1;
1468 
1469 	rc = compress_buff(&scratch_buff, dbg_buff);
1470 err1:
1471 	release_scratch_buff(&scratch_buff, dbg_buff);
1472 err:
1473 	return rc;
1474 }
1475 
1476 static int collect_hw_sched(struct cudbg_init *pdbg_init,
1477 			    struct cudbg_buffer *dbg_buff,
1478 			    struct cudbg_error *cudbg_err)
1479 {
1480 	struct adapter *padap = pdbg_init->adap;
1481 	struct cudbg_buffer scratch_buff;
1482 	struct struct_hw_sched *hw_sched_buff;
1483 	u32 size;
1484 	int i, rc = 0;
1485 
1486 	if (!padap->params.vpd.cclk) {
1487 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1488 		goto err;
1489 	}
1490 
1491 	size = sizeof(struct struct_hw_sched);
1492 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1493 	if (rc)
1494 		goto err;
1495 
1496 	hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
1497 
1498 	hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
1499 	hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
1500 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1501 
1502 	for (i = 0; i < NTX_SCHED; ++i) {
1503 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1504 		    &hw_sched_buff->ipg[i], 1);
1505 	}
1506 
1507 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1508 	if (rc)
1509 		goto err1;
1510 
1511 	rc = compress_buff(&scratch_buff, dbg_buff);
1512 err1:
1513 	release_scratch_buff(&scratch_buff, dbg_buff);
1514 err:
1515 	return rc;
1516 }
1517 
1518 static int collect_pm_stats(struct cudbg_init *pdbg_init,
1519 			    struct cudbg_buffer *dbg_buff,
1520 			    struct cudbg_error *cudbg_err)
1521 {
1522 	struct adapter *padap = pdbg_init->adap;
1523 	struct cudbg_buffer scratch_buff;
1524 	struct struct_pm_stats *pm_stats_buff;
1525 	u32 size;
1526 	int rc = 0;
1527 
1528 	size = sizeof(struct struct_pm_stats);
1529 
1530 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1531 	if (rc)
1532 		goto err;
1533 
1534 	pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
1535 
1536 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1537 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1538 
1539 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1540 	if (rc)
1541 		goto err1;
1542 
1543 	rc = compress_buff(&scratch_buff, dbg_buff);
1544 err1:
1545 	release_scratch_buff(&scratch_buff, dbg_buff);
1546 err:
1547 	return rc;
1548 }
1549 
1550 static int collect_path_mtu(struct cudbg_init *pdbg_init,
1551 			    struct cudbg_buffer *dbg_buff,
1552 			    struct cudbg_error *cudbg_err)
1553 {
1554 	struct adapter *padap = pdbg_init->adap;
1555 	struct cudbg_buffer scratch_buff;
1556 	u32 size;
1557 	int rc = 0;
1558 
1559 	size = NMTUS  * sizeof(u16);
1560 
1561 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1562 	if (rc)
1563 		goto err;
1564 
1565 	t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
1566 
1567 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1568 	if (rc)
1569 		goto err1;
1570 
1571 	rc = compress_buff(&scratch_buff, dbg_buff);
1572 err1:
1573 	release_scratch_buff(&scratch_buff, dbg_buff);
1574 err:
1575 	return rc;
1576 }
1577 
1578 static int collect_rss_key(struct cudbg_init *pdbg_init,
1579 			   struct cudbg_buffer *dbg_buff,
1580 			   struct cudbg_error *cudbg_err)
1581 {
1582 	struct adapter *padap = pdbg_init->adap;
1583 	struct cudbg_buffer scratch_buff;
1584 	u32 size;
1585 
1586 	int rc = 0;
1587 
1588 	size = 10  * sizeof(u32);
1589 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1590 	if (rc)
1591 		goto err;
1592 
1593 	t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
1594 
1595 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1596 	if (rc)
1597 		goto err1;
1598 
1599 	rc = compress_buff(&scratch_buff, dbg_buff);
1600 err1:
1601 	release_scratch_buff(&scratch_buff, dbg_buff);
1602 err:
1603 	return rc;
1604 }
1605 
1606 static int collect_rss_config(struct cudbg_init *pdbg_init,
1607 			      struct cudbg_buffer *dbg_buff,
1608 			      struct cudbg_error *cudbg_err)
1609 {
1610 	struct adapter *padap = pdbg_init->adap;
1611 	struct cudbg_buffer scratch_buff;
1612 	struct rss_config *rss_conf;
1613 	int rc;
1614 	u32 size;
1615 
1616 	size = sizeof(struct rss_config);
1617 
1618 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1619 	if (rc)
1620 		goto err;
1621 
1622 	rss_conf =  (struct rss_config *)scratch_buff.data;
1623 
1624 	rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
1625 	rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
1626 	rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
1627 	rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
1628 	rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
1629 	rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
1630 	rss_conf->chip = padap->params.chipid;
1631 
1632 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1633 	if (rc)
1634 		goto err1;
1635 
1636 	rc = compress_buff(&scratch_buff, dbg_buff);
1637 
1638 err1:
1639 	release_scratch_buff(&scratch_buff, dbg_buff);
1640 err:
1641 	return rc;
1642 }
1643 
1644 static int collect_rss_vf_config(struct cudbg_init *pdbg_init,
1645 				 struct cudbg_buffer *dbg_buff,
1646 				 struct cudbg_error *cudbg_err)
1647 {
1648 	struct adapter *padap = pdbg_init->adap;
1649 	struct cudbg_buffer scratch_buff;
1650 	struct rss_vf_conf *vfconf;
1651 	int vf, rc, vf_count;
1652 	u32 size;
1653 
1654 	vf_count = padap->chip_params->vfcount;
1655 	size = vf_count * sizeof(*vfconf);
1656 
1657 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1658 	if (rc)
1659 		goto err;
1660 
1661 	vfconf =  (struct rss_vf_conf *)scratch_buff.data;
1662 
1663 	for (vf = 0; vf < vf_count; vf++) {
1664 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1665 				      &vfconf[vf].rss_vf_vfh, 1);
1666 	}
1667 
1668 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1669 	if (rc)
1670 		goto err1;
1671 
1672 	rc = compress_buff(&scratch_buff, dbg_buff);
1673 
1674 err1:
1675 	release_scratch_buff(&scratch_buff, dbg_buff);
1676 err:
1677 	return rc;
1678 }
1679 
1680 static int collect_rss_pf_config(struct cudbg_init *pdbg_init,
1681 				 struct cudbg_buffer *dbg_buff,
1682 				 struct cudbg_error *cudbg_err)
1683 {
1684 	struct cudbg_buffer scratch_buff;
1685 	struct rss_pf_conf *pfconf;
1686 	struct adapter *padap = pdbg_init->adap;
1687 	u32 rss_pf_map, rss_pf_mask, size;
1688 	int pf, rc;
1689 
1690 	size = 8  * sizeof(*pfconf);
1691 
1692 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1693 	if (rc)
1694 		goto err;
1695 
1696 	pfconf =  (struct rss_pf_conf *)scratch_buff.data;
1697 
1698 	rss_pf_map = t4_read_rss_pf_map(padap, 1);
1699 	rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
1700 
1701 	for (pf = 0; pf < 8; pf++) {
1702 		pfconf[pf].rss_pf_map = rss_pf_map;
1703 		pfconf[pf].rss_pf_mask = rss_pf_mask;
1704 		/* no return val */
1705 		t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
1706 	}
1707 
1708 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1709 	if (rc)
1710 		goto err1;
1711 
1712 	rc = compress_buff(&scratch_buff, dbg_buff);
1713 err1:
1714 	release_scratch_buff(&scratch_buff, dbg_buff);
1715 err:
1716 	return rc;
1717 }
1718 
1719 static int check_valid(u32 *buf, int type)
1720 {
1721 	int index;
1722 	int bit;
1723 	int bit_pos = 0;
1724 
1725 	switch (type) {
1726 	case CTXT_EGRESS:
1727 		bit_pos = 176;
1728 		break;
1729 	case CTXT_INGRESS:
1730 		bit_pos = 141;
1731 		break;
1732 	case CTXT_FLM:
1733 		bit_pos = 89;
1734 		break;
1735 	}
1736 	index = bit_pos / 32;
1737 	bit =  bit_pos % 32;
1738 
1739 	return buf[index] & (1U << bit);
1740 }
1741 
1742 /**
1743  * Get EGRESS, INGRESS, FLM, and CNM max qid.
1744  *
1745  * For EGRESS and INGRESS, do the following calculation.
1746  * max_qid = (DBQ/IMSG context region size in bytes) /
1747  *	     (size of context in bytes).
1748  *
1749  * For FLM, do the following calculation.
1750  * max_qid = (FLM cache region size in bytes) /
1751  *	     ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
1752  *
1753  * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
1754  * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
1755  * splitting is enabled, then max CNM qid is half of max FLM qid.
1756  */
1757 static int get_max_ctxt_qid(struct adapter *padap,
1758 			    struct struct_meminfo *meminfo,
1759 			    u32 *max_ctx_qid, u8 nelem)
1760 {
1761 	u32 i, idx, found = 0;
1762 
1763 	if (nelem != (CTXT_CNM + 1))
1764 		return -EINVAL;
1765 
1766 	for (i = 0; i < meminfo->mem_c; i++) {
1767 		if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
1768 			continue;                        /* skip holes */
1769 
1770 		idx = meminfo->mem[i].idx;
1771 		/* Get DBQ, IMSG, and FLM context region size */
1772 		if (idx <= CTXT_FLM) {
1773 			if (!(meminfo->mem[i].limit))
1774 				meminfo->mem[i].limit =
1775 					i < meminfo->mem_c - 1 ?
1776 					meminfo->mem[i + 1].base - 1 : ~0;
1777 
1778 			if (idx < CTXT_FLM) {
1779 				/* Get EGRESS and INGRESS max qid. */
1780 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1781 						    meminfo->mem[i].base + 1) /
1782 						   CUDBG_CTXT_SIZE_BYTES;
1783 				found++;
1784 			} else {
1785 				/* Get FLM and CNM max qid. */
1786 				u32 value, edram_ptr_count;
1787 				u8 bytes_per_ptr = 8;
1788 				u8 nohdr;
1789 
1790 				value = t4_read_reg(padap, A_SGE_FLM_CFG);
1791 
1792 				/* Check if header splitting is enabled. */
1793 				nohdr = (value >> S_NOHDR) & 1U;
1794 
1795 				/* Get the number of pointers in EDRAM per
1796 				 * qid in units of 32.
1797 				 */
1798 				edram_ptr_count = 32 *
1799 						  (1U << G_EDRAMPTRCNT(value));
1800 
1801 				/* EDRAMPTRCNT value of 3 is reserved.
1802 				 * So don't exceed 128.
1803 				 */
1804 				if (edram_ptr_count > 128)
1805 					edram_ptr_count = 128;
1806 
1807 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1808 						    meminfo->mem[i].base + 1) /
1809 						   (edram_ptr_count *
1810 						    bytes_per_ptr);
1811 				found++;
1812 
1813 				/* CNM has 1-to-1 mapping with FLM.
1814 				 * However, if header splitting is enabled,
1815 				 * then max CNM qid is half of max FLM qid.
1816 				 */
1817 				max_ctx_qid[CTXT_CNM] = nohdr ?
1818 							max_ctx_qid[idx] :
1819 							max_ctx_qid[idx] >> 1;
1820 
1821 				/* One more increment for CNM */
1822 				found++;
1823 			}
1824 		}
1825 		if (found == nelem)
1826 			break;
1827 	}
1828 
1829 	/* Sanity check. Ensure the values are within known max. */
1830 	max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
1831 					 M_CTXTQID);
1832 	max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
1833 					  CUDBG_MAX_INGRESS_QIDS);
1834 	max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
1835 				      CUDBG_MAX_FL_QIDS);
1836 	max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
1837 				      CUDBG_MAX_CNM_QIDS);
1838 	return 0;
1839 }
1840 
1841 static int collect_dump_context(struct cudbg_init *pdbg_init,
1842 				struct cudbg_buffer *dbg_buff,
1843 				struct cudbg_error *cudbg_err)
1844 {
1845 	struct cudbg_buffer scratch_buff;
1846 	struct cudbg_buffer temp_buff;
1847 	struct adapter *padap = pdbg_init->adap;
1848 	u32 size = 0, next_offset = 0, total_size = 0;
1849 	struct cudbg_ch_cntxt *buff = NULL;
1850 	struct struct_meminfo meminfo;
1851 	int bytes = 0;
1852 	int rc = 0;
1853 	u32 i, j;
1854 	u32 max_ctx_qid[CTXT_CNM + 1];
1855 	bool limit_qid = false;
1856 	u32 qid_count = 0;
1857 
1858 	rc = fill_meminfo(padap, &meminfo);
1859 	if (rc)
1860 		goto err;
1861 
1862 	/* Get max valid qid for each type of queue */
1863 	rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
1864 	if (rc)
1865 		goto err;
1866 
1867 	/* There are four types of queues. Collect context upto max
1868 	 * qid of each type of queue.
1869 	 */
1870 	for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1871 		size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
1872 
1873 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1874 	if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
1875 		/* Not enough scratch Memory available.
1876 		 * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
1877 		 * for each queue type.
1878 		 */
1879 		size = 0;
1880 		for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1881 			size += sizeof(struct cudbg_ch_cntxt) *
1882 				CUDBG_LOWMEM_MAX_CTXT_QIDS;
1883 
1884 		limit_qid = true;
1885 		rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1886 		if (rc)
1887 			goto err;
1888 	}
1889 
1890 	buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
1891 
1892 	/* Collect context data */
1893 	for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
1894 		qid_count = 0;
1895 		for (j = 0; j < max_ctx_qid[i]; j++) {
1896 			read_sge_ctxt(pdbg_init, j, i, buff->data);
1897 
1898 			rc = check_valid(buff->data, i);
1899 			if (rc) {
1900 				buff->cntxt_type = i;
1901 				buff->cntxt_id = j;
1902 				buff++;
1903 				total_size += sizeof(struct cudbg_ch_cntxt);
1904 
1905 				if (i == CTXT_FLM) {
1906 					read_sge_ctxt(pdbg_init, j, CTXT_CNM,
1907 						      buff->data);
1908 					buff->cntxt_type = CTXT_CNM;
1909 					buff->cntxt_id = j;
1910 					buff++;
1911 					total_size +=
1912 						sizeof(struct cudbg_ch_cntxt);
1913 				}
1914 				qid_count++;
1915 			}
1916 
1917 			/* If there's not enough space to collect more qids,
1918 			 * then bail and move on to next queue type.
1919 			 */
1920 			if (limit_qid &&
1921 			    qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
1922 				break;
1923 		}
1924 	}
1925 
1926 	scratch_buff.size = total_size;
1927 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1928 	if (rc)
1929 		goto err1;
1930 
1931 	/* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
1932 	while (total_size > 0) {
1933 		bytes = min_t(unsigned long, (unsigned long)total_size,
1934 			      (unsigned long)CUDBG_CHUNK_SIZE);
1935 		temp_buff.size = bytes;
1936 		temp_buff.data = (void *)((char *)scratch_buff.data +
1937 					  next_offset);
1938 
1939 		rc = compress_buff(&temp_buff, dbg_buff);
1940 		if (rc)
1941 			goto err1;
1942 
1943 		total_size -= bytes;
1944 		next_offset += bytes;
1945 	}
1946 
1947 err1:
1948 	scratch_buff.size = size;
1949 	release_scratch_buff(&scratch_buff, dbg_buff);
1950 err:
1951 	return rc;
1952 }
1953 
1954 static int collect_fw_devlog(struct cudbg_init *pdbg_init,
1955 			     struct cudbg_buffer *dbg_buff,
1956 			     struct cudbg_error *cudbg_err)
1957 {
1958 #ifdef notyet
1959 	struct adapter *padap = pdbg_init->adap;
1960 	struct devlog_params *dparams = &padap->params.devlog;
1961 	struct cudbg_param *params = NULL;
1962 	struct cudbg_buffer scratch_buff;
1963 	u32 offset;
1964 	int rc = 0, i;
1965 
1966 	rc = t4_init_devlog_params(padap, 1);
1967 
1968 	if (rc < 0) {
1969 		pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
1970 				 "%d\n", __func__, rc);
1971 		for (i = 0; i < pdbg_init->dbg_params_cnt; i++) {
1972 			if (pdbg_init->dbg_params[i].param_type ==
1973 			    CUDBG_DEVLOG_PARAM) {
1974 				params = &pdbg_init->dbg_params[i];
1975 				break;
1976 			}
1977 		}
1978 
1979 		if (params) {
1980 			dparams->memtype = params->u.devlog_param.memtype;
1981 			dparams->start = params->u.devlog_param.start;
1982 			dparams->size = params->u.devlog_param.size;
1983 		} else {
1984 			cudbg_err->sys_err = rc;
1985 			goto err;
1986 		}
1987 	}
1988 
1989 	rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
1990 
1991 	if (rc)
1992 		goto err;
1993 
1994 	/* Collect FW devlog */
1995 	if (dparams->start != 0) {
1996 		offset = scratch_buff.offset;
1997 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
1998 				  dparams->memtype, dparams->start,
1999 				  dparams->size,
2000 				  (__be32 *)((char *)scratch_buff.data +
2001 					     offset), 1);
2002 
2003 		if (rc) {
2004 			pdbg_init->print("%s(), t4_memory_rw failed!, rc: "\
2005 					 "%d\n", __func__, rc);
2006 			cudbg_err->sys_err = rc;
2007 			goto err1;
2008 		}
2009 	}
2010 
2011 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2012 
2013 	if (rc)
2014 		goto err1;
2015 
2016 	rc = compress_buff(&scratch_buff, dbg_buff);
2017 
2018 err1:
2019 	release_scratch_buff(&scratch_buff, dbg_buff);
2020 err:
2021 	return rc;
2022 #endif
2023 	return (EDOOFUS);
2024 }
2025 /* CIM OBQ */
2026 
2027 static int collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
2028 				struct cudbg_buffer *dbg_buff,
2029 				struct cudbg_error *cudbg_err)
2030 {
2031 	int rc = 0, qid = 0;
2032 
2033 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2034 
2035 	return rc;
2036 }
2037 
2038 static int collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
2039 				struct cudbg_buffer *dbg_buff,
2040 				struct cudbg_error *cudbg_err)
2041 {
2042 	int rc = 0, qid = 1;
2043 
2044 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2045 
2046 	return rc;
2047 }
2048 
2049 static int collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
2050 				struct cudbg_buffer *dbg_buff,
2051 				struct cudbg_error *cudbg_err)
2052 {
2053 	int rc = 0, qid = 2;
2054 
2055 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2056 
2057 	return rc;
2058 }
2059 
2060 static int collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
2061 				struct cudbg_buffer *dbg_buff,
2062 				struct cudbg_error *cudbg_err)
2063 {
2064 	int rc = 0, qid = 3;
2065 
2066 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2067 
2068 	return rc;
2069 }
2070 
2071 static int collect_cim_obq_sge(struct cudbg_init *pdbg_init,
2072 			       struct cudbg_buffer *dbg_buff,
2073 			       struct cudbg_error *cudbg_err)
2074 {
2075 	int rc = 0, qid = 4;
2076 
2077 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2078 
2079 	return rc;
2080 }
2081 
2082 static int collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
2083 				struct cudbg_buffer *dbg_buff,
2084 				struct cudbg_error *cudbg_err)
2085 {
2086 	int rc = 0, qid = 5;
2087 
2088 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2089 
2090 	return rc;
2091 }
2092 
2093 static int collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
2094 				 struct cudbg_buffer *dbg_buff,
2095 				 struct cudbg_error *cudbg_err)
2096 {
2097 	int rc = 0, qid = 6;
2098 
2099 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2100 
2101 	return rc;
2102 }
2103 
2104 static int collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
2105 				 struct cudbg_buffer *dbg_buff,
2106 				 struct cudbg_error *cudbg_err)
2107 {
2108 	int rc = 0, qid = 7;
2109 
2110 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2111 
2112 	return rc;
2113 }
2114 
2115 static int read_cim_obq(struct cudbg_init *pdbg_init,
2116 			struct cudbg_buffer *dbg_buff,
2117 			struct cudbg_error *cudbg_err, int qid)
2118 {
2119 	struct cudbg_buffer scratch_buff;
2120 	struct adapter *padap = pdbg_init->adap;
2121 	u32 qsize;
2122 	int rc;
2123 	int no_of_read_words;
2124 
2125 	/* collect CIM OBQ */
2126 	qsize =  6 * CIM_OBQ_SIZE * 4 *  sizeof(u32);
2127 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2128 	if (rc)
2129 		goto err;
2130 
2131 	/* t4_read_cim_obq will return no. of read words or error */
2132 	no_of_read_words = t4_read_cim_obq(padap, qid,
2133 					   (u32 *)((u32 *)scratch_buff.data +
2134 					   scratch_buff.offset), qsize);
2135 
2136 	/* no_of_read_words is less than or equal to 0 means error */
2137 	if (no_of_read_words <= 0) {
2138 		if (no_of_read_words == 0)
2139 			rc = CUDBG_SYSTEM_ERROR;
2140 		else
2141 			rc = no_of_read_words;
2142 		if (pdbg_init->verbose)
2143 			pdbg_init->print("%s: t4_read_cim_obq failed (%d)\n",
2144 				 __func__, rc);
2145 		cudbg_err->sys_err = rc;
2146 		goto err1;
2147 	}
2148 
2149 	scratch_buff.size = no_of_read_words * 4;
2150 
2151 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2152 
2153 	if (rc)
2154 		goto err1;
2155 
2156 	rc = compress_buff(&scratch_buff, dbg_buff);
2157 
2158 	if (rc)
2159 		goto err1;
2160 
2161 err1:
2162 	release_scratch_buff(&scratch_buff, dbg_buff);
2163 err:
2164 	return rc;
2165 }
2166 
2167 /* CIM IBQ */
2168 
2169 static int collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
2170 			       struct cudbg_buffer *dbg_buff,
2171 			       struct cudbg_error *cudbg_err)
2172 {
2173 	int rc = 0, qid = 0;
2174 
2175 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2176 	return rc;
2177 }
2178 
2179 static int collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
2180 			       struct cudbg_buffer *dbg_buff,
2181 			       struct cudbg_error *cudbg_err)
2182 {
2183 	int rc = 0, qid = 1;
2184 
2185 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2186 	return rc;
2187 }
2188 
2189 static int collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
2190 			       struct cudbg_buffer *dbg_buff,
2191 			       struct cudbg_error *cudbg_err)
2192 {
2193 	int rc = 0, qid = 2;
2194 
2195 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2196 	return rc;
2197 }
2198 
2199 static int collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
2200 				struct cudbg_buffer *dbg_buff,
2201 				struct cudbg_error *cudbg_err)
2202 {
2203 	int rc = 0, qid = 3;
2204 
2205 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2206 	return rc;
2207 }
2208 
2209 static int collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
2210 				struct cudbg_buffer *dbg_buff,
2211 				struct cudbg_error *cudbg_err)
2212 {
2213 	int rc = 0, qid = 4;
2214 
2215 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2216 	return rc;
2217 }
2218 
2219 static int collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
2220 				struct cudbg_buffer *dbg_buff,
2221 				struct cudbg_error *cudbg_err)
2222 {
2223 	int rc, qid = 5;
2224 
2225 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2226 	return rc;
2227 }
2228 
2229 static int read_cim_ibq(struct cudbg_init *pdbg_init,
2230 			struct cudbg_buffer *dbg_buff,
2231 			struct cudbg_error *cudbg_err, int qid)
2232 {
2233 	struct adapter *padap = pdbg_init->adap;
2234 	struct cudbg_buffer scratch_buff;
2235 	u32 qsize;
2236 	int rc;
2237 	int no_of_read_words;
2238 
2239 	/* collect CIM IBQ */
2240 	qsize = CIM_IBQ_SIZE * 4 *  sizeof(u32);
2241 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2242 
2243 	if (rc)
2244 		goto err;
2245 
2246 	/* t4_read_cim_ibq will return no. of read words or error */
2247 	no_of_read_words = t4_read_cim_ibq(padap, qid,
2248 					   (u32 *)((u32 *)scratch_buff.data +
2249 					   scratch_buff.offset), qsize);
2250 	/* no_of_read_words is less than or equal to 0 means error */
2251 	if (no_of_read_words <= 0) {
2252 		if (no_of_read_words == 0)
2253 			rc = CUDBG_SYSTEM_ERROR;
2254 		else
2255 			rc = no_of_read_words;
2256 		if (pdbg_init->verbose)
2257 			pdbg_init->print("%s: t4_read_cim_ibq failed (%d)\n",
2258 				 __func__, rc);
2259 		cudbg_err->sys_err = rc;
2260 		goto err1;
2261 	}
2262 
2263 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2264 	if (rc)
2265 		goto err1;
2266 
2267 	rc = compress_buff(&scratch_buff, dbg_buff);
2268 	if (rc)
2269 		goto err1;
2270 
2271 err1:
2272 	release_scratch_buff(&scratch_buff, dbg_buff);
2273 
2274 err:
2275 	return rc;
2276 }
2277 
2278 static int collect_cim_ma_la(struct cudbg_init *pdbg_init,
2279 			     struct cudbg_buffer *dbg_buff,
2280 			     struct cudbg_error *cudbg_err)
2281 {
2282 	struct cudbg_buffer scratch_buff;
2283 	struct adapter *padap = pdbg_init->adap;
2284 	u32 rc = 0;
2285 
2286 	/* collect CIM MA LA */
2287 	scratch_buff.size =  2 * CIM_MALA_SIZE * 5 * sizeof(u32);
2288 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2289 	if (rc)
2290 		goto err;
2291 
2292 	/* no return */
2293 	t4_cim_read_ma_la(padap,
2294 			  (u32 *) ((char *)scratch_buff.data +
2295 				   scratch_buff.offset),
2296 			  (u32 *) ((char *)scratch_buff.data +
2297 				   scratch_buff.offset + 5 * CIM_MALA_SIZE));
2298 
2299 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2300 	if (rc)
2301 		goto err1;
2302 
2303 	rc = compress_buff(&scratch_buff, dbg_buff);
2304 
2305 err1:
2306 	release_scratch_buff(&scratch_buff, dbg_buff);
2307 err:
2308 	return rc;
2309 }
2310 
2311 static int collect_cim_la(struct cudbg_init *pdbg_init,
2312 			  struct cudbg_buffer *dbg_buff,
2313 			  struct cudbg_error *cudbg_err)
2314 {
2315 	struct cudbg_buffer scratch_buff;
2316 	struct adapter *padap = pdbg_init->adap;
2317 
2318 	int rc;
2319 	u32 cfg = 0;
2320 	int size;
2321 
2322 	/* collect CIM LA */
2323 	if (is_t6(padap)) {
2324 		size = padap->params.cim_la_size / 10 + 1;
2325 		size *= 11 * sizeof(u32);
2326 	} else {
2327 		size = padap->params.cim_la_size / 8;
2328 		size *= 8 * sizeof(u32);
2329 	}
2330 
2331 	size += sizeof(cfg);
2332 
2333 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
2334 	if (rc)
2335 		goto err;
2336 
2337 	rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
2338 
2339 	if (rc) {
2340 		if (pdbg_init->verbose)
2341 			pdbg_init->print("%s: t4_cim_read failed (%d)\n",
2342 				 __func__, rc);
2343 		cudbg_err->sys_err = rc;
2344 		goto err1;
2345 	}
2346 
2347 	memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
2348 	       sizeof(cfg));
2349 
2350 	rc = t4_cim_read_la(padap,
2351 			    (u32 *) ((char *)scratch_buff.data +
2352 				     scratch_buff.offset + sizeof(cfg)), NULL);
2353 	if (rc < 0) {
2354 		if (pdbg_init->verbose)
2355 			pdbg_init->print("%s: t4_cim_read_la failed (%d)\n",
2356 				 __func__, rc);
2357 		cudbg_err->sys_err = rc;
2358 		goto err1;
2359 	}
2360 
2361 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2362 	if (rc)
2363 		goto err1;
2364 
2365 	rc = compress_buff(&scratch_buff, dbg_buff);
2366 	if (rc)
2367 		goto err1;
2368 
2369 err1:
2370 	release_scratch_buff(&scratch_buff, dbg_buff);
2371 err:
2372 	return rc;
2373 }
2374 
2375 static int collect_cim_qcfg(struct cudbg_init *pdbg_init,
2376 			    struct cudbg_buffer *dbg_buff,
2377 			    struct cudbg_error *cudbg_err)
2378 {
2379 	struct cudbg_buffer scratch_buff;
2380 	struct adapter *padap = pdbg_init->adap;
2381 	u32 offset;
2382 	int cim_num_obq, rc = 0;
2383 
2384 	struct struct_cim_qcfg *cim_qcfg_data = NULL;
2385 
2386 	rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
2387 			      &scratch_buff);
2388 
2389 	if (rc)
2390 		goto err;
2391 
2392 	offset = scratch_buff.offset;
2393 
2394 	cim_num_obq = is_t4(padap) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
2395 
2396 	cim_qcfg_data =
2397 		(struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
2398 					   offset));
2399 
2400 	rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
2401 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
2402 
2403 	if (rc) {
2404 		if (pdbg_init->verbose)
2405 			pdbg_init->print("%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
2406 			    __func__, rc);
2407 		cudbg_err->sys_err = rc;
2408 		goto err1;
2409 	}
2410 
2411 	rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
2412 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
2413 			 cim_qcfg_data->obq_wr);
2414 
2415 	if (rc) {
2416 		if (pdbg_init->verbose)
2417 			pdbg_init->print("%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
2418 			    __func__, rc);
2419 		cudbg_err->sys_err = rc;
2420 		goto err1;
2421 	}
2422 
2423 	/* no return val */
2424 	t4_read_cimq_cfg(padap,
2425 			cim_qcfg_data->base,
2426 			cim_qcfg_data->size,
2427 			cim_qcfg_data->thres);
2428 
2429 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2430 	if (rc)
2431 		goto err1;
2432 
2433 	rc = compress_buff(&scratch_buff, dbg_buff);
2434 	if (rc)
2435 		goto err1;
2436 
2437 err1:
2438 	release_scratch_buff(&scratch_buff, dbg_buff);
2439 err:
2440 	return rc;
2441 }
2442 
2443 /**
2444  * Fetch the TX/RX payload regions start and end.
2445  *
2446  * @padap (IN): adapter handle.
2447  * @mem_type (IN): EDC0, EDC1, MC/MC0/MC1.
2448  * @mem_tot_len (IN): total length of @mem_type memory region to read.
2449  * @payload_type (IN): TX or RX Payload.
2450  * @reg_info (OUT): store the payload region info.
2451  *
2452  * Fetch the TX/RX payload region information from meminfo.
2453  * However, reading from the @mem_type region starts at 0 and not
2454  * from whatever base info is stored in meminfo.  Hence, if the
2455  * payload region exists, then calculate the payload region
2456  * start and end wrt 0 and @mem_tot_len, respectively, and set
2457  * @reg_info->exist to true. Otherwise, set @reg_info->exist to false.
2458  */
2459 #ifdef notyet
2460 static int get_payload_range(struct adapter *padap, u8 mem_type,
2461 			     unsigned long mem_tot_len, u8 payload_type,
2462 			     struct struct_region_info *reg_info)
2463 {
2464 	struct struct_meminfo meminfo;
2465 	struct struct_mem_desc mem_region;
2466 	struct struct_mem_desc payload;
2467 	u32 i, idx, found = 0;
2468 	u8 mc_type;
2469 	int rc;
2470 
2471 	/* Get meminfo of all regions */
2472 	rc = fill_meminfo(padap, &meminfo);
2473 	if (rc)
2474 		return rc;
2475 
2476 	/* Extract the specified TX or RX Payload region range */
2477 	memset(&payload, 0, sizeof(struct struct_mem_desc));
2478 	for (i = 0; i < meminfo.mem_c; i++) {
2479 		if (meminfo.mem[i].idx >= ARRAY_SIZE(region))
2480 			continue;                        /* skip holes */
2481 
2482 		idx = meminfo.mem[i].idx;
2483 		/* Get TX or RX Payload region start and end */
2484 		if (idx == payload_type) {
2485 			if (!(meminfo.mem[i].limit))
2486 				meminfo.mem[i].limit =
2487 					i < meminfo.mem_c - 1 ?
2488 					meminfo.mem[i + 1].base - 1 : ~0;
2489 
2490 			memcpy(&payload, &meminfo.mem[i], sizeof(payload));
2491 			found = 1;
2492 			break;
2493 		}
2494 	}
2495 
2496 	/* If TX or RX Payload region is not found return error. */
2497 	if (!found)
2498 		return -EINVAL;
2499 
2500 	if (mem_type < MEM_MC) {
2501 		memcpy(&mem_region, &meminfo.avail[mem_type],
2502 		       sizeof(mem_region));
2503 	} else {
2504 		/* Check if both MC0 and MC1 exist by checking if a
2505 		 * base address for the specified @mem_type exists.
2506 		 * If a base address exists, then there is MC1 and
2507 		 * hence use the base address stored at index 3.
2508 		 * Otherwise, use the base address stored at index 2.
2509 		 */
2510 		mc_type = meminfo.avail[mem_type].base ?
2511 			  mem_type : mem_type - 1;
2512 		memcpy(&mem_region, &meminfo.avail[mc_type],
2513 		       sizeof(mem_region));
2514 	}
2515 
2516 	/* Check if payload region exists in current memory */
2517 	if (payload.base < mem_region.base && payload.limit < mem_region.base) {
2518 		reg_info->exist = false;
2519 		return 0;
2520 	}
2521 
2522 	/* Get Payload region start and end with respect to 0 and
2523 	 * mem_tot_len, respectively.  This is because reading from the
2524 	 * memory region starts at 0 and not at base info stored in meminfo.
2525 	 */
2526 	if (payload.base < mem_region.limit) {
2527 		reg_info->exist = true;
2528 		if (payload.base >= mem_region.base)
2529 			reg_info->start = payload.base - mem_region.base;
2530 		else
2531 			reg_info->start = 0;
2532 
2533 		if (payload.limit < mem_region.limit)
2534 			reg_info->end = payload.limit - mem_region.base;
2535 		else
2536 			reg_info->end = mem_tot_len;
2537 	}
2538 
2539 	return 0;
2540 }
2541 #endif
2542 
2543 static int read_fw_mem(struct cudbg_init *pdbg_init,
2544 			struct cudbg_buffer *dbg_buff, u8 mem_type,
2545 			unsigned long tot_len, struct cudbg_error *cudbg_err)
2546 {
2547 #ifdef notyet
2548 	struct cudbg_buffer scratch_buff;
2549 	struct adapter *padap = pdbg_init->adap;
2550 	unsigned long bytes_read = 0;
2551 	unsigned long bytes_left;
2552 	unsigned long bytes;
2553 	int	      rc;
2554 	struct struct_region_info payload[2]; /* TX and RX Payload Region */
2555 	u16 get_payload_flag;
2556 	u8 i;
2557 
2558 	get_payload_flag =
2559 		pdbg_init->dbg_params[CUDBG_GET_PAYLOAD_PARAM].param_type;
2560 
2561 	/* If explicitly asked to get TX/RX Payload data,
2562 	 * then don't zero out the payload data. Otherwise,
2563 	 * zero out the payload data.
2564 	 */
2565 	if (!get_payload_flag) {
2566 		u8 region_index[2];
2567 		u8 j = 0;
2568 
2569 		/* Find the index of TX and RX Payload regions in meminfo */
2570 		for (i = 0; i < ARRAY_SIZE(region); i++) {
2571 			if (!strcmp(region[i], "Tx payload:") ||
2572 			    !strcmp(region[i], "Rx payload:")) {
2573 				region_index[j] = i;
2574 				j++;
2575 				if (j == 2)
2576 					break;
2577 			}
2578 		}
2579 
2580 		/* Get TX/RX Payload region range if they exist */
2581 		memset(payload, 0, ARRAY_SIZE(payload) * sizeof(payload[0]));
2582 		for (i = 0; i < ARRAY_SIZE(payload); i++) {
2583 			rc = get_payload_range(padap, mem_type, tot_len,
2584 					       region_index[i],
2585 					       &payload[i]);
2586 			if (rc)
2587 				goto err;
2588 
2589 			if (payload[i].exist) {
2590 				/* Align start and end to avoid wrap around */
2591 				payload[i].start =
2592 					roundup(payload[i].start,
2593 					    CUDBG_CHUNK_SIZE);
2594 				payload[i].end =
2595 					rounddown(payload[i].end,
2596 					    CUDBG_CHUNK_SIZE);
2597 			}
2598 		}
2599 	}
2600 
2601 	bytes_left = tot_len;
2602 	scratch_buff.size = tot_len;
2603 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2604 	if (rc)
2605 		goto err;
2606 
2607 	while (bytes_left > 0) {
2608 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2609 		rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
2610 
2611 		if (rc) {
2612 			rc = CUDBG_STATUS_NO_SCRATCH_MEM;
2613 			goto err;
2614 		}
2615 
2616 		if (!get_payload_flag) {
2617 			for (i = 0; i < ARRAY_SIZE(payload); i++) {
2618 				if (payload[i].exist &&
2619 				    bytes_read >= payload[i].start &&
2620 				    (bytes_read + bytes) <= payload[i].end) {
2621 					memset(scratch_buff.data, 0, bytes);
2622 					/* TX and RX Payload regions
2623 					 * can't overlap.
2624 					 */
2625 					goto skip_read;
2626 				}
2627 			}
2628 		}
2629 
2630 		/* Read from file */
2631 		/*fread(scratch_buff.data, 1, Bytes, in);*/
2632 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
2633 				  bytes, (__be32 *)(scratch_buff.data), 1);
2634 
2635 		if (rc) {
2636 			if (pdbg_init->verbose)
2637 				pdbg_init->print("%s: t4_memory_rw failed (%d)",
2638 				    __func__, rc);
2639 			cudbg_err->sys_err = rc;
2640 			goto err1;
2641 		}
2642 
2643 skip_read:
2644 		rc = compress_buff(&scratch_buff, dbg_buff);
2645 		if (rc)
2646 			goto err1;
2647 
2648 		bytes_left -= bytes;
2649 		bytes_read += bytes;
2650 		release_scratch_buff(&scratch_buff, dbg_buff);
2651 	}
2652 
2653 err1:
2654 	if (rc)
2655 		release_scratch_buff(&scratch_buff, dbg_buff);
2656 
2657 err:
2658 	return rc;
2659 #endif
2660 	return (EDOOFUS);
2661 }
2662 
2663 static void collect_mem_info(struct cudbg_init *pdbg_init,
2664 			     struct card_mem *mem_info)
2665 {
2666 	struct adapter *padap = pdbg_init->adap;
2667 	u32 value;
2668 	int t4 = 0;
2669 
2670 	if (is_t4(padap))
2671 		t4 = 1;
2672 
2673 	if (t4) {
2674 		value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
2675 		value = G_EXT_MEM_SIZE(value);
2676 		mem_info->size_mc0 = (u16)value;  /* size in MB */
2677 
2678 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2679 		if (value & F_EXT_MEM_ENABLE)
2680 			mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
2681 								  bit */
2682 	} else {
2683 		value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
2684 		value = G_EXT_MEM0_SIZE(value);
2685 		mem_info->size_mc0 = (u16)value;
2686 
2687 		value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
2688 		value = G_EXT_MEM1_SIZE(value);
2689 		mem_info->size_mc1 = (u16)value;
2690 
2691 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2692 		if (value & F_EXT_MEM0_ENABLE)
2693 			mem_info->mem_flag |= (1 << MC0_FLAG);
2694 		if (value & F_EXT_MEM1_ENABLE)
2695 			mem_info->mem_flag |= (1 << MC1_FLAG);
2696 	}
2697 
2698 	value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
2699 	value = G_EDRAM0_SIZE(value);
2700 	mem_info->size_edc0 = (u16)value;
2701 
2702 	value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
2703 	value = G_EDRAM1_SIZE(value);
2704 	mem_info->size_edc1 = (u16)value;
2705 
2706 	value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2707 	if (value & F_EDRAM0_ENABLE)
2708 		mem_info->mem_flag |= (1 << EDC0_FLAG);
2709 	if (value & F_EDRAM1_ENABLE)
2710 		mem_info->mem_flag |= (1 << EDC1_FLAG);
2711 
2712 }
2713 
2714 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
2715 				struct cudbg_error *cudbg_err)
2716 {
2717 	struct adapter *padap = pdbg_init->adap;
2718 	int rc;
2719 
2720 	if (is_fw_attached(pdbg_init)) {
2721 
2722 		/* Flush uP dcache before reading edcX/mcX  */
2723 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
2724 
2725 		if (rc) {
2726 			if (pdbg_init->verbose)
2727 				pdbg_init->print("%s: t4_fwcache failed (%d)\n",
2728 				 __func__, rc);
2729 			cudbg_err->sys_warn = rc;
2730 		}
2731 	}
2732 }
2733 
2734 static int collect_edc0_meminfo(struct cudbg_init *pdbg_init,
2735 				struct cudbg_buffer *dbg_buff,
2736 				struct cudbg_error *cudbg_err)
2737 {
2738 	struct card_mem mem_info = {0};
2739 	unsigned long edc0_size;
2740 	int rc;
2741 
2742 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2743 
2744 	collect_mem_info(pdbg_init, &mem_info);
2745 
2746 	if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
2747 		edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
2748 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
2749 				 edc0_size, cudbg_err);
2750 		if (rc)
2751 			goto err;
2752 
2753 	} else {
2754 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2755 		if (pdbg_init->verbose)
2756 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2757 				 __func__, err_msg[-rc]);
2758 		goto err;
2759 
2760 	}
2761 err:
2762 	return rc;
2763 }
2764 
2765 static int collect_edc1_meminfo(struct cudbg_init *pdbg_init,
2766 				struct cudbg_buffer *dbg_buff,
2767 				struct cudbg_error *cudbg_err)
2768 {
2769 	struct card_mem mem_info = {0};
2770 	unsigned long edc1_size;
2771 	int rc;
2772 
2773 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2774 
2775 	collect_mem_info(pdbg_init, &mem_info);
2776 
2777 	if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
2778 		edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
2779 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
2780 				 edc1_size, cudbg_err);
2781 		if (rc)
2782 			goto err;
2783 	} else {
2784 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2785 		if (pdbg_init->verbose)
2786 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2787 				 __func__, err_msg[-rc]);
2788 		goto err;
2789 	}
2790 
2791 err:
2792 
2793 	return rc;
2794 }
2795 
2796 static int collect_mc0_meminfo(struct cudbg_init *pdbg_init,
2797 			       struct cudbg_buffer *dbg_buff,
2798 			       struct cudbg_error *cudbg_err)
2799 {
2800 	struct card_mem mem_info = {0};
2801 	unsigned long mc0_size;
2802 	int rc;
2803 
2804 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2805 
2806 	collect_mem_info(pdbg_init, &mem_info);
2807 
2808 	if (mem_info.mem_flag & (1 << MC0_FLAG)) {
2809 		mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
2810 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
2811 				 mc0_size, cudbg_err);
2812 		if (rc)
2813 			goto err;
2814 	} else {
2815 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2816 		if (pdbg_init->verbose)
2817 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2818 				 __func__, err_msg[-rc]);
2819 		goto err;
2820 	}
2821 
2822 err:
2823 	return rc;
2824 }
2825 
2826 static int collect_mc1_meminfo(struct cudbg_init *pdbg_init,
2827 			       struct cudbg_buffer *dbg_buff,
2828 			       struct cudbg_error *cudbg_err)
2829 {
2830 	struct card_mem mem_info = {0};
2831 	unsigned long mc1_size;
2832 	int rc;
2833 
2834 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2835 
2836 	collect_mem_info(pdbg_init, &mem_info);
2837 
2838 	if (mem_info.mem_flag & (1 << MC1_FLAG)) {
2839 		mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
2840 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
2841 				 mc1_size, cudbg_err);
2842 		if (rc)
2843 			goto err;
2844 	} else {
2845 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2846 
2847 		if (pdbg_init->verbose)
2848 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2849 				 __func__, err_msg[-rc]);
2850 		goto err;
2851 	}
2852 err:
2853 	return rc;
2854 }
2855 
2856 static int collect_reg_dump(struct cudbg_init *pdbg_init,
2857 			    struct cudbg_buffer *dbg_buff,
2858 			    struct cudbg_error *cudbg_err)
2859 {
2860 	struct cudbg_buffer scratch_buff;
2861 	struct cudbg_buffer tmp_scratch_buff;
2862 	struct adapter *padap = pdbg_init->adap;
2863 	unsigned long	     bytes_read = 0;
2864 	unsigned long	     bytes_left;
2865 	u32		     buf_size = 0, bytes = 0;
2866 	int		     rc = 0;
2867 
2868 	if (is_t4(padap))
2869 		buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
2870 	else if (is_t5(padap) || is_t6(padap))
2871 		buf_size = T5_REGMAP_SIZE;
2872 
2873 	scratch_buff.size = buf_size;
2874 
2875 	tmp_scratch_buff = scratch_buff;
2876 
2877 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2878 	if (rc)
2879 		goto err;
2880 
2881 	/* no return */
2882 	t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
2883 	bytes_left =   scratch_buff.size;
2884 
2885 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2886 	if (rc)
2887 		goto err1;
2888 
2889 	while (bytes_left > 0) {
2890 		tmp_scratch_buff.data =
2891 			((char *)scratch_buff.data) + bytes_read;
2892 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2893 		tmp_scratch_buff.size = bytes;
2894 		compress_buff(&tmp_scratch_buff, dbg_buff);
2895 		bytes_left -= bytes;
2896 		bytes_read += bytes;
2897 	}
2898 
2899 err1:
2900 	release_scratch_buff(&scratch_buff, dbg_buff);
2901 err:
2902 	return rc;
2903 }
2904 
2905 static int collect_cctrl(struct cudbg_init *pdbg_init,
2906 			 struct cudbg_buffer *dbg_buff,
2907 			 struct cudbg_error *cudbg_err)
2908 {
2909 	struct cudbg_buffer scratch_buff;
2910 	struct adapter *padap = pdbg_init->adap;
2911 	u32 size;
2912 	int rc;
2913 
2914 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2915 	scratch_buff.size = size;
2916 
2917 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2918 	if (rc)
2919 		goto err;
2920 
2921 	t4_read_cong_tbl(padap, (void *)scratch_buff.data);
2922 
2923 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2924 	if (rc)
2925 		goto err1;
2926 
2927 	rc = compress_buff(&scratch_buff, dbg_buff);
2928 
2929 err1:
2930 	release_scratch_buff(&scratch_buff, dbg_buff);
2931 err:
2932 	return rc;
2933 }
2934 
2935 static int check_busy_bit(struct adapter *padap)
2936 {
2937 	u32 val;
2938 	u32 busy = 1;
2939 	int i = 0;
2940 	int retry = 10;
2941 	int status = 0;
2942 
2943 	while (busy & (1 < retry)) {
2944 		val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
2945 		busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
2946 		i++;
2947 	}
2948 
2949 	if (busy)
2950 		status = -1;
2951 
2952 	return status;
2953 }
2954 
2955 static int cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
2956 {
2957 	int rc = 0;
2958 
2959 	/* write register address into the A_CIM_HOST_ACC_CTRL */
2960 	t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
2961 
2962 	/* Poll HOSTBUSY */
2963 	rc = check_busy_bit(padap);
2964 	if (rc)
2965 		goto err;
2966 
2967 	/* Read value from A_CIM_HOST_ACC_DATA */
2968 	*val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
2969 
2970 err:
2971 	return rc;
2972 }
2973 
2974 static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
2975 		       struct ireg_field *up_cim_reg, u32 *buff)
2976 {
2977 	u32 i;
2978 	int rc = 0;
2979 
2980 	for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
2981 		rc = cim_ha_rreg(padap,
2982 				 up_cim_reg->ireg_local_offset + (i * 4),
2983 				buff);
2984 		if (rc) {
2985 			if (pdbg_init->verbose)
2986 				pdbg_init->print("BUSY timeout reading"
2987 					 "CIM_HOST_ACC_CTRL\n");
2988 			goto err;
2989 		}
2990 
2991 		buff++;
2992 	}
2993 
2994 err:
2995 	return rc;
2996 }
2997 
2998 static int collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2999 				   struct cudbg_buffer *dbg_buff,
3000 				   struct cudbg_error *cudbg_err)
3001 {
3002 	struct cudbg_buffer scratch_buff;
3003 	struct adapter *padap = pdbg_init->adap;
3004 	struct ireg_buf *up_cim;
3005 	u32 size;
3006 	int i, rc, n;
3007 
3008 	n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
3009 	size = sizeof(struct ireg_buf) * n;
3010 	scratch_buff.size = size;
3011 
3012 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3013 	if (rc)
3014 		goto err;
3015 
3016 	up_cim = (struct ireg_buf *)scratch_buff.data;
3017 
3018 	for (i = 0; i < n; i++) {
3019 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
3020 		u32 *buff = up_cim->outbuf;
3021 
3022 		if (is_t5(padap)) {
3023 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
3024 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
3025 			up_cim_reg->ireg_local_offset =
3026 						t5_up_cim_reg_array[i][2];
3027 			up_cim_reg->ireg_offset_range =
3028 						t5_up_cim_reg_array[i][3];
3029 		} else if (is_t6(padap)) {
3030 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
3031 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
3032 			up_cim_reg->ireg_local_offset =
3033 						t6_up_cim_reg_array[i][2];
3034 			up_cim_reg->ireg_offset_range =
3035 						t6_up_cim_reg_array[i][3];
3036 		}
3037 
3038 		rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
3039 
3040 		up_cim++;
3041 	}
3042 
3043 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3044 	if (rc)
3045 		goto err1;
3046 
3047 	rc = compress_buff(&scratch_buff, dbg_buff);
3048 
3049 err1:
3050 	release_scratch_buff(&scratch_buff, dbg_buff);
3051 err:
3052 	return rc;
3053 }
3054 
3055 static int collect_mbox_log(struct cudbg_init *pdbg_init,
3056 			    struct cudbg_buffer *dbg_buff,
3057 			    struct cudbg_error *cudbg_err)
3058 {
3059 #ifdef notyet
3060 	struct cudbg_buffer scratch_buff;
3061 	struct cudbg_mbox_log *mboxlog = NULL;
3062 	struct mbox_cmd_log *log = NULL;
3063 	struct mbox_cmd *entry;
3064 	u64 flit;
3065 	u32 size;
3066 	unsigned int entry_idx;
3067 	int i, k, rc;
3068 	u16 mbox_cmds;
3069 
3070 	if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
3071 		log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3072 			mboxlog_param.log;
3073 		mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3074 				mboxlog_param.mbox_cmds;
3075 	} else {
3076 		if (pdbg_init->verbose)
3077 			pdbg_init->print("Mbox log is not requested\n");
3078 		return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
3079 	}
3080 
3081 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3082 	scratch_buff.size = size;
3083 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3084 	if (rc)
3085 		goto err;
3086 
3087 	mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
3088 
3089 	for (k = 0; k < mbox_cmds; k++) {
3090 		entry_idx = log->cursor + k;
3091 		if (entry_idx >= log->size)
3092 			entry_idx -= log->size;
3093 		entry = mbox_cmd_log_entry(log, entry_idx);
3094 
3095 		/* skip over unused entries */
3096 		if (entry->timestamp == 0)
3097 			continue;
3098 
3099 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3100 
3101 		for (i = 0; i < MBOX_LEN / 8; i++) {
3102 			flit = entry->cmd[i];
3103 			mboxlog->hi[i] = (u32)(flit >> 32);
3104 			mboxlog->lo[i] = (u32)flit;
3105 		}
3106 
3107 		mboxlog++;
3108 	}
3109 
3110 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3111 	if (rc)
3112 		goto err1;
3113 
3114 	rc = compress_buff(&scratch_buff, dbg_buff);
3115 
3116 err1:
3117 	release_scratch_buff(&scratch_buff, dbg_buff);
3118 err:
3119 	return rc;
3120 #endif
3121 	return (EDOOFUS);
3122 }
3123 
3124 static int collect_pbt_tables(struct cudbg_init *pdbg_init,
3125 			      struct cudbg_buffer *dbg_buff,
3126 			      struct cudbg_error *cudbg_err)
3127 {
3128 	struct cudbg_buffer scratch_buff;
3129 	struct adapter *padap = pdbg_init->adap;
3130 	struct cudbg_pbt_tables *pbt = NULL;
3131 	u32 size;
3132 	u32 addr;
3133 	int i, rc;
3134 
3135 	size = sizeof(struct cudbg_pbt_tables);
3136 	scratch_buff.size = size;
3137 
3138 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3139 	if (rc)
3140 		goto err;
3141 
3142 	pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
3143 
3144 	/* PBT dynamic entries */
3145 	addr = CUDBG_CHAC_PBT_ADDR;
3146 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3147 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
3148 		if (rc) {
3149 			if (pdbg_init->verbose)
3150 				pdbg_init->print("BUSY timeout reading"
3151 					 "CIM_HOST_ACC_CTRL\n");
3152 			goto err1;
3153 		}
3154 	}
3155 
3156 	/* PBT static entries */
3157 
3158 	/* static entries start when bit 6 is set */
3159 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3160 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3161 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
3162 		if (rc) {
3163 			if (pdbg_init->verbose)
3164 				pdbg_init->print("BUSY timeout reading"
3165 					 "CIM_HOST_ACC_CTRL\n");
3166 			goto err1;
3167 		}
3168 	}
3169 
3170 	/* LRF entries */
3171 	addr = CUDBG_CHAC_PBT_LRF;
3172 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3173 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
3174 		if (rc) {
3175 			if (pdbg_init->verbose)
3176 				pdbg_init->print("BUSY timeout reading"
3177 					 "CIM_HOST_ACC_CTRL\n");
3178 			goto err1;
3179 		}
3180 	}
3181 
3182 	/* PBT data entries */
3183 	addr = CUDBG_CHAC_PBT_DATA;
3184 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3185 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
3186 		if (rc) {
3187 			if (pdbg_init->verbose)
3188 				pdbg_init->print("BUSY timeout reading"
3189 					 "CIM_HOST_ACC_CTRL\n");
3190 			goto err1;
3191 		}
3192 	}
3193 
3194 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3195 	if (rc)
3196 		goto err1;
3197 
3198 	rc = compress_buff(&scratch_buff, dbg_buff);
3199 
3200 err1:
3201 	release_scratch_buff(&scratch_buff, dbg_buff);
3202 err:
3203 	return rc;
3204 }
3205 
3206 static int collect_pm_indirect(struct cudbg_init *pdbg_init,
3207 			       struct cudbg_buffer *dbg_buff,
3208 			       struct cudbg_error *cudbg_err)
3209 {
3210 	struct cudbg_buffer scratch_buff;
3211 	struct adapter *padap = pdbg_init->adap;
3212 	struct ireg_buf *ch_pm;
3213 	u32 size;
3214 	int i, rc, n;
3215 
3216 	n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
3217 	size = sizeof(struct ireg_buf) * n * 2;
3218 	scratch_buff.size = size;
3219 
3220 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3221 	if (rc)
3222 		goto err;
3223 
3224 	ch_pm = (struct ireg_buf *)scratch_buff.data;
3225 
3226 	/*PM_RX*/
3227 	for (i = 0; i < n; i++) {
3228 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3229 		u32 *buff = ch_pm->outbuf;
3230 
3231 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
3232 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
3233 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
3234 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
3235 
3236 		t4_read_indirect(padap,
3237 				pm_pio->ireg_addr,
3238 				pm_pio->ireg_data,
3239 				buff,
3240 				pm_pio->ireg_offset_range,
3241 				pm_pio->ireg_local_offset);
3242 
3243 		ch_pm++;
3244 	}
3245 
3246 	/*PM_Tx*/
3247 	n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
3248 	for (i = 0; i < n; i++) {
3249 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3250 		u32 *buff = ch_pm->outbuf;
3251 
3252 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
3253 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
3254 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
3255 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
3256 
3257 		t4_read_indirect(padap,
3258 				pm_pio->ireg_addr,
3259 				pm_pio->ireg_data,
3260 				buff,
3261 				pm_pio->ireg_offset_range,
3262 				pm_pio->ireg_local_offset);
3263 
3264 		ch_pm++;
3265 	}
3266 
3267 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3268 	if (rc)
3269 		goto err1;
3270 
3271 	rc = compress_buff(&scratch_buff, dbg_buff);
3272 
3273 err1:
3274 	release_scratch_buff(&scratch_buff, dbg_buff);
3275 err:
3276 	return rc;
3277 
3278 }
3279 
3280 static int collect_tid(struct cudbg_init *pdbg_init,
3281 		       struct cudbg_buffer *dbg_buff,
3282 		       struct cudbg_error *cudbg_err)
3283 {
3284 
3285 	struct cudbg_buffer scratch_buff;
3286 	struct adapter *padap = pdbg_init->adap;
3287 	struct tid_info_region *tid;
3288 	struct tid_info_region_rev1 *tid1;
3289 	u32 para[7], val[7];
3290 	u32 mbox, pf;
3291 	int rc;
3292 
3293 	scratch_buff.size = sizeof(struct tid_info_region_rev1);
3294 
3295 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3296 	if (rc)
3297 		goto err;
3298 
3299 #define FW_PARAM_DEV_A(param) \
3300 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3301 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3302 #define FW_PARAM_PFVF_A(param) \
3303 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3304 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
3305 	 V_FW_PARAMS_PARAM_Y(0) | \
3306 	 V_FW_PARAMS_PARAM_Z(0))
3307 #define MAX_ATIDS_A 8192U
3308 
3309 	tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
3310 	tid = &(tid1->tid);
3311 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
3312 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
3313 	tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
3314 			     sizeof(struct cudbg_ver_hdr);
3315 
3316 	if (is_t5(padap)) {
3317 		tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
3318 		tid1->tid_start = 0;
3319 	} else if (is_t6(padap)) {
3320 		tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
3321 		tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
3322 	}
3323 
3324 	tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
3325 
3326 	para[0] = FW_PARAM_PFVF_A(FILTER_START);
3327 	para[1] = FW_PARAM_PFVF_A(FILTER_END);
3328 	para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
3329 	para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
3330 	para[4] = FW_PARAM_DEV_A(NTID);
3331 	para[5] = FW_PARAM_PFVF_A(SERVER_START);
3332 	para[6] = FW_PARAM_PFVF_A(SERVER_END);
3333 
3334 	mbox = padap->mbox;
3335 	pf = padap->pf;
3336 	rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3337 	if (rc <  0) {
3338 		if (rc == -FW_EPERM) {
3339 			/* It looks like we don't have permission to use
3340 			 * padap->mbox.
3341 			 *
3342 			 * Try mbox 4.  If it works, we'll continue to
3343 			 * collect the rest of tid info from mbox 4.
3344 			 * Else, quit trying to collect tid info.
3345 			 */
3346 			mbox = 4;
3347 			pf = 4;
3348 			rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3349 			if (rc < 0) {
3350 				cudbg_err->sys_err = rc;
3351 				goto err1;
3352 			}
3353 		} else {
3354 			cudbg_err->sys_err = rc;
3355 			goto err1;
3356 		}
3357 	}
3358 
3359 	tid->ftid_base = val[0];
3360 	tid->nftids = val[1] - val[0] + 1;
3361 	/*active filter region*/
3362 	if (val[2] != val[3]) {
3363 #ifdef notyet
3364 		tid->flags |= FW_OFLD_CONN;
3365 #endif
3366 		tid->aftid_base = val[2];
3367 		tid->aftid_end = val[3];
3368 	}
3369 	tid->ntids = val[4];
3370 	tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
3371 	tid->stid_base = val[5];
3372 	tid->nstids = val[6] - val[5] + 1;
3373 
3374 	if (chip_id(padap) >= CHELSIO_T6) {
3375 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
3376 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
3377 		rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3378 		if (rc < 0) {
3379 			cudbg_err->sys_err = rc;
3380 			goto err1;
3381 		}
3382 
3383 		tid->hpftid_base = val[0];
3384 		tid->nhpftids = val[1] - val[0] + 1;
3385 	}
3386 
3387 	if (chip_id(padap) <= CHELSIO_T5) {
3388 		tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
3389 		tid->hash_base /= 4;
3390 	} else
3391 		tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
3392 
3393 	/*UO context range*/
3394 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
3395 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
3396 
3397 	rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3398 	if (rc <  0) {
3399 		cudbg_err->sys_err = rc;
3400 		goto err1;
3401 	}
3402 
3403 	if (val[0] != val[1]) {
3404 		tid->uotid_base = val[0];
3405 		tid->nuotids = val[1] - val[0] + 1;
3406 	}
3407 	tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
3408 	tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
3409 
3410 #undef FW_PARAM_PFVF_A
3411 #undef FW_PARAM_DEV_A
3412 #undef MAX_ATIDS_A
3413 
3414 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3415 	if (rc)
3416 		goto err1;
3417 	rc = compress_buff(&scratch_buff, dbg_buff);
3418 
3419 err1:
3420 	release_scratch_buff(&scratch_buff, dbg_buff);
3421 err:
3422 	return rc;
3423 }
3424 
3425 static int collect_tx_rate(struct cudbg_init *pdbg_init,
3426 			   struct cudbg_buffer *dbg_buff,
3427 			   struct cudbg_error *cudbg_err)
3428 {
3429 	struct cudbg_buffer scratch_buff;
3430 	struct adapter *padap = pdbg_init->adap;
3431 	struct tx_rate *tx_rate;
3432 	u32 size;
3433 	int rc;
3434 
3435 	size = sizeof(struct tx_rate);
3436 	scratch_buff.size = size;
3437 
3438 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3439 	if (rc)
3440 		goto err;
3441 
3442 	tx_rate = (struct tx_rate *)scratch_buff.data;
3443 	t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
3444 	tx_rate->nchan = padap->chip_params->nchan;
3445 
3446 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3447 	if (rc)
3448 		goto err1;
3449 
3450 	rc = compress_buff(&scratch_buff, dbg_buff);
3451 
3452 err1:
3453 	release_scratch_buff(&scratch_buff, dbg_buff);
3454 err:
3455 	return rc;
3456 }
3457 
3458 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
3459 {
3460 	*mask = x | y;
3461 	y = (__force u64)cpu_to_be64(y);
3462 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
3463 }
3464 
3465 static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
3466 {
3467 	if (is_t5(padap)) {
3468 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3469 							  A_MPS_VF_RPLCT_MAP3));
3470 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3471 							  A_MPS_VF_RPLCT_MAP2));
3472 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3473 							  A_MPS_VF_RPLCT_MAP1));
3474 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3475 							  A_MPS_VF_RPLCT_MAP0));
3476 	} else {
3477 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3478 							  A_MPS_VF_RPLCT_MAP7));
3479 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3480 							  A_MPS_VF_RPLCT_MAP6));
3481 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3482 							  A_MPS_VF_RPLCT_MAP5));
3483 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3484 							  A_MPS_VF_RPLCT_MAP4));
3485 	}
3486 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
3487 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
3488 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
3489 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
3490 }
3491 
3492 static int collect_mps_tcam(struct cudbg_init *pdbg_init,
3493 			    struct cudbg_buffer *dbg_buff,
3494 			    struct cudbg_error *cudbg_err)
3495 {
3496 	struct cudbg_buffer scratch_buff;
3497 	struct adapter *padap = pdbg_init->adap;
3498 	struct cudbg_mps_tcam *tcam = NULL;
3499 	u32 size = 0, i, n, total_size = 0;
3500 	u32 ctl, data2;
3501 	u64 tcamy, tcamx, val;
3502 	int rc;
3503 
3504 	n = padap->chip_params->mps_tcam_size;
3505 	size = sizeof(struct cudbg_mps_tcam) * n;
3506 	scratch_buff.size = size;
3507 
3508 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3509 	if (rc)
3510 		goto err;
3511 	memset(scratch_buff.data, 0, size);
3512 
3513 	tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
3514 	for (i = 0; i < n; i++) {
3515 		if (chip_id(padap) >= CHELSIO_T6) {
3516 			/* CtlReqID   - 1: use Host Driver Requester ID
3517 			 * CtlCmdType - 0: Read, 1: Write
3518 			 * CtlTcamSel - 0: TCAM0, 1: TCAM1
3519 			 * CtlXYBitSel- 0: Y bit, 1: X bit
3520 			 */
3521 
3522 			/* Read tcamy */
3523 			ctl = (V_CTLREQID(1) |
3524 			       V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
3525 			if (i < 256)
3526 				ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
3527 			else
3528 				ctl |= V_CTLTCAMINDEX(i - 256) |
3529 				       V_CTLTCAMSEL(1);
3530 
3531 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3532 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3533 			tcamy = G_DMACH(val) << 32;
3534 			tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3535 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3536 			tcam->lookup_type = G_DATALKPTYPE(data2);
3537 
3538 			/* 0 - Outer header, 1 - Inner header
3539 			 * [71:48] bit locations are overloaded for
3540 			 * outer vs. inner lookup types.
3541 			 */
3542 
3543 			if (tcam->lookup_type &&
3544 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3545 				/* Inner header VNI */
3546 				tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
3547 					     (G_DATAVIDH1(data2) << 16) |
3548 					     G_VIDL(val);
3549 				tcam->dip_hit = data2 & F_DATADIPHIT;
3550 			} else {
3551 				tcam->vlan_vld = data2 & F_DATAVIDH2;
3552 				tcam->ivlan = G_VIDL(val);
3553 			}
3554 
3555 			tcam->port_num = G_DATAPORTNUM(data2);
3556 
3557 			/* Read tcamx. Change the control param */
3558 			ctl |= V_CTLXYBITSEL(1);
3559 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3560 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3561 			tcamx = G_DMACH(val) << 32;
3562 			tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3563 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3564 			if (tcam->lookup_type &&
3565 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3566 				/* Inner header VNI mask */
3567 				tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
3568 					     (G_DATAVIDH1(data2) << 16) |
3569 					     G_VIDL(val);
3570 			}
3571 		} else {
3572 			tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
3573 			tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
3574 		}
3575 
3576 		if (tcamx & tcamy)
3577 			continue;
3578 
3579 		tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
3580 		tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
3581 
3582 		if (is_t5(padap))
3583 			tcam->repli = (tcam->cls_lo & F_REPLICATE);
3584 		else if (is_t6(padap))
3585 			tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
3586 
3587 		if (tcam->repli) {
3588 			struct fw_ldst_cmd ldst_cmd;
3589 			struct fw_ldst_mps_rplc mps_rplc;
3590 
3591 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3592 			ldst_cmd.op_to_addrspace =
3593 				htonl(V_FW_CMD_OP(FW_LDST_CMD) |
3594 				      F_FW_CMD_REQUEST |
3595 				      F_FW_CMD_READ |
3596 				      V_FW_LDST_CMD_ADDRSPACE(
3597 					      FW_LDST_ADDRSPC_MPS));
3598 
3599 			ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3600 
3601 			ldst_cmd.u.mps.rplc.fid_idx =
3602 				htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
3603 				      V_FW_LDST_CMD_IDX(i));
3604 
3605 			rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
3606 					sizeof(ldst_cmd), &ldst_cmd);
3607 
3608 			if (rc)
3609 				mps_rpl_backdoor(padap, &mps_rplc);
3610 			else
3611 				mps_rplc = ldst_cmd.u.mps.rplc;
3612 
3613 			tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
3614 			tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
3615 			tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
3616 			tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
3617 			if (padap->chip_params->mps_rplc_size >
3618 					CUDBG_MAX_RPLC_SIZE) {
3619 				tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
3620 				tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
3621 				tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
3622 				tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
3623 			}
3624 		}
3625 		cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
3626 
3627 		tcam->idx = i;
3628 		tcam->rplc_size = padap->chip_params->mps_rplc_size;
3629 
3630 		total_size += sizeof(struct cudbg_mps_tcam);
3631 
3632 		tcam++;
3633 	}
3634 
3635 	if (total_size == 0) {
3636 		rc = CUDBG_SYSTEM_ERROR;
3637 		goto err1;
3638 	}
3639 
3640 	scratch_buff.size = total_size;
3641 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3642 	if (rc)
3643 		goto err1;
3644 
3645 	rc = compress_buff(&scratch_buff, dbg_buff);
3646 
3647 err1:
3648 	scratch_buff.size = size;
3649 	release_scratch_buff(&scratch_buff, dbg_buff);
3650 err:
3651 	return rc;
3652 }
3653 
3654 static int collect_pcie_config(struct cudbg_init *pdbg_init,
3655 			       struct cudbg_buffer *dbg_buff,
3656 			       struct cudbg_error *cudbg_err)
3657 {
3658 	struct cudbg_buffer scratch_buff;
3659 	struct adapter *padap = pdbg_init->adap;
3660 	u32 size, *value, j;
3661 	int i, rc, n;
3662 
3663 	size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
3664 	n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
3665 	scratch_buff.size = size;
3666 
3667 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3668 	if (rc)
3669 		goto err;
3670 
3671 	value = (u32 *)scratch_buff.data;
3672 	for (i = 0; i < n; i++) {
3673 		for (j = t5_pcie_config_array[i][0];
3674 		     j <= t5_pcie_config_array[i][1]; j += 4) {
3675 			*value++ = t4_hw_pci_read_cfg4(padap, j);
3676 		}
3677 	}
3678 
3679 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3680 	if (rc)
3681 		goto err1;
3682 
3683 	rc = compress_buff(&scratch_buff, dbg_buff);
3684 
3685 err1:
3686 	release_scratch_buff(&scratch_buff, dbg_buff);
3687 err:
3688 	return rc;
3689 }
3690 
3691 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
3692 			  struct cudbg_tid_data *tid_data)
3693 {
3694 	int i, cmd_retry = 8;
3695 	struct adapter *padap = pdbg_init->adap;
3696 	u32 val;
3697 
3698 	/* Fill REQ_DATA regs with 0's */
3699 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3700 		t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
3701 
3702 	/* Write DBIG command */
3703 	val = (0x4 << S_DBGICMD) | tid;
3704 	t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
3705 	tid_data->dbig_cmd = val;
3706 
3707 	val = 0;
3708 	val |= 1 << S_DBGICMDSTRT;
3709 	val |= 1;  /* LE mode */
3710 	t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
3711 	tid_data->dbig_conf = val;
3712 
3713 	/* Poll the DBGICMDBUSY bit */
3714 	val = 1;
3715 	while (val) {
3716 		val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
3717 		val = (val >> S_DBGICMDBUSY) & 1;
3718 		cmd_retry--;
3719 		if (!cmd_retry) {
3720 			if (pdbg_init->verbose)
3721 				pdbg_init->print("%s(): Timeout waiting for non-busy\n",
3722 					 __func__);
3723 			return CUDBG_SYSTEM_ERROR;
3724 		}
3725 	}
3726 
3727 	/* Check RESP status */
3728 	val = 0;
3729 	val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
3730 	tid_data->dbig_rsp_stat = val;
3731 	if (!(val & 1)) {
3732 		if (pdbg_init->verbose)
3733 			pdbg_init->print("%s(): DBGI command failed\n", __func__);
3734 		return CUDBG_SYSTEM_ERROR;
3735 	}
3736 
3737 	/* Read RESP data */
3738 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3739 		tid_data->data[i] = t4_read_reg(padap,
3740 						A_LE_DB_DBGI_RSP_DATA +
3741 						(i << 2));
3742 
3743 	tid_data->tid = tid;
3744 
3745 	return 0;
3746 }
3747 
3748 static int collect_le_tcam(struct cudbg_init *pdbg_init,
3749 			   struct cudbg_buffer *dbg_buff,
3750 			   struct cudbg_error *cudbg_err)
3751 {
3752 	struct cudbg_buffer scratch_buff;
3753 	struct adapter *padap = pdbg_init->adap;
3754 	struct cudbg_tcam tcam_region = {0};
3755 	struct cudbg_tid_data *tid_data = NULL;
3756 	u32 value, bytes = 0, bytes_left  = 0;
3757 	u32 i;
3758 	int rc, size;
3759 
3760 	/* Get the LE regions */
3761 	value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
3762 							     index */
3763 	tcam_region.tid_hash_base = value;
3764 
3765 	/* Get routing table index */
3766 	value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
3767 	tcam_region.routing_start = value;
3768 
3769 	/*Get clip table index */
3770 	value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
3771 	tcam_region.clip_start = value;
3772 
3773 	/* Get filter table index */
3774 	value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
3775 	tcam_region.filter_start = value;
3776 
3777 	/* Get server table index */
3778 	value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
3779 	tcam_region.server_start = value;
3780 
3781 	/* Check whether hash is enabled and calculate the max tids */
3782 	value = t4_read_reg(padap, A_LE_DB_CONFIG);
3783 	if ((value >> S_HASHEN) & 1) {
3784 		value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
3785 		if (chip_id(padap) > CHELSIO_T5)
3786 			tcam_region.max_tid = (value & 0xFFFFF) +
3787 					      tcam_region.tid_hash_base;
3788 		else {	    /* for T5 */
3789 			value = G_HASHTIDSIZE(value);
3790 			value = 1 << value;
3791 			tcam_region.max_tid = value +
3792 				tcam_region.tid_hash_base;
3793 		}
3794 	} else	 /* hash not enabled */
3795 		tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
3796 
3797 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
3798 	size += sizeof(struct cudbg_tcam);
3799 	scratch_buff.size = size;
3800 
3801 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3802 	if (rc)
3803 		goto err;
3804 
3805 	rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
3806 	if (rc)
3807 		goto err;
3808 
3809 	memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
3810 
3811 	tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
3812 					     scratch_buff.data) + 1);
3813 	bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
3814 	bytes = sizeof(struct cudbg_tcam);
3815 
3816 	/* read all tid */
3817 	for (i = 0; i < tcam_region.max_tid; i++) {
3818 		if (bytes_left < sizeof(struct cudbg_tid_data)) {
3819 			scratch_buff.size = bytes;
3820 			rc = compress_buff(&scratch_buff, dbg_buff);
3821 			if (rc)
3822 				goto err1;
3823 			scratch_buff.size = CUDBG_CHUNK_SIZE;
3824 			release_scratch_buff(&scratch_buff, dbg_buff);
3825 
3826 			/* new alloc */
3827 			rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
3828 					      &scratch_buff);
3829 			if (rc)
3830 				goto err;
3831 
3832 			tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
3833 			bytes_left = CUDBG_CHUNK_SIZE;
3834 			bytes = 0;
3835 		}
3836 
3837 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
3838 
3839 		if (rc) {
3840 			cudbg_err->sys_err = rc;
3841 			goto err1;
3842 		}
3843 
3844 		tid_data++;
3845 		bytes_left -= sizeof(struct cudbg_tid_data);
3846 		bytes += sizeof(struct cudbg_tid_data);
3847 	}
3848 
3849 	if (bytes) {
3850 		scratch_buff.size = bytes;
3851 		rc = compress_buff(&scratch_buff, dbg_buff);
3852 	}
3853 
3854 err1:
3855 	scratch_buff.size = CUDBG_CHUNK_SIZE;
3856 	release_scratch_buff(&scratch_buff, dbg_buff);
3857 err:
3858 	return rc;
3859 }
3860 
3861 static int collect_ma_indirect(struct cudbg_init *pdbg_init,
3862 			       struct cudbg_buffer *dbg_buff,
3863 			       struct cudbg_error *cudbg_err)
3864 {
3865 	struct cudbg_buffer scratch_buff;
3866 	struct adapter *padap = pdbg_init->adap;
3867 	struct ireg_buf *ma_indr = NULL;
3868 	u32 size, j;
3869 	int i, rc, n;
3870 
3871 	if (chip_id(padap) < CHELSIO_T6) {
3872 		if (pdbg_init->verbose)
3873 			pdbg_init->print("MA indirect available only in T6\n");
3874 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3875 		goto err;
3876 	}
3877 
3878 	n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
3879 	size = sizeof(struct ireg_buf) * n * 2;
3880 	scratch_buff.size = size;
3881 
3882 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3883 	if (rc)
3884 		goto err;
3885 
3886 	ma_indr = (struct ireg_buf *)scratch_buff.data;
3887 
3888 	for (i = 0; i < n; i++) {
3889 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3890 		u32 *buff = ma_indr->outbuf;
3891 
3892 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
3893 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3894 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3895 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3896 
3897 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3898 				 buff, ma_fli->ireg_offset_range,
3899 				 ma_fli->ireg_local_offset);
3900 
3901 		ma_indr++;
3902 
3903 	}
3904 
3905 	n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
3906 
3907 	for (i = 0; i < n; i++) {
3908 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3909 		u32 *buff = ma_indr->outbuf;
3910 
3911 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3912 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3913 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3914 
3915 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3916 			t4_read_indirect(padap, ma_fli->ireg_addr,
3917 					 ma_fli->ireg_data, buff, 1,
3918 					 ma_fli->ireg_local_offset);
3919 			buff++;
3920 			ma_fli->ireg_local_offset += 0x20;
3921 		}
3922 		ma_indr++;
3923 	}
3924 
3925 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3926 	if (rc)
3927 		goto err1;
3928 
3929 	rc = compress_buff(&scratch_buff, dbg_buff);
3930 
3931 err1:
3932 	release_scratch_buff(&scratch_buff, dbg_buff);
3933 err:
3934 	return rc;
3935 }
3936 
3937 static int collect_hma_indirect(struct cudbg_init *pdbg_init,
3938 			       struct cudbg_buffer *dbg_buff,
3939 			       struct cudbg_error *cudbg_err)
3940 {
3941 	struct cudbg_buffer scratch_buff;
3942 	struct adapter *padap = pdbg_init->adap;
3943 	struct ireg_buf *hma_indr = NULL;
3944 	u32 size;
3945 	int i, rc, n;
3946 
3947 	if (chip_id(padap) < CHELSIO_T6) {
3948 		if (pdbg_init->verbose)
3949 			pdbg_init->print("HMA indirect available only in T6\n");
3950 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3951 		goto err;
3952 	}
3953 
3954 	n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
3955 	size = sizeof(struct ireg_buf) * n;
3956 	scratch_buff.size = size;
3957 
3958 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3959 	if (rc)
3960 		goto err;
3961 
3962 	hma_indr = (struct ireg_buf *)scratch_buff.data;
3963 
3964 	for (i = 0; i < n; i++) {
3965 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
3966 		u32 *buff = hma_indr->outbuf;
3967 
3968 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3969 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3970 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3971 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3972 
3973 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3974 				 buff, hma_fli->ireg_offset_range,
3975 				 hma_fli->ireg_local_offset);
3976 
3977 		hma_indr++;
3978 
3979 	}
3980 
3981 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3982 	if (rc)
3983 		goto err1;
3984 
3985 	rc = compress_buff(&scratch_buff, dbg_buff);
3986 
3987 err1:
3988 	release_scratch_buff(&scratch_buff, dbg_buff);
3989 err:
3990 	return rc;
3991 }
3992 
3993 static int collect_pcie_indirect(struct cudbg_init *pdbg_init,
3994 				 struct cudbg_buffer *dbg_buff,
3995 				 struct cudbg_error *cudbg_err)
3996 {
3997 	struct cudbg_buffer scratch_buff;
3998 	struct adapter *padap = pdbg_init->adap;
3999 	struct ireg_buf *ch_pcie;
4000 	u32 size;
4001 	int i, rc, n;
4002 
4003 	n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
4004 	size = sizeof(struct ireg_buf) * n * 2;
4005 	scratch_buff.size = size;
4006 
4007 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4008 	if (rc)
4009 		goto err;
4010 
4011 	ch_pcie = (struct ireg_buf *)scratch_buff.data;
4012 
4013 	/*PCIE_PDBG*/
4014 	for (i = 0; i < n; i++) {
4015 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4016 		u32 *buff = ch_pcie->outbuf;
4017 
4018 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
4019 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
4020 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
4021 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
4022 
4023 		t4_read_indirect(padap,
4024 				pcie_pio->ireg_addr,
4025 				pcie_pio->ireg_data,
4026 				buff,
4027 				pcie_pio->ireg_offset_range,
4028 				pcie_pio->ireg_local_offset);
4029 
4030 		ch_pcie++;
4031 	}
4032 
4033 	/*PCIE_CDBG*/
4034 	n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
4035 	for (i = 0; i < n; i++) {
4036 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4037 		u32 *buff = ch_pcie->outbuf;
4038 
4039 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
4040 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
4041 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
4042 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
4043 
4044 		t4_read_indirect(padap,
4045 				pcie_pio->ireg_addr,
4046 				pcie_pio->ireg_data,
4047 				buff,
4048 				pcie_pio->ireg_offset_range,
4049 				pcie_pio->ireg_local_offset);
4050 
4051 		ch_pcie++;
4052 	}
4053 
4054 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4055 	if (rc)
4056 		goto err1;
4057 
4058 	rc = compress_buff(&scratch_buff, dbg_buff);
4059 
4060 err1:
4061 	release_scratch_buff(&scratch_buff, dbg_buff);
4062 err:
4063 	return rc;
4064 
4065 }
4066 
4067 static int collect_tp_indirect(struct cudbg_init *pdbg_init,
4068 			       struct cudbg_buffer *dbg_buff,
4069 			       struct cudbg_error *cudbg_err)
4070 {
4071 	struct cudbg_buffer scratch_buff;
4072 	struct adapter *padap = pdbg_init->adap;
4073 	struct ireg_buf *ch_tp_pio;
4074 	u32 size;
4075 	int i, rc, n = 0;
4076 
4077 	if (is_t5(padap))
4078 		n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
4079 	else if (is_t6(padap))
4080 		n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
4081 
4082 	size = sizeof(struct ireg_buf) * n * 3;
4083 	scratch_buff.size = size;
4084 
4085 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4086 	if (rc)
4087 		goto err;
4088 
4089 	ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
4090 
4091 	/* TP_PIO*/
4092 	for (i = 0; i < n; i++) {
4093 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4094 		u32 *buff = ch_tp_pio->outbuf;
4095 
4096 		if (is_t5(padap)) {
4097 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
4098 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
4099 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
4100 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
4101 		} else if (is_t6(padap)) {
4102 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
4103 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
4104 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
4105 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
4106 		}
4107 
4108 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
4109 			       tp_pio->ireg_local_offset, true);
4110 
4111 		ch_tp_pio++;
4112 	}
4113 
4114 	/* TP_TM_PIO*/
4115 	if (is_t5(padap))
4116 		n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
4117 	else if (is_t6(padap))
4118 		n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
4119 
4120 	for (i = 0; i < n; i++) {
4121 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4122 		u32 *buff = ch_tp_pio->outbuf;
4123 
4124 		if (is_t5(padap)) {
4125 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
4126 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
4127 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
4128 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
4129 		} else if (is_t6(padap)) {
4130 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
4131 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
4132 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
4133 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
4134 		}
4135 
4136 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
4137 				  tp_pio->ireg_local_offset, true);
4138 
4139 		ch_tp_pio++;
4140 	}
4141 
4142 	/* TP_MIB_INDEX*/
4143 	if (is_t5(padap))
4144 		n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
4145 	else if (is_t6(padap))
4146 		n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
4147 
4148 	for (i = 0; i < n ; i++) {
4149 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4150 		u32 *buff = ch_tp_pio->outbuf;
4151 
4152 		if (is_t5(padap)) {
4153 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
4154 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
4155 			tp_pio->ireg_local_offset =
4156 				t5_tp_mib_index_array[i][2];
4157 			tp_pio->ireg_offset_range =
4158 				t5_tp_mib_index_array[i][3];
4159 		} else if (is_t6(padap)) {
4160 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
4161 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
4162 			tp_pio->ireg_local_offset =
4163 				t6_tp_mib_index_array[i][2];
4164 			tp_pio->ireg_offset_range =
4165 				t6_tp_mib_index_array[i][3];
4166 		}
4167 
4168 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
4169 			       tp_pio->ireg_local_offset, true);
4170 
4171 		ch_tp_pio++;
4172 	}
4173 
4174 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4175 	if (rc)
4176 		goto err1;
4177 
4178 	rc = compress_buff(&scratch_buff, dbg_buff);
4179 
4180 err1:
4181 	release_scratch_buff(&scratch_buff, dbg_buff);
4182 err:
4183 	return rc;
4184 }
4185 
4186 static int collect_sge_indirect(struct cudbg_init *pdbg_init,
4187 				struct cudbg_buffer *dbg_buff,
4188 				struct cudbg_error *cudbg_err)
4189 {
4190 	struct cudbg_buffer scratch_buff;
4191 	struct adapter *padap = pdbg_init->adap;
4192 	struct ireg_buf *ch_sge_dbg;
4193 	u32 size;
4194 	int i, rc;
4195 
4196 	size = sizeof(struct ireg_buf) * 2;
4197 	scratch_buff.size = size;
4198 
4199 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4200 	if (rc)
4201 		goto err;
4202 
4203 	ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
4204 
4205 	for (i = 0; i < 2; i++) {
4206 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
4207 		u32 *buff = ch_sge_dbg->outbuf;
4208 
4209 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
4210 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
4211 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
4212 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
4213 
4214 		t4_read_indirect(padap,
4215 				sge_pio->ireg_addr,
4216 				sge_pio->ireg_data,
4217 				buff,
4218 				sge_pio->ireg_offset_range,
4219 				sge_pio->ireg_local_offset);
4220 
4221 		ch_sge_dbg++;
4222 	}
4223 
4224 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4225 	if (rc)
4226 		goto err1;
4227 
4228 	rc = compress_buff(&scratch_buff, dbg_buff);
4229 
4230 err1:
4231 	release_scratch_buff(&scratch_buff, dbg_buff);
4232 err:
4233 	return rc;
4234 }
4235 
4236 static int collect_full(struct cudbg_init *pdbg_init,
4237 			struct cudbg_buffer *dbg_buff,
4238 			struct cudbg_error *cudbg_err)
4239 {
4240 	struct cudbg_buffer scratch_buff;
4241 	struct adapter *padap = pdbg_init->adap;
4242 	u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
4243 	u32 *sp;
4244 	int rc;
4245 	int nreg = 0;
4246 
4247 	/* Collect Registers:
4248 	 * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
4249 	 * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
4250 	 * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
4251 	 * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
4252 	 * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
4253 	 * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3)  This is for T6
4254 	 * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
4255 	 **/
4256 
4257 	if (is_t5(padap))
4258 		nreg = 6;
4259 	else if (is_t6(padap))
4260 		nreg = 7;
4261 
4262 	scratch_buff.size = nreg * sizeof(u32);
4263 
4264 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4265 	if (rc)
4266 		goto err;
4267 
4268 	sp = (u32 *)scratch_buff.data;
4269 
4270 	/* TP_DBG_SCHED_TX */
4271 	reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
4272 	reg_offset_range = 1;
4273 
4274 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4275 
4276 	sp++;
4277 
4278 	/* TP_DBG_SCHED_RX */
4279 	reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
4280 	reg_offset_range = 1;
4281 
4282 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4283 
4284 	sp++;
4285 
4286 	/* TP_DBG_CSIDE_INT */
4287 	reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
4288 	reg_offset_range = 1;
4289 
4290 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4291 
4292 	sp++;
4293 
4294 	/* TP_DBG_ESIDE_INT */
4295 	reg_local_offset = t5_tp_pio_array[8][2] + 3;
4296 	reg_offset_range = 1;
4297 
4298 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4299 
4300 	sp++;
4301 
4302 	/* PCIE_CDEBUG_INDEX[AppData0] */
4303 	reg_addr = t5_pcie_cdbg_array[0][0];
4304 	reg_data = t5_pcie_cdbg_array[0][1];
4305 	reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
4306 	reg_offset_range = 1;
4307 
4308 	t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
4309 			 reg_local_offset);
4310 
4311 	sp++;
4312 
4313 	if (is_t6(padap)) {
4314 		/* PCIE_CDEBUG_INDEX[AppData1] */
4315 		reg_addr = t5_pcie_cdbg_array[0][0];
4316 		reg_data = t5_pcie_cdbg_array[0][1];
4317 		reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
4318 		reg_offset_range = 1;
4319 
4320 		t4_read_indirect(padap, reg_addr, reg_data, sp,
4321 				 reg_offset_range, reg_local_offset);
4322 
4323 		sp++;
4324 	}
4325 
4326 	/* SGE_DEBUG_DATA_HIGH_INDEX_10 */
4327 	*sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
4328 
4329 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4330 	if (rc)
4331 		goto err1;
4332 
4333 	rc = compress_buff(&scratch_buff, dbg_buff);
4334 
4335 err1:
4336 	release_scratch_buff(&scratch_buff, dbg_buff);
4337 err:
4338 	return rc;
4339 }
4340 
4341 static int collect_vpd_data(struct cudbg_init *pdbg_init,
4342 			    struct cudbg_buffer *dbg_buff,
4343 			    struct cudbg_error *cudbg_err)
4344 {
4345 #ifdef notyet
4346 	struct cudbg_buffer scratch_buff;
4347 	struct adapter *padap = pdbg_init->adap;
4348 	struct struct_vpd_data *vpd_data;
4349 	char vpd_ver[4];
4350 	u32 fw_vers;
4351 	u32 size;
4352 	int rc;
4353 
4354 	size = sizeof(struct struct_vpd_data);
4355 	scratch_buff.size = size;
4356 
4357 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4358 	if (rc)
4359 		goto err;
4360 
4361 	vpd_data = (struct struct_vpd_data *)scratch_buff.data;
4362 
4363 	if (is_t5(padap)) {
4364 		read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
4365 		read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
4366 		read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
4367 		read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
4368 	} else if (is_t6(padap)) {
4369 		read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
4370 		read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
4371 		read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
4372 		read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
4373 	}
4374 
4375 	if (is_fw_attached(pdbg_init)) {
4376 	   rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
4377 	} else {
4378 		rc = 1;
4379 	}
4380 
4381 	if (rc) {
4382 		/* Now trying with backdoor mechanism */
4383 		rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
4384 				  (u8 *)&vpd_data->scfg_vers);
4385 		if (rc)
4386 			goto err1;
4387 	}
4388 
4389 	if (is_fw_attached(pdbg_init)) {
4390 		rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
4391 	} else {
4392 		rc = 1;
4393 	}
4394 
4395 	if (rc) {
4396 		/* Now trying with backdoor mechanism */
4397 		rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
4398 				  (u8 *)vpd_ver);
4399 		if (rc)
4400 			goto err1;
4401 		/* read_vpd_reg return string of stored hex
4402 		 * converting hex string to char string
4403 		 * vpd version is 2 bytes only */
4404 		sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
4405 		vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
4406 	}
4407 
4408 	/* Get FW version if it's not already filled in */
4409 	fw_vers = padap->params.fw_vers;
4410 	if (!fw_vers) {
4411 		rc = t4_get_fw_version(padap, &fw_vers);
4412 		if (rc)
4413 			goto err1;
4414 	}
4415 
4416 	vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
4417 	vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
4418 	vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
4419 	vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
4420 
4421 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4422 	if (rc)
4423 		goto err1;
4424 
4425 	rc = compress_buff(&scratch_buff, dbg_buff);
4426 
4427 err1:
4428 	release_scratch_buff(&scratch_buff, dbg_buff);
4429 err:
4430 	return rc;
4431 #endif
4432 	return (EDOOFUS);
4433 }
4434