xref: /freebsd/sys/dev/cxgbe/cudbg/cudbg_lib.c (revision a16423b7a48c61370d4eeb542e3ca6c1b4c9579a)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/param.h>
29 
30 #include "common/common.h"
31 #include "common/t4_regs.h"
32 #include "cudbg.h"
33 #include "cudbg_lib_common.h"
34 #include "cudbg_lib.h"
35 #include "cudbg_entity.h"
36 #define  BUFFER_WARN_LIMIT 10000000
37 
38 struct large_entity large_entity_list[] = {
39 	{CUDBG_EDC0, 0, 0},
40 	{CUDBG_EDC1, 0 , 0},
41 	{CUDBG_MC0, 0, 0},
42 	{CUDBG_MC1, 0, 0}
43 };
44 
45 static int is_fw_attached(struct cudbg_init *pdbg_init)
46 {
47 
48 	return (pdbg_init->adap->flags & FW_OK);
49 }
50 
51 /* This function will add additional padding bytes into debug_buffer to make it
52  * 4 byte aligned.*/
53 static void align_debug_buffer(struct cudbg_buffer *dbg_buff,
54 			struct cudbg_entity_hdr *entity_hdr)
55 {
56 	u8 zero_buf[4] = {0};
57 	u8 padding, remain;
58 
59 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
60 	padding = 4 - remain;
61 	if (remain) {
62 		memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
63 		       padding);
64 		dbg_buff->offset += padding;
65 		entity_hdr->num_pad = padding;
66 	}
67 
68 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
69 }
70 
71 static void read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
72 			  enum ctxt_type ctype, u32 *data)
73 {
74 	struct adapter *padap = pdbg_init->adap;
75 	int rc = -1;
76 
77 	if (is_fw_attached(pdbg_init)) {
78 		rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
79 		    "t4cudf");
80 		if (rc != 0)
81 			goto out;
82 		rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
83 				    data);
84 		end_synchronized_op(padap, 0);
85 	}
86 
87 out:
88 	if (rc)
89 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
90 }
91 
92 static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
93 			    struct cudbg_buffer *dbg_buff,
94 			    struct cudbg_entity_hdr **entity_hdr)
95 {
96 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
97 	int rc = 0;
98 	u32 ext_offset = cudbg_hdr->data_len;
99 	*ext_size = 0;
100 
101 	if (dbg_buff->size - dbg_buff->offset <=
102 		 sizeof(struct cudbg_entity_hdr)) {
103 		rc = CUDBG_STATUS_BUFFER_SHORT;
104 		goto err;
105 	}
106 
107 	*entity_hdr = (struct cudbg_entity_hdr *)
108 		       ((char *)outbuf + cudbg_hdr->data_len);
109 
110 	/* Find the last extended entity header */
111 	while ((*entity_hdr)->size) {
112 
113 		ext_offset += sizeof(struct cudbg_entity_hdr) +
114 				     (*entity_hdr)->size;
115 
116 		*ext_size += (*entity_hdr)->size +
117 			      sizeof(struct cudbg_entity_hdr);
118 
119 		if (dbg_buff->size - dbg_buff->offset + *ext_size  <=
120 			sizeof(struct cudbg_entity_hdr)) {
121 			rc = CUDBG_STATUS_BUFFER_SHORT;
122 			goto err;
123 		}
124 
125 		if (ext_offset != (*entity_hdr)->next_ext_offset) {
126 			ext_offset -= sizeof(struct cudbg_entity_hdr) +
127 				     (*entity_hdr)->size;
128 			break;
129 		}
130 
131 		(*entity_hdr)->next_ext_offset = *ext_size;
132 
133 		*entity_hdr = (struct cudbg_entity_hdr *)
134 					   ((char *)outbuf +
135 					   ext_offset);
136 	}
137 
138 	/* update the data offset */
139 	dbg_buff->offset = ext_offset;
140 err:
141 	return rc;
142 }
143 
144 static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
145 		       u32 cur_entity_data_offset,
146 		       u32 cur_entity_size,
147 		       int entity_nu, u32 ext_size)
148 {
149 	struct cudbg_private *priv = handle;
150 	struct cudbg_init *cudbg_init = &priv->dbg_init;
151 	struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
152 	u64 timestamp;
153 	u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
154 	u32 remain_flash_size;
155 	u32 flash_data_offset;
156 	u32 data_hdr_size;
157 	int rc = -1;
158 	unsigned int cudbg_len;
159 
160 	data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
161 			sizeof(struct cudbg_hdr);
162 	t4_flash_loc_start(cudbg_init->adap, FLASH_LOC_CUDBG, &cudbg_len);
163 
164 	flash_data_offset = ((cudbg_len / SF_SEC_SIZE) *
165 			     (sizeof(struct cudbg_flash_hdr) +
166 			      data_hdr_size)) +
167 			    (cur_entity_data_offset - data_hdr_size);
168 
169 	if (flash_data_offset > cudbg_len) {
170 		update_skip_size(sec_info, cur_entity_size);
171 		if (cudbg_init->verbose)
172 			cudbg_init->print("Large entity skipping...\n");
173 		return rc;
174 	}
175 
176 	remain_flash_size = cudbg_len - flash_data_offset;
177 
178 	if (cur_entity_size > remain_flash_size) {
179 		update_skip_size(sec_info, cur_entity_size);
180 		if (cudbg_init->verbose)
181 			cudbg_init->print("Large entity skipping...\n");
182 	} else {
183 		timestamp = 0;
184 
185 		cur_entity_hdr_offset +=
186 			(sizeof(struct cudbg_entity_hdr) *
187 			(entity_nu - 1));
188 
189 		rc = cudbg_write_flash(handle, timestamp, dbg_buff,
190 				       cur_entity_data_offset,
191 				       cur_entity_hdr_offset,
192 				       cur_entity_size,
193 				       ext_size);
194 		if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
195 			cudbg_init->print("\n\tFLASH is full... "
196 				"can not write in flash more\n\n");
197 	}
198 
199 	return rc;
200 }
201 
202 int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
203 {
204 	struct cudbg_entity_hdr *entity_hdr = NULL;
205 	struct cudbg_entity_hdr *ext_entity_hdr = NULL;
206 	struct cudbg_hdr *cudbg_hdr;
207 	struct cudbg_buffer dbg_buff;
208 	struct cudbg_error cudbg_err = {0};
209 	int large_entity_code;
210 
211 	u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
212 	struct cudbg_init *cudbg_init =
213 		&(((struct cudbg_private *)handle)->dbg_init);
214 	struct adapter *padap = cudbg_init->adap;
215 	u32 total_size, remaining_buf_size;
216 	u32 ext_size = 0;
217 	int index, bit, i, rc = -1;
218 	int all;
219 	bool flag_ext = 0;
220 
221 	reset_skip_entity();
222 
223 	dbg_buff.data = outbuf;
224 	dbg_buff.size = *outbuf_size;
225 	dbg_buff.offset = 0;
226 
227 	cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
228 	cudbg_hdr->signature = CUDBG_SIGNATURE;
229 	cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
230 	cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
231 	cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
232 	cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
233 	cudbg_hdr->chip_ver = padap->params.chipid;
234 
235 	if (cudbg_hdr->data_len)
236 		flag_ext = 1;
237 
238 	if (cudbg_init->use_flash) {
239 #ifndef notyet
240 		rc = t4_get_flash_params(padap);
241 		if (rc) {
242 			if (cudbg_init->verbose)
243 				cudbg_init->print("\nGet flash params failed.\n\n");
244 			cudbg_init->use_flash = 0;
245 		}
246 #endif
247 
248 #ifdef notyet
249 		/* Timestamp is mandatory. If it is not passed then disable
250 		 * flash support
251 		 */
252 		if (!cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM].u.time) {
253 			if (cudbg_init->verbose)
254 				cudbg_init->print("\nTimestamp param missing,"
255 					  "so ignoring flash write request\n\n");
256 			cudbg_init->use_flash = 0;
257 		}
258 #endif
259 	}
260 
261 	if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
262 	    dbg_buff.size) {
263 		rc = CUDBG_STATUS_SMALL_BUFF;
264 		total_size = cudbg_hdr->hdr_len;
265 		goto err;
266 	}
267 
268 	/* If ext flag is set then move the offset to the end of the buf
269 	 * so that we can add ext entities
270 	 */
271 	if (flag_ext) {
272 		ext_entity_hdr = (struct cudbg_entity_hdr *)
273 			      ((char *)outbuf + cudbg_hdr->hdr_len +
274 			      (sizeof(struct cudbg_entity_hdr) *
275 			      (CUDBG_EXT_ENTITY - 1)));
276 		ext_entity_hdr->start_offset = cudbg_hdr->data_len;
277 		ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
278 		ext_entity_hdr->size = 0;
279 		dbg_buff.offset = cudbg_hdr->data_len;
280 	} else {
281 		dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
282 		dbg_buff.offset += CUDBG_MAX_ENTITY *
283 					sizeof(struct cudbg_entity_hdr);
284 	}
285 
286 	total_size = dbg_buff.offset;
287 	all = dbg_bitmap[0] & (1 << CUDBG_ALL);
288 
289 	/*sort(large_entity_list);*/
290 
291 	for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
292 		index = i / 8;
293 		bit = i % 8;
294 
295 		if (entity_list[i].bit == CUDBG_EXT_ENTITY)
296 			continue;
297 
298 		if (all || (dbg_bitmap[index] & (1 << bit))) {
299 
300 			if (!flag_ext) {
301 				rc = get_entity_hdr(outbuf, i, dbg_buff.size,
302 						    &entity_hdr);
303 				if (rc)
304 					cudbg_hdr->hdr_flags = rc;
305 			} else {
306 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
307 							     &dbg_buff,
308 							     &entity_hdr);
309 				if (rc)
310 					goto err;
311 
312 				/* move the offset after the ext header */
313 				dbg_buff.offset +=
314 					sizeof(struct cudbg_entity_hdr);
315 			}
316 
317 			entity_hdr->entity_type = i;
318 			entity_hdr->start_offset = dbg_buff.offset;
319 			/* process each entity by calling process_entity fp */
320 			remaining_buf_size = dbg_buff.size - dbg_buff.offset;
321 
322 			if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
323 			    is_large_entity(i)) {
324 				if (cudbg_init->verbose)
325 					cudbg_init->print("Skipping %s\n",
326 					    entity_list[i].name);
327 				skip_entity(i);
328 				continue;
329 			} else {
330 
331 				/* If fw_attach is 0, then skip entities which
332 				 * communicates with firmware
333 				 */
334 
335 				if (!is_fw_attached(cudbg_init) &&
336 				    (entity_list[i].flag &
337 				    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
338 					if (cudbg_init->verbose)
339 						cudbg_init->print("Skipping %s entity,"\
340 							  "because fw_attach "\
341 							  "is 0\n",
342 							  entity_list[i].name);
343 					continue;
344 				}
345 
346 				if (cudbg_init->verbose)
347 					cudbg_init->print("collecting debug entity: "\
348 						  "%s\n", entity_list[i].name);
349 				memset(&cudbg_err, 0,
350 				       sizeof(struct cudbg_error));
351 				rc = process_entity[i-1](cudbg_init, &dbg_buff,
352 							 &cudbg_err);
353 			}
354 
355 			if (rc) {
356 				entity_hdr->size = 0;
357 				dbg_buff.offset = entity_hdr->start_offset;
358 			} else
359 				align_debug_buffer(&dbg_buff, entity_hdr);
360 
361 			if (cudbg_err.sys_err)
362 				rc = CUDBG_SYSTEM_ERROR;
363 
364 			entity_hdr->hdr_flags =  rc;
365 			entity_hdr->sys_err = cudbg_err.sys_err;
366 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
367 
368 			/* We don't want to include ext entity size in global
369 			 * header
370 			 */
371 			if (!flag_ext)
372 				total_size += entity_hdr->size;
373 
374 			cudbg_hdr->data_len = total_size;
375 			*outbuf_size = total_size;
376 
377 			/* consider the size of the ext entity header and data
378 			 * also
379 			 */
380 			if (flag_ext) {
381 				ext_size += (sizeof(struct cudbg_entity_hdr) +
382 					     entity_hdr->size);
383 				entity_hdr->start_offset -= cudbg_hdr->data_len;
384 				ext_entity_hdr->size = ext_size;
385 				entity_hdr->next_ext_offset = ext_size;
386 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
387 			}
388 
389 			if (cudbg_init->use_flash) {
390 				if (flag_ext) {
391 					wr_entity_to_flash(handle,
392 							   &dbg_buff,
393 							   ext_entity_hdr->
394 							   start_offset,
395 							   entity_hdr->
396 							   size,
397 							   CUDBG_EXT_ENTITY,
398 							   ext_size);
399 				}
400 				else
401 					wr_entity_to_flash(handle,
402 							   &dbg_buff,
403 							   entity_hdr->\
404 							   start_offset,
405 							   entity_hdr->size,
406 							   i, ext_size);
407 			}
408 		}
409 	}
410 
411 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
412 	     i++) {
413 		large_entity_code = large_entity_list[i].entity_code;
414 		if (large_entity_list[i].skip_flag) {
415 			if (!flag_ext) {
416 				rc = get_entity_hdr(outbuf, large_entity_code,
417 						    dbg_buff.size, &entity_hdr);
418 				if (rc)
419 					cudbg_hdr->hdr_flags = rc;
420 			} else {
421 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
422 							     &dbg_buff,
423 							     &entity_hdr);
424 				if (rc)
425 					goto err;
426 
427 				dbg_buff.offset +=
428 					sizeof(struct cudbg_entity_hdr);
429 			}
430 
431 			/* If fw_attach is 0, then skip entities which
432 			 * communicates with firmware
433 			 */
434 			if (!is_fw_attached(cudbg_init) &&
435 			    (entity_list[large_entity_code].flag &
436 			    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
437 				if (cudbg_init->verbose)
438 					cudbg_init->print("Skipping %s entity,"\
439 						  "because fw_attach "\
440 						  "is 0\n",
441 						  entity_list[large_entity_code]
442 						  .name);
443 				continue;
444 			}
445 
446 			entity_hdr->entity_type = large_entity_code;
447 			entity_hdr->start_offset = dbg_buff.offset;
448 			if (cudbg_init->verbose)
449 				cudbg_init->print("Re-trying debug entity: %s\n",
450 					  entity_list[large_entity_code].name);
451 
452 			memset(&cudbg_err, 0, sizeof(struct cudbg_error));
453 			rc = process_entity[large_entity_code - 1](cudbg_init,
454 								   &dbg_buff,
455 								   &cudbg_err);
456 			if (rc) {
457 				entity_hdr->size = 0;
458 				dbg_buff.offset = entity_hdr->start_offset;
459 			} else
460 				align_debug_buffer(&dbg_buff, entity_hdr);
461 
462 			if (cudbg_err.sys_err)
463 				rc = CUDBG_SYSTEM_ERROR;
464 
465 			entity_hdr->hdr_flags = rc;
466 			entity_hdr->sys_err = cudbg_err.sys_err;
467 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
468 
469 			/* We don't want to include ext entity size in global
470 			 * header
471 			 */
472 			if (!flag_ext)
473 				total_size += entity_hdr->size;
474 
475 			cudbg_hdr->data_len = total_size;
476 			*outbuf_size = total_size;
477 
478 			/* consider the size of the ext entity header and
479 			 * data also
480 			 */
481 			if (flag_ext) {
482 				ext_size += (sizeof(struct cudbg_entity_hdr) +
483 						   entity_hdr->size);
484 				entity_hdr->start_offset -=
485 							cudbg_hdr->data_len;
486 				ext_entity_hdr->size = ext_size;
487 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
488 			}
489 
490 			if (cudbg_init->use_flash) {
491 				if (flag_ext)
492 					wr_entity_to_flash(handle,
493 							   &dbg_buff,
494 							   ext_entity_hdr->
495 							   start_offset,
496 							   entity_hdr->size,
497 							   CUDBG_EXT_ENTITY,
498 							   ext_size);
499 				else
500 					wr_entity_to_flash(handle,
501 							   &dbg_buff,
502 							   entity_hdr->
503 							   start_offset,
504 							   entity_hdr->
505 							   size,
506 							   large_entity_list[i].
507 							   entity_code,
508 							   ext_size);
509 			}
510 		}
511 	}
512 
513 	cudbg_hdr->data_len = total_size;
514 	*outbuf_size = total_size;
515 
516 	if (flag_ext)
517 		*outbuf_size += ext_size;
518 
519 	return 0;
520 err:
521 	return rc;
522 }
523 
524 void reset_skip_entity(void)
525 {
526 	int i;
527 
528 	for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
529 		large_entity_list[i].skip_flag = 0;
530 }
531 
532 void skip_entity(int entity_code)
533 {
534 	int i;
535 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
536 	     i++) {
537 		if (large_entity_list[i].entity_code == entity_code)
538 			large_entity_list[i].skip_flag = 1;
539 	}
540 }
541 
542 int is_large_entity(int entity_code)
543 {
544 	int i;
545 
546 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
547 	     i++) {
548 		if (large_entity_list[i].entity_code == entity_code)
549 			return 1;
550 	}
551 	return 0;
552 }
553 
554 int get_entity_hdr(void *outbuf, int i, u32 size,
555 		   struct cudbg_entity_hdr **entity_hdr)
556 {
557 	int rc = 0;
558 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
559 
560 	if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
561 		return CUDBG_STATUS_SMALL_BUFF;
562 
563 	*entity_hdr = (struct cudbg_entity_hdr *)
564 		      ((char *)outbuf+cudbg_hdr->hdr_len +
565 		       (sizeof(struct cudbg_entity_hdr)*(i-1)));
566 	return rc;
567 }
568 
569 static int collect_rss(struct cudbg_init *pdbg_init,
570 		       struct cudbg_buffer *dbg_buff,
571 		       struct cudbg_error *cudbg_err)
572 {
573 	struct adapter *padap = pdbg_init->adap;
574 	struct cudbg_buffer scratch_buff;
575 	u32 size;
576 	int rc = 0;
577 
578 	size = padap->chip_params->rss_nentries * sizeof(u16);
579 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
580 	if (rc)
581 		goto err;
582 
583 	rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
584 	if (rc) {
585 		if (pdbg_init->verbose)
586 			pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n",
587 				 __func__, rc);
588 		cudbg_err->sys_err = rc;
589 		goto err1;
590 	}
591 
592 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
593 	if (rc)
594 		goto err1;
595 
596 	rc = compress_buff(&scratch_buff, dbg_buff);
597 
598 err1:
599 	release_scratch_buff(&scratch_buff, dbg_buff);
600 err:
601 	return rc;
602 }
603 
604 static int collect_sw_state(struct cudbg_init *pdbg_init,
605 			    struct cudbg_buffer *dbg_buff,
606 			    struct cudbg_error *cudbg_err)
607 {
608 	struct adapter *padap = pdbg_init->adap;
609 	struct cudbg_buffer scratch_buff;
610 	struct sw_state *swstate;
611 	u32 size;
612 	int rc = 0;
613 
614 	size = sizeof(struct sw_state);
615 
616 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
617 	if (rc)
618 		goto err;
619 
620 	swstate = (struct sw_state *) scratch_buff.data;
621 
622 	swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
623 	snprintf(swstate->caller_string, sizeof(swstate->caller_string), "%s",
624 	    "FreeBSD");
625 	swstate->os_type = 0;
626 
627 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
628 	if (rc)
629 		goto err1;
630 
631 	rc = compress_buff(&scratch_buff, dbg_buff);
632 
633 err1:
634 	release_scratch_buff(&scratch_buff, dbg_buff);
635 err:
636 	return rc;
637 }
638 
639 static int collect_ddp_stats(struct cudbg_init *pdbg_init,
640 			     struct cudbg_buffer *dbg_buff,
641 			     struct cudbg_error *cudbg_err)
642 {
643 	struct adapter *padap = pdbg_init->adap;
644 	struct cudbg_buffer scratch_buff;
645 	struct tp_usm_stats  *tp_usm_stats_buff;
646 	u32 size;
647 	int rc = 0;
648 
649 	size = sizeof(struct tp_usm_stats);
650 
651 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
652 	if (rc)
653 		goto err;
654 
655 	tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
656 
657 	/* spin_lock(&padap->stats_lock);	TODO*/
658 	t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
659 	/* spin_unlock(&padap->stats_lock);	TODO*/
660 
661 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
662 	if (rc)
663 		goto err1;
664 
665 	rc = compress_buff(&scratch_buff, dbg_buff);
666 
667 err1:
668 	release_scratch_buff(&scratch_buff, dbg_buff);
669 err:
670 	return rc;
671 }
672 
673 static int collect_ulptx_la(struct cudbg_init *pdbg_init,
674 			    struct cudbg_buffer *dbg_buff,
675 			    struct cudbg_error *cudbg_err)
676 {
677 	struct adapter *padap = pdbg_init->adap;
678 	struct cudbg_buffer scratch_buff;
679 	struct struct_ulptx_la *ulptx_la_buff;
680 	u32 size, i, j;
681 	int rc = 0;
682 
683 	size = sizeof(struct struct_ulptx_la);
684 
685 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
686 	if (rc)
687 		goto err;
688 
689 	ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
690 
691 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
692 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
693 						      A_ULP_TX_LA_RDPTR_0 +
694 						      0x10 * i);
695 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
696 						      A_ULP_TX_LA_WRPTR_0 +
697 						      0x10 * i);
698 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
699 						       A_ULP_TX_LA_RDDATA_0 +
700 						       0x10 * i);
701 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
702 			ulptx_la_buff->rd_data[i][j] =
703 				t4_read_reg(padap,
704 					    A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
705 		}
706 	}
707 
708 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
709 	if (rc)
710 		goto err1;
711 
712 	rc = compress_buff(&scratch_buff, dbg_buff);
713 
714 err1:
715 	release_scratch_buff(&scratch_buff, dbg_buff);
716 err:
717 	return rc;
718 
719 }
720 
721 static int collect_ulprx_la(struct cudbg_init *pdbg_init,
722 			    struct cudbg_buffer *dbg_buff,
723 			    struct cudbg_error *cudbg_err)
724 {
725 	struct adapter *padap = pdbg_init->adap;
726 	struct cudbg_buffer scratch_buff;
727 	struct struct_ulprx_la *ulprx_la_buff;
728 	u32 size;
729 	int rc = 0;
730 
731 	size = sizeof(struct struct_ulprx_la);
732 
733 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
734 	if (rc)
735 		goto err;
736 
737 	ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
738 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
739 	ulprx_la_buff->size = ULPRX_LA_SIZE;
740 
741 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
742 	if (rc)
743 		goto err1;
744 
745 	rc = compress_buff(&scratch_buff, dbg_buff);
746 
747 err1:
748 	release_scratch_buff(&scratch_buff, dbg_buff);
749 err:
750 	return rc;
751 }
752 
753 static int collect_cpl_stats(struct cudbg_init *pdbg_init,
754 			     struct cudbg_buffer *dbg_buff,
755 			     struct cudbg_error *cudbg_err)
756 {
757 	struct adapter *padap = pdbg_init->adap;
758 	struct cudbg_buffer scratch_buff;
759 	struct struct_tp_cpl_stats *tp_cpl_stats_buff;
760 	u32 size;
761 	int rc = 0;
762 
763 	size = sizeof(struct struct_tp_cpl_stats);
764 
765 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
766 	if (rc)
767 		goto err;
768 
769 	tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
770 	tp_cpl_stats_buff->nchan = padap->chip_params->nchan;
771 
772 	/* spin_lock(&padap->stats_lock);	TODO*/
773 	t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
774 	/* spin_unlock(&padap->stats_lock);	TODO*/
775 
776 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
777 	if (rc)
778 		goto err1;
779 
780 	rc = compress_buff(&scratch_buff, dbg_buff);
781 
782 err1:
783 	release_scratch_buff(&scratch_buff, dbg_buff);
784 err:
785 	return rc;
786 }
787 
788 static int collect_wc_stats(struct cudbg_init *pdbg_init,
789 			    struct cudbg_buffer *dbg_buff,
790 			    struct cudbg_error *cudbg_err)
791 {
792 	struct adapter *padap = pdbg_init->adap;
793 	struct cudbg_buffer scratch_buff;
794 	struct struct_wc_stats *wc_stats_buff;
795 	u32 val1;
796 	u32 val2;
797 	u32 size;
798 
799 	int rc = 0;
800 
801 	size = sizeof(struct struct_wc_stats);
802 
803 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
804 	if (rc)
805 		goto err;
806 
807 	wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
808 
809 	if (!is_t4(padap)) {
810 		val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
811 		val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
812 		wc_stats_buff->wr_cl_success = val1 - val2;
813 		wc_stats_buff->wr_cl_fail = val2;
814 	} else {
815 		wc_stats_buff->wr_cl_success = 0;
816 		wc_stats_buff->wr_cl_fail = 0;
817 	}
818 
819 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
820 	if (rc)
821 		goto err1;
822 
823 	rc = compress_buff(&scratch_buff, dbg_buff);
824 err1:
825 	release_scratch_buff(&scratch_buff, dbg_buff);
826 err:
827 	return rc;
828 }
829 
830 static int mem_desc_cmp(const void *a, const void *b)
831 {
832 	return ((const struct struct_mem_desc *)a)->base -
833 		((const struct struct_mem_desc *)b)->base;
834 }
835 
836 static int fill_meminfo(struct adapter *padap,
837 			struct struct_meminfo *meminfo_buff)
838 {
839 	struct struct_mem_desc *md;
840 	u32 size, lo, hi;
841 	u32 used, alloc;
842 	int n, i, rc = 0;
843 
844 	size = sizeof(struct struct_meminfo);
845 
846 	memset(meminfo_buff->avail, 0,
847 	       ARRAY_SIZE(meminfo_buff->avail) *
848 	       sizeof(struct struct_mem_desc));
849 	memset(meminfo_buff->mem, 0,
850 	       (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
851 	md  = meminfo_buff->mem;
852 
853 	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
854 		meminfo_buff->mem[i].limit = 0;
855 		meminfo_buff->mem[i].idx = i;
856 	}
857 
858 	i = 0;
859 
860 	lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
861 
862 	if (lo & F_EDRAM0_ENABLE) {
863 		hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
864 		meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
865 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
866 					       (G_EDRAM0_SIZE(hi) << 20);
867 		meminfo_buff->avail[i].idx = 0;
868 		i++;
869 	}
870 
871 	if (lo & F_EDRAM1_ENABLE) {
872 		hi =  t4_read_reg(padap, A_MA_EDRAM1_BAR);
873 		meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
874 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
875 					       (G_EDRAM1_SIZE(hi) << 20);
876 		meminfo_buff->avail[i].idx = 1;
877 		i++;
878 	}
879 
880 	if (is_t5(padap)) {
881 		if (lo & F_EXT_MEM0_ENABLE) {
882 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
883 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
884 			meminfo_buff->avail[i].limit =
885 				meminfo_buff->avail[i].base +
886 				(G_EXT_MEM_SIZE(hi) << 20);
887 			meminfo_buff->avail[i].idx = 3;
888 			i++;
889 		}
890 
891 		if (lo & F_EXT_MEM1_ENABLE) {
892 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
893 			meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
894 			meminfo_buff->avail[i].limit =
895 				meminfo_buff->avail[i].base +
896 				(G_EXT_MEM1_SIZE(hi) << 20);
897 			meminfo_buff->avail[i].idx = 4;
898 			i++;
899 		}
900 	} else if (is_t6(padap)) {
901 		if (lo & F_EXT_MEM_ENABLE) {
902 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
903 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
904 			meminfo_buff->avail[i].limit =
905 				meminfo_buff->avail[i].base +
906 				(G_EXT_MEM_SIZE(hi) << 20);
907 			meminfo_buff->avail[i].idx = 2;
908 			i++;
909 		}
910 	}
911 
912 	if (!i) {				   /* no memory available */
913 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
914 		goto err;
915 	}
916 
917 	meminfo_buff->avail_c = i;
918 	qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
919 	    mem_desc_cmp);
920 	(md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
921 	(md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
922 	(md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
923 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
924 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
925 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
926 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
927 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
928 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
929 
930 	/* the next few have explicit upper bounds */
931 	md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
932 	md->limit = md->base - 1 +
933 		    t4_read_reg(padap,
934 				A_TP_PMM_TX_PAGE_SIZE) *
935 				G_PMTXMAXPAGE(t4_read_reg(padap,
936 							  A_TP_PMM_TX_MAX_PAGE)
937 					     );
938 	md++;
939 
940 	md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
941 	md->limit = md->base - 1 +
942 		    t4_read_reg(padap,
943 				A_TP_PMM_RX_PAGE_SIZE) *
944 				G_PMRXMAXPAGE(t4_read_reg(padap,
945 							  A_TP_PMM_RX_MAX_PAGE)
946 					      );
947 	md++;
948 	if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
949 		if (chip_id(padap) <= CHELSIO_T5) {
950 			hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
951 			md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
952 		} else {
953 			hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
954 			md->base = t4_read_reg(padap,
955 					       A_LE_DB_HASH_TBL_BASE_ADDR);
956 		}
957 		md->limit = 0;
958 	} else {
959 		md->base = 0;
960 		md->idx = ARRAY_SIZE(region);  /* hide it */
961 	}
962 	md++;
963 #define ulp_region(reg) \
964 	{\
965 		md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
966 		(md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
967 	}
968 
969 	ulp_region(RX_ISCSI);
970 	ulp_region(RX_TDDP);
971 	ulp_region(TX_TPT);
972 	ulp_region(RX_STAG);
973 	ulp_region(RX_RQ);
974 	ulp_region(RX_RQUDP);
975 	ulp_region(RX_PBL);
976 	ulp_region(TX_PBL);
977 #undef ulp_region
978 	md->base = 0;
979 	md->idx = ARRAY_SIZE(region);
980 	if (!is_t4(padap)) {
981 		u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
982 		u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
983 		if (is_t5(padap)) {
984 			if (sge_ctrl & F_VFIFO_ENABLE)
985 				size = G_DBVFIFO_SIZE(fifo_size);
986 		} else
987 			size = G_T6_DBVFIFO_SIZE(fifo_size);
988 
989 		if (size) {
990 			md->base = G_BASEADDR(t4_read_reg(padap,
991 							  A_SGE_DBVFIFO_BADDR));
992 			md->limit = md->base + (size << 2) - 1;
993 		}
994 	}
995 
996 	md++;
997 
998 	md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
999 	md->limit = 0;
1000 	md++;
1001 	md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
1002 	md->limit = 0;
1003 	md++;
1004 #ifndef __NO_DRIVER_OCQ_SUPPORT__
1005 	/*md->base = padap->vres.ocq.start;*/
1006 	/*if (adap->vres.ocq.size)*/
1007 	/*	  md->limit = md->base + adap->vres.ocq.size - 1;*/
1008 	/*else*/
1009 	md->idx = ARRAY_SIZE(region);  /* hide it */
1010 	md++;
1011 #endif
1012 
1013 	/* add any address-space holes, there can be up to 3 */
1014 	for (n = 0; n < i - 1; n++)
1015 		if (meminfo_buff->avail[n].limit <
1016 		    meminfo_buff->avail[n + 1].base)
1017 			(md++)->base = meminfo_buff->avail[n].limit;
1018 
1019 	if (meminfo_buff->avail[n].limit)
1020 		(md++)->base = meminfo_buff->avail[n].limit;
1021 
1022 	n = (int) (md - meminfo_buff->mem);
1023 	meminfo_buff->mem_c = n;
1024 
1025 	qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
1026 	    mem_desc_cmp);
1027 
1028 	lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
1029 	hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
1030 	meminfo_buff->up_ram_lo = lo;
1031 	meminfo_buff->up_ram_hi = hi;
1032 
1033 	lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
1034 	hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
1035 	meminfo_buff->up_extmem2_lo = lo;
1036 	meminfo_buff->up_extmem2_hi = hi;
1037 
1038 	lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
1039 	meminfo_buff->rx_pages_data[0] =  G_PMRXMAXPAGE(lo);
1040 	meminfo_buff->rx_pages_data[1] =
1041 		t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
1042 	meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
1043 
1044 	lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
1045 	hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
1046 	meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
1047 	meminfo_buff->tx_pages_data[1] =
1048 		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
1049 	meminfo_buff->tx_pages_data[2] =
1050 		hi >= (1 << 20) ? 'M' : 'K';
1051 	meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
1052 
1053 	for (i = 0; i < 4; i++) {
1054 		if (chip_id(padap) > CHELSIO_T5)
1055 			lo = t4_read_reg(padap,
1056 					 A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
1057 		else
1058 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
1059 		if (is_t5(padap)) {
1060 			used = G_T5_USED(lo);
1061 			alloc = G_T5_ALLOC(lo);
1062 		} else {
1063 			used = G_USED(lo);
1064 			alloc = G_ALLOC(lo);
1065 		}
1066 		meminfo_buff->port_used[i] = used;
1067 		meminfo_buff->port_alloc[i] = alloc;
1068 	}
1069 
1070 	for (i = 0; i < padap->chip_params->nchan; i++) {
1071 		if (chip_id(padap) > CHELSIO_T5)
1072 			lo = t4_read_reg(padap,
1073 					 A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
1074 		else
1075 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
1076 		if (is_t5(padap)) {
1077 			used = G_T5_USED(lo);
1078 			alloc = G_T5_ALLOC(lo);
1079 		} else {
1080 			used = G_USED(lo);
1081 			alloc = G_ALLOC(lo);
1082 		}
1083 		meminfo_buff->loopback_used[i] = used;
1084 		meminfo_buff->loopback_alloc[i] = alloc;
1085 	}
1086 err:
1087 	return rc;
1088 }
1089 
1090 static int collect_meminfo(struct cudbg_init *pdbg_init,
1091 			   struct cudbg_buffer *dbg_buff,
1092 			   struct cudbg_error *cudbg_err)
1093 {
1094 	struct adapter *padap = pdbg_init->adap;
1095 	struct cudbg_buffer scratch_buff;
1096 	struct struct_meminfo *meminfo_buff;
1097 	int rc = 0;
1098 	u32 size;
1099 
1100 	size = sizeof(struct struct_meminfo);
1101 
1102 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1103 	if (rc)
1104 		goto err;
1105 
1106 	meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
1107 
1108 	rc = fill_meminfo(padap, meminfo_buff);
1109 	if (rc)
1110 		goto err;
1111 
1112 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1113 	if (rc)
1114 		goto err1;
1115 
1116 	rc = compress_buff(&scratch_buff, dbg_buff);
1117 err1:
1118 	release_scratch_buff(&scratch_buff, dbg_buff);
1119 err:
1120 	return rc;
1121 }
1122 
1123 static int collect_lb_stats(struct cudbg_init *pdbg_init,
1124 			    struct cudbg_buffer *dbg_buff,
1125 			    struct cudbg_error *cudbg_err)
1126 {
1127 	struct adapter *padap = pdbg_init->adap;
1128 	struct cudbg_buffer scratch_buff;
1129 	struct lb_port_stats *tmp_stats;
1130 	struct struct_lb_stats *lb_stats_buff;
1131 	u32 i, n, size;
1132 	int rc = 0;
1133 
1134 	rc = padap->params.nports;
1135 	if (rc < 0)
1136 		goto err;
1137 
1138 	n = rc;
1139 	size = sizeof(struct struct_lb_stats) +
1140 	       n * sizeof(struct lb_port_stats);
1141 
1142 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1143 	if (rc)
1144 		goto err;
1145 
1146 	lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
1147 
1148 	lb_stats_buff->nchan = n;
1149 	tmp_stats = lb_stats_buff->s;
1150 
1151 	for (i = 0; i < n; i += 2, tmp_stats += 2) {
1152 		t4_get_lb_stats(padap, i, tmp_stats);
1153 		t4_get_lb_stats(padap, i + 1, tmp_stats+1);
1154 	}
1155 
1156 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1157 	if (rc)
1158 		goto err1;
1159 
1160 	rc = compress_buff(&scratch_buff, dbg_buff);
1161 err1:
1162 	release_scratch_buff(&scratch_buff, dbg_buff);
1163 err:
1164 	return rc;
1165 }
1166 
1167 static int collect_rdma_stats(struct cudbg_init *pdbg_init,
1168 			      struct cudbg_buffer *dbg_buff,
1169 			      struct cudbg_error *cudbg_er)
1170 {
1171 	struct adapter *padap = pdbg_init->adap;
1172 	struct cudbg_buffer scratch_buff;
1173 	struct tp_rdma_stats *rdma_stats_buff;
1174 	u32 size;
1175 	int rc = 0;
1176 
1177 	size = sizeof(struct tp_rdma_stats);
1178 
1179 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1180 	if (rc)
1181 		goto err;
1182 
1183 	rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
1184 
1185 	/* spin_lock(&padap->stats_lock);	TODO*/
1186 	t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
1187 	/* spin_unlock(&padap->stats_lock);	TODO*/
1188 
1189 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1190 	if (rc)
1191 		goto err1;
1192 
1193 	rc = compress_buff(&scratch_buff, dbg_buff);
1194 err1:
1195 	release_scratch_buff(&scratch_buff, dbg_buff);
1196 err:
1197 	return rc;
1198 }
1199 
1200 static int collect_clk_info(struct cudbg_init *pdbg_init,
1201 			    struct cudbg_buffer *dbg_buff,
1202 			    struct cudbg_error *cudbg_err)
1203 {
1204 	struct cudbg_buffer scratch_buff;
1205 	struct adapter *padap = pdbg_init->adap;
1206 	struct struct_clk_info *clk_info_buff;
1207 	u64 tp_tick_us;
1208 	int size;
1209 	int rc = 0;
1210 
1211 	if (!padap->params.vpd.cclk) {
1212 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1213 		goto err;
1214 	}
1215 
1216 	size = sizeof(struct struct_clk_info);
1217 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1218 	if (rc)
1219 		goto err;
1220 
1221 	clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
1222 
1223 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;  /* in ps
1224 	*/
1225 	clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
1226 	clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
1227 	clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
1228 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1229 	/* in us */
1230 	clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
1231 				      clk_info_buff->dack_re) / 1000000) *
1232 				     t4_read_reg(padap, A_TP_DACK_TIMER);
1233 
1234 	clk_info_buff->retransmit_min =
1235 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
1236 	clk_info_buff->retransmit_max =
1237 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
1238 
1239 	clk_info_buff->persist_timer_min =
1240 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
1241 	clk_info_buff->persist_timer_max =
1242 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
1243 
1244 	clk_info_buff->keepalive_idle_timer =
1245 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
1246 	clk_info_buff->keepalive_interval =
1247 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
1248 
1249 	clk_info_buff->initial_srtt =
1250 		tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
1251 	clk_info_buff->finwait2_timer =
1252 		tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
1253 
1254 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1255 
1256 	if (rc)
1257 		goto err1;
1258 
1259 	rc = compress_buff(&scratch_buff, dbg_buff);
1260 err1:
1261 	release_scratch_buff(&scratch_buff, dbg_buff);
1262 err:
1263 	return rc;
1264 
1265 }
1266 
1267 static int collect_macstats(struct cudbg_init *pdbg_init,
1268 			    struct cudbg_buffer *dbg_buff,
1269 			    struct cudbg_error *cudbg_err)
1270 {
1271 	struct adapter *padap = pdbg_init->adap;
1272 	struct cudbg_buffer scratch_buff;
1273 	struct struct_mac_stats_rev1 *mac_stats_buff;
1274 	u32 i, n, size;
1275 	int rc = 0;
1276 
1277 	rc = padap->params.nports;
1278 	if (rc < 0)
1279 		goto err;
1280 
1281 	n = rc;
1282 	size = sizeof(struct struct_mac_stats_rev1);
1283 
1284 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1285 	if (rc)
1286 		goto err;
1287 
1288 	mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
1289 
1290 	mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1291 	mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
1292 	mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
1293 				       sizeof(struct cudbg_ver_hdr);
1294 
1295 	mac_stats_buff->port_count = n;
1296 	for (i = 0; i <  mac_stats_buff->port_count; i++)
1297 		/* Incorrect, should use hport instead of i */
1298 		t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
1299 
1300 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1301 	if (rc)
1302 		goto err1;
1303 
1304 	rc = compress_buff(&scratch_buff, dbg_buff);
1305 err1:
1306 	release_scratch_buff(&scratch_buff, dbg_buff);
1307 err:
1308 	return rc;
1309 }
1310 
1311 static int collect_cim_pif_la(struct cudbg_init *pdbg_init,
1312 			      struct cudbg_buffer *dbg_buff,
1313 			      struct cudbg_error *cudbg_err)
1314 {
1315 	struct adapter *padap = pdbg_init->adap;
1316 	struct cudbg_buffer scratch_buff;
1317 	struct cim_pif_la *cim_pif_la_buff;
1318 	u32 size;
1319 	int rc = 0;
1320 
1321 	size = sizeof(struct cim_pif_la) +
1322 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1323 
1324 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1325 	if (rc)
1326 		goto err;
1327 
1328 	cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
1329 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1330 
1331 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1332 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1333 			   NULL, NULL);
1334 
1335 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1336 	if (rc)
1337 		goto err1;
1338 
1339 	rc = compress_buff(&scratch_buff, dbg_buff);
1340 err1:
1341 	release_scratch_buff(&scratch_buff, dbg_buff);
1342 err:
1343 	return rc;
1344 }
1345 
1346 static int collect_tp_la(struct cudbg_init *pdbg_init,
1347 			 struct cudbg_buffer *dbg_buff,
1348 			 struct cudbg_error *cudbg_err)
1349 {
1350 	struct adapter *padap = pdbg_init->adap;
1351 	struct cudbg_buffer scratch_buff;
1352 	struct struct_tp_la *tp_la_buff;
1353 	u32 size;
1354 	int rc = 0;
1355 
1356 	size = sizeof(struct struct_tp_la) + TPLA_SIZE *  sizeof(u64);
1357 
1358 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1359 	if (rc)
1360 		goto err;
1361 
1362 	tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
1363 
1364 	tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
1365 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1366 
1367 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1368 	if (rc)
1369 		goto err1;
1370 
1371 	rc = compress_buff(&scratch_buff, dbg_buff);
1372 err1:
1373 	release_scratch_buff(&scratch_buff, dbg_buff);
1374 err:
1375 	return rc;
1376 }
1377 
1378 static int collect_fcoe_stats(struct cudbg_init *pdbg_init,
1379 			      struct cudbg_buffer *dbg_buff,
1380 			      struct cudbg_error *cudbg_err)
1381 {
1382 	struct adapter *padap = pdbg_init->adap;
1383 	struct cudbg_buffer scratch_buff;
1384 	struct struct_tp_fcoe_stats  *tp_fcoe_stats_buff;
1385 	u32 size;
1386 	int rc = 0;
1387 
1388 	size = sizeof(struct struct_tp_fcoe_stats);
1389 
1390 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1391 	if (rc)
1392 		goto err;
1393 
1394 	tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
1395 
1396 	/* spin_lock(&padap->stats_lock);	TODO*/
1397 	t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
1398 	t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
1399 	if (padap->chip_params->nchan == NCHAN) {
1400 		t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
1401 		t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
1402 	}
1403 	/* spin_unlock(&padap->stats_lock);	TODO*/
1404 
1405 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1406 	if (rc)
1407 		goto err1;
1408 
1409 	rc = compress_buff(&scratch_buff, dbg_buff);
1410 err1:
1411 	release_scratch_buff(&scratch_buff, dbg_buff);
1412 err:
1413 	return rc;
1414 }
1415 
1416 static int collect_tp_err_stats(struct cudbg_init *pdbg_init,
1417 				struct cudbg_buffer *dbg_buff,
1418 				struct cudbg_error *cudbg_err)
1419 {
1420 	struct adapter *padap = pdbg_init->adap;
1421 	struct cudbg_buffer scratch_buff;
1422 	struct struct_tp_err_stats *tp_err_stats_buff;
1423 	u32 size;
1424 	int rc = 0;
1425 
1426 	size = sizeof(struct struct_tp_err_stats);
1427 
1428 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1429 	if (rc)
1430 		goto err;
1431 
1432 	tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
1433 
1434 	/* spin_lock(&padap->stats_lock);	TODO*/
1435 	t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
1436 	/* spin_unlock(&padap->stats_lock);	TODO*/
1437 	tp_err_stats_buff->nchan = padap->chip_params->nchan;
1438 
1439 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1440 	if (rc)
1441 		goto err1;
1442 
1443 	rc = compress_buff(&scratch_buff, dbg_buff);
1444 err1:
1445 	release_scratch_buff(&scratch_buff, dbg_buff);
1446 err:
1447 	return rc;
1448 }
1449 
1450 static int collect_tcp_stats(struct cudbg_init *pdbg_init,
1451 			     struct cudbg_buffer *dbg_buff,
1452 			     struct cudbg_error *cudbg_err)
1453 {
1454 	struct adapter *padap = pdbg_init->adap;
1455 	struct cudbg_buffer scratch_buff;
1456 	struct struct_tcp_stats *tcp_stats_buff;
1457 	u32 size;
1458 	int rc = 0;
1459 
1460 	size = sizeof(struct struct_tcp_stats);
1461 
1462 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1463 	if (rc)
1464 		goto err;
1465 
1466 	tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
1467 
1468 	/* spin_lock(&padap->stats_lock);	TODO*/
1469 	t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
1470 	/* spin_unlock(&padap->stats_lock);	TODO*/
1471 
1472 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1473 	if (rc)
1474 		goto err1;
1475 
1476 	rc = compress_buff(&scratch_buff, dbg_buff);
1477 err1:
1478 	release_scratch_buff(&scratch_buff, dbg_buff);
1479 err:
1480 	return rc;
1481 }
1482 
1483 static int collect_hw_sched(struct cudbg_init *pdbg_init,
1484 			    struct cudbg_buffer *dbg_buff,
1485 			    struct cudbg_error *cudbg_err)
1486 {
1487 	struct adapter *padap = pdbg_init->adap;
1488 	struct cudbg_buffer scratch_buff;
1489 	struct struct_hw_sched *hw_sched_buff;
1490 	u32 size;
1491 	int i, rc = 0;
1492 
1493 	if (!padap->params.vpd.cclk) {
1494 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1495 		goto err;
1496 	}
1497 
1498 	size = sizeof(struct struct_hw_sched);
1499 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1500 	if (rc)
1501 		goto err;
1502 
1503 	hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
1504 
1505 	hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
1506 	hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
1507 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1508 
1509 	for (i = 0; i < NTX_SCHED; ++i) {
1510 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1511 		    &hw_sched_buff->ipg[i], 1);
1512 	}
1513 
1514 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1515 	if (rc)
1516 		goto err1;
1517 
1518 	rc = compress_buff(&scratch_buff, dbg_buff);
1519 err1:
1520 	release_scratch_buff(&scratch_buff, dbg_buff);
1521 err:
1522 	return rc;
1523 }
1524 
1525 static int collect_pm_stats(struct cudbg_init *pdbg_init,
1526 			    struct cudbg_buffer *dbg_buff,
1527 			    struct cudbg_error *cudbg_err)
1528 {
1529 	struct adapter *padap = pdbg_init->adap;
1530 	struct cudbg_buffer scratch_buff;
1531 	struct struct_pm_stats *pm_stats_buff;
1532 	u32 size;
1533 	int rc = 0;
1534 
1535 	size = sizeof(struct struct_pm_stats);
1536 
1537 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1538 	if (rc)
1539 		goto err;
1540 
1541 	pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
1542 
1543 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1544 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1545 
1546 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1547 	if (rc)
1548 		goto err1;
1549 
1550 	rc = compress_buff(&scratch_buff, dbg_buff);
1551 err1:
1552 	release_scratch_buff(&scratch_buff, dbg_buff);
1553 err:
1554 	return rc;
1555 }
1556 
1557 static int collect_path_mtu(struct cudbg_init *pdbg_init,
1558 			    struct cudbg_buffer *dbg_buff,
1559 			    struct cudbg_error *cudbg_err)
1560 {
1561 	struct adapter *padap = pdbg_init->adap;
1562 	struct cudbg_buffer scratch_buff;
1563 	u32 size;
1564 	int rc = 0;
1565 
1566 	size = NMTUS  * sizeof(u16);
1567 
1568 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1569 	if (rc)
1570 		goto err;
1571 
1572 	t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
1573 
1574 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1575 	if (rc)
1576 		goto err1;
1577 
1578 	rc = compress_buff(&scratch_buff, dbg_buff);
1579 err1:
1580 	release_scratch_buff(&scratch_buff, dbg_buff);
1581 err:
1582 	return rc;
1583 }
1584 
1585 static int collect_rss_key(struct cudbg_init *pdbg_init,
1586 			   struct cudbg_buffer *dbg_buff,
1587 			   struct cudbg_error *cudbg_err)
1588 {
1589 	struct adapter *padap = pdbg_init->adap;
1590 	struct cudbg_buffer scratch_buff;
1591 	u32 size;
1592 
1593 	int rc = 0;
1594 
1595 	size = 10  * sizeof(u32);
1596 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1597 	if (rc)
1598 		goto err;
1599 
1600 	t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
1601 
1602 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1603 	if (rc)
1604 		goto err1;
1605 
1606 	rc = compress_buff(&scratch_buff, dbg_buff);
1607 err1:
1608 	release_scratch_buff(&scratch_buff, dbg_buff);
1609 err:
1610 	return rc;
1611 }
1612 
1613 static int collect_rss_config(struct cudbg_init *pdbg_init,
1614 			      struct cudbg_buffer *dbg_buff,
1615 			      struct cudbg_error *cudbg_err)
1616 {
1617 	struct adapter *padap = pdbg_init->adap;
1618 	struct cudbg_buffer scratch_buff;
1619 	struct rss_config *rss_conf;
1620 	int rc;
1621 	u32 size;
1622 
1623 	size = sizeof(struct rss_config);
1624 
1625 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1626 	if (rc)
1627 		goto err;
1628 
1629 	rss_conf =  (struct rss_config *)scratch_buff.data;
1630 
1631 	rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
1632 	rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
1633 	rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
1634 	rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
1635 	rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
1636 	rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
1637 	rss_conf->chip = padap->params.chipid;
1638 
1639 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1640 	if (rc)
1641 		goto err1;
1642 
1643 	rc = compress_buff(&scratch_buff, dbg_buff);
1644 
1645 err1:
1646 	release_scratch_buff(&scratch_buff, dbg_buff);
1647 err:
1648 	return rc;
1649 }
1650 
1651 static int collect_rss_vf_config(struct cudbg_init *pdbg_init,
1652 				 struct cudbg_buffer *dbg_buff,
1653 				 struct cudbg_error *cudbg_err)
1654 {
1655 	struct adapter *padap = pdbg_init->adap;
1656 	struct cudbg_buffer scratch_buff;
1657 	struct rss_vf_conf *vfconf;
1658 	int vf, rc, vf_count;
1659 	u32 size;
1660 
1661 	vf_count = padap->chip_params->vfcount;
1662 	size = vf_count * sizeof(*vfconf);
1663 
1664 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1665 	if (rc)
1666 		goto err;
1667 
1668 	vfconf =  (struct rss_vf_conf *)scratch_buff.data;
1669 
1670 	for (vf = 0; vf < vf_count; vf++) {
1671 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1672 				      &vfconf[vf].rss_vf_vfh, 1);
1673 	}
1674 
1675 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1676 	if (rc)
1677 		goto err1;
1678 
1679 	rc = compress_buff(&scratch_buff, dbg_buff);
1680 
1681 err1:
1682 	release_scratch_buff(&scratch_buff, dbg_buff);
1683 err:
1684 	return rc;
1685 }
1686 
1687 static int collect_rss_pf_config(struct cudbg_init *pdbg_init,
1688 				 struct cudbg_buffer *dbg_buff,
1689 				 struct cudbg_error *cudbg_err)
1690 {
1691 	struct cudbg_buffer scratch_buff;
1692 	struct rss_pf_conf *pfconf;
1693 	struct adapter *padap = pdbg_init->adap;
1694 	u32 rss_pf_map, rss_pf_mask, size;
1695 	int pf, rc;
1696 
1697 	size = 8  * sizeof(*pfconf);
1698 
1699 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1700 	if (rc)
1701 		goto err;
1702 
1703 	pfconf =  (struct rss_pf_conf *)scratch_buff.data;
1704 
1705 	rss_pf_map = t4_read_rss_pf_map(padap, 1);
1706 	rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
1707 
1708 	for (pf = 0; pf < 8; pf++) {
1709 		pfconf[pf].rss_pf_map = rss_pf_map;
1710 		pfconf[pf].rss_pf_mask = rss_pf_mask;
1711 		/* no return val */
1712 		t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
1713 	}
1714 
1715 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1716 	if (rc)
1717 		goto err1;
1718 
1719 	rc = compress_buff(&scratch_buff, dbg_buff);
1720 err1:
1721 	release_scratch_buff(&scratch_buff, dbg_buff);
1722 err:
1723 	return rc;
1724 }
1725 
1726 static int check_valid(u32 *buf, int type)
1727 {
1728 	int index;
1729 	int bit;
1730 	int bit_pos = 0;
1731 
1732 	switch (type) {
1733 	case CTXT_EGRESS:
1734 		bit_pos = 176;
1735 		break;
1736 	case CTXT_INGRESS:
1737 		bit_pos = 141;
1738 		break;
1739 	case CTXT_FLM:
1740 		bit_pos = 89;
1741 		break;
1742 	}
1743 	index = bit_pos / 32;
1744 	bit =  bit_pos % 32;
1745 
1746 	return buf[index] & (1U << bit);
1747 }
1748 
1749 /**
1750  * Get EGRESS, INGRESS, FLM, and CNM max qid.
1751  *
1752  * For EGRESS and INGRESS, do the following calculation.
1753  * max_qid = (DBQ/IMSG context region size in bytes) /
1754  *	     (size of context in bytes).
1755  *
1756  * For FLM, do the following calculation.
1757  * max_qid = (FLM cache region size in bytes) /
1758  *	     ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
1759  *
1760  * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
1761  * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
1762  * splitting is enabled, then max CNM qid is half of max FLM qid.
1763  */
1764 static int get_max_ctxt_qid(struct adapter *padap,
1765 			    struct struct_meminfo *meminfo,
1766 			    u32 *max_ctx_qid, u8 nelem)
1767 {
1768 	u32 i, idx, found = 0;
1769 
1770 	if (nelem != (CTXT_CNM + 1))
1771 		return -EINVAL;
1772 
1773 	for (i = 0; i < meminfo->mem_c; i++) {
1774 		if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
1775 			continue;                        /* skip holes */
1776 
1777 		idx = meminfo->mem[i].idx;
1778 		/* Get DBQ, IMSG, and FLM context region size */
1779 		if (idx <= CTXT_FLM) {
1780 			if (!(meminfo->mem[i].limit))
1781 				meminfo->mem[i].limit =
1782 					i < meminfo->mem_c - 1 ?
1783 					meminfo->mem[i + 1].base - 1 : ~0;
1784 
1785 			if (idx < CTXT_FLM) {
1786 				/* Get EGRESS and INGRESS max qid. */
1787 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1788 						    meminfo->mem[i].base + 1) /
1789 						   CUDBG_CTXT_SIZE_BYTES;
1790 				found++;
1791 			} else {
1792 				/* Get FLM and CNM max qid. */
1793 				u32 value, edram_ptr_count;
1794 				u8 bytes_per_ptr = 8;
1795 				u8 nohdr;
1796 
1797 				value = t4_read_reg(padap, A_SGE_FLM_CFG);
1798 
1799 				/* Check if header splitting is enabled. */
1800 				nohdr = (value >> S_NOHDR) & 1U;
1801 
1802 				/* Get the number of pointers in EDRAM per
1803 				 * qid in units of 32.
1804 				 */
1805 				edram_ptr_count = 32 *
1806 						  (1U << G_EDRAMPTRCNT(value));
1807 
1808 				/* EDRAMPTRCNT value of 3 is reserved.
1809 				 * So don't exceed 128.
1810 				 */
1811 				if (edram_ptr_count > 128)
1812 					edram_ptr_count = 128;
1813 
1814 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1815 						    meminfo->mem[i].base + 1) /
1816 						   (edram_ptr_count *
1817 						    bytes_per_ptr);
1818 				found++;
1819 
1820 				/* CNM has 1-to-1 mapping with FLM.
1821 				 * However, if header splitting is enabled,
1822 				 * then max CNM qid is half of max FLM qid.
1823 				 */
1824 				max_ctx_qid[CTXT_CNM] = nohdr ?
1825 							max_ctx_qid[idx] :
1826 							max_ctx_qid[idx] >> 1;
1827 
1828 				/* One more increment for CNM */
1829 				found++;
1830 			}
1831 		}
1832 		if (found == nelem)
1833 			break;
1834 	}
1835 
1836 	/* Sanity check. Ensure the values are within known max. */
1837 	max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
1838 					 M_CTXTQID);
1839 	max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
1840 					  CUDBG_MAX_INGRESS_QIDS);
1841 	max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
1842 				      CUDBG_MAX_FL_QIDS);
1843 	max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
1844 				      CUDBG_MAX_CNM_QIDS);
1845 	return 0;
1846 }
1847 
1848 static int collect_dump_context(struct cudbg_init *pdbg_init,
1849 				struct cudbg_buffer *dbg_buff,
1850 				struct cudbg_error *cudbg_err)
1851 {
1852 	struct cudbg_buffer scratch_buff;
1853 	struct cudbg_buffer temp_buff;
1854 	struct adapter *padap = pdbg_init->adap;
1855 	u32 size = 0, next_offset = 0, total_size = 0;
1856 	struct cudbg_ch_cntxt *buff = NULL;
1857 	struct struct_meminfo meminfo;
1858 	int bytes = 0;
1859 	int rc = 0;
1860 	u32 i, j;
1861 	u32 max_ctx_qid[CTXT_CNM + 1];
1862 	bool limit_qid = false;
1863 	u32 qid_count = 0;
1864 
1865 	rc = fill_meminfo(padap, &meminfo);
1866 	if (rc)
1867 		goto err;
1868 
1869 	/* Get max valid qid for each type of queue */
1870 	rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
1871 	if (rc)
1872 		goto err;
1873 
1874 	/* There are four types of queues. Collect context upto max
1875 	 * qid of each type of queue.
1876 	 */
1877 	for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1878 		size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
1879 
1880 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1881 	if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
1882 		/* Not enough scratch Memory available.
1883 		 * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
1884 		 * for each queue type.
1885 		 */
1886 		size = 0;
1887 		for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1888 			size += sizeof(struct cudbg_ch_cntxt) *
1889 				CUDBG_LOWMEM_MAX_CTXT_QIDS;
1890 
1891 		limit_qid = true;
1892 		rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1893 		if (rc)
1894 			goto err;
1895 	}
1896 
1897 	buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
1898 
1899 	/* Collect context data */
1900 	for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
1901 		qid_count = 0;
1902 		for (j = 0; j < max_ctx_qid[i]; j++) {
1903 			read_sge_ctxt(pdbg_init, j, i, buff->data);
1904 
1905 			rc = check_valid(buff->data, i);
1906 			if (rc) {
1907 				buff->cntxt_type = i;
1908 				buff->cntxt_id = j;
1909 				buff++;
1910 				total_size += sizeof(struct cudbg_ch_cntxt);
1911 
1912 				if (i == CTXT_FLM) {
1913 					read_sge_ctxt(pdbg_init, j, CTXT_CNM,
1914 						      buff->data);
1915 					buff->cntxt_type = CTXT_CNM;
1916 					buff->cntxt_id = j;
1917 					buff++;
1918 					total_size +=
1919 						sizeof(struct cudbg_ch_cntxt);
1920 				}
1921 				qid_count++;
1922 			}
1923 
1924 			/* If there's not enough space to collect more qids,
1925 			 * then bail and move on to next queue type.
1926 			 */
1927 			if (limit_qid &&
1928 			    qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
1929 				break;
1930 		}
1931 	}
1932 
1933 	scratch_buff.size = total_size;
1934 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1935 	if (rc)
1936 		goto err1;
1937 
1938 	/* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
1939 	while (total_size > 0) {
1940 		bytes = min_t(unsigned long, (unsigned long)total_size,
1941 			      (unsigned long)CUDBG_CHUNK_SIZE);
1942 		temp_buff.size = bytes;
1943 		temp_buff.data = (void *)((char *)scratch_buff.data +
1944 					  next_offset);
1945 
1946 		rc = compress_buff(&temp_buff, dbg_buff);
1947 		if (rc)
1948 			goto err1;
1949 
1950 		total_size -= bytes;
1951 		next_offset += bytes;
1952 	}
1953 
1954 err1:
1955 	scratch_buff.size = size;
1956 	release_scratch_buff(&scratch_buff, dbg_buff);
1957 err:
1958 	return rc;
1959 }
1960 
1961 static int collect_fw_devlog(struct cudbg_init *pdbg_init,
1962 			     struct cudbg_buffer *dbg_buff,
1963 			     struct cudbg_error *cudbg_err)
1964 {
1965 #ifdef notyet
1966 	struct adapter *padap = pdbg_init->adap;
1967 	struct devlog_params *dparams = &padap->params.devlog;
1968 	struct cudbg_param *params = NULL;
1969 	struct cudbg_buffer scratch_buff;
1970 	u32 offset;
1971 	int rc = 0, i;
1972 
1973 	rc = t4_init_devlog_ncores_params(padap, 1);
1974 
1975 	if (rc < 0) {
1976 		pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
1977 				 "%d\n", __func__, rc);
1978 		for (i = 0; i < pdbg_init->dbg_params_cnt; i++) {
1979 			if (pdbg_init->dbg_params[i].param_type ==
1980 			    CUDBG_DEVLOG_PARAM) {
1981 				params = &pdbg_init->dbg_params[i];
1982 				break;
1983 			}
1984 		}
1985 
1986 		if (params) {
1987 			dparams->memtype = params->u.devlog_param.memtype;
1988 			dparams->start = params->u.devlog_param.start;
1989 			dparams->size = params->u.devlog_param.size;
1990 		} else {
1991 			cudbg_err->sys_err = rc;
1992 			goto err;
1993 		}
1994 	}
1995 
1996 	rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
1997 
1998 	if (rc)
1999 		goto err;
2000 
2001 	/* Collect FW devlog */
2002 	if (dparams->start != 0) {
2003 		offset = scratch_buff.offset;
2004 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
2005 				  dparams->memtype, dparams->start,
2006 				  dparams->size,
2007 				  (__be32 *)((char *)scratch_buff.data +
2008 					     offset), 1);
2009 
2010 		if (rc) {
2011 			pdbg_init->print("%s(), t4_memory_rw failed!, rc: "\
2012 					 "%d\n", __func__, rc);
2013 			cudbg_err->sys_err = rc;
2014 			goto err1;
2015 		}
2016 	}
2017 
2018 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2019 
2020 	if (rc)
2021 		goto err1;
2022 
2023 	rc = compress_buff(&scratch_buff, dbg_buff);
2024 
2025 err1:
2026 	release_scratch_buff(&scratch_buff, dbg_buff);
2027 err:
2028 	return rc;
2029 #endif
2030 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
2031 }
2032 /* CIM OBQ */
2033 
2034 static int collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
2035 				struct cudbg_buffer *dbg_buff,
2036 				struct cudbg_error *cudbg_err)
2037 {
2038 	int rc = 0, qid = 0;
2039 
2040 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2041 
2042 	return rc;
2043 }
2044 
2045 static int collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
2046 				struct cudbg_buffer *dbg_buff,
2047 				struct cudbg_error *cudbg_err)
2048 {
2049 	int rc = 0, qid = 1;
2050 
2051 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2052 
2053 	return rc;
2054 }
2055 
2056 static int collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
2057 				struct cudbg_buffer *dbg_buff,
2058 				struct cudbg_error *cudbg_err)
2059 {
2060 	int rc = 0, qid = 2;
2061 
2062 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2063 
2064 	return rc;
2065 }
2066 
2067 static int collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
2068 				struct cudbg_buffer *dbg_buff,
2069 				struct cudbg_error *cudbg_err)
2070 {
2071 	int rc = 0, qid = 3;
2072 
2073 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2074 
2075 	return rc;
2076 }
2077 
2078 static int collect_cim_obq_sge(struct cudbg_init *pdbg_init,
2079 			       struct cudbg_buffer *dbg_buff,
2080 			       struct cudbg_error *cudbg_err)
2081 {
2082 	int rc = 0, qid = 4;
2083 
2084 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2085 
2086 	return rc;
2087 }
2088 
2089 static int collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
2090 				struct cudbg_buffer *dbg_buff,
2091 				struct cudbg_error *cudbg_err)
2092 {
2093 	int rc = 0, qid = 5;
2094 
2095 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2096 
2097 	return rc;
2098 }
2099 
2100 static int collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
2101 				 struct cudbg_buffer *dbg_buff,
2102 				 struct cudbg_error *cudbg_err)
2103 {
2104 	int rc = 0, qid = 6;
2105 
2106 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2107 
2108 	return rc;
2109 }
2110 
2111 static int collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
2112 				 struct cudbg_buffer *dbg_buff,
2113 				 struct cudbg_error *cudbg_err)
2114 {
2115 	int rc = 0, qid = 7;
2116 
2117 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2118 
2119 	return rc;
2120 }
2121 
2122 static int read_cim_obq(struct cudbg_init *pdbg_init,
2123 			struct cudbg_buffer *dbg_buff,
2124 			struct cudbg_error *cudbg_err, int qid)
2125 {
2126 	struct cudbg_buffer scratch_buff;
2127 	struct adapter *padap = pdbg_init->adap;
2128 	u32 qsize;
2129 	int rc;
2130 	int no_of_read_words;
2131 
2132 	/* collect CIM OBQ */
2133 	qsize =  6 * CIM_OBQ_SIZE * 4 *  sizeof(u32);
2134 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2135 	if (rc)
2136 		goto err;
2137 
2138 	/* t4_read_cim_obq will return no. of read words or error */
2139 	no_of_read_words = t4_read_cim_obq(padap, qid,
2140 					   (u32 *)((u32 *)scratch_buff.data +
2141 					   scratch_buff.offset), qsize);
2142 
2143 	/* no_of_read_words is less than or equal to 0 means error */
2144 	if (no_of_read_words <= 0) {
2145 		if (no_of_read_words == 0)
2146 			rc = CUDBG_SYSTEM_ERROR;
2147 		else
2148 			rc = no_of_read_words;
2149 		if (pdbg_init->verbose)
2150 			pdbg_init->print("%s: t4_read_cim_obq failed (%d)\n",
2151 				 __func__, rc);
2152 		cudbg_err->sys_err = rc;
2153 		goto err1;
2154 	}
2155 
2156 	scratch_buff.size = no_of_read_words * 4;
2157 
2158 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2159 
2160 	if (rc)
2161 		goto err1;
2162 
2163 	rc = compress_buff(&scratch_buff, dbg_buff);
2164 
2165 	if (rc)
2166 		goto err1;
2167 
2168 err1:
2169 	release_scratch_buff(&scratch_buff, dbg_buff);
2170 err:
2171 	return rc;
2172 }
2173 
2174 /* CIM IBQ */
2175 
2176 static int collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
2177 			       struct cudbg_buffer *dbg_buff,
2178 			       struct cudbg_error *cudbg_err)
2179 {
2180 	int rc = 0, qid = 0;
2181 
2182 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2183 	return rc;
2184 }
2185 
2186 static int collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
2187 			       struct cudbg_buffer *dbg_buff,
2188 			       struct cudbg_error *cudbg_err)
2189 {
2190 	int rc = 0, qid = 1;
2191 
2192 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2193 	return rc;
2194 }
2195 
2196 static int collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
2197 			       struct cudbg_buffer *dbg_buff,
2198 			       struct cudbg_error *cudbg_err)
2199 {
2200 	int rc = 0, qid = 2;
2201 
2202 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2203 	return rc;
2204 }
2205 
2206 static int collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
2207 				struct cudbg_buffer *dbg_buff,
2208 				struct cudbg_error *cudbg_err)
2209 {
2210 	int rc = 0, qid = 3;
2211 
2212 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2213 	return rc;
2214 }
2215 
2216 static int collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
2217 				struct cudbg_buffer *dbg_buff,
2218 				struct cudbg_error *cudbg_err)
2219 {
2220 	int rc = 0, qid = 4;
2221 
2222 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2223 	return rc;
2224 }
2225 
2226 static int collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
2227 				struct cudbg_buffer *dbg_buff,
2228 				struct cudbg_error *cudbg_err)
2229 {
2230 	int rc, qid = 5;
2231 
2232 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2233 	return rc;
2234 }
2235 
2236 static int read_cim_ibq(struct cudbg_init *pdbg_init,
2237 			struct cudbg_buffer *dbg_buff,
2238 			struct cudbg_error *cudbg_err, int qid)
2239 {
2240 	struct adapter *padap = pdbg_init->adap;
2241 	struct cudbg_buffer scratch_buff;
2242 	u32 qsize;
2243 	int rc;
2244 	int no_of_read_words;
2245 
2246 	/* collect CIM IBQ */
2247 	qsize = CIM_IBQ_SIZE * 4 *  sizeof(u32);
2248 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2249 
2250 	if (rc)
2251 		goto err;
2252 
2253 	/* t4_read_cim_ibq will return no. of read words or error */
2254 	no_of_read_words = t4_read_cim_ibq(padap, qid,
2255 					   (u32 *)((u32 *)scratch_buff.data +
2256 					   scratch_buff.offset), qsize);
2257 	/* no_of_read_words is less than or equal to 0 means error */
2258 	if (no_of_read_words <= 0) {
2259 		if (no_of_read_words == 0)
2260 			rc = CUDBG_SYSTEM_ERROR;
2261 		else
2262 			rc = no_of_read_words;
2263 		if (pdbg_init->verbose)
2264 			pdbg_init->print("%s: t4_read_cim_ibq failed (%d)\n",
2265 				 __func__, rc);
2266 		cudbg_err->sys_err = rc;
2267 		goto err1;
2268 	}
2269 
2270 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2271 	if (rc)
2272 		goto err1;
2273 
2274 	rc = compress_buff(&scratch_buff, dbg_buff);
2275 	if (rc)
2276 		goto err1;
2277 
2278 err1:
2279 	release_scratch_buff(&scratch_buff, dbg_buff);
2280 
2281 err:
2282 	return rc;
2283 }
2284 
2285 static int collect_cim_ma_la(struct cudbg_init *pdbg_init,
2286 			     struct cudbg_buffer *dbg_buff,
2287 			     struct cudbg_error *cudbg_err)
2288 {
2289 	struct cudbg_buffer scratch_buff;
2290 	struct adapter *padap = pdbg_init->adap;
2291 	u32 rc = 0;
2292 
2293 	/* collect CIM MA LA */
2294 	scratch_buff.size =  2 * CIM_MALA_SIZE * 5 * sizeof(u32);
2295 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2296 	if (rc)
2297 		goto err;
2298 
2299 	/* no return */
2300 	t4_cim_read_ma_la(padap,
2301 			  (u32 *) ((char *)scratch_buff.data +
2302 				   scratch_buff.offset),
2303 			  (u32 *) ((char *)scratch_buff.data +
2304 				   scratch_buff.offset + 5 * CIM_MALA_SIZE));
2305 
2306 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2307 	if (rc)
2308 		goto err1;
2309 
2310 	rc = compress_buff(&scratch_buff, dbg_buff);
2311 
2312 err1:
2313 	release_scratch_buff(&scratch_buff, dbg_buff);
2314 err:
2315 	return rc;
2316 }
2317 
2318 static int collect_cim_la(struct cudbg_init *pdbg_init,
2319 			  struct cudbg_buffer *dbg_buff,
2320 			  struct cudbg_error *cudbg_err)
2321 {
2322 	struct cudbg_buffer scratch_buff;
2323 	struct adapter *padap = pdbg_init->adap;
2324 
2325 	int rc;
2326 	u32 cfg = 0;
2327 	int size;
2328 
2329 	/* collect CIM LA */
2330 	if (is_t6(padap)) {
2331 		size = padap->params.cim_la_size / 10 + 1;
2332 		size *= 11 * sizeof(u32);
2333 	} else {
2334 		size = padap->params.cim_la_size / 8;
2335 		size *= 8 * sizeof(u32);
2336 	}
2337 
2338 	size += sizeof(cfg);
2339 
2340 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
2341 	if (rc)
2342 		goto err;
2343 
2344 	rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
2345 
2346 	if (rc) {
2347 		if (pdbg_init->verbose)
2348 			pdbg_init->print("%s: t4_cim_read failed (%d)\n",
2349 				 __func__, rc);
2350 		cudbg_err->sys_err = rc;
2351 		goto err1;
2352 	}
2353 
2354 	memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
2355 	       sizeof(cfg));
2356 
2357 	rc = t4_cim_read_la(padap,
2358 			    (u32 *) ((char *)scratch_buff.data +
2359 				     scratch_buff.offset + sizeof(cfg)), NULL);
2360 	if (rc < 0) {
2361 		if (pdbg_init->verbose)
2362 			pdbg_init->print("%s: t4_cim_read_la failed (%d)\n",
2363 				 __func__, rc);
2364 		cudbg_err->sys_err = rc;
2365 		goto err1;
2366 	}
2367 
2368 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2369 	if (rc)
2370 		goto err1;
2371 
2372 	rc = compress_buff(&scratch_buff, dbg_buff);
2373 	if (rc)
2374 		goto err1;
2375 
2376 err1:
2377 	release_scratch_buff(&scratch_buff, dbg_buff);
2378 err:
2379 	return rc;
2380 }
2381 
2382 static int collect_cim_qcfg(struct cudbg_init *pdbg_init,
2383 			    struct cudbg_buffer *dbg_buff,
2384 			    struct cudbg_error *cudbg_err)
2385 {
2386 	struct cudbg_buffer scratch_buff;
2387 	struct adapter *padap = pdbg_init->adap;
2388 	u32 offset;
2389 	int rc = 0;
2390 
2391 	struct struct_cim_qcfg *cim_qcfg_data = NULL;
2392 
2393 	rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
2394 			      &scratch_buff);
2395 
2396 	if (rc)
2397 		goto err;
2398 
2399 	offset = scratch_buff.offset;
2400 
2401 	cim_qcfg_data =
2402 		(struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
2403 					   offset));
2404 
2405 	rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
2406 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
2407 
2408 	if (rc) {
2409 		if (pdbg_init->verbose)
2410 			pdbg_init->print("%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
2411 			    __func__, rc);
2412 		cudbg_err->sys_err = rc;
2413 		goto err1;
2414 	}
2415 
2416 	rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
2417 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
2418 			 cim_qcfg_data->obq_wr);
2419 
2420 	if (rc) {
2421 		if (pdbg_init->verbose)
2422 			pdbg_init->print("%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
2423 			    __func__, rc);
2424 		cudbg_err->sys_err = rc;
2425 		goto err1;
2426 	}
2427 
2428 	/* no return val */
2429 	t4_read_cimq_cfg(padap,
2430 			cim_qcfg_data->base,
2431 			cim_qcfg_data->size,
2432 			cim_qcfg_data->thres);
2433 
2434 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2435 	if (rc)
2436 		goto err1;
2437 
2438 	rc = compress_buff(&scratch_buff, dbg_buff);
2439 	if (rc)
2440 		goto err1;
2441 
2442 err1:
2443 	release_scratch_buff(&scratch_buff, dbg_buff);
2444 err:
2445 	return rc;
2446 }
2447 
2448 /**
2449  * Fetch the TX/RX payload regions start and end.
2450  *
2451  * @padap (IN): adapter handle.
2452  * @mem_type (IN): EDC0, EDC1, MC/MC0/MC1.
2453  * @mem_tot_len (IN): total length of @mem_type memory region to read.
2454  * @payload_type (IN): TX or RX Payload.
2455  * @reg_info (OUT): store the payload region info.
2456  *
2457  * Fetch the TX/RX payload region information from meminfo.
2458  * However, reading from the @mem_type region starts at 0 and not
2459  * from whatever base info is stored in meminfo.  Hence, if the
2460  * payload region exists, then calculate the payload region
2461  * start and end wrt 0 and @mem_tot_len, respectively, and set
2462  * @reg_info->exist to true. Otherwise, set @reg_info->exist to false.
2463  */
2464 #ifdef notyet
2465 static int get_payload_range(struct adapter *padap, u8 mem_type,
2466 			     unsigned long mem_tot_len, u8 payload_type,
2467 			     struct struct_region_info *reg_info)
2468 {
2469 	struct struct_meminfo meminfo;
2470 	struct struct_mem_desc mem_region;
2471 	struct struct_mem_desc payload;
2472 	u32 i, idx, found = 0;
2473 	u8 mc_type;
2474 	int rc;
2475 
2476 	/* Get meminfo of all regions */
2477 	rc = fill_meminfo(padap, &meminfo);
2478 	if (rc)
2479 		return rc;
2480 
2481 	/* Extract the specified TX or RX Payload region range */
2482 	memset(&payload, 0, sizeof(struct struct_mem_desc));
2483 	for (i = 0; i < meminfo.mem_c; i++) {
2484 		if (meminfo.mem[i].idx >= ARRAY_SIZE(region))
2485 			continue;                        /* skip holes */
2486 
2487 		idx = meminfo.mem[i].idx;
2488 		/* Get TX or RX Payload region start and end */
2489 		if (idx == payload_type) {
2490 			if (!(meminfo.mem[i].limit))
2491 				meminfo.mem[i].limit =
2492 					i < meminfo.mem_c - 1 ?
2493 					meminfo.mem[i + 1].base - 1 : ~0;
2494 
2495 			memcpy(&payload, &meminfo.mem[i], sizeof(payload));
2496 			found = 1;
2497 			break;
2498 		}
2499 	}
2500 
2501 	/* If TX or RX Payload region is not found return error. */
2502 	if (!found)
2503 		return -EINVAL;
2504 
2505 	if (mem_type < MEM_MC) {
2506 		memcpy(&mem_region, &meminfo.avail[mem_type],
2507 		       sizeof(mem_region));
2508 	} else {
2509 		/* Check if both MC0 and MC1 exist by checking if a
2510 		 * base address for the specified @mem_type exists.
2511 		 * If a base address exists, then there is MC1 and
2512 		 * hence use the base address stored at index 3.
2513 		 * Otherwise, use the base address stored at index 2.
2514 		 */
2515 		mc_type = meminfo.avail[mem_type].base ?
2516 			  mem_type : mem_type - 1;
2517 		memcpy(&mem_region, &meminfo.avail[mc_type],
2518 		       sizeof(mem_region));
2519 	}
2520 
2521 	/* Check if payload region exists in current memory */
2522 	if (payload.base < mem_region.base && payload.limit < mem_region.base) {
2523 		reg_info->exist = false;
2524 		return 0;
2525 	}
2526 
2527 	/* Get Payload region start and end with respect to 0 and
2528 	 * mem_tot_len, respectively.  This is because reading from the
2529 	 * memory region starts at 0 and not at base info stored in meminfo.
2530 	 */
2531 	if (payload.base < mem_region.limit) {
2532 		reg_info->exist = true;
2533 		if (payload.base >= mem_region.base)
2534 			reg_info->start = payload.base - mem_region.base;
2535 		else
2536 			reg_info->start = 0;
2537 
2538 		if (payload.limit < mem_region.limit)
2539 			reg_info->end = payload.limit - mem_region.base;
2540 		else
2541 			reg_info->end = mem_tot_len;
2542 	}
2543 
2544 	return 0;
2545 }
2546 #endif
2547 
2548 static int read_fw_mem(struct cudbg_init *pdbg_init,
2549 			struct cudbg_buffer *dbg_buff, u8 mem_type,
2550 			unsigned long tot_len, struct cudbg_error *cudbg_err)
2551 {
2552 #ifdef notyet
2553 	struct cudbg_buffer scratch_buff;
2554 	struct adapter *padap = pdbg_init->adap;
2555 	unsigned long bytes_read = 0;
2556 	unsigned long bytes_left;
2557 	unsigned long bytes;
2558 	int	      rc;
2559 	struct struct_region_info payload[2]; /* TX and RX Payload Region */
2560 	u16 get_payload_flag;
2561 	u8 i;
2562 
2563 	get_payload_flag =
2564 		pdbg_init->dbg_params[CUDBG_GET_PAYLOAD_PARAM].param_type;
2565 
2566 	/* If explicitly asked to get TX/RX Payload data,
2567 	 * then don't zero out the payload data. Otherwise,
2568 	 * zero out the payload data.
2569 	 */
2570 	if (!get_payload_flag) {
2571 		u8 region_index[2];
2572 		u8 j = 0;
2573 
2574 		/* Find the index of TX and RX Payload regions in meminfo */
2575 		for (i = 0; i < ARRAY_SIZE(region); i++) {
2576 			if (!strcmp(region[i], "Tx payload:") ||
2577 			    !strcmp(region[i], "Rx payload:")) {
2578 				region_index[j] = i;
2579 				j++;
2580 				if (j == 2)
2581 					break;
2582 			}
2583 		}
2584 
2585 		/* Get TX/RX Payload region range if they exist */
2586 		memset(payload, 0, ARRAY_SIZE(payload) * sizeof(payload[0]));
2587 		for (i = 0; i < ARRAY_SIZE(payload); i++) {
2588 			rc = get_payload_range(padap, mem_type, tot_len,
2589 					       region_index[i],
2590 					       &payload[i]);
2591 			if (rc)
2592 				goto err;
2593 
2594 			if (payload[i].exist) {
2595 				/* Align start and end to avoid wrap around */
2596 				payload[i].start =
2597 					roundup(payload[i].start,
2598 					    CUDBG_CHUNK_SIZE);
2599 				payload[i].end =
2600 					rounddown(payload[i].end,
2601 					    CUDBG_CHUNK_SIZE);
2602 			}
2603 		}
2604 	}
2605 
2606 	bytes_left = tot_len;
2607 	scratch_buff.size = tot_len;
2608 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2609 	if (rc)
2610 		goto err;
2611 
2612 	while (bytes_left > 0) {
2613 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2614 		rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
2615 
2616 		if (rc) {
2617 			rc = CUDBG_STATUS_NO_SCRATCH_MEM;
2618 			goto err;
2619 		}
2620 
2621 		if (!get_payload_flag) {
2622 			for (i = 0; i < ARRAY_SIZE(payload); i++) {
2623 				if (payload[i].exist &&
2624 				    bytes_read >= payload[i].start &&
2625 				    (bytes_read + bytes) <= payload[i].end) {
2626 					memset(scratch_buff.data, 0, bytes);
2627 					/* TX and RX Payload regions
2628 					 * can't overlap.
2629 					 */
2630 					goto skip_read;
2631 				}
2632 			}
2633 		}
2634 
2635 		/* Read from file */
2636 		/*fread(scratch_buff.data, 1, Bytes, in);*/
2637 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
2638 				  bytes, (__be32 *)(scratch_buff.data), 1);
2639 
2640 		if (rc) {
2641 			if (pdbg_init->verbose)
2642 				pdbg_init->print("%s: t4_memory_rw failed (%d)",
2643 				    __func__, rc);
2644 			cudbg_err->sys_err = rc;
2645 			goto err1;
2646 		}
2647 
2648 skip_read:
2649 		rc = compress_buff(&scratch_buff, dbg_buff);
2650 		if (rc)
2651 			goto err1;
2652 
2653 		bytes_left -= bytes;
2654 		bytes_read += bytes;
2655 		release_scratch_buff(&scratch_buff, dbg_buff);
2656 	}
2657 
2658 err1:
2659 	if (rc)
2660 		release_scratch_buff(&scratch_buff, dbg_buff);
2661 
2662 err:
2663 	return rc;
2664 #endif
2665 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
2666 }
2667 
2668 static void collect_mem_info(struct cudbg_init *pdbg_init,
2669 			     struct card_mem *mem_info)
2670 {
2671 	struct adapter *padap = pdbg_init->adap;
2672 	u32 value;
2673 	int t4 = 0;
2674 
2675 	if (is_t4(padap))
2676 		t4 = 1;
2677 
2678 	if (t4) {
2679 		value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
2680 		value = G_EXT_MEM_SIZE(value);
2681 		mem_info->size_mc0 = (u16)value;  /* size in MB */
2682 
2683 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2684 		if (value & F_EXT_MEM_ENABLE)
2685 			mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
2686 								  bit */
2687 	} else {
2688 		value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
2689 		value = G_EXT_MEM0_SIZE(value);
2690 		mem_info->size_mc0 = (u16)value;
2691 
2692 		value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
2693 		value = G_EXT_MEM1_SIZE(value);
2694 		mem_info->size_mc1 = (u16)value;
2695 
2696 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2697 		if (value & F_EXT_MEM0_ENABLE)
2698 			mem_info->mem_flag |= (1 << MC0_FLAG);
2699 		if (value & F_EXT_MEM1_ENABLE)
2700 			mem_info->mem_flag |= (1 << MC1_FLAG);
2701 	}
2702 
2703 	value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
2704 	value = G_EDRAM0_SIZE(value);
2705 	mem_info->size_edc0 = (u16)value;
2706 
2707 	value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
2708 	value = G_EDRAM1_SIZE(value);
2709 	mem_info->size_edc1 = (u16)value;
2710 
2711 	value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2712 	if (value & F_EDRAM0_ENABLE)
2713 		mem_info->mem_flag |= (1 << EDC0_FLAG);
2714 	if (value & F_EDRAM1_ENABLE)
2715 		mem_info->mem_flag |= (1 << EDC1_FLAG);
2716 
2717 }
2718 
2719 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
2720 				struct cudbg_error *cudbg_err)
2721 {
2722 	struct adapter *padap = pdbg_init->adap;
2723 	int rc;
2724 
2725 	if (is_fw_attached(pdbg_init)) {
2726 
2727 		/* Flush uP dcache before reading edcX/mcX  */
2728 		rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
2729 		    "t4cudl");
2730 		if (rc == 0) {
2731 			rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
2732 			end_synchronized_op(padap, 0);
2733 		}
2734 
2735 		if (rc) {
2736 			if (pdbg_init->verbose)
2737 				pdbg_init->print("%s: t4_fwcache failed (%d)\n",
2738 				 __func__, rc);
2739 			cudbg_err->sys_warn = rc;
2740 		}
2741 	}
2742 }
2743 
2744 static int collect_edc0_meminfo(struct cudbg_init *pdbg_init,
2745 				struct cudbg_buffer *dbg_buff,
2746 				struct cudbg_error *cudbg_err)
2747 {
2748 	struct card_mem mem_info = {0};
2749 	unsigned long edc0_size;
2750 	int rc;
2751 
2752 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2753 
2754 	collect_mem_info(pdbg_init, &mem_info);
2755 
2756 	if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
2757 		edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
2758 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
2759 				 edc0_size, cudbg_err);
2760 		if (rc)
2761 			goto err;
2762 
2763 	} else {
2764 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2765 		if (pdbg_init->verbose)
2766 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2767 				 __func__, err_msg[-rc]);
2768 		goto err;
2769 
2770 	}
2771 err:
2772 	return rc;
2773 }
2774 
2775 static int collect_edc1_meminfo(struct cudbg_init *pdbg_init,
2776 				struct cudbg_buffer *dbg_buff,
2777 				struct cudbg_error *cudbg_err)
2778 {
2779 	struct card_mem mem_info = {0};
2780 	unsigned long edc1_size;
2781 	int rc;
2782 
2783 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2784 
2785 	collect_mem_info(pdbg_init, &mem_info);
2786 
2787 	if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
2788 		edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
2789 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
2790 				 edc1_size, cudbg_err);
2791 		if (rc)
2792 			goto err;
2793 	} else {
2794 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2795 		if (pdbg_init->verbose)
2796 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2797 				 __func__, err_msg[-rc]);
2798 		goto err;
2799 	}
2800 
2801 err:
2802 
2803 	return rc;
2804 }
2805 
2806 static int collect_mc0_meminfo(struct cudbg_init *pdbg_init,
2807 			       struct cudbg_buffer *dbg_buff,
2808 			       struct cudbg_error *cudbg_err)
2809 {
2810 	struct card_mem mem_info = {0};
2811 	unsigned long mc0_size;
2812 	int rc;
2813 
2814 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2815 
2816 	collect_mem_info(pdbg_init, &mem_info);
2817 
2818 	if (mem_info.mem_flag & (1 << MC0_FLAG)) {
2819 		mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
2820 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
2821 				 mc0_size, cudbg_err);
2822 		if (rc)
2823 			goto err;
2824 	} else {
2825 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2826 		if (pdbg_init->verbose)
2827 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2828 				 __func__, err_msg[-rc]);
2829 		goto err;
2830 	}
2831 
2832 err:
2833 	return rc;
2834 }
2835 
2836 static int collect_mc1_meminfo(struct cudbg_init *pdbg_init,
2837 			       struct cudbg_buffer *dbg_buff,
2838 			       struct cudbg_error *cudbg_err)
2839 {
2840 	struct card_mem mem_info = {0};
2841 	unsigned long mc1_size;
2842 	int rc;
2843 
2844 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2845 
2846 	collect_mem_info(pdbg_init, &mem_info);
2847 
2848 	if (mem_info.mem_flag & (1 << MC1_FLAG)) {
2849 		mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
2850 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
2851 				 mc1_size, cudbg_err);
2852 		if (rc)
2853 			goto err;
2854 	} else {
2855 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2856 
2857 		if (pdbg_init->verbose)
2858 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2859 				 __func__, err_msg[-rc]);
2860 		goto err;
2861 	}
2862 err:
2863 	return rc;
2864 }
2865 
2866 static int collect_reg_dump(struct cudbg_init *pdbg_init,
2867 			    struct cudbg_buffer *dbg_buff,
2868 			    struct cudbg_error *cudbg_err)
2869 {
2870 	struct cudbg_buffer scratch_buff;
2871 	struct cudbg_buffer tmp_scratch_buff;
2872 	struct adapter *padap = pdbg_init->adap;
2873 	unsigned long	     bytes_read = 0;
2874 	unsigned long	     bytes_left;
2875 	u32		     buf_size = 0, bytes = 0;
2876 	int		     rc = 0;
2877 
2878 	if (is_t4(padap))
2879 		buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
2880 	else if (is_t5(padap) || is_t6(padap))
2881 		buf_size = T5_REGMAP_SIZE;
2882 
2883 	scratch_buff.size = buf_size;
2884 
2885 	tmp_scratch_buff = scratch_buff;
2886 
2887 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2888 	if (rc)
2889 		goto err;
2890 
2891 	/* no return */
2892 	t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
2893 	bytes_left =   scratch_buff.size;
2894 
2895 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2896 	if (rc)
2897 		goto err1;
2898 
2899 	while (bytes_left > 0) {
2900 		tmp_scratch_buff.data =
2901 			((char *)scratch_buff.data) + bytes_read;
2902 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2903 		tmp_scratch_buff.size = bytes;
2904 		compress_buff(&tmp_scratch_buff, dbg_buff);
2905 		bytes_left -= bytes;
2906 		bytes_read += bytes;
2907 	}
2908 
2909 err1:
2910 	release_scratch_buff(&scratch_buff, dbg_buff);
2911 err:
2912 	return rc;
2913 }
2914 
2915 static int collect_cctrl(struct cudbg_init *pdbg_init,
2916 			 struct cudbg_buffer *dbg_buff,
2917 			 struct cudbg_error *cudbg_err)
2918 {
2919 	struct cudbg_buffer scratch_buff;
2920 	struct adapter *padap = pdbg_init->adap;
2921 	u32 size;
2922 	int rc;
2923 
2924 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2925 	scratch_buff.size = size;
2926 
2927 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2928 	if (rc)
2929 		goto err;
2930 
2931 	t4_read_cong_tbl(padap, (void *)scratch_buff.data);
2932 
2933 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2934 	if (rc)
2935 		goto err1;
2936 
2937 	rc = compress_buff(&scratch_buff, dbg_buff);
2938 
2939 err1:
2940 	release_scratch_buff(&scratch_buff, dbg_buff);
2941 err:
2942 	return rc;
2943 }
2944 
2945 static int check_busy_bit(struct adapter *padap)
2946 {
2947 	u32 val;
2948 	u32 busy = 1;
2949 	int i = 0;
2950 	int retry = 10;
2951 	int status = 0;
2952 
2953 	while (busy && i < retry) {
2954 		val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
2955 		busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
2956 		i++;
2957 	}
2958 
2959 	if (busy)
2960 		status = -1;
2961 
2962 	return status;
2963 }
2964 
2965 static int cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
2966 {
2967 	int rc = 0;
2968 
2969 	/* write register address into the A_CIM_HOST_ACC_CTRL */
2970 	t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
2971 
2972 	/* Poll HOSTBUSY */
2973 	rc = check_busy_bit(padap);
2974 	if (rc)
2975 		goto err;
2976 
2977 	/* Read value from A_CIM_HOST_ACC_DATA */
2978 	*val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
2979 
2980 err:
2981 	return rc;
2982 }
2983 
2984 static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
2985 		       struct ireg_field *up_cim_reg, u32 *buff)
2986 {
2987 	u32 i;
2988 	int rc = 0;
2989 
2990 	for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
2991 		rc = cim_ha_rreg(padap,
2992 				 up_cim_reg->ireg_local_offset + (i * 4),
2993 				buff);
2994 		if (rc) {
2995 			if (pdbg_init->verbose)
2996 				pdbg_init->print("BUSY timeout reading"
2997 					 "CIM_HOST_ACC_CTRL\n");
2998 			goto err;
2999 		}
3000 
3001 		buff++;
3002 	}
3003 
3004 err:
3005 	return rc;
3006 }
3007 
3008 static int collect_up_cim_indirect(struct cudbg_init *pdbg_init,
3009 				   struct cudbg_buffer *dbg_buff,
3010 				   struct cudbg_error *cudbg_err)
3011 {
3012 	struct cudbg_buffer scratch_buff;
3013 	struct adapter *padap = pdbg_init->adap;
3014 	struct ireg_buf *up_cim;
3015 	u32 size;
3016 	int i, rc, n;
3017 
3018 	n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
3019 	size = sizeof(struct ireg_buf) * n;
3020 	scratch_buff.size = size;
3021 
3022 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3023 	if (rc)
3024 		goto err;
3025 
3026 	up_cim = (struct ireg_buf *)scratch_buff.data;
3027 
3028 	for (i = 0; i < n; i++) {
3029 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
3030 		u32 *buff = up_cim->outbuf;
3031 
3032 		if (is_t5(padap)) {
3033 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
3034 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
3035 			up_cim_reg->ireg_local_offset =
3036 						t5_up_cim_reg_array[i][2];
3037 			up_cim_reg->ireg_offset_range =
3038 						t5_up_cim_reg_array[i][3];
3039 		} else if (is_t6(padap)) {
3040 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
3041 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
3042 			up_cim_reg->ireg_local_offset =
3043 						t6_up_cim_reg_array[i][2];
3044 			up_cim_reg->ireg_offset_range =
3045 						t6_up_cim_reg_array[i][3];
3046 		}
3047 
3048 		rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
3049 
3050 		up_cim++;
3051 	}
3052 
3053 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3054 	if (rc)
3055 		goto err1;
3056 
3057 	rc = compress_buff(&scratch_buff, dbg_buff);
3058 
3059 err1:
3060 	release_scratch_buff(&scratch_buff, dbg_buff);
3061 err:
3062 	return rc;
3063 }
3064 
3065 static int collect_mbox_log(struct cudbg_init *pdbg_init,
3066 			    struct cudbg_buffer *dbg_buff,
3067 			    struct cudbg_error *cudbg_err)
3068 {
3069 #ifdef notyet
3070 	struct cudbg_buffer scratch_buff;
3071 	struct cudbg_mbox_log *mboxlog = NULL;
3072 	struct mbox_cmd_log *log = NULL;
3073 	struct mbox_cmd *entry;
3074 	u64 flit;
3075 	u32 size;
3076 	unsigned int entry_idx;
3077 	int i, k, rc;
3078 	u16 mbox_cmds;
3079 
3080 	if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
3081 		log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3082 			mboxlog_param.log;
3083 		mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3084 				mboxlog_param.mbox_cmds;
3085 	} else {
3086 		if (pdbg_init->verbose)
3087 			pdbg_init->print("Mbox log is not requested\n");
3088 		return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
3089 	}
3090 
3091 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3092 	scratch_buff.size = size;
3093 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3094 	if (rc)
3095 		goto err;
3096 
3097 	mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
3098 
3099 	for (k = 0; k < mbox_cmds; k++) {
3100 		entry_idx = log->cursor + k;
3101 		if (entry_idx >= log->size)
3102 			entry_idx -= log->size;
3103 		entry = mbox_cmd_log_entry(log, entry_idx);
3104 
3105 		/* skip over unused entries */
3106 		if (entry->timestamp == 0)
3107 			continue;
3108 
3109 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3110 
3111 		for (i = 0; i < MBOX_LEN / 8; i++) {
3112 			flit = entry->cmd[i];
3113 			mboxlog->hi[i] = (u32)(flit >> 32);
3114 			mboxlog->lo[i] = (u32)flit;
3115 		}
3116 
3117 		mboxlog++;
3118 	}
3119 
3120 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3121 	if (rc)
3122 		goto err1;
3123 
3124 	rc = compress_buff(&scratch_buff, dbg_buff);
3125 
3126 err1:
3127 	release_scratch_buff(&scratch_buff, dbg_buff);
3128 err:
3129 	return rc;
3130 #endif
3131 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
3132 }
3133 
3134 static int collect_pbt_tables(struct cudbg_init *pdbg_init,
3135 			      struct cudbg_buffer *dbg_buff,
3136 			      struct cudbg_error *cudbg_err)
3137 {
3138 	struct cudbg_buffer scratch_buff;
3139 	struct adapter *padap = pdbg_init->adap;
3140 	struct cudbg_pbt_tables *pbt = NULL;
3141 	u32 size;
3142 	u32 addr;
3143 	int i, rc;
3144 
3145 	size = sizeof(struct cudbg_pbt_tables);
3146 	scratch_buff.size = size;
3147 
3148 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3149 	if (rc)
3150 		goto err;
3151 
3152 	pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
3153 
3154 	/* PBT dynamic entries */
3155 	addr = CUDBG_CHAC_PBT_ADDR;
3156 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3157 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
3158 		if (rc) {
3159 			if (pdbg_init->verbose)
3160 				pdbg_init->print("BUSY timeout reading"
3161 					 "CIM_HOST_ACC_CTRL\n");
3162 			goto err1;
3163 		}
3164 	}
3165 
3166 	/* PBT static entries */
3167 
3168 	/* static entries start when bit 6 is set */
3169 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3170 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3171 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
3172 		if (rc) {
3173 			if (pdbg_init->verbose)
3174 				pdbg_init->print("BUSY timeout reading"
3175 					 "CIM_HOST_ACC_CTRL\n");
3176 			goto err1;
3177 		}
3178 	}
3179 
3180 	/* LRF entries */
3181 	addr = CUDBG_CHAC_PBT_LRF;
3182 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3183 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
3184 		if (rc) {
3185 			if (pdbg_init->verbose)
3186 				pdbg_init->print("BUSY timeout reading"
3187 					 "CIM_HOST_ACC_CTRL\n");
3188 			goto err1;
3189 		}
3190 	}
3191 
3192 	/* PBT data entries */
3193 	addr = CUDBG_CHAC_PBT_DATA;
3194 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3195 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
3196 		if (rc) {
3197 			if (pdbg_init->verbose)
3198 				pdbg_init->print("BUSY timeout reading"
3199 					 "CIM_HOST_ACC_CTRL\n");
3200 			goto err1;
3201 		}
3202 	}
3203 
3204 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3205 	if (rc)
3206 		goto err1;
3207 
3208 	rc = compress_buff(&scratch_buff, dbg_buff);
3209 
3210 err1:
3211 	release_scratch_buff(&scratch_buff, dbg_buff);
3212 err:
3213 	return rc;
3214 }
3215 
3216 static int collect_pm_indirect(struct cudbg_init *pdbg_init,
3217 			       struct cudbg_buffer *dbg_buff,
3218 			       struct cudbg_error *cudbg_err)
3219 {
3220 	struct cudbg_buffer scratch_buff;
3221 	struct adapter *padap = pdbg_init->adap;
3222 	struct ireg_buf *ch_pm;
3223 	u32 size;
3224 	int i, rc, n;
3225 
3226 	n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
3227 	size = sizeof(struct ireg_buf) * n * 2;
3228 	scratch_buff.size = size;
3229 
3230 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3231 	if (rc)
3232 		goto err;
3233 
3234 	ch_pm = (struct ireg_buf *)scratch_buff.data;
3235 
3236 	/*PM_RX*/
3237 	for (i = 0; i < n; i++) {
3238 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3239 		u32 *buff = ch_pm->outbuf;
3240 
3241 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
3242 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
3243 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
3244 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
3245 
3246 		t4_read_indirect(padap,
3247 				pm_pio->ireg_addr,
3248 				pm_pio->ireg_data,
3249 				buff,
3250 				pm_pio->ireg_offset_range,
3251 				pm_pio->ireg_local_offset);
3252 
3253 		ch_pm++;
3254 	}
3255 
3256 	/*PM_Tx*/
3257 	n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
3258 	for (i = 0; i < n; i++) {
3259 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3260 		u32 *buff = ch_pm->outbuf;
3261 
3262 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
3263 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
3264 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
3265 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
3266 
3267 		t4_read_indirect(padap,
3268 				pm_pio->ireg_addr,
3269 				pm_pio->ireg_data,
3270 				buff,
3271 				pm_pio->ireg_offset_range,
3272 				pm_pio->ireg_local_offset);
3273 
3274 		ch_pm++;
3275 	}
3276 
3277 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3278 	if (rc)
3279 		goto err1;
3280 
3281 	rc = compress_buff(&scratch_buff, dbg_buff);
3282 
3283 err1:
3284 	release_scratch_buff(&scratch_buff, dbg_buff);
3285 err:
3286 	return rc;
3287 
3288 }
3289 
3290 static int collect_tid(struct cudbg_init *pdbg_init,
3291 		       struct cudbg_buffer *dbg_buff,
3292 		       struct cudbg_error *cudbg_err)
3293 {
3294 
3295 	struct cudbg_buffer scratch_buff;
3296 	struct adapter *padap = pdbg_init->adap;
3297 	struct tid_info_region *tid;
3298 	struct tid_info_region_rev1 *tid1;
3299 	u32 para[7], val[7];
3300 	u32 mbox, pf;
3301 	int rc;
3302 
3303 	scratch_buff.size = sizeof(struct tid_info_region_rev1);
3304 
3305 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3306 	if (rc)
3307 		goto err;
3308 
3309 #define FW_PARAM_DEV_A(param) \
3310 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3311 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3312 #define FW_PARAM_PFVF_A(param) \
3313 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3314 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
3315 	 V_FW_PARAMS_PARAM_Y(0) | \
3316 	 V_FW_PARAMS_PARAM_Z(0))
3317 #define MAX_ATIDS_A 8192U
3318 
3319 	tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
3320 	tid = &(tid1->tid);
3321 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
3322 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
3323 	tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
3324 			     sizeof(struct cudbg_ver_hdr);
3325 
3326 	if (is_t5(padap)) {
3327 		tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
3328 		tid1->tid_start = 0;
3329 	} else if (is_t6(padap)) {
3330 		tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
3331 		tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
3332 	}
3333 
3334 	tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
3335 
3336 	para[0] = FW_PARAM_PFVF_A(FILTER_START);
3337 	para[1] = FW_PARAM_PFVF_A(FILTER_END);
3338 	para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
3339 	para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
3340 	para[4] = FW_PARAM_DEV_A(NTID);
3341 	para[5] = FW_PARAM_PFVF_A(SERVER_START);
3342 	para[6] = FW_PARAM_PFVF_A(SERVER_END);
3343 
3344 	rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK, "t4cudq");
3345 	if (rc)
3346 		goto err;
3347 	mbox = padap->mbox;
3348 	pf = padap->pf;
3349 	rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3350 	if (rc <  0) {
3351 		if (rc == -FW_EPERM) {
3352 			/* It looks like we don't have permission to use
3353 			 * padap->mbox.
3354 			 *
3355 			 * Try mbox 4.  If it works, we'll continue to
3356 			 * collect the rest of tid info from mbox 4.
3357 			 * Else, quit trying to collect tid info.
3358 			 */
3359 			mbox = 4;
3360 			pf = 4;
3361 			rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3362 			if (rc < 0) {
3363 				cudbg_err->sys_err = rc;
3364 				goto err1;
3365 			}
3366 		} else {
3367 			cudbg_err->sys_err = rc;
3368 			goto err1;
3369 		}
3370 	}
3371 
3372 	tid->ftid_base = val[0];
3373 	tid->nftids = val[1] - val[0] + 1;
3374 	/*active filter region*/
3375 	if (val[2] != val[3]) {
3376 #ifdef notyet
3377 		tid->flags |= FW_OFLD_CONN;
3378 #endif
3379 		tid->aftid_base = val[2];
3380 		tid->aftid_end = val[3];
3381 	}
3382 	tid->ntids = val[4];
3383 	tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
3384 	tid->stid_base = val[5];
3385 	tid->nstids = val[6] - val[5] + 1;
3386 
3387 	if (chip_id(padap) >= CHELSIO_T6) {
3388 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
3389 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
3390 		rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3391 		if (rc < 0) {
3392 			cudbg_err->sys_err = rc;
3393 			goto err1;
3394 		}
3395 
3396 		tid->hpftid_base = val[0];
3397 		tid->nhpftids = val[1] - val[0] + 1;
3398 	}
3399 
3400 	if (chip_id(padap) <= CHELSIO_T5) {
3401 		tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
3402 		tid->hash_base /= 4;
3403 	} else
3404 		tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
3405 
3406 	/*UO context range*/
3407 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
3408 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
3409 
3410 	rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3411 	if (rc <  0) {
3412 		cudbg_err->sys_err = rc;
3413 		goto err1;
3414 	}
3415 
3416 	if (val[0] != val[1]) {
3417 		tid->uotid_base = val[0];
3418 		tid->nuotids = val[1] - val[0] + 1;
3419 	}
3420 	tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
3421 	tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
3422 
3423 #undef FW_PARAM_PFVF_A
3424 #undef FW_PARAM_DEV_A
3425 #undef MAX_ATIDS_A
3426 
3427 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3428 	if (rc)
3429 		goto err1;
3430 	rc = compress_buff(&scratch_buff, dbg_buff);
3431 
3432 err1:
3433 	end_synchronized_op(padap, 0);
3434 	release_scratch_buff(&scratch_buff, dbg_buff);
3435 err:
3436 	return rc;
3437 }
3438 
3439 static int collect_tx_rate(struct cudbg_init *pdbg_init,
3440 			   struct cudbg_buffer *dbg_buff,
3441 			   struct cudbg_error *cudbg_err)
3442 {
3443 	struct cudbg_buffer scratch_buff;
3444 	struct adapter *padap = pdbg_init->adap;
3445 	struct tx_rate *tx_rate;
3446 	u32 size;
3447 	int rc;
3448 
3449 	size = sizeof(struct tx_rate);
3450 	scratch_buff.size = size;
3451 
3452 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3453 	if (rc)
3454 		goto err;
3455 
3456 	tx_rate = (struct tx_rate *)scratch_buff.data;
3457 	t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
3458 	tx_rate->nchan = padap->chip_params->nchan;
3459 
3460 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3461 	if (rc)
3462 		goto err1;
3463 
3464 	rc = compress_buff(&scratch_buff, dbg_buff);
3465 
3466 err1:
3467 	release_scratch_buff(&scratch_buff, dbg_buff);
3468 err:
3469 	return rc;
3470 }
3471 
3472 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
3473 {
3474 	*mask = x | y;
3475 	y = (__force u64)cpu_to_be64(y);
3476 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
3477 }
3478 
3479 static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
3480 {
3481 	if (is_t5(padap)) {
3482 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3483 							  A_MPS_VF_RPLCT_MAP3));
3484 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3485 							  A_MPS_VF_RPLCT_MAP2));
3486 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3487 							  A_MPS_VF_RPLCT_MAP1));
3488 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3489 							  A_MPS_VF_RPLCT_MAP0));
3490 	} else {
3491 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3492 							  A_MPS_VF_RPLCT_MAP7));
3493 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3494 							  A_MPS_VF_RPLCT_MAP6));
3495 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3496 							  A_MPS_VF_RPLCT_MAP5));
3497 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3498 							  A_MPS_VF_RPLCT_MAP4));
3499 	}
3500 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
3501 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
3502 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
3503 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
3504 }
3505 
3506 static int collect_mps_tcam(struct cudbg_init *pdbg_init,
3507 			    struct cudbg_buffer *dbg_buff,
3508 			    struct cudbg_error *cudbg_err)
3509 {
3510 	struct cudbg_buffer scratch_buff;
3511 	struct adapter *padap = pdbg_init->adap;
3512 	struct cudbg_mps_tcam *tcam = NULL;
3513 	u32 size = 0, i, n, total_size = 0;
3514 	u32 ctl, data2;
3515 	u64 tcamy, tcamx, val;
3516 	int rc;
3517 
3518 	n = padap->chip_params->mps_tcam_size;
3519 	size = sizeof(struct cudbg_mps_tcam) * n;
3520 	scratch_buff.size = size;
3521 
3522 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3523 	if (rc)
3524 		goto err;
3525 	memset(scratch_buff.data, 0, size);
3526 
3527 	tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
3528 	for (i = 0; i < n; i++) {
3529 		if (chip_id(padap) >= CHELSIO_T6) {
3530 			/* CtlReqID   - 1: use Host Driver Requester ID
3531 			 * CtlCmdType - 0: Read, 1: Write
3532 			 * CtlTcamSel - 0: TCAM0, 1: TCAM1
3533 			 * CtlXYBitSel- 0: Y bit, 1: X bit
3534 			 */
3535 
3536 			/* Read tcamy */
3537 			ctl = (V_CTLREQID(1) |
3538 			       V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
3539 			if (i < 256)
3540 				ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
3541 			else
3542 				ctl |= V_CTLTCAMINDEX(i - 256) |
3543 				       V_CTLTCAMSEL(1);
3544 
3545 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3546 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3547 			tcamy = G_DMACH(val) << 32;
3548 			tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3549 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3550 			tcam->lookup_type = G_DATALKPTYPE(data2);
3551 
3552 			/* 0 - Outer header, 1 - Inner header
3553 			 * [71:48] bit locations are overloaded for
3554 			 * outer vs. inner lookup types.
3555 			 */
3556 
3557 			if (tcam->lookup_type &&
3558 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3559 				/* Inner header VNI */
3560 				tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
3561 					     (G_DATAVIDH1(data2) << 16) |
3562 					     G_VIDL(val);
3563 				tcam->dip_hit = data2 & F_DATADIPHIT;
3564 			} else {
3565 				tcam->vlan_vld = data2 & F_DATAVIDH2;
3566 				tcam->ivlan = G_VIDL(val);
3567 			}
3568 
3569 			tcam->port_num = G_DATAPORTNUM(data2);
3570 
3571 			/* Read tcamx. Change the control param */
3572 			ctl |= V_CTLXYBITSEL(1);
3573 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3574 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3575 			tcamx = G_DMACH(val) << 32;
3576 			tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3577 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3578 			if (tcam->lookup_type &&
3579 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3580 				/* Inner header VNI mask */
3581 				tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
3582 					     (G_DATAVIDH1(data2) << 16) |
3583 					     G_VIDL(val);
3584 			}
3585 		} else {
3586 			tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
3587 			tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
3588 		}
3589 
3590 		if (tcamx & tcamy)
3591 			continue;
3592 
3593 		tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
3594 		tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
3595 
3596 		if (is_t5(padap))
3597 			tcam->repli = (tcam->cls_lo & F_REPLICATE);
3598 		else if (is_t6(padap))
3599 			tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
3600 
3601 		if (tcam->repli) {
3602 			struct fw_ldst_cmd ldst_cmd;
3603 			struct fw_ldst_mps_rplc mps_rplc;
3604 
3605 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3606 			ldst_cmd.op_to_addrspace =
3607 				htonl(V_FW_CMD_OP(FW_LDST_CMD) |
3608 				      F_FW_CMD_REQUEST |
3609 				      F_FW_CMD_READ |
3610 				      V_FW_LDST_CMD_ADDRSPACE(
3611 					      FW_LDST_ADDRSPC_MPS));
3612 
3613 			ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3614 
3615 			ldst_cmd.u.mps.rplc.fid_idx =
3616 				htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
3617 				      V_FW_LDST_CMD_IDX(i));
3618 
3619 			rc = begin_synchronized_op(padap, NULL,
3620 			    SLEEP_OK | INTR_OK, "t4cudm");
3621 			if (rc == 0) {
3622 				rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
3623 						sizeof(ldst_cmd), &ldst_cmd);
3624 				end_synchronized_op(padap, 0);
3625 			}
3626 
3627 			if (rc)
3628 				mps_rpl_backdoor(padap, &mps_rplc);
3629 			else
3630 				mps_rplc = ldst_cmd.u.mps.rplc;
3631 
3632 			tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
3633 			tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
3634 			tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
3635 			tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
3636 			if (padap->chip_params->mps_rplc_size >
3637 					CUDBG_MAX_RPLC_SIZE) {
3638 				tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
3639 				tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
3640 				tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
3641 				tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
3642 			}
3643 		}
3644 		cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
3645 
3646 		tcam->idx = i;
3647 		tcam->rplc_size = padap->chip_params->mps_rplc_size;
3648 
3649 		total_size += sizeof(struct cudbg_mps_tcam);
3650 
3651 		tcam++;
3652 	}
3653 
3654 	if (total_size == 0) {
3655 		rc = CUDBG_SYSTEM_ERROR;
3656 		goto err1;
3657 	}
3658 
3659 	scratch_buff.size = total_size;
3660 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3661 	if (rc)
3662 		goto err1;
3663 
3664 	rc = compress_buff(&scratch_buff, dbg_buff);
3665 
3666 err1:
3667 	scratch_buff.size = size;
3668 	release_scratch_buff(&scratch_buff, dbg_buff);
3669 err:
3670 	return rc;
3671 }
3672 
3673 static int collect_pcie_config(struct cudbg_init *pdbg_init,
3674 			       struct cudbg_buffer *dbg_buff,
3675 			       struct cudbg_error *cudbg_err)
3676 {
3677 	struct cudbg_buffer scratch_buff;
3678 	struct adapter *padap = pdbg_init->adap;
3679 	u32 size, *value, j;
3680 	int i, rc, n;
3681 
3682 	size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
3683 	n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
3684 	scratch_buff.size = size;
3685 
3686 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3687 	if (rc)
3688 		goto err;
3689 
3690 	value = (u32 *)scratch_buff.data;
3691 	for (i = 0; i < n; i++) {
3692 		for (j = t5_pcie_config_array[i][0];
3693 		     j <= t5_pcie_config_array[i][1]; j += 4) {
3694 			*value++ = t4_hw_pci_read_cfg4(padap, j);
3695 		}
3696 	}
3697 
3698 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3699 	if (rc)
3700 		goto err1;
3701 
3702 	rc = compress_buff(&scratch_buff, dbg_buff);
3703 
3704 err1:
3705 	release_scratch_buff(&scratch_buff, dbg_buff);
3706 err:
3707 	return rc;
3708 }
3709 
3710 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
3711 			  struct cudbg_tid_data *tid_data)
3712 {
3713 	int i, cmd_retry = 8;
3714 	struct adapter *padap = pdbg_init->adap;
3715 	u32 val;
3716 
3717 	/* Fill REQ_DATA regs with 0's */
3718 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3719 		t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
3720 
3721 	/* Write DBIG command */
3722 	val = (0x4 << S_DBGICMD) | tid;
3723 	t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
3724 	tid_data->dbig_cmd = val;
3725 
3726 	val = 0;
3727 	val |= 1 << S_DBGICMDSTRT;
3728 	val |= 1;  /* LE mode */
3729 	t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
3730 	tid_data->dbig_conf = val;
3731 
3732 	/* Poll the DBGICMDBUSY bit */
3733 	val = 1;
3734 	while (val) {
3735 		val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
3736 		val = (val >> S_DBGICMDBUSY) & 1;
3737 		cmd_retry--;
3738 		if (!cmd_retry) {
3739 			if (pdbg_init->verbose)
3740 				pdbg_init->print("%s(): Timeout waiting for non-busy\n",
3741 					 __func__);
3742 			return CUDBG_SYSTEM_ERROR;
3743 		}
3744 	}
3745 
3746 	/* Check RESP status */
3747 	val = 0;
3748 	val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
3749 	tid_data->dbig_rsp_stat = val;
3750 	if (!(val & 1)) {
3751 		if (pdbg_init->verbose)
3752 			pdbg_init->print("%s(): DBGI command failed\n", __func__);
3753 		return CUDBG_SYSTEM_ERROR;
3754 	}
3755 
3756 	/* Read RESP data */
3757 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3758 		tid_data->data[i] = t4_read_reg(padap,
3759 						A_LE_DB_DBGI_RSP_DATA +
3760 						(i << 2));
3761 
3762 	tid_data->tid = tid;
3763 
3764 	return 0;
3765 }
3766 
3767 static int collect_le_tcam(struct cudbg_init *pdbg_init,
3768 			   struct cudbg_buffer *dbg_buff,
3769 			   struct cudbg_error *cudbg_err)
3770 {
3771 	struct cudbg_buffer scratch_buff;
3772 	struct adapter *padap = pdbg_init->adap;
3773 	struct cudbg_tcam tcam_region = {0};
3774 	struct cudbg_tid_data *tid_data = NULL;
3775 	u32 value, bytes = 0, bytes_left  = 0;
3776 	u32 i;
3777 	int rc, size;
3778 
3779 	/* Get the LE regions */
3780 	value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
3781 							     index */
3782 	tcam_region.tid_hash_base = value;
3783 
3784 	/* Get routing table index */
3785 	value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
3786 	tcam_region.routing_start = value;
3787 
3788 	/*Get clip table index */
3789 	value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
3790 	tcam_region.clip_start = value;
3791 
3792 	/* Get filter table index */
3793 	value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
3794 	tcam_region.filter_start = value;
3795 
3796 	/* Get server table index */
3797 	value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
3798 	tcam_region.server_start = value;
3799 
3800 	/* Check whether hash is enabled and calculate the max tids */
3801 	value = t4_read_reg(padap, A_LE_DB_CONFIG);
3802 	if ((value >> S_HASHEN) & 1) {
3803 		value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
3804 		if (chip_id(padap) > CHELSIO_T5)
3805 			tcam_region.max_tid = (value & 0xFFFFF) +
3806 					      tcam_region.tid_hash_base;
3807 		else {	    /* for T5 */
3808 			value = G_HASHTIDSIZE(value);
3809 			value = 1 << value;
3810 			tcam_region.max_tid = value +
3811 				tcam_region.tid_hash_base;
3812 		}
3813 	} else	 /* hash not enabled */
3814 		tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
3815 
3816 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
3817 	size += sizeof(struct cudbg_tcam);
3818 	scratch_buff.size = size;
3819 
3820 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3821 	if (rc)
3822 		goto err;
3823 
3824 	rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
3825 	if (rc)
3826 		goto err;
3827 
3828 	memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
3829 
3830 	tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
3831 					     scratch_buff.data) + 1);
3832 	bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
3833 	bytes = sizeof(struct cudbg_tcam);
3834 
3835 	/* read all tid */
3836 	for (i = 0; i < tcam_region.max_tid; i++) {
3837 		if (bytes_left < sizeof(struct cudbg_tid_data)) {
3838 			scratch_buff.size = bytes;
3839 			rc = compress_buff(&scratch_buff, dbg_buff);
3840 			if (rc)
3841 				goto err1;
3842 			scratch_buff.size = CUDBG_CHUNK_SIZE;
3843 			release_scratch_buff(&scratch_buff, dbg_buff);
3844 
3845 			/* new alloc */
3846 			rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
3847 					      &scratch_buff);
3848 			if (rc)
3849 				goto err;
3850 
3851 			tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
3852 			bytes_left = CUDBG_CHUNK_SIZE;
3853 			bytes = 0;
3854 		}
3855 
3856 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
3857 
3858 		if (rc) {
3859 			cudbg_err->sys_err = rc;
3860 			goto err1;
3861 		}
3862 
3863 		tid_data++;
3864 		bytes_left -= sizeof(struct cudbg_tid_data);
3865 		bytes += sizeof(struct cudbg_tid_data);
3866 	}
3867 
3868 	if (bytes) {
3869 		scratch_buff.size = bytes;
3870 		rc = compress_buff(&scratch_buff, dbg_buff);
3871 	}
3872 
3873 err1:
3874 	scratch_buff.size = CUDBG_CHUNK_SIZE;
3875 	release_scratch_buff(&scratch_buff, dbg_buff);
3876 err:
3877 	return rc;
3878 }
3879 
3880 static int collect_ma_indirect(struct cudbg_init *pdbg_init,
3881 			       struct cudbg_buffer *dbg_buff,
3882 			       struct cudbg_error *cudbg_err)
3883 {
3884 	struct cudbg_buffer scratch_buff;
3885 	struct adapter *padap = pdbg_init->adap;
3886 	struct ireg_buf *ma_indr = NULL;
3887 	u32 size, j;
3888 	int i, rc, n;
3889 
3890 	if (chip_id(padap) < CHELSIO_T6) {
3891 		if (pdbg_init->verbose)
3892 			pdbg_init->print("MA indirect available only in T6\n");
3893 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3894 		goto err;
3895 	}
3896 
3897 	n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
3898 	size = sizeof(struct ireg_buf) * n * 2;
3899 	scratch_buff.size = size;
3900 
3901 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3902 	if (rc)
3903 		goto err;
3904 
3905 	ma_indr = (struct ireg_buf *)scratch_buff.data;
3906 
3907 	for (i = 0; i < n; i++) {
3908 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3909 		u32 *buff = ma_indr->outbuf;
3910 
3911 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
3912 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3913 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3914 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3915 
3916 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3917 				 buff, ma_fli->ireg_offset_range,
3918 				 ma_fli->ireg_local_offset);
3919 
3920 		ma_indr++;
3921 
3922 	}
3923 
3924 	n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
3925 
3926 	for (i = 0; i < n; i++) {
3927 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3928 		u32 *buff = ma_indr->outbuf;
3929 
3930 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3931 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3932 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3933 
3934 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3935 			t4_read_indirect(padap, ma_fli->ireg_addr,
3936 					 ma_fli->ireg_data, buff, 1,
3937 					 ma_fli->ireg_local_offset);
3938 			buff++;
3939 			ma_fli->ireg_local_offset += 0x20;
3940 		}
3941 		ma_indr++;
3942 	}
3943 
3944 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3945 	if (rc)
3946 		goto err1;
3947 
3948 	rc = compress_buff(&scratch_buff, dbg_buff);
3949 
3950 err1:
3951 	release_scratch_buff(&scratch_buff, dbg_buff);
3952 err:
3953 	return rc;
3954 }
3955 
3956 static int collect_hma_indirect(struct cudbg_init *pdbg_init,
3957 			       struct cudbg_buffer *dbg_buff,
3958 			       struct cudbg_error *cudbg_err)
3959 {
3960 	struct cudbg_buffer scratch_buff;
3961 	struct adapter *padap = pdbg_init->adap;
3962 	struct ireg_buf *hma_indr = NULL;
3963 	u32 size;
3964 	int i, rc, n;
3965 
3966 	if (chip_id(padap) < CHELSIO_T6) {
3967 		if (pdbg_init->verbose)
3968 			pdbg_init->print("HMA indirect available only in T6\n");
3969 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3970 		goto err;
3971 	}
3972 
3973 	n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
3974 	size = sizeof(struct ireg_buf) * n;
3975 	scratch_buff.size = size;
3976 
3977 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3978 	if (rc)
3979 		goto err;
3980 
3981 	hma_indr = (struct ireg_buf *)scratch_buff.data;
3982 
3983 	for (i = 0; i < n; i++) {
3984 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
3985 		u32 *buff = hma_indr->outbuf;
3986 
3987 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3988 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3989 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3990 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3991 
3992 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3993 				 buff, hma_fli->ireg_offset_range,
3994 				 hma_fli->ireg_local_offset);
3995 
3996 		hma_indr++;
3997 
3998 	}
3999 
4000 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4001 	if (rc)
4002 		goto err1;
4003 
4004 	rc = compress_buff(&scratch_buff, dbg_buff);
4005 
4006 err1:
4007 	release_scratch_buff(&scratch_buff, dbg_buff);
4008 err:
4009 	return rc;
4010 }
4011 
4012 static int collect_pcie_indirect(struct cudbg_init *pdbg_init,
4013 				 struct cudbg_buffer *dbg_buff,
4014 				 struct cudbg_error *cudbg_err)
4015 {
4016 	struct cudbg_buffer scratch_buff;
4017 	struct adapter *padap = pdbg_init->adap;
4018 	struct ireg_buf *ch_pcie;
4019 	u32 size;
4020 	int i, rc, n;
4021 
4022 	n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
4023 	size = sizeof(struct ireg_buf) * n * 2;
4024 	scratch_buff.size = size;
4025 
4026 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4027 	if (rc)
4028 		goto err;
4029 
4030 	ch_pcie = (struct ireg_buf *)scratch_buff.data;
4031 
4032 	/*PCIE_PDBG*/
4033 	for (i = 0; i < n; i++) {
4034 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4035 		u32 *buff = ch_pcie->outbuf;
4036 
4037 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
4038 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
4039 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
4040 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
4041 
4042 		t4_read_indirect(padap,
4043 				pcie_pio->ireg_addr,
4044 				pcie_pio->ireg_data,
4045 				buff,
4046 				pcie_pio->ireg_offset_range,
4047 				pcie_pio->ireg_local_offset);
4048 
4049 		ch_pcie++;
4050 	}
4051 
4052 	/*PCIE_CDBG*/
4053 	n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
4054 	for (i = 0; i < n; i++) {
4055 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4056 		u32 *buff = ch_pcie->outbuf;
4057 
4058 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
4059 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
4060 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
4061 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
4062 
4063 		t4_read_indirect(padap,
4064 				pcie_pio->ireg_addr,
4065 				pcie_pio->ireg_data,
4066 				buff,
4067 				pcie_pio->ireg_offset_range,
4068 				pcie_pio->ireg_local_offset);
4069 
4070 		ch_pcie++;
4071 	}
4072 
4073 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4074 	if (rc)
4075 		goto err1;
4076 
4077 	rc = compress_buff(&scratch_buff, dbg_buff);
4078 
4079 err1:
4080 	release_scratch_buff(&scratch_buff, dbg_buff);
4081 err:
4082 	return rc;
4083 
4084 }
4085 
4086 static int collect_tp_indirect(struct cudbg_init *pdbg_init,
4087 			       struct cudbg_buffer *dbg_buff,
4088 			       struct cudbg_error *cudbg_err)
4089 {
4090 	struct cudbg_buffer scratch_buff;
4091 	struct adapter *padap = pdbg_init->adap;
4092 	struct ireg_buf *ch_tp_pio;
4093 	u32 size;
4094 	int i, rc, n = 0;
4095 
4096 	if (is_t5(padap))
4097 		n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
4098 	else if (is_t6(padap))
4099 		n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
4100 
4101 	size = sizeof(struct ireg_buf) * n * 3;
4102 	scratch_buff.size = size;
4103 
4104 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4105 	if (rc)
4106 		goto err;
4107 
4108 	ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
4109 
4110 	/* TP_PIO*/
4111 	for (i = 0; i < n; i++) {
4112 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4113 		u32 *buff = ch_tp_pio->outbuf;
4114 
4115 		if (is_t5(padap)) {
4116 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
4117 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
4118 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
4119 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
4120 		} else if (is_t6(padap)) {
4121 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
4122 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
4123 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
4124 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
4125 		}
4126 
4127 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
4128 			       tp_pio->ireg_local_offset, true);
4129 
4130 		ch_tp_pio++;
4131 	}
4132 
4133 	/* TP_TM_PIO*/
4134 	if (is_t5(padap))
4135 		n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
4136 	else if (is_t6(padap))
4137 		n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
4138 
4139 	for (i = 0; i < n; i++) {
4140 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4141 		u32 *buff = ch_tp_pio->outbuf;
4142 
4143 		if (is_t5(padap)) {
4144 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
4145 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
4146 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
4147 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
4148 		} else if (is_t6(padap)) {
4149 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
4150 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
4151 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
4152 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
4153 		}
4154 
4155 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
4156 				  tp_pio->ireg_local_offset, true);
4157 
4158 		ch_tp_pio++;
4159 	}
4160 
4161 	/* TP_MIB_INDEX*/
4162 	if (is_t5(padap))
4163 		n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
4164 	else if (is_t6(padap))
4165 		n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
4166 
4167 	for (i = 0; i < n ; i++) {
4168 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4169 		u32 *buff = ch_tp_pio->outbuf;
4170 
4171 		if (is_t5(padap)) {
4172 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
4173 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
4174 			tp_pio->ireg_local_offset =
4175 				t5_tp_mib_index_array[i][2];
4176 			tp_pio->ireg_offset_range =
4177 				t5_tp_mib_index_array[i][3];
4178 		} else if (is_t6(padap)) {
4179 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
4180 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
4181 			tp_pio->ireg_local_offset =
4182 				t6_tp_mib_index_array[i][2];
4183 			tp_pio->ireg_offset_range =
4184 				t6_tp_mib_index_array[i][3];
4185 		}
4186 
4187 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
4188 			       tp_pio->ireg_local_offset, true);
4189 
4190 		ch_tp_pio++;
4191 	}
4192 
4193 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4194 	if (rc)
4195 		goto err1;
4196 
4197 	rc = compress_buff(&scratch_buff, dbg_buff);
4198 
4199 err1:
4200 	release_scratch_buff(&scratch_buff, dbg_buff);
4201 err:
4202 	return rc;
4203 }
4204 
4205 static int collect_sge_indirect(struct cudbg_init *pdbg_init,
4206 				struct cudbg_buffer *dbg_buff,
4207 				struct cudbg_error *cudbg_err)
4208 {
4209 	struct cudbg_buffer scratch_buff;
4210 	struct adapter *padap = pdbg_init->adap;
4211 	struct ireg_buf *ch_sge_dbg;
4212 	u32 size;
4213 	int i, rc;
4214 
4215 	size = sizeof(struct ireg_buf) * 2;
4216 	scratch_buff.size = size;
4217 
4218 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4219 	if (rc)
4220 		goto err;
4221 
4222 	ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
4223 
4224 	for (i = 0; i < 2; i++) {
4225 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
4226 		u32 *buff = ch_sge_dbg->outbuf;
4227 
4228 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
4229 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
4230 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
4231 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
4232 
4233 		t4_read_indirect(padap,
4234 				sge_pio->ireg_addr,
4235 				sge_pio->ireg_data,
4236 				buff,
4237 				sge_pio->ireg_offset_range,
4238 				sge_pio->ireg_local_offset);
4239 
4240 		ch_sge_dbg++;
4241 	}
4242 
4243 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4244 	if (rc)
4245 		goto err1;
4246 
4247 	rc = compress_buff(&scratch_buff, dbg_buff);
4248 
4249 err1:
4250 	release_scratch_buff(&scratch_buff, dbg_buff);
4251 err:
4252 	return rc;
4253 }
4254 
4255 static int collect_full(struct cudbg_init *pdbg_init,
4256 			struct cudbg_buffer *dbg_buff,
4257 			struct cudbg_error *cudbg_err)
4258 {
4259 	struct cudbg_buffer scratch_buff;
4260 	struct adapter *padap = pdbg_init->adap;
4261 	u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
4262 	u32 *sp;
4263 	int rc;
4264 	int nreg = 0;
4265 
4266 	/* Collect Registers:
4267 	 * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
4268 	 * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
4269 	 * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
4270 	 * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
4271 	 * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
4272 	 * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3)  This is for T6
4273 	 * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
4274 	 **/
4275 
4276 	if (is_t5(padap))
4277 		nreg = 6;
4278 	else if (is_t6(padap))
4279 		nreg = 7;
4280 
4281 	scratch_buff.size = nreg * sizeof(u32);
4282 
4283 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4284 	if (rc)
4285 		goto err;
4286 
4287 	sp = (u32 *)scratch_buff.data;
4288 
4289 	/* TP_DBG_SCHED_TX */
4290 	reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
4291 	reg_offset_range = 1;
4292 
4293 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4294 
4295 	sp++;
4296 
4297 	/* TP_DBG_SCHED_RX */
4298 	reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
4299 	reg_offset_range = 1;
4300 
4301 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4302 
4303 	sp++;
4304 
4305 	/* TP_DBG_CSIDE_INT */
4306 	reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
4307 	reg_offset_range = 1;
4308 
4309 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4310 
4311 	sp++;
4312 
4313 	/* TP_DBG_ESIDE_INT */
4314 	reg_local_offset = t5_tp_pio_array[8][2] + 3;
4315 	reg_offset_range = 1;
4316 
4317 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4318 
4319 	sp++;
4320 
4321 	/* PCIE_CDEBUG_INDEX[AppData0] */
4322 	reg_addr = t5_pcie_cdbg_array[0][0];
4323 	reg_data = t5_pcie_cdbg_array[0][1];
4324 	reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
4325 	reg_offset_range = 1;
4326 
4327 	t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
4328 			 reg_local_offset);
4329 
4330 	sp++;
4331 
4332 	if (is_t6(padap)) {
4333 		/* PCIE_CDEBUG_INDEX[AppData1] */
4334 		reg_addr = t5_pcie_cdbg_array[0][0];
4335 		reg_data = t5_pcie_cdbg_array[0][1];
4336 		reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
4337 		reg_offset_range = 1;
4338 
4339 		t4_read_indirect(padap, reg_addr, reg_data, sp,
4340 				 reg_offset_range, reg_local_offset);
4341 
4342 		sp++;
4343 	}
4344 
4345 	/* SGE_DEBUG_DATA_HIGH_INDEX_10 */
4346 	*sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
4347 
4348 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4349 	if (rc)
4350 		goto err1;
4351 
4352 	rc = compress_buff(&scratch_buff, dbg_buff);
4353 
4354 err1:
4355 	release_scratch_buff(&scratch_buff, dbg_buff);
4356 err:
4357 	return rc;
4358 }
4359 
4360 static int collect_vpd_data(struct cudbg_init *pdbg_init,
4361 			    struct cudbg_buffer *dbg_buff,
4362 			    struct cudbg_error *cudbg_err)
4363 {
4364 #ifdef notyet
4365 	struct cudbg_buffer scratch_buff;
4366 	struct adapter *padap = pdbg_init->adap;
4367 	struct struct_vpd_data *vpd_data;
4368 	char vpd_ver[4];
4369 	u32 fw_vers;
4370 	u32 size;
4371 	int rc;
4372 
4373 	size = sizeof(struct struct_vpd_data);
4374 	scratch_buff.size = size;
4375 
4376 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4377 	if (rc)
4378 		goto err;
4379 
4380 	vpd_data = (struct struct_vpd_data *)scratch_buff.data;
4381 
4382 	if (is_t5(padap)) {
4383 		read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
4384 		read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
4385 		read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
4386 		read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
4387 	} else if (is_t6(padap)) {
4388 		read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
4389 		read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
4390 		read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
4391 		read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
4392 	}
4393 
4394 	if (is_fw_attached(pdbg_init)) {
4395 	   rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
4396 	} else {
4397 		rc = 1;
4398 	}
4399 
4400 	if (rc) {
4401 		/* Now trying with backdoor mechanism */
4402 		rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
4403 				  (u8 *)&vpd_data->scfg_vers);
4404 		if (rc)
4405 			goto err1;
4406 	}
4407 
4408 	if (is_fw_attached(pdbg_init)) {
4409 		rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
4410 	} else {
4411 		rc = 1;
4412 	}
4413 
4414 	if (rc) {
4415 		/* Now trying with backdoor mechanism */
4416 		rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
4417 				  (u8 *)vpd_ver);
4418 		if (rc)
4419 			goto err1;
4420 		/* read_vpd_reg return string of stored hex
4421 		 * converting hex string to char string
4422 		 * vpd version is 2 bytes only */
4423 		sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
4424 		vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
4425 	}
4426 
4427 	/* Get FW version if it's not already filled in */
4428 	fw_vers = padap->params.fw_vers;
4429 	if (!fw_vers) {
4430 		rc = t4_get_fw_version(padap, &fw_vers);
4431 		if (rc)
4432 			goto err1;
4433 	}
4434 
4435 	vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
4436 	vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
4437 	vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
4438 	vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
4439 
4440 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4441 	if (rc)
4442 		goto err1;
4443 
4444 	rc = compress_buff(&scratch_buff, dbg_buff);
4445 
4446 err1:
4447 	release_scratch_buff(&scratch_buff, dbg_buff);
4448 err:
4449 	return rc;
4450 #endif
4451 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
4452 }
4453