xref: /freebsd/sys/dev/cxgbe/cudbg/cudbg_lib.c (revision 6ef644f5889afbd0f681b08ed1a2f369524af83e)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 
31 #include "common/common.h"
32 #include "common/t4_regs.h"
33 #include "cudbg.h"
34 #include "cudbg_lib_common.h"
35 #include "cudbg_lib.h"
36 #include "cudbg_entity.h"
37 #define  BUFFER_WARN_LIMIT 10000000
38 
39 struct large_entity large_entity_list[] = {
40 	{CUDBG_EDC0, 0, 0},
41 	{CUDBG_EDC1, 0 , 0},
42 	{CUDBG_MC0, 0, 0},
43 	{CUDBG_MC1, 0, 0}
44 };
45 
46 static int is_fw_attached(struct cudbg_init *pdbg_init)
47 {
48 
49 	return (pdbg_init->adap->flags & FW_OK);
50 }
51 
52 /* This function will add additional padding bytes into debug_buffer to make it
53  * 4 byte aligned.*/
54 static void align_debug_buffer(struct cudbg_buffer *dbg_buff,
55 			struct cudbg_entity_hdr *entity_hdr)
56 {
57 	u8 zero_buf[4] = {0};
58 	u8 padding, remain;
59 
60 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
61 	padding = 4 - remain;
62 	if (remain) {
63 		memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
64 		       padding);
65 		dbg_buff->offset += padding;
66 		entity_hdr->num_pad = padding;
67 	}
68 
69 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
70 }
71 
72 static void read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
73 			  enum ctxt_type ctype, u32 *data)
74 {
75 	struct adapter *padap = pdbg_init->adap;
76 	int rc = -1;
77 
78 	if (is_fw_attached(pdbg_init)) {
79 		rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
80 		    "t4cudf");
81 		if (rc != 0)
82 			goto out;
83 		rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
84 				    data);
85 		end_synchronized_op(padap, 0);
86 	}
87 
88 out:
89 	if (rc)
90 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
91 }
92 
93 static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
94 			    struct cudbg_buffer *dbg_buff,
95 			    struct cudbg_entity_hdr **entity_hdr)
96 {
97 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
98 	int rc = 0;
99 	u32 ext_offset = cudbg_hdr->data_len;
100 	*ext_size = 0;
101 
102 	if (dbg_buff->size - dbg_buff->offset <=
103 		 sizeof(struct cudbg_entity_hdr)) {
104 		rc = CUDBG_STATUS_BUFFER_SHORT;
105 		goto err;
106 	}
107 
108 	*entity_hdr = (struct cudbg_entity_hdr *)
109 		       ((char *)outbuf + cudbg_hdr->data_len);
110 
111 	/* Find the last extended entity header */
112 	while ((*entity_hdr)->size) {
113 
114 		ext_offset += sizeof(struct cudbg_entity_hdr) +
115 				     (*entity_hdr)->size;
116 
117 		*ext_size += (*entity_hdr)->size +
118 			      sizeof(struct cudbg_entity_hdr);
119 
120 		if (dbg_buff->size - dbg_buff->offset + *ext_size  <=
121 			sizeof(struct cudbg_entity_hdr)) {
122 			rc = CUDBG_STATUS_BUFFER_SHORT;
123 			goto err;
124 		}
125 
126 		if (ext_offset != (*entity_hdr)->next_ext_offset) {
127 			ext_offset -= sizeof(struct cudbg_entity_hdr) +
128 				     (*entity_hdr)->size;
129 			break;
130 		}
131 
132 		(*entity_hdr)->next_ext_offset = *ext_size;
133 
134 		*entity_hdr = (struct cudbg_entity_hdr *)
135 					   ((char *)outbuf +
136 					   ext_offset);
137 	}
138 
139 	/* update the data offset */
140 	dbg_buff->offset = ext_offset;
141 err:
142 	return rc;
143 }
144 
145 static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
146 		       u32 cur_entity_data_offset,
147 		       u32 cur_entity_size,
148 		       int entity_nu, u32 ext_size)
149 {
150 	struct cudbg_private *priv = handle;
151 	struct cudbg_init *cudbg_init = &priv->dbg_init;
152 	struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
153 	u64 timestamp;
154 	u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
155 	u32 remain_flash_size;
156 	u32 flash_data_offset;
157 	u32 data_hdr_size;
158 	int rc = -1;
159 
160 	data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
161 			sizeof(struct cudbg_hdr);
162 
163 	flash_data_offset = (FLASH_CUDBG_NSECS *
164 			     (sizeof(struct cudbg_flash_hdr) +
165 			      data_hdr_size)) +
166 			    (cur_entity_data_offset - data_hdr_size);
167 
168 	if (flash_data_offset > CUDBG_FLASH_SIZE) {
169 		update_skip_size(sec_info, cur_entity_size);
170 		if (cudbg_init->verbose)
171 			cudbg_init->print("Large entity skipping...\n");
172 		return rc;
173 	}
174 
175 	remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
176 
177 	if (cur_entity_size > remain_flash_size) {
178 		update_skip_size(sec_info, cur_entity_size);
179 		if (cudbg_init->verbose)
180 			cudbg_init->print("Large entity skipping...\n");
181 	} else {
182 		timestamp = 0;
183 
184 		cur_entity_hdr_offset +=
185 			(sizeof(struct cudbg_entity_hdr) *
186 			(entity_nu - 1));
187 
188 		rc = cudbg_write_flash(handle, timestamp, dbg_buff,
189 				       cur_entity_data_offset,
190 				       cur_entity_hdr_offset,
191 				       cur_entity_size,
192 				       ext_size);
193 		if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
194 			cudbg_init->print("\n\tFLASH is full... "
195 				"can not write in flash more\n\n");
196 	}
197 
198 	return rc;
199 }
200 
201 int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
202 {
203 	struct cudbg_entity_hdr *entity_hdr = NULL;
204 	struct cudbg_entity_hdr *ext_entity_hdr = NULL;
205 	struct cudbg_hdr *cudbg_hdr;
206 	struct cudbg_buffer dbg_buff;
207 	struct cudbg_error cudbg_err = {0};
208 	int large_entity_code;
209 
210 	u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
211 	struct cudbg_init *cudbg_init =
212 		&(((struct cudbg_private *)handle)->dbg_init);
213 	struct adapter *padap = cudbg_init->adap;
214 	u32 total_size, remaining_buf_size;
215 	u32 ext_size = 0;
216 	int index, bit, i, rc = -1;
217 	int all;
218 	bool flag_ext = 0;
219 
220 	reset_skip_entity();
221 
222 	dbg_buff.data = outbuf;
223 	dbg_buff.size = *outbuf_size;
224 	dbg_buff.offset = 0;
225 
226 	cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
227 	cudbg_hdr->signature = CUDBG_SIGNATURE;
228 	cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
229 	cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
230 	cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
231 	cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
232 	cudbg_hdr->chip_ver = padap->params.chipid;
233 
234 	if (cudbg_hdr->data_len)
235 		flag_ext = 1;
236 
237 	if (cudbg_init->use_flash) {
238 #ifndef notyet
239 		rc = t4_get_flash_params(padap);
240 		if (rc) {
241 			if (cudbg_init->verbose)
242 				cudbg_init->print("\nGet flash params failed.\n\n");
243 			cudbg_init->use_flash = 0;
244 		}
245 #endif
246 
247 #ifdef notyet
248 		/* Timestamp is mandatory. If it is not passed then disable
249 		 * flash support
250 		 */
251 		if (!cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM].u.time) {
252 			if (cudbg_init->verbose)
253 				cudbg_init->print("\nTimestamp param missing,"
254 					  "so ignoring flash write request\n\n");
255 			cudbg_init->use_flash = 0;
256 		}
257 #endif
258 	}
259 
260 	if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
261 	    dbg_buff.size) {
262 		rc = CUDBG_STATUS_SMALL_BUFF;
263 		total_size = cudbg_hdr->hdr_len;
264 		goto err;
265 	}
266 
267 	/* If ext flag is set then move the offset to the end of the buf
268 	 * so that we can add ext entities
269 	 */
270 	if (flag_ext) {
271 		ext_entity_hdr = (struct cudbg_entity_hdr *)
272 			      ((char *)outbuf + cudbg_hdr->hdr_len +
273 			      (sizeof(struct cudbg_entity_hdr) *
274 			      (CUDBG_EXT_ENTITY - 1)));
275 		ext_entity_hdr->start_offset = cudbg_hdr->data_len;
276 		ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
277 		ext_entity_hdr->size = 0;
278 		dbg_buff.offset = cudbg_hdr->data_len;
279 	} else {
280 		dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
281 		dbg_buff.offset += CUDBG_MAX_ENTITY *
282 					sizeof(struct cudbg_entity_hdr);
283 	}
284 
285 	total_size = dbg_buff.offset;
286 	all = dbg_bitmap[0] & (1 << CUDBG_ALL);
287 
288 	/*sort(large_entity_list);*/
289 
290 	for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
291 		index = i / 8;
292 		bit = i % 8;
293 
294 		if (entity_list[i].bit == CUDBG_EXT_ENTITY)
295 			continue;
296 
297 		if (all || (dbg_bitmap[index] & (1 << bit))) {
298 
299 			if (!flag_ext) {
300 				rc = get_entity_hdr(outbuf, i, dbg_buff.size,
301 						    &entity_hdr);
302 				if (rc)
303 					cudbg_hdr->hdr_flags = rc;
304 			} else {
305 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
306 							     &dbg_buff,
307 							     &entity_hdr);
308 				if (rc)
309 					goto err;
310 
311 				/* move the offset after the ext header */
312 				dbg_buff.offset +=
313 					sizeof(struct cudbg_entity_hdr);
314 			}
315 
316 			entity_hdr->entity_type = i;
317 			entity_hdr->start_offset = dbg_buff.offset;
318 			/* process each entity by calling process_entity fp */
319 			remaining_buf_size = dbg_buff.size - dbg_buff.offset;
320 
321 			if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
322 			    is_large_entity(i)) {
323 				if (cudbg_init->verbose)
324 					cudbg_init->print("Skipping %s\n",
325 					    entity_list[i].name);
326 				skip_entity(i);
327 				continue;
328 			} else {
329 
330 				/* If fw_attach is 0, then skip entities which
331 				 * communicates with firmware
332 				 */
333 
334 				if (!is_fw_attached(cudbg_init) &&
335 				    (entity_list[i].flag &
336 				    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
337 					if (cudbg_init->verbose)
338 						cudbg_init->print("Skipping %s entity,"\
339 							  "because fw_attach "\
340 							  "is 0\n",
341 							  entity_list[i].name);
342 					continue;
343 				}
344 
345 				if (cudbg_init->verbose)
346 					cudbg_init->print("collecting debug entity: "\
347 						  "%s\n", entity_list[i].name);
348 				memset(&cudbg_err, 0,
349 				       sizeof(struct cudbg_error));
350 				rc = process_entity[i-1](cudbg_init, &dbg_buff,
351 							 &cudbg_err);
352 			}
353 
354 			if (rc) {
355 				entity_hdr->size = 0;
356 				dbg_buff.offset = entity_hdr->start_offset;
357 			} else
358 				align_debug_buffer(&dbg_buff, entity_hdr);
359 
360 			if (cudbg_err.sys_err)
361 				rc = CUDBG_SYSTEM_ERROR;
362 
363 			entity_hdr->hdr_flags =  rc;
364 			entity_hdr->sys_err = cudbg_err.sys_err;
365 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
366 
367 			/* We don't want to include ext entity size in global
368 			 * header
369 			 */
370 			if (!flag_ext)
371 				total_size += entity_hdr->size;
372 
373 			cudbg_hdr->data_len = total_size;
374 			*outbuf_size = total_size;
375 
376 			/* consider the size of the ext entity header and data
377 			 * also
378 			 */
379 			if (flag_ext) {
380 				ext_size += (sizeof(struct cudbg_entity_hdr) +
381 					     entity_hdr->size);
382 				entity_hdr->start_offset -= cudbg_hdr->data_len;
383 				ext_entity_hdr->size = ext_size;
384 				entity_hdr->next_ext_offset = ext_size;
385 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
386 			}
387 
388 			if (cudbg_init->use_flash) {
389 				if (flag_ext) {
390 					wr_entity_to_flash(handle,
391 							   &dbg_buff,
392 							   ext_entity_hdr->
393 							   start_offset,
394 							   entity_hdr->
395 							   size,
396 							   CUDBG_EXT_ENTITY,
397 							   ext_size);
398 				}
399 				else
400 					wr_entity_to_flash(handle,
401 							   &dbg_buff,
402 							   entity_hdr->\
403 							   start_offset,
404 							   entity_hdr->size,
405 							   i, ext_size);
406 			}
407 		}
408 	}
409 
410 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
411 	     i++) {
412 		large_entity_code = large_entity_list[i].entity_code;
413 		if (large_entity_list[i].skip_flag) {
414 			if (!flag_ext) {
415 				rc = get_entity_hdr(outbuf, large_entity_code,
416 						    dbg_buff.size, &entity_hdr);
417 				if (rc)
418 					cudbg_hdr->hdr_flags = rc;
419 			} else {
420 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
421 							     &dbg_buff,
422 							     &entity_hdr);
423 				if (rc)
424 					goto err;
425 
426 				dbg_buff.offset +=
427 					sizeof(struct cudbg_entity_hdr);
428 			}
429 
430 			/* If fw_attach is 0, then skip entities which
431 			 * communicates with firmware
432 			 */
433 			if (!is_fw_attached(cudbg_init) &&
434 			    (entity_list[large_entity_code].flag &
435 			    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
436 				if (cudbg_init->verbose)
437 					cudbg_init->print("Skipping %s entity,"\
438 						  "because fw_attach "\
439 						  "is 0\n",
440 						  entity_list[large_entity_code]
441 						  .name);
442 				continue;
443 			}
444 
445 			entity_hdr->entity_type = large_entity_code;
446 			entity_hdr->start_offset = dbg_buff.offset;
447 			if (cudbg_init->verbose)
448 				cudbg_init->print("Re-trying debug entity: %s\n",
449 					  entity_list[large_entity_code].name);
450 
451 			memset(&cudbg_err, 0, sizeof(struct cudbg_error));
452 			rc = process_entity[large_entity_code - 1](cudbg_init,
453 								   &dbg_buff,
454 								   &cudbg_err);
455 			if (rc) {
456 				entity_hdr->size = 0;
457 				dbg_buff.offset = entity_hdr->start_offset;
458 			} else
459 				align_debug_buffer(&dbg_buff, entity_hdr);
460 
461 			if (cudbg_err.sys_err)
462 				rc = CUDBG_SYSTEM_ERROR;
463 
464 			entity_hdr->hdr_flags = rc;
465 			entity_hdr->sys_err = cudbg_err.sys_err;
466 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
467 
468 			/* We don't want to include ext entity size in global
469 			 * header
470 			 */
471 			if (!flag_ext)
472 				total_size += entity_hdr->size;
473 
474 			cudbg_hdr->data_len = total_size;
475 			*outbuf_size = total_size;
476 
477 			/* consider the size of the ext entity header and
478 			 * data also
479 			 */
480 			if (flag_ext) {
481 				ext_size += (sizeof(struct cudbg_entity_hdr) +
482 						   entity_hdr->size);
483 				entity_hdr->start_offset -=
484 							cudbg_hdr->data_len;
485 				ext_entity_hdr->size = ext_size;
486 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
487 			}
488 
489 			if (cudbg_init->use_flash) {
490 				if (flag_ext)
491 					wr_entity_to_flash(handle,
492 							   &dbg_buff,
493 							   ext_entity_hdr->
494 							   start_offset,
495 							   entity_hdr->size,
496 							   CUDBG_EXT_ENTITY,
497 							   ext_size);
498 				else
499 					wr_entity_to_flash(handle,
500 							   &dbg_buff,
501 							   entity_hdr->
502 							   start_offset,
503 							   entity_hdr->
504 							   size,
505 							   large_entity_list[i].
506 							   entity_code,
507 							   ext_size);
508 			}
509 		}
510 	}
511 
512 	cudbg_hdr->data_len = total_size;
513 	*outbuf_size = total_size;
514 
515 	if (flag_ext)
516 		*outbuf_size += ext_size;
517 
518 	return 0;
519 err:
520 	return rc;
521 }
522 
523 void reset_skip_entity(void)
524 {
525 	int i;
526 
527 	for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
528 		large_entity_list[i].skip_flag = 0;
529 }
530 
531 void skip_entity(int entity_code)
532 {
533 	int i;
534 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
535 	     i++) {
536 		if (large_entity_list[i].entity_code == entity_code)
537 			large_entity_list[i].skip_flag = 1;
538 	}
539 }
540 
541 int is_large_entity(int entity_code)
542 {
543 	int i;
544 
545 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
546 	     i++) {
547 		if (large_entity_list[i].entity_code == entity_code)
548 			return 1;
549 	}
550 	return 0;
551 }
552 
553 int get_entity_hdr(void *outbuf, int i, u32 size,
554 		   struct cudbg_entity_hdr **entity_hdr)
555 {
556 	int rc = 0;
557 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
558 
559 	if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
560 		return CUDBG_STATUS_SMALL_BUFF;
561 
562 	*entity_hdr = (struct cudbg_entity_hdr *)
563 		      ((char *)outbuf+cudbg_hdr->hdr_len +
564 		       (sizeof(struct cudbg_entity_hdr)*(i-1)));
565 	return rc;
566 }
567 
568 static int collect_rss(struct cudbg_init *pdbg_init,
569 		       struct cudbg_buffer *dbg_buff,
570 		       struct cudbg_error *cudbg_err)
571 {
572 	struct adapter *padap = pdbg_init->adap;
573 	struct cudbg_buffer scratch_buff;
574 	u32 size;
575 	int rc = 0;
576 
577 	size = padap->chip_params->rss_nentries * sizeof(u16);
578 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
579 	if (rc)
580 		goto err;
581 
582 	rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
583 	if (rc) {
584 		if (pdbg_init->verbose)
585 			pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n",
586 				 __func__, rc);
587 		cudbg_err->sys_err = rc;
588 		goto err1;
589 	}
590 
591 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
592 	if (rc)
593 		goto err1;
594 
595 	rc = compress_buff(&scratch_buff, dbg_buff);
596 
597 err1:
598 	release_scratch_buff(&scratch_buff, dbg_buff);
599 err:
600 	return rc;
601 }
602 
603 static int collect_sw_state(struct cudbg_init *pdbg_init,
604 			    struct cudbg_buffer *dbg_buff,
605 			    struct cudbg_error *cudbg_err)
606 {
607 	struct adapter *padap = pdbg_init->adap;
608 	struct cudbg_buffer scratch_buff;
609 	struct sw_state *swstate;
610 	u32 size;
611 	int rc = 0;
612 
613 	size = sizeof(struct sw_state);
614 
615 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
616 	if (rc)
617 		goto err;
618 
619 	swstate = (struct sw_state *) scratch_buff.data;
620 
621 	swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
622 	snprintf(swstate->caller_string, sizeof(swstate->caller_string), "%s",
623 	    "FreeBSD");
624 	swstate->os_type = 0;
625 
626 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
627 	if (rc)
628 		goto err1;
629 
630 	rc = compress_buff(&scratch_buff, dbg_buff);
631 
632 err1:
633 	release_scratch_buff(&scratch_buff, dbg_buff);
634 err:
635 	return rc;
636 }
637 
638 static int collect_ddp_stats(struct cudbg_init *pdbg_init,
639 			     struct cudbg_buffer *dbg_buff,
640 			     struct cudbg_error *cudbg_err)
641 {
642 	struct adapter *padap = pdbg_init->adap;
643 	struct cudbg_buffer scratch_buff;
644 	struct tp_usm_stats  *tp_usm_stats_buff;
645 	u32 size;
646 	int rc = 0;
647 
648 	size = sizeof(struct tp_usm_stats);
649 
650 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
651 	if (rc)
652 		goto err;
653 
654 	tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
655 
656 	/* spin_lock(&padap->stats_lock);	TODO*/
657 	t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
658 	/* spin_unlock(&padap->stats_lock);	TODO*/
659 
660 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
661 	if (rc)
662 		goto err1;
663 
664 	rc = compress_buff(&scratch_buff, dbg_buff);
665 
666 err1:
667 	release_scratch_buff(&scratch_buff, dbg_buff);
668 err:
669 	return rc;
670 }
671 
672 static int collect_ulptx_la(struct cudbg_init *pdbg_init,
673 			    struct cudbg_buffer *dbg_buff,
674 			    struct cudbg_error *cudbg_err)
675 {
676 	struct adapter *padap = pdbg_init->adap;
677 	struct cudbg_buffer scratch_buff;
678 	struct struct_ulptx_la *ulptx_la_buff;
679 	u32 size, i, j;
680 	int rc = 0;
681 
682 	size = sizeof(struct struct_ulptx_la);
683 
684 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
685 	if (rc)
686 		goto err;
687 
688 	ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
689 
690 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
691 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
692 						      A_ULP_TX_LA_RDPTR_0 +
693 						      0x10 * i);
694 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
695 						      A_ULP_TX_LA_WRPTR_0 +
696 						      0x10 * i);
697 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
698 						       A_ULP_TX_LA_RDDATA_0 +
699 						       0x10 * i);
700 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
701 			ulptx_la_buff->rd_data[i][j] =
702 				t4_read_reg(padap,
703 					    A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
704 		}
705 	}
706 
707 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
708 	if (rc)
709 		goto err1;
710 
711 	rc = compress_buff(&scratch_buff, dbg_buff);
712 
713 err1:
714 	release_scratch_buff(&scratch_buff, dbg_buff);
715 err:
716 	return rc;
717 
718 }
719 
720 static int collect_ulprx_la(struct cudbg_init *pdbg_init,
721 			    struct cudbg_buffer *dbg_buff,
722 			    struct cudbg_error *cudbg_err)
723 {
724 	struct adapter *padap = pdbg_init->adap;
725 	struct cudbg_buffer scratch_buff;
726 	struct struct_ulprx_la *ulprx_la_buff;
727 	u32 size;
728 	int rc = 0;
729 
730 	size = sizeof(struct struct_ulprx_la);
731 
732 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
733 	if (rc)
734 		goto err;
735 
736 	ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
737 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
738 	ulprx_la_buff->size = ULPRX_LA_SIZE;
739 
740 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
741 	if (rc)
742 		goto err1;
743 
744 	rc = compress_buff(&scratch_buff, dbg_buff);
745 
746 err1:
747 	release_scratch_buff(&scratch_buff, dbg_buff);
748 err:
749 	return rc;
750 }
751 
752 static int collect_cpl_stats(struct cudbg_init *pdbg_init,
753 			     struct cudbg_buffer *dbg_buff,
754 			     struct cudbg_error *cudbg_err)
755 {
756 	struct adapter *padap = pdbg_init->adap;
757 	struct cudbg_buffer scratch_buff;
758 	struct struct_tp_cpl_stats *tp_cpl_stats_buff;
759 	u32 size;
760 	int rc = 0;
761 
762 	size = sizeof(struct struct_tp_cpl_stats);
763 
764 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
765 	if (rc)
766 		goto err;
767 
768 	tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
769 	tp_cpl_stats_buff->nchan = padap->chip_params->nchan;
770 
771 	/* spin_lock(&padap->stats_lock);	TODO*/
772 	t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
773 	/* spin_unlock(&padap->stats_lock);	TODO*/
774 
775 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
776 	if (rc)
777 		goto err1;
778 
779 	rc = compress_buff(&scratch_buff, dbg_buff);
780 
781 err1:
782 	release_scratch_buff(&scratch_buff, dbg_buff);
783 err:
784 	return rc;
785 }
786 
787 static int collect_wc_stats(struct cudbg_init *pdbg_init,
788 			    struct cudbg_buffer *dbg_buff,
789 			    struct cudbg_error *cudbg_err)
790 {
791 	struct adapter *padap = pdbg_init->adap;
792 	struct cudbg_buffer scratch_buff;
793 	struct struct_wc_stats *wc_stats_buff;
794 	u32 val1;
795 	u32 val2;
796 	u32 size;
797 
798 	int rc = 0;
799 
800 	size = sizeof(struct struct_wc_stats);
801 
802 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
803 	if (rc)
804 		goto err;
805 
806 	wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
807 
808 	if (!is_t4(padap)) {
809 		val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
810 		val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
811 		wc_stats_buff->wr_cl_success = val1 - val2;
812 		wc_stats_buff->wr_cl_fail = val2;
813 	} else {
814 		wc_stats_buff->wr_cl_success = 0;
815 		wc_stats_buff->wr_cl_fail = 0;
816 	}
817 
818 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
819 	if (rc)
820 		goto err1;
821 
822 	rc = compress_buff(&scratch_buff, dbg_buff);
823 err1:
824 	release_scratch_buff(&scratch_buff, dbg_buff);
825 err:
826 	return rc;
827 }
828 
829 static int mem_desc_cmp(const void *a, const void *b)
830 {
831 	return ((const struct struct_mem_desc *)a)->base -
832 		((const struct struct_mem_desc *)b)->base;
833 }
834 
835 static int fill_meminfo(struct adapter *padap,
836 			struct struct_meminfo *meminfo_buff)
837 {
838 	struct struct_mem_desc *md;
839 	u32 size, lo, hi;
840 	u32 used, alloc;
841 	int n, i, rc = 0;
842 
843 	size = sizeof(struct struct_meminfo);
844 
845 	memset(meminfo_buff->avail, 0,
846 	       ARRAY_SIZE(meminfo_buff->avail) *
847 	       sizeof(struct struct_mem_desc));
848 	memset(meminfo_buff->mem, 0,
849 	       (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
850 	md  = meminfo_buff->mem;
851 
852 	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
853 		meminfo_buff->mem[i].limit = 0;
854 		meminfo_buff->mem[i].idx = i;
855 	}
856 
857 	i = 0;
858 
859 	lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
860 
861 	if (lo & F_EDRAM0_ENABLE) {
862 		hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
863 		meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
864 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
865 					       (G_EDRAM0_SIZE(hi) << 20);
866 		meminfo_buff->avail[i].idx = 0;
867 		i++;
868 	}
869 
870 	if (lo & F_EDRAM1_ENABLE) {
871 		hi =  t4_read_reg(padap, A_MA_EDRAM1_BAR);
872 		meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
873 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
874 					       (G_EDRAM1_SIZE(hi) << 20);
875 		meminfo_buff->avail[i].idx = 1;
876 		i++;
877 	}
878 
879 	if (is_t5(padap)) {
880 		if (lo & F_EXT_MEM0_ENABLE) {
881 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
882 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
883 			meminfo_buff->avail[i].limit =
884 				meminfo_buff->avail[i].base +
885 				(G_EXT_MEM_SIZE(hi) << 20);
886 			meminfo_buff->avail[i].idx = 3;
887 			i++;
888 		}
889 
890 		if (lo & F_EXT_MEM1_ENABLE) {
891 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
892 			meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
893 			meminfo_buff->avail[i].limit =
894 				meminfo_buff->avail[i].base +
895 				(G_EXT_MEM1_SIZE(hi) << 20);
896 			meminfo_buff->avail[i].idx = 4;
897 			i++;
898 		}
899 	} else if (is_t6(padap)) {
900 		if (lo & F_EXT_MEM_ENABLE) {
901 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
902 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
903 			meminfo_buff->avail[i].limit =
904 				meminfo_buff->avail[i].base +
905 				(G_EXT_MEM_SIZE(hi) << 20);
906 			meminfo_buff->avail[i].idx = 2;
907 			i++;
908 		}
909 	}
910 
911 	if (!i) {				   /* no memory available */
912 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
913 		goto err;
914 	}
915 
916 	meminfo_buff->avail_c = i;
917 	qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
918 	    mem_desc_cmp);
919 	(md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
920 	(md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
921 	(md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
922 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
923 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
924 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
925 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
926 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
927 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
928 
929 	/* the next few have explicit upper bounds */
930 	md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
931 	md->limit = md->base - 1 +
932 		    t4_read_reg(padap,
933 				A_TP_PMM_TX_PAGE_SIZE) *
934 				G_PMTXMAXPAGE(t4_read_reg(padap,
935 							  A_TP_PMM_TX_MAX_PAGE)
936 					     );
937 	md++;
938 
939 	md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
940 	md->limit = md->base - 1 +
941 		    t4_read_reg(padap,
942 				A_TP_PMM_RX_PAGE_SIZE) *
943 				G_PMRXMAXPAGE(t4_read_reg(padap,
944 							  A_TP_PMM_RX_MAX_PAGE)
945 					      );
946 	md++;
947 	if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
948 		if (chip_id(padap) <= CHELSIO_T5) {
949 			hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
950 			md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
951 		} else {
952 			hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
953 			md->base = t4_read_reg(padap,
954 					       A_LE_DB_HASH_TBL_BASE_ADDR);
955 		}
956 		md->limit = 0;
957 	} else {
958 		md->base = 0;
959 		md->idx = ARRAY_SIZE(region);  /* hide it */
960 	}
961 	md++;
962 #define ulp_region(reg) \
963 	{\
964 		md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
965 		(md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
966 	}
967 
968 	ulp_region(RX_ISCSI);
969 	ulp_region(RX_TDDP);
970 	ulp_region(TX_TPT);
971 	ulp_region(RX_STAG);
972 	ulp_region(RX_RQ);
973 	ulp_region(RX_RQUDP);
974 	ulp_region(RX_PBL);
975 	ulp_region(TX_PBL);
976 #undef ulp_region
977 	md->base = 0;
978 	md->idx = ARRAY_SIZE(region);
979 	if (!is_t4(padap)) {
980 		u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
981 		u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
982 		if (is_t5(padap)) {
983 			if (sge_ctrl & F_VFIFO_ENABLE)
984 				size = G_DBVFIFO_SIZE(fifo_size);
985 		} else
986 			size = G_T6_DBVFIFO_SIZE(fifo_size);
987 
988 		if (size) {
989 			md->base = G_BASEADDR(t4_read_reg(padap,
990 							  A_SGE_DBVFIFO_BADDR));
991 			md->limit = md->base + (size << 2) - 1;
992 		}
993 	}
994 
995 	md++;
996 
997 	md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
998 	md->limit = 0;
999 	md++;
1000 	md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
1001 	md->limit = 0;
1002 	md++;
1003 #ifndef __NO_DRIVER_OCQ_SUPPORT__
1004 	/*md->base = padap->vres.ocq.start;*/
1005 	/*if (adap->vres.ocq.size)*/
1006 	/*	  md->limit = md->base + adap->vres.ocq.size - 1;*/
1007 	/*else*/
1008 	md->idx = ARRAY_SIZE(region);  /* hide it */
1009 	md++;
1010 #endif
1011 
1012 	/* add any address-space holes, there can be up to 3 */
1013 	for (n = 0; n < i - 1; n++)
1014 		if (meminfo_buff->avail[n].limit <
1015 		    meminfo_buff->avail[n + 1].base)
1016 			(md++)->base = meminfo_buff->avail[n].limit;
1017 
1018 	if (meminfo_buff->avail[n].limit)
1019 		(md++)->base = meminfo_buff->avail[n].limit;
1020 
1021 	n = (int) (md - meminfo_buff->mem);
1022 	meminfo_buff->mem_c = n;
1023 
1024 	qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
1025 	    mem_desc_cmp);
1026 
1027 	lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
1028 	hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
1029 	meminfo_buff->up_ram_lo = lo;
1030 	meminfo_buff->up_ram_hi = hi;
1031 
1032 	lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
1033 	hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
1034 	meminfo_buff->up_extmem2_lo = lo;
1035 	meminfo_buff->up_extmem2_hi = hi;
1036 
1037 	lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
1038 	meminfo_buff->rx_pages_data[0] =  G_PMRXMAXPAGE(lo);
1039 	meminfo_buff->rx_pages_data[1] =
1040 		t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
1041 	meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
1042 
1043 	lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
1044 	hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
1045 	meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
1046 	meminfo_buff->tx_pages_data[1] =
1047 		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
1048 	meminfo_buff->tx_pages_data[2] =
1049 		hi >= (1 << 20) ? 'M' : 'K';
1050 	meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
1051 
1052 	for (i = 0; i < 4; i++) {
1053 		if (chip_id(padap) > CHELSIO_T5)
1054 			lo = t4_read_reg(padap,
1055 					 A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
1056 		else
1057 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
1058 		if (is_t5(padap)) {
1059 			used = G_T5_USED(lo);
1060 			alloc = G_T5_ALLOC(lo);
1061 		} else {
1062 			used = G_USED(lo);
1063 			alloc = G_ALLOC(lo);
1064 		}
1065 		meminfo_buff->port_used[i] = used;
1066 		meminfo_buff->port_alloc[i] = alloc;
1067 	}
1068 
1069 	for (i = 0; i < padap->chip_params->nchan; i++) {
1070 		if (chip_id(padap) > CHELSIO_T5)
1071 			lo = t4_read_reg(padap,
1072 					 A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
1073 		else
1074 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
1075 		if (is_t5(padap)) {
1076 			used = G_T5_USED(lo);
1077 			alloc = G_T5_ALLOC(lo);
1078 		} else {
1079 			used = G_USED(lo);
1080 			alloc = G_ALLOC(lo);
1081 		}
1082 		meminfo_buff->loopback_used[i] = used;
1083 		meminfo_buff->loopback_alloc[i] = alloc;
1084 	}
1085 err:
1086 	return rc;
1087 }
1088 
1089 static int collect_meminfo(struct cudbg_init *pdbg_init,
1090 			   struct cudbg_buffer *dbg_buff,
1091 			   struct cudbg_error *cudbg_err)
1092 {
1093 	struct adapter *padap = pdbg_init->adap;
1094 	struct cudbg_buffer scratch_buff;
1095 	struct struct_meminfo *meminfo_buff;
1096 	int rc = 0;
1097 	u32 size;
1098 
1099 	size = sizeof(struct struct_meminfo);
1100 
1101 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1102 	if (rc)
1103 		goto err;
1104 
1105 	meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
1106 
1107 	rc = fill_meminfo(padap, meminfo_buff);
1108 	if (rc)
1109 		goto err;
1110 
1111 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1112 	if (rc)
1113 		goto err1;
1114 
1115 	rc = compress_buff(&scratch_buff, dbg_buff);
1116 err1:
1117 	release_scratch_buff(&scratch_buff, dbg_buff);
1118 err:
1119 	return rc;
1120 }
1121 
1122 static int collect_lb_stats(struct cudbg_init *pdbg_init,
1123 			    struct cudbg_buffer *dbg_buff,
1124 			    struct cudbg_error *cudbg_err)
1125 {
1126 	struct adapter *padap = pdbg_init->adap;
1127 	struct cudbg_buffer scratch_buff;
1128 	struct lb_port_stats *tmp_stats;
1129 	struct struct_lb_stats *lb_stats_buff;
1130 	u32 i, n, size;
1131 	int rc = 0;
1132 
1133 	rc = padap->params.nports;
1134 	if (rc < 0)
1135 		goto err;
1136 
1137 	n = rc;
1138 	size = sizeof(struct struct_lb_stats) +
1139 	       n * sizeof(struct lb_port_stats);
1140 
1141 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1142 	if (rc)
1143 		goto err;
1144 
1145 	lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
1146 
1147 	lb_stats_buff->nchan = n;
1148 	tmp_stats = lb_stats_buff->s;
1149 
1150 	for (i = 0; i < n; i += 2, tmp_stats += 2) {
1151 		t4_get_lb_stats(padap, i, tmp_stats);
1152 		t4_get_lb_stats(padap, i + 1, tmp_stats+1);
1153 	}
1154 
1155 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1156 	if (rc)
1157 		goto err1;
1158 
1159 	rc = compress_buff(&scratch_buff, dbg_buff);
1160 err1:
1161 	release_scratch_buff(&scratch_buff, dbg_buff);
1162 err:
1163 	return rc;
1164 }
1165 
1166 static int collect_rdma_stats(struct cudbg_init *pdbg_init,
1167 			      struct cudbg_buffer *dbg_buff,
1168 			      struct cudbg_error *cudbg_er)
1169 {
1170 	struct adapter *padap = pdbg_init->adap;
1171 	struct cudbg_buffer scratch_buff;
1172 	struct tp_rdma_stats *rdma_stats_buff;
1173 	u32 size;
1174 	int rc = 0;
1175 
1176 	size = sizeof(struct tp_rdma_stats);
1177 
1178 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1179 	if (rc)
1180 		goto err;
1181 
1182 	rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
1183 
1184 	/* spin_lock(&padap->stats_lock);	TODO*/
1185 	t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
1186 	/* spin_unlock(&padap->stats_lock);	TODO*/
1187 
1188 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1189 	if (rc)
1190 		goto err1;
1191 
1192 	rc = compress_buff(&scratch_buff, dbg_buff);
1193 err1:
1194 	release_scratch_buff(&scratch_buff, dbg_buff);
1195 err:
1196 	return rc;
1197 }
1198 
1199 static int collect_clk_info(struct cudbg_init *pdbg_init,
1200 			    struct cudbg_buffer *dbg_buff,
1201 			    struct cudbg_error *cudbg_err)
1202 {
1203 	struct cudbg_buffer scratch_buff;
1204 	struct adapter *padap = pdbg_init->adap;
1205 	struct struct_clk_info *clk_info_buff;
1206 	u64 tp_tick_us;
1207 	int size;
1208 	int rc = 0;
1209 
1210 	if (!padap->params.vpd.cclk) {
1211 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1212 		goto err;
1213 	}
1214 
1215 	size = sizeof(struct struct_clk_info);
1216 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1217 	if (rc)
1218 		goto err;
1219 
1220 	clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
1221 
1222 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;  /* in ps
1223 	*/
1224 	clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
1225 	clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
1226 	clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
1227 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1228 	/* in us */
1229 	clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
1230 				      clk_info_buff->dack_re) / 1000000) *
1231 				     t4_read_reg(padap, A_TP_DACK_TIMER);
1232 
1233 	clk_info_buff->retransmit_min =
1234 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
1235 	clk_info_buff->retransmit_max =
1236 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
1237 
1238 	clk_info_buff->persist_timer_min =
1239 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
1240 	clk_info_buff->persist_timer_max =
1241 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
1242 
1243 	clk_info_buff->keepalive_idle_timer =
1244 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
1245 	clk_info_buff->keepalive_interval =
1246 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
1247 
1248 	clk_info_buff->initial_srtt =
1249 		tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
1250 	clk_info_buff->finwait2_timer =
1251 		tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
1252 
1253 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1254 
1255 	if (rc)
1256 		goto err1;
1257 
1258 	rc = compress_buff(&scratch_buff, dbg_buff);
1259 err1:
1260 	release_scratch_buff(&scratch_buff, dbg_buff);
1261 err:
1262 	return rc;
1263 
1264 }
1265 
1266 static int collect_macstats(struct cudbg_init *pdbg_init,
1267 			    struct cudbg_buffer *dbg_buff,
1268 			    struct cudbg_error *cudbg_err)
1269 {
1270 	struct adapter *padap = pdbg_init->adap;
1271 	struct cudbg_buffer scratch_buff;
1272 	struct struct_mac_stats_rev1 *mac_stats_buff;
1273 	u32 i, n, size;
1274 	int rc = 0;
1275 
1276 	rc = padap->params.nports;
1277 	if (rc < 0)
1278 		goto err;
1279 
1280 	n = rc;
1281 	size = sizeof(struct struct_mac_stats_rev1);
1282 
1283 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1284 	if (rc)
1285 		goto err;
1286 
1287 	mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
1288 
1289 	mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1290 	mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
1291 	mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
1292 				       sizeof(struct cudbg_ver_hdr);
1293 
1294 	mac_stats_buff->port_count = n;
1295 	for (i = 0; i <  mac_stats_buff->port_count; i++)
1296 		t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
1297 
1298 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1299 	if (rc)
1300 		goto err1;
1301 
1302 	rc = compress_buff(&scratch_buff, dbg_buff);
1303 err1:
1304 	release_scratch_buff(&scratch_buff, dbg_buff);
1305 err:
1306 	return rc;
1307 }
1308 
1309 static int collect_cim_pif_la(struct cudbg_init *pdbg_init,
1310 			      struct cudbg_buffer *dbg_buff,
1311 			      struct cudbg_error *cudbg_err)
1312 {
1313 	struct adapter *padap = pdbg_init->adap;
1314 	struct cudbg_buffer scratch_buff;
1315 	struct cim_pif_la *cim_pif_la_buff;
1316 	u32 size;
1317 	int rc = 0;
1318 
1319 	size = sizeof(struct cim_pif_la) +
1320 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1321 
1322 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1323 	if (rc)
1324 		goto err;
1325 
1326 	cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
1327 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1328 
1329 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1330 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1331 			   NULL, NULL);
1332 
1333 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1334 	if (rc)
1335 		goto err1;
1336 
1337 	rc = compress_buff(&scratch_buff, dbg_buff);
1338 err1:
1339 	release_scratch_buff(&scratch_buff, dbg_buff);
1340 err:
1341 	return rc;
1342 }
1343 
1344 static int collect_tp_la(struct cudbg_init *pdbg_init,
1345 			 struct cudbg_buffer *dbg_buff,
1346 			 struct cudbg_error *cudbg_err)
1347 {
1348 	struct adapter *padap = pdbg_init->adap;
1349 	struct cudbg_buffer scratch_buff;
1350 	struct struct_tp_la *tp_la_buff;
1351 	u32 size;
1352 	int rc = 0;
1353 
1354 	size = sizeof(struct struct_tp_la) + TPLA_SIZE *  sizeof(u64);
1355 
1356 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1357 	if (rc)
1358 		goto err;
1359 
1360 	tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
1361 
1362 	tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
1363 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1364 
1365 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1366 	if (rc)
1367 		goto err1;
1368 
1369 	rc = compress_buff(&scratch_buff, dbg_buff);
1370 err1:
1371 	release_scratch_buff(&scratch_buff, dbg_buff);
1372 err:
1373 	return rc;
1374 }
1375 
1376 static int collect_fcoe_stats(struct cudbg_init *pdbg_init,
1377 			      struct cudbg_buffer *dbg_buff,
1378 			      struct cudbg_error *cudbg_err)
1379 {
1380 	struct adapter *padap = pdbg_init->adap;
1381 	struct cudbg_buffer scratch_buff;
1382 	struct struct_tp_fcoe_stats  *tp_fcoe_stats_buff;
1383 	u32 size;
1384 	int rc = 0;
1385 
1386 	size = sizeof(struct struct_tp_fcoe_stats);
1387 
1388 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1389 	if (rc)
1390 		goto err;
1391 
1392 	tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
1393 
1394 	/* spin_lock(&padap->stats_lock);	TODO*/
1395 	t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
1396 	t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
1397 	if (padap->chip_params->nchan == NCHAN) {
1398 		t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
1399 		t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
1400 	}
1401 	/* spin_unlock(&padap->stats_lock);	TODO*/
1402 
1403 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1404 	if (rc)
1405 		goto err1;
1406 
1407 	rc = compress_buff(&scratch_buff, dbg_buff);
1408 err1:
1409 	release_scratch_buff(&scratch_buff, dbg_buff);
1410 err:
1411 	return rc;
1412 }
1413 
1414 static int collect_tp_err_stats(struct cudbg_init *pdbg_init,
1415 				struct cudbg_buffer *dbg_buff,
1416 				struct cudbg_error *cudbg_err)
1417 {
1418 	struct adapter *padap = pdbg_init->adap;
1419 	struct cudbg_buffer scratch_buff;
1420 	struct struct_tp_err_stats *tp_err_stats_buff;
1421 	u32 size;
1422 	int rc = 0;
1423 
1424 	size = sizeof(struct struct_tp_err_stats);
1425 
1426 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1427 	if (rc)
1428 		goto err;
1429 
1430 	tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
1431 
1432 	/* spin_lock(&padap->stats_lock);	TODO*/
1433 	t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
1434 	/* spin_unlock(&padap->stats_lock);	TODO*/
1435 	tp_err_stats_buff->nchan = padap->chip_params->nchan;
1436 
1437 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1438 	if (rc)
1439 		goto err1;
1440 
1441 	rc = compress_buff(&scratch_buff, dbg_buff);
1442 err1:
1443 	release_scratch_buff(&scratch_buff, dbg_buff);
1444 err:
1445 	return rc;
1446 }
1447 
1448 static int collect_tcp_stats(struct cudbg_init *pdbg_init,
1449 			     struct cudbg_buffer *dbg_buff,
1450 			     struct cudbg_error *cudbg_err)
1451 {
1452 	struct adapter *padap = pdbg_init->adap;
1453 	struct cudbg_buffer scratch_buff;
1454 	struct struct_tcp_stats *tcp_stats_buff;
1455 	u32 size;
1456 	int rc = 0;
1457 
1458 	size = sizeof(struct struct_tcp_stats);
1459 
1460 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1461 	if (rc)
1462 		goto err;
1463 
1464 	tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
1465 
1466 	/* spin_lock(&padap->stats_lock);	TODO*/
1467 	t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
1468 	/* spin_unlock(&padap->stats_lock);	TODO*/
1469 
1470 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1471 	if (rc)
1472 		goto err1;
1473 
1474 	rc = compress_buff(&scratch_buff, dbg_buff);
1475 err1:
1476 	release_scratch_buff(&scratch_buff, dbg_buff);
1477 err:
1478 	return rc;
1479 }
1480 
1481 static int collect_hw_sched(struct cudbg_init *pdbg_init,
1482 			    struct cudbg_buffer *dbg_buff,
1483 			    struct cudbg_error *cudbg_err)
1484 {
1485 	struct adapter *padap = pdbg_init->adap;
1486 	struct cudbg_buffer scratch_buff;
1487 	struct struct_hw_sched *hw_sched_buff;
1488 	u32 size;
1489 	int i, rc = 0;
1490 
1491 	if (!padap->params.vpd.cclk) {
1492 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1493 		goto err;
1494 	}
1495 
1496 	size = sizeof(struct struct_hw_sched);
1497 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1498 	if (rc)
1499 		goto err;
1500 
1501 	hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
1502 
1503 	hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
1504 	hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
1505 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1506 
1507 	for (i = 0; i < NTX_SCHED; ++i) {
1508 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1509 		    &hw_sched_buff->ipg[i], 1);
1510 	}
1511 
1512 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1513 	if (rc)
1514 		goto err1;
1515 
1516 	rc = compress_buff(&scratch_buff, dbg_buff);
1517 err1:
1518 	release_scratch_buff(&scratch_buff, dbg_buff);
1519 err:
1520 	return rc;
1521 }
1522 
1523 static int collect_pm_stats(struct cudbg_init *pdbg_init,
1524 			    struct cudbg_buffer *dbg_buff,
1525 			    struct cudbg_error *cudbg_err)
1526 {
1527 	struct adapter *padap = pdbg_init->adap;
1528 	struct cudbg_buffer scratch_buff;
1529 	struct struct_pm_stats *pm_stats_buff;
1530 	u32 size;
1531 	int rc = 0;
1532 
1533 	size = sizeof(struct struct_pm_stats);
1534 
1535 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1536 	if (rc)
1537 		goto err;
1538 
1539 	pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
1540 
1541 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1542 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1543 
1544 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1545 	if (rc)
1546 		goto err1;
1547 
1548 	rc = compress_buff(&scratch_buff, dbg_buff);
1549 err1:
1550 	release_scratch_buff(&scratch_buff, dbg_buff);
1551 err:
1552 	return rc;
1553 }
1554 
1555 static int collect_path_mtu(struct cudbg_init *pdbg_init,
1556 			    struct cudbg_buffer *dbg_buff,
1557 			    struct cudbg_error *cudbg_err)
1558 {
1559 	struct adapter *padap = pdbg_init->adap;
1560 	struct cudbg_buffer scratch_buff;
1561 	u32 size;
1562 	int rc = 0;
1563 
1564 	size = NMTUS  * sizeof(u16);
1565 
1566 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1567 	if (rc)
1568 		goto err;
1569 
1570 	t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
1571 
1572 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1573 	if (rc)
1574 		goto err1;
1575 
1576 	rc = compress_buff(&scratch_buff, dbg_buff);
1577 err1:
1578 	release_scratch_buff(&scratch_buff, dbg_buff);
1579 err:
1580 	return rc;
1581 }
1582 
1583 static int collect_rss_key(struct cudbg_init *pdbg_init,
1584 			   struct cudbg_buffer *dbg_buff,
1585 			   struct cudbg_error *cudbg_err)
1586 {
1587 	struct adapter *padap = pdbg_init->adap;
1588 	struct cudbg_buffer scratch_buff;
1589 	u32 size;
1590 
1591 	int rc = 0;
1592 
1593 	size = 10  * sizeof(u32);
1594 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1595 	if (rc)
1596 		goto err;
1597 
1598 	t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
1599 
1600 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1601 	if (rc)
1602 		goto err1;
1603 
1604 	rc = compress_buff(&scratch_buff, dbg_buff);
1605 err1:
1606 	release_scratch_buff(&scratch_buff, dbg_buff);
1607 err:
1608 	return rc;
1609 }
1610 
1611 static int collect_rss_config(struct cudbg_init *pdbg_init,
1612 			      struct cudbg_buffer *dbg_buff,
1613 			      struct cudbg_error *cudbg_err)
1614 {
1615 	struct adapter *padap = pdbg_init->adap;
1616 	struct cudbg_buffer scratch_buff;
1617 	struct rss_config *rss_conf;
1618 	int rc;
1619 	u32 size;
1620 
1621 	size = sizeof(struct rss_config);
1622 
1623 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1624 	if (rc)
1625 		goto err;
1626 
1627 	rss_conf =  (struct rss_config *)scratch_buff.data;
1628 
1629 	rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
1630 	rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
1631 	rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
1632 	rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
1633 	rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
1634 	rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
1635 	rss_conf->chip = padap->params.chipid;
1636 
1637 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1638 	if (rc)
1639 		goto err1;
1640 
1641 	rc = compress_buff(&scratch_buff, dbg_buff);
1642 
1643 err1:
1644 	release_scratch_buff(&scratch_buff, dbg_buff);
1645 err:
1646 	return rc;
1647 }
1648 
1649 static int collect_rss_vf_config(struct cudbg_init *pdbg_init,
1650 				 struct cudbg_buffer *dbg_buff,
1651 				 struct cudbg_error *cudbg_err)
1652 {
1653 	struct adapter *padap = pdbg_init->adap;
1654 	struct cudbg_buffer scratch_buff;
1655 	struct rss_vf_conf *vfconf;
1656 	int vf, rc, vf_count;
1657 	u32 size;
1658 
1659 	vf_count = padap->chip_params->vfcount;
1660 	size = vf_count * sizeof(*vfconf);
1661 
1662 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1663 	if (rc)
1664 		goto err;
1665 
1666 	vfconf =  (struct rss_vf_conf *)scratch_buff.data;
1667 
1668 	for (vf = 0; vf < vf_count; vf++) {
1669 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1670 				      &vfconf[vf].rss_vf_vfh, 1);
1671 	}
1672 
1673 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1674 	if (rc)
1675 		goto err1;
1676 
1677 	rc = compress_buff(&scratch_buff, dbg_buff);
1678 
1679 err1:
1680 	release_scratch_buff(&scratch_buff, dbg_buff);
1681 err:
1682 	return rc;
1683 }
1684 
1685 static int collect_rss_pf_config(struct cudbg_init *pdbg_init,
1686 				 struct cudbg_buffer *dbg_buff,
1687 				 struct cudbg_error *cudbg_err)
1688 {
1689 	struct cudbg_buffer scratch_buff;
1690 	struct rss_pf_conf *pfconf;
1691 	struct adapter *padap = pdbg_init->adap;
1692 	u32 rss_pf_map, rss_pf_mask, size;
1693 	int pf, rc;
1694 
1695 	size = 8  * sizeof(*pfconf);
1696 
1697 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1698 	if (rc)
1699 		goto err;
1700 
1701 	pfconf =  (struct rss_pf_conf *)scratch_buff.data;
1702 
1703 	rss_pf_map = t4_read_rss_pf_map(padap, 1);
1704 	rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
1705 
1706 	for (pf = 0; pf < 8; pf++) {
1707 		pfconf[pf].rss_pf_map = rss_pf_map;
1708 		pfconf[pf].rss_pf_mask = rss_pf_mask;
1709 		/* no return val */
1710 		t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
1711 	}
1712 
1713 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1714 	if (rc)
1715 		goto err1;
1716 
1717 	rc = compress_buff(&scratch_buff, dbg_buff);
1718 err1:
1719 	release_scratch_buff(&scratch_buff, dbg_buff);
1720 err:
1721 	return rc;
1722 }
1723 
1724 static int check_valid(u32 *buf, int type)
1725 {
1726 	int index;
1727 	int bit;
1728 	int bit_pos = 0;
1729 
1730 	switch (type) {
1731 	case CTXT_EGRESS:
1732 		bit_pos = 176;
1733 		break;
1734 	case CTXT_INGRESS:
1735 		bit_pos = 141;
1736 		break;
1737 	case CTXT_FLM:
1738 		bit_pos = 89;
1739 		break;
1740 	}
1741 	index = bit_pos / 32;
1742 	bit =  bit_pos % 32;
1743 
1744 	return buf[index] & (1U << bit);
1745 }
1746 
1747 /**
1748  * Get EGRESS, INGRESS, FLM, and CNM max qid.
1749  *
1750  * For EGRESS and INGRESS, do the following calculation.
1751  * max_qid = (DBQ/IMSG context region size in bytes) /
1752  *	     (size of context in bytes).
1753  *
1754  * For FLM, do the following calculation.
1755  * max_qid = (FLM cache region size in bytes) /
1756  *	     ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
1757  *
1758  * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
1759  * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
1760  * splitting is enabled, then max CNM qid is half of max FLM qid.
1761  */
1762 static int get_max_ctxt_qid(struct adapter *padap,
1763 			    struct struct_meminfo *meminfo,
1764 			    u32 *max_ctx_qid, u8 nelem)
1765 {
1766 	u32 i, idx, found = 0;
1767 
1768 	if (nelem != (CTXT_CNM + 1))
1769 		return -EINVAL;
1770 
1771 	for (i = 0; i < meminfo->mem_c; i++) {
1772 		if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
1773 			continue;                        /* skip holes */
1774 
1775 		idx = meminfo->mem[i].idx;
1776 		/* Get DBQ, IMSG, and FLM context region size */
1777 		if (idx <= CTXT_FLM) {
1778 			if (!(meminfo->mem[i].limit))
1779 				meminfo->mem[i].limit =
1780 					i < meminfo->mem_c - 1 ?
1781 					meminfo->mem[i + 1].base - 1 : ~0;
1782 
1783 			if (idx < CTXT_FLM) {
1784 				/* Get EGRESS and INGRESS max qid. */
1785 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1786 						    meminfo->mem[i].base + 1) /
1787 						   CUDBG_CTXT_SIZE_BYTES;
1788 				found++;
1789 			} else {
1790 				/* Get FLM and CNM max qid. */
1791 				u32 value, edram_ptr_count;
1792 				u8 bytes_per_ptr = 8;
1793 				u8 nohdr;
1794 
1795 				value = t4_read_reg(padap, A_SGE_FLM_CFG);
1796 
1797 				/* Check if header splitting is enabled. */
1798 				nohdr = (value >> S_NOHDR) & 1U;
1799 
1800 				/* Get the number of pointers in EDRAM per
1801 				 * qid in units of 32.
1802 				 */
1803 				edram_ptr_count = 32 *
1804 						  (1U << G_EDRAMPTRCNT(value));
1805 
1806 				/* EDRAMPTRCNT value of 3 is reserved.
1807 				 * So don't exceed 128.
1808 				 */
1809 				if (edram_ptr_count > 128)
1810 					edram_ptr_count = 128;
1811 
1812 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1813 						    meminfo->mem[i].base + 1) /
1814 						   (edram_ptr_count *
1815 						    bytes_per_ptr);
1816 				found++;
1817 
1818 				/* CNM has 1-to-1 mapping with FLM.
1819 				 * However, if header splitting is enabled,
1820 				 * then max CNM qid is half of max FLM qid.
1821 				 */
1822 				max_ctx_qid[CTXT_CNM] = nohdr ?
1823 							max_ctx_qid[idx] :
1824 							max_ctx_qid[idx] >> 1;
1825 
1826 				/* One more increment for CNM */
1827 				found++;
1828 			}
1829 		}
1830 		if (found == nelem)
1831 			break;
1832 	}
1833 
1834 	/* Sanity check. Ensure the values are within known max. */
1835 	max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
1836 					 M_CTXTQID);
1837 	max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
1838 					  CUDBG_MAX_INGRESS_QIDS);
1839 	max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
1840 				      CUDBG_MAX_FL_QIDS);
1841 	max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
1842 				      CUDBG_MAX_CNM_QIDS);
1843 	return 0;
1844 }
1845 
1846 static int collect_dump_context(struct cudbg_init *pdbg_init,
1847 				struct cudbg_buffer *dbg_buff,
1848 				struct cudbg_error *cudbg_err)
1849 {
1850 	struct cudbg_buffer scratch_buff;
1851 	struct cudbg_buffer temp_buff;
1852 	struct adapter *padap = pdbg_init->adap;
1853 	u32 size = 0, next_offset = 0, total_size = 0;
1854 	struct cudbg_ch_cntxt *buff = NULL;
1855 	struct struct_meminfo meminfo;
1856 	int bytes = 0;
1857 	int rc = 0;
1858 	u32 i, j;
1859 	u32 max_ctx_qid[CTXT_CNM + 1];
1860 	bool limit_qid = false;
1861 	u32 qid_count = 0;
1862 
1863 	rc = fill_meminfo(padap, &meminfo);
1864 	if (rc)
1865 		goto err;
1866 
1867 	/* Get max valid qid for each type of queue */
1868 	rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
1869 	if (rc)
1870 		goto err;
1871 
1872 	/* There are four types of queues. Collect context upto max
1873 	 * qid of each type of queue.
1874 	 */
1875 	for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1876 		size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
1877 
1878 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1879 	if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
1880 		/* Not enough scratch Memory available.
1881 		 * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
1882 		 * for each queue type.
1883 		 */
1884 		size = 0;
1885 		for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1886 			size += sizeof(struct cudbg_ch_cntxt) *
1887 				CUDBG_LOWMEM_MAX_CTXT_QIDS;
1888 
1889 		limit_qid = true;
1890 		rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1891 		if (rc)
1892 			goto err;
1893 	}
1894 
1895 	buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
1896 
1897 	/* Collect context data */
1898 	for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
1899 		qid_count = 0;
1900 		for (j = 0; j < max_ctx_qid[i]; j++) {
1901 			read_sge_ctxt(pdbg_init, j, i, buff->data);
1902 
1903 			rc = check_valid(buff->data, i);
1904 			if (rc) {
1905 				buff->cntxt_type = i;
1906 				buff->cntxt_id = j;
1907 				buff++;
1908 				total_size += sizeof(struct cudbg_ch_cntxt);
1909 
1910 				if (i == CTXT_FLM) {
1911 					read_sge_ctxt(pdbg_init, j, CTXT_CNM,
1912 						      buff->data);
1913 					buff->cntxt_type = CTXT_CNM;
1914 					buff->cntxt_id = j;
1915 					buff++;
1916 					total_size +=
1917 						sizeof(struct cudbg_ch_cntxt);
1918 				}
1919 				qid_count++;
1920 			}
1921 
1922 			/* If there's not enough space to collect more qids,
1923 			 * then bail and move on to next queue type.
1924 			 */
1925 			if (limit_qid &&
1926 			    qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
1927 				break;
1928 		}
1929 	}
1930 
1931 	scratch_buff.size = total_size;
1932 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1933 	if (rc)
1934 		goto err1;
1935 
1936 	/* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
1937 	while (total_size > 0) {
1938 		bytes = min_t(unsigned long, (unsigned long)total_size,
1939 			      (unsigned long)CUDBG_CHUNK_SIZE);
1940 		temp_buff.size = bytes;
1941 		temp_buff.data = (void *)((char *)scratch_buff.data +
1942 					  next_offset);
1943 
1944 		rc = compress_buff(&temp_buff, dbg_buff);
1945 		if (rc)
1946 			goto err1;
1947 
1948 		total_size -= bytes;
1949 		next_offset += bytes;
1950 	}
1951 
1952 err1:
1953 	scratch_buff.size = size;
1954 	release_scratch_buff(&scratch_buff, dbg_buff);
1955 err:
1956 	return rc;
1957 }
1958 
1959 static int collect_fw_devlog(struct cudbg_init *pdbg_init,
1960 			     struct cudbg_buffer *dbg_buff,
1961 			     struct cudbg_error *cudbg_err)
1962 {
1963 #ifdef notyet
1964 	struct adapter *padap = pdbg_init->adap;
1965 	struct devlog_params *dparams = &padap->params.devlog;
1966 	struct cudbg_param *params = NULL;
1967 	struct cudbg_buffer scratch_buff;
1968 	u32 offset;
1969 	int rc = 0, i;
1970 
1971 	rc = t4_init_devlog_params(padap, 1);
1972 
1973 	if (rc < 0) {
1974 		pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
1975 				 "%d\n", __func__, rc);
1976 		for (i = 0; i < pdbg_init->dbg_params_cnt; i++) {
1977 			if (pdbg_init->dbg_params[i].param_type ==
1978 			    CUDBG_DEVLOG_PARAM) {
1979 				params = &pdbg_init->dbg_params[i];
1980 				break;
1981 			}
1982 		}
1983 
1984 		if (params) {
1985 			dparams->memtype = params->u.devlog_param.memtype;
1986 			dparams->start = params->u.devlog_param.start;
1987 			dparams->size = params->u.devlog_param.size;
1988 		} else {
1989 			cudbg_err->sys_err = rc;
1990 			goto err;
1991 		}
1992 	}
1993 
1994 	rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
1995 
1996 	if (rc)
1997 		goto err;
1998 
1999 	/* Collect FW devlog */
2000 	if (dparams->start != 0) {
2001 		offset = scratch_buff.offset;
2002 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
2003 				  dparams->memtype, dparams->start,
2004 				  dparams->size,
2005 				  (__be32 *)((char *)scratch_buff.data +
2006 					     offset), 1);
2007 
2008 		if (rc) {
2009 			pdbg_init->print("%s(), t4_memory_rw failed!, rc: "\
2010 					 "%d\n", __func__, rc);
2011 			cudbg_err->sys_err = rc;
2012 			goto err1;
2013 		}
2014 	}
2015 
2016 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2017 
2018 	if (rc)
2019 		goto err1;
2020 
2021 	rc = compress_buff(&scratch_buff, dbg_buff);
2022 
2023 err1:
2024 	release_scratch_buff(&scratch_buff, dbg_buff);
2025 err:
2026 	return rc;
2027 #endif
2028 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
2029 }
2030 /* CIM OBQ */
2031 
2032 static int collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
2033 				struct cudbg_buffer *dbg_buff,
2034 				struct cudbg_error *cudbg_err)
2035 {
2036 	int rc = 0, qid = 0;
2037 
2038 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2039 
2040 	return rc;
2041 }
2042 
2043 static int collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
2044 				struct cudbg_buffer *dbg_buff,
2045 				struct cudbg_error *cudbg_err)
2046 {
2047 	int rc = 0, qid = 1;
2048 
2049 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2050 
2051 	return rc;
2052 }
2053 
2054 static int collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
2055 				struct cudbg_buffer *dbg_buff,
2056 				struct cudbg_error *cudbg_err)
2057 {
2058 	int rc = 0, qid = 2;
2059 
2060 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2061 
2062 	return rc;
2063 }
2064 
2065 static int collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
2066 				struct cudbg_buffer *dbg_buff,
2067 				struct cudbg_error *cudbg_err)
2068 {
2069 	int rc = 0, qid = 3;
2070 
2071 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2072 
2073 	return rc;
2074 }
2075 
2076 static int collect_cim_obq_sge(struct cudbg_init *pdbg_init,
2077 			       struct cudbg_buffer *dbg_buff,
2078 			       struct cudbg_error *cudbg_err)
2079 {
2080 	int rc = 0, qid = 4;
2081 
2082 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2083 
2084 	return rc;
2085 }
2086 
2087 static int collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
2088 				struct cudbg_buffer *dbg_buff,
2089 				struct cudbg_error *cudbg_err)
2090 {
2091 	int rc = 0, qid = 5;
2092 
2093 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2094 
2095 	return rc;
2096 }
2097 
2098 static int collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
2099 				 struct cudbg_buffer *dbg_buff,
2100 				 struct cudbg_error *cudbg_err)
2101 {
2102 	int rc = 0, qid = 6;
2103 
2104 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2105 
2106 	return rc;
2107 }
2108 
2109 static int collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
2110 				 struct cudbg_buffer *dbg_buff,
2111 				 struct cudbg_error *cudbg_err)
2112 {
2113 	int rc = 0, qid = 7;
2114 
2115 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2116 
2117 	return rc;
2118 }
2119 
2120 static int read_cim_obq(struct cudbg_init *pdbg_init,
2121 			struct cudbg_buffer *dbg_buff,
2122 			struct cudbg_error *cudbg_err, int qid)
2123 {
2124 	struct cudbg_buffer scratch_buff;
2125 	struct adapter *padap = pdbg_init->adap;
2126 	u32 qsize;
2127 	int rc;
2128 	int no_of_read_words;
2129 
2130 	/* collect CIM OBQ */
2131 	qsize =  6 * CIM_OBQ_SIZE * 4 *  sizeof(u32);
2132 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2133 	if (rc)
2134 		goto err;
2135 
2136 	/* t4_read_cim_obq will return no. of read words or error */
2137 	no_of_read_words = t4_read_cim_obq(padap, qid,
2138 					   (u32 *)((u32 *)scratch_buff.data +
2139 					   scratch_buff.offset), qsize);
2140 
2141 	/* no_of_read_words is less than or equal to 0 means error */
2142 	if (no_of_read_words <= 0) {
2143 		if (no_of_read_words == 0)
2144 			rc = CUDBG_SYSTEM_ERROR;
2145 		else
2146 			rc = no_of_read_words;
2147 		if (pdbg_init->verbose)
2148 			pdbg_init->print("%s: t4_read_cim_obq failed (%d)\n",
2149 				 __func__, rc);
2150 		cudbg_err->sys_err = rc;
2151 		goto err1;
2152 	}
2153 
2154 	scratch_buff.size = no_of_read_words * 4;
2155 
2156 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2157 
2158 	if (rc)
2159 		goto err1;
2160 
2161 	rc = compress_buff(&scratch_buff, dbg_buff);
2162 
2163 	if (rc)
2164 		goto err1;
2165 
2166 err1:
2167 	release_scratch_buff(&scratch_buff, dbg_buff);
2168 err:
2169 	return rc;
2170 }
2171 
2172 /* CIM IBQ */
2173 
2174 static int collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
2175 			       struct cudbg_buffer *dbg_buff,
2176 			       struct cudbg_error *cudbg_err)
2177 {
2178 	int rc = 0, qid = 0;
2179 
2180 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2181 	return rc;
2182 }
2183 
2184 static int collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
2185 			       struct cudbg_buffer *dbg_buff,
2186 			       struct cudbg_error *cudbg_err)
2187 {
2188 	int rc = 0, qid = 1;
2189 
2190 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2191 	return rc;
2192 }
2193 
2194 static int collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
2195 			       struct cudbg_buffer *dbg_buff,
2196 			       struct cudbg_error *cudbg_err)
2197 {
2198 	int rc = 0, qid = 2;
2199 
2200 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2201 	return rc;
2202 }
2203 
2204 static int collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
2205 				struct cudbg_buffer *dbg_buff,
2206 				struct cudbg_error *cudbg_err)
2207 {
2208 	int rc = 0, qid = 3;
2209 
2210 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2211 	return rc;
2212 }
2213 
2214 static int collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
2215 				struct cudbg_buffer *dbg_buff,
2216 				struct cudbg_error *cudbg_err)
2217 {
2218 	int rc = 0, qid = 4;
2219 
2220 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2221 	return rc;
2222 }
2223 
2224 static int collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
2225 				struct cudbg_buffer *dbg_buff,
2226 				struct cudbg_error *cudbg_err)
2227 {
2228 	int rc, qid = 5;
2229 
2230 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2231 	return rc;
2232 }
2233 
2234 static int read_cim_ibq(struct cudbg_init *pdbg_init,
2235 			struct cudbg_buffer *dbg_buff,
2236 			struct cudbg_error *cudbg_err, int qid)
2237 {
2238 	struct adapter *padap = pdbg_init->adap;
2239 	struct cudbg_buffer scratch_buff;
2240 	u32 qsize;
2241 	int rc;
2242 	int no_of_read_words;
2243 
2244 	/* collect CIM IBQ */
2245 	qsize = CIM_IBQ_SIZE * 4 *  sizeof(u32);
2246 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2247 
2248 	if (rc)
2249 		goto err;
2250 
2251 	/* t4_read_cim_ibq will return no. of read words or error */
2252 	no_of_read_words = t4_read_cim_ibq(padap, qid,
2253 					   (u32 *)((u32 *)scratch_buff.data +
2254 					   scratch_buff.offset), qsize);
2255 	/* no_of_read_words is less than or equal to 0 means error */
2256 	if (no_of_read_words <= 0) {
2257 		if (no_of_read_words == 0)
2258 			rc = CUDBG_SYSTEM_ERROR;
2259 		else
2260 			rc = no_of_read_words;
2261 		if (pdbg_init->verbose)
2262 			pdbg_init->print("%s: t4_read_cim_ibq failed (%d)\n",
2263 				 __func__, rc);
2264 		cudbg_err->sys_err = rc;
2265 		goto err1;
2266 	}
2267 
2268 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2269 	if (rc)
2270 		goto err1;
2271 
2272 	rc = compress_buff(&scratch_buff, dbg_buff);
2273 	if (rc)
2274 		goto err1;
2275 
2276 err1:
2277 	release_scratch_buff(&scratch_buff, dbg_buff);
2278 
2279 err:
2280 	return rc;
2281 }
2282 
2283 static int collect_cim_ma_la(struct cudbg_init *pdbg_init,
2284 			     struct cudbg_buffer *dbg_buff,
2285 			     struct cudbg_error *cudbg_err)
2286 {
2287 	struct cudbg_buffer scratch_buff;
2288 	struct adapter *padap = pdbg_init->adap;
2289 	u32 rc = 0;
2290 
2291 	/* collect CIM MA LA */
2292 	scratch_buff.size =  2 * CIM_MALA_SIZE * 5 * sizeof(u32);
2293 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2294 	if (rc)
2295 		goto err;
2296 
2297 	/* no return */
2298 	t4_cim_read_ma_la(padap,
2299 			  (u32 *) ((char *)scratch_buff.data +
2300 				   scratch_buff.offset),
2301 			  (u32 *) ((char *)scratch_buff.data +
2302 				   scratch_buff.offset + 5 * CIM_MALA_SIZE));
2303 
2304 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2305 	if (rc)
2306 		goto err1;
2307 
2308 	rc = compress_buff(&scratch_buff, dbg_buff);
2309 
2310 err1:
2311 	release_scratch_buff(&scratch_buff, dbg_buff);
2312 err:
2313 	return rc;
2314 }
2315 
2316 static int collect_cim_la(struct cudbg_init *pdbg_init,
2317 			  struct cudbg_buffer *dbg_buff,
2318 			  struct cudbg_error *cudbg_err)
2319 {
2320 	struct cudbg_buffer scratch_buff;
2321 	struct adapter *padap = pdbg_init->adap;
2322 
2323 	int rc;
2324 	u32 cfg = 0;
2325 	int size;
2326 
2327 	/* collect CIM LA */
2328 	if (is_t6(padap)) {
2329 		size = padap->params.cim_la_size / 10 + 1;
2330 		size *= 11 * sizeof(u32);
2331 	} else {
2332 		size = padap->params.cim_la_size / 8;
2333 		size *= 8 * sizeof(u32);
2334 	}
2335 
2336 	size += sizeof(cfg);
2337 
2338 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
2339 	if (rc)
2340 		goto err;
2341 
2342 	rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
2343 
2344 	if (rc) {
2345 		if (pdbg_init->verbose)
2346 			pdbg_init->print("%s: t4_cim_read failed (%d)\n",
2347 				 __func__, rc);
2348 		cudbg_err->sys_err = rc;
2349 		goto err1;
2350 	}
2351 
2352 	memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
2353 	       sizeof(cfg));
2354 
2355 	rc = t4_cim_read_la(padap,
2356 			    (u32 *) ((char *)scratch_buff.data +
2357 				     scratch_buff.offset + sizeof(cfg)), NULL);
2358 	if (rc < 0) {
2359 		if (pdbg_init->verbose)
2360 			pdbg_init->print("%s: t4_cim_read_la failed (%d)\n",
2361 				 __func__, rc);
2362 		cudbg_err->sys_err = rc;
2363 		goto err1;
2364 	}
2365 
2366 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2367 	if (rc)
2368 		goto err1;
2369 
2370 	rc = compress_buff(&scratch_buff, dbg_buff);
2371 	if (rc)
2372 		goto err1;
2373 
2374 err1:
2375 	release_scratch_buff(&scratch_buff, dbg_buff);
2376 err:
2377 	return rc;
2378 }
2379 
2380 static int collect_cim_qcfg(struct cudbg_init *pdbg_init,
2381 			    struct cudbg_buffer *dbg_buff,
2382 			    struct cudbg_error *cudbg_err)
2383 {
2384 	struct cudbg_buffer scratch_buff;
2385 	struct adapter *padap = pdbg_init->adap;
2386 	u32 offset;
2387 	int rc = 0;
2388 
2389 	struct struct_cim_qcfg *cim_qcfg_data = NULL;
2390 
2391 	rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
2392 			      &scratch_buff);
2393 
2394 	if (rc)
2395 		goto err;
2396 
2397 	offset = scratch_buff.offset;
2398 
2399 	cim_qcfg_data =
2400 		(struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
2401 					   offset));
2402 
2403 	rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
2404 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
2405 
2406 	if (rc) {
2407 		if (pdbg_init->verbose)
2408 			pdbg_init->print("%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
2409 			    __func__, rc);
2410 		cudbg_err->sys_err = rc;
2411 		goto err1;
2412 	}
2413 
2414 	rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
2415 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
2416 			 cim_qcfg_data->obq_wr);
2417 
2418 	if (rc) {
2419 		if (pdbg_init->verbose)
2420 			pdbg_init->print("%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
2421 			    __func__, rc);
2422 		cudbg_err->sys_err = rc;
2423 		goto err1;
2424 	}
2425 
2426 	/* no return val */
2427 	t4_read_cimq_cfg(padap,
2428 			cim_qcfg_data->base,
2429 			cim_qcfg_data->size,
2430 			cim_qcfg_data->thres);
2431 
2432 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2433 	if (rc)
2434 		goto err1;
2435 
2436 	rc = compress_buff(&scratch_buff, dbg_buff);
2437 	if (rc)
2438 		goto err1;
2439 
2440 err1:
2441 	release_scratch_buff(&scratch_buff, dbg_buff);
2442 err:
2443 	return rc;
2444 }
2445 
2446 /**
2447  * Fetch the TX/RX payload regions start and end.
2448  *
2449  * @padap (IN): adapter handle.
2450  * @mem_type (IN): EDC0, EDC1, MC/MC0/MC1.
2451  * @mem_tot_len (IN): total length of @mem_type memory region to read.
2452  * @payload_type (IN): TX or RX Payload.
2453  * @reg_info (OUT): store the payload region info.
2454  *
2455  * Fetch the TX/RX payload region information from meminfo.
2456  * However, reading from the @mem_type region starts at 0 and not
2457  * from whatever base info is stored in meminfo.  Hence, if the
2458  * payload region exists, then calculate the payload region
2459  * start and end wrt 0 and @mem_tot_len, respectively, and set
2460  * @reg_info->exist to true. Otherwise, set @reg_info->exist to false.
2461  */
2462 #ifdef notyet
2463 static int get_payload_range(struct adapter *padap, u8 mem_type,
2464 			     unsigned long mem_tot_len, u8 payload_type,
2465 			     struct struct_region_info *reg_info)
2466 {
2467 	struct struct_meminfo meminfo;
2468 	struct struct_mem_desc mem_region;
2469 	struct struct_mem_desc payload;
2470 	u32 i, idx, found = 0;
2471 	u8 mc_type;
2472 	int rc;
2473 
2474 	/* Get meminfo of all regions */
2475 	rc = fill_meminfo(padap, &meminfo);
2476 	if (rc)
2477 		return rc;
2478 
2479 	/* Extract the specified TX or RX Payload region range */
2480 	memset(&payload, 0, sizeof(struct struct_mem_desc));
2481 	for (i = 0; i < meminfo.mem_c; i++) {
2482 		if (meminfo.mem[i].idx >= ARRAY_SIZE(region))
2483 			continue;                        /* skip holes */
2484 
2485 		idx = meminfo.mem[i].idx;
2486 		/* Get TX or RX Payload region start and end */
2487 		if (idx == payload_type) {
2488 			if (!(meminfo.mem[i].limit))
2489 				meminfo.mem[i].limit =
2490 					i < meminfo.mem_c - 1 ?
2491 					meminfo.mem[i + 1].base - 1 : ~0;
2492 
2493 			memcpy(&payload, &meminfo.mem[i], sizeof(payload));
2494 			found = 1;
2495 			break;
2496 		}
2497 	}
2498 
2499 	/* If TX or RX Payload region is not found return error. */
2500 	if (!found)
2501 		return -EINVAL;
2502 
2503 	if (mem_type < MEM_MC) {
2504 		memcpy(&mem_region, &meminfo.avail[mem_type],
2505 		       sizeof(mem_region));
2506 	} else {
2507 		/* Check if both MC0 and MC1 exist by checking if a
2508 		 * base address for the specified @mem_type exists.
2509 		 * If a base address exists, then there is MC1 and
2510 		 * hence use the base address stored at index 3.
2511 		 * Otherwise, use the base address stored at index 2.
2512 		 */
2513 		mc_type = meminfo.avail[mem_type].base ?
2514 			  mem_type : mem_type - 1;
2515 		memcpy(&mem_region, &meminfo.avail[mc_type],
2516 		       sizeof(mem_region));
2517 	}
2518 
2519 	/* Check if payload region exists in current memory */
2520 	if (payload.base < mem_region.base && payload.limit < mem_region.base) {
2521 		reg_info->exist = false;
2522 		return 0;
2523 	}
2524 
2525 	/* Get Payload region start and end with respect to 0 and
2526 	 * mem_tot_len, respectively.  This is because reading from the
2527 	 * memory region starts at 0 and not at base info stored in meminfo.
2528 	 */
2529 	if (payload.base < mem_region.limit) {
2530 		reg_info->exist = true;
2531 		if (payload.base >= mem_region.base)
2532 			reg_info->start = payload.base - mem_region.base;
2533 		else
2534 			reg_info->start = 0;
2535 
2536 		if (payload.limit < mem_region.limit)
2537 			reg_info->end = payload.limit - mem_region.base;
2538 		else
2539 			reg_info->end = mem_tot_len;
2540 	}
2541 
2542 	return 0;
2543 }
2544 #endif
2545 
2546 static int read_fw_mem(struct cudbg_init *pdbg_init,
2547 			struct cudbg_buffer *dbg_buff, u8 mem_type,
2548 			unsigned long tot_len, struct cudbg_error *cudbg_err)
2549 {
2550 #ifdef notyet
2551 	struct cudbg_buffer scratch_buff;
2552 	struct adapter *padap = pdbg_init->adap;
2553 	unsigned long bytes_read = 0;
2554 	unsigned long bytes_left;
2555 	unsigned long bytes;
2556 	int	      rc;
2557 	struct struct_region_info payload[2]; /* TX and RX Payload Region */
2558 	u16 get_payload_flag;
2559 	u8 i;
2560 
2561 	get_payload_flag =
2562 		pdbg_init->dbg_params[CUDBG_GET_PAYLOAD_PARAM].param_type;
2563 
2564 	/* If explicitly asked to get TX/RX Payload data,
2565 	 * then don't zero out the payload data. Otherwise,
2566 	 * zero out the payload data.
2567 	 */
2568 	if (!get_payload_flag) {
2569 		u8 region_index[2];
2570 		u8 j = 0;
2571 
2572 		/* Find the index of TX and RX Payload regions in meminfo */
2573 		for (i = 0; i < ARRAY_SIZE(region); i++) {
2574 			if (!strcmp(region[i], "Tx payload:") ||
2575 			    !strcmp(region[i], "Rx payload:")) {
2576 				region_index[j] = i;
2577 				j++;
2578 				if (j == 2)
2579 					break;
2580 			}
2581 		}
2582 
2583 		/* Get TX/RX Payload region range if they exist */
2584 		memset(payload, 0, ARRAY_SIZE(payload) * sizeof(payload[0]));
2585 		for (i = 0; i < ARRAY_SIZE(payload); i++) {
2586 			rc = get_payload_range(padap, mem_type, tot_len,
2587 					       region_index[i],
2588 					       &payload[i]);
2589 			if (rc)
2590 				goto err;
2591 
2592 			if (payload[i].exist) {
2593 				/* Align start and end to avoid wrap around */
2594 				payload[i].start =
2595 					roundup(payload[i].start,
2596 					    CUDBG_CHUNK_SIZE);
2597 				payload[i].end =
2598 					rounddown(payload[i].end,
2599 					    CUDBG_CHUNK_SIZE);
2600 			}
2601 		}
2602 	}
2603 
2604 	bytes_left = tot_len;
2605 	scratch_buff.size = tot_len;
2606 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2607 	if (rc)
2608 		goto err;
2609 
2610 	while (bytes_left > 0) {
2611 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2612 		rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
2613 
2614 		if (rc) {
2615 			rc = CUDBG_STATUS_NO_SCRATCH_MEM;
2616 			goto err;
2617 		}
2618 
2619 		if (!get_payload_flag) {
2620 			for (i = 0; i < ARRAY_SIZE(payload); i++) {
2621 				if (payload[i].exist &&
2622 				    bytes_read >= payload[i].start &&
2623 				    (bytes_read + bytes) <= payload[i].end) {
2624 					memset(scratch_buff.data, 0, bytes);
2625 					/* TX and RX Payload regions
2626 					 * can't overlap.
2627 					 */
2628 					goto skip_read;
2629 				}
2630 			}
2631 		}
2632 
2633 		/* Read from file */
2634 		/*fread(scratch_buff.data, 1, Bytes, in);*/
2635 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
2636 				  bytes, (__be32 *)(scratch_buff.data), 1);
2637 
2638 		if (rc) {
2639 			if (pdbg_init->verbose)
2640 				pdbg_init->print("%s: t4_memory_rw failed (%d)",
2641 				    __func__, rc);
2642 			cudbg_err->sys_err = rc;
2643 			goto err1;
2644 		}
2645 
2646 skip_read:
2647 		rc = compress_buff(&scratch_buff, dbg_buff);
2648 		if (rc)
2649 			goto err1;
2650 
2651 		bytes_left -= bytes;
2652 		bytes_read += bytes;
2653 		release_scratch_buff(&scratch_buff, dbg_buff);
2654 	}
2655 
2656 err1:
2657 	if (rc)
2658 		release_scratch_buff(&scratch_buff, dbg_buff);
2659 
2660 err:
2661 	return rc;
2662 #endif
2663 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
2664 }
2665 
2666 static void collect_mem_info(struct cudbg_init *pdbg_init,
2667 			     struct card_mem *mem_info)
2668 {
2669 	struct adapter *padap = pdbg_init->adap;
2670 	u32 value;
2671 	int t4 = 0;
2672 
2673 	if (is_t4(padap))
2674 		t4 = 1;
2675 
2676 	if (t4) {
2677 		value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
2678 		value = G_EXT_MEM_SIZE(value);
2679 		mem_info->size_mc0 = (u16)value;  /* size in MB */
2680 
2681 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2682 		if (value & F_EXT_MEM_ENABLE)
2683 			mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
2684 								  bit */
2685 	} else {
2686 		value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
2687 		value = G_EXT_MEM0_SIZE(value);
2688 		mem_info->size_mc0 = (u16)value;
2689 
2690 		value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
2691 		value = G_EXT_MEM1_SIZE(value);
2692 		mem_info->size_mc1 = (u16)value;
2693 
2694 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2695 		if (value & F_EXT_MEM0_ENABLE)
2696 			mem_info->mem_flag |= (1 << MC0_FLAG);
2697 		if (value & F_EXT_MEM1_ENABLE)
2698 			mem_info->mem_flag |= (1 << MC1_FLAG);
2699 	}
2700 
2701 	value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
2702 	value = G_EDRAM0_SIZE(value);
2703 	mem_info->size_edc0 = (u16)value;
2704 
2705 	value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
2706 	value = G_EDRAM1_SIZE(value);
2707 	mem_info->size_edc1 = (u16)value;
2708 
2709 	value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2710 	if (value & F_EDRAM0_ENABLE)
2711 		mem_info->mem_flag |= (1 << EDC0_FLAG);
2712 	if (value & F_EDRAM1_ENABLE)
2713 		mem_info->mem_flag |= (1 << EDC1_FLAG);
2714 
2715 }
2716 
2717 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
2718 				struct cudbg_error *cudbg_err)
2719 {
2720 	struct adapter *padap = pdbg_init->adap;
2721 	int rc;
2722 
2723 	if (is_fw_attached(pdbg_init)) {
2724 
2725 		/* Flush uP dcache before reading edcX/mcX  */
2726 		rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
2727 		    "t4cudl");
2728 		if (rc == 0) {
2729 			rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
2730 			end_synchronized_op(padap, 0);
2731 		}
2732 
2733 		if (rc) {
2734 			if (pdbg_init->verbose)
2735 				pdbg_init->print("%s: t4_fwcache failed (%d)\n",
2736 				 __func__, rc);
2737 			cudbg_err->sys_warn = rc;
2738 		}
2739 	}
2740 }
2741 
2742 static int collect_edc0_meminfo(struct cudbg_init *pdbg_init,
2743 				struct cudbg_buffer *dbg_buff,
2744 				struct cudbg_error *cudbg_err)
2745 {
2746 	struct card_mem mem_info = {0};
2747 	unsigned long edc0_size;
2748 	int rc;
2749 
2750 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2751 
2752 	collect_mem_info(pdbg_init, &mem_info);
2753 
2754 	if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
2755 		edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
2756 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
2757 				 edc0_size, cudbg_err);
2758 		if (rc)
2759 			goto err;
2760 
2761 	} else {
2762 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2763 		if (pdbg_init->verbose)
2764 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2765 				 __func__, err_msg[-rc]);
2766 		goto err;
2767 
2768 	}
2769 err:
2770 	return rc;
2771 }
2772 
2773 static int collect_edc1_meminfo(struct cudbg_init *pdbg_init,
2774 				struct cudbg_buffer *dbg_buff,
2775 				struct cudbg_error *cudbg_err)
2776 {
2777 	struct card_mem mem_info = {0};
2778 	unsigned long edc1_size;
2779 	int rc;
2780 
2781 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2782 
2783 	collect_mem_info(pdbg_init, &mem_info);
2784 
2785 	if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
2786 		edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
2787 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
2788 				 edc1_size, cudbg_err);
2789 		if (rc)
2790 			goto err;
2791 	} else {
2792 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2793 		if (pdbg_init->verbose)
2794 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2795 				 __func__, err_msg[-rc]);
2796 		goto err;
2797 	}
2798 
2799 err:
2800 
2801 	return rc;
2802 }
2803 
2804 static int collect_mc0_meminfo(struct cudbg_init *pdbg_init,
2805 			       struct cudbg_buffer *dbg_buff,
2806 			       struct cudbg_error *cudbg_err)
2807 {
2808 	struct card_mem mem_info = {0};
2809 	unsigned long mc0_size;
2810 	int rc;
2811 
2812 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2813 
2814 	collect_mem_info(pdbg_init, &mem_info);
2815 
2816 	if (mem_info.mem_flag & (1 << MC0_FLAG)) {
2817 		mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
2818 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
2819 				 mc0_size, cudbg_err);
2820 		if (rc)
2821 			goto err;
2822 	} else {
2823 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2824 		if (pdbg_init->verbose)
2825 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2826 				 __func__, err_msg[-rc]);
2827 		goto err;
2828 	}
2829 
2830 err:
2831 	return rc;
2832 }
2833 
2834 static int collect_mc1_meminfo(struct cudbg_init *pdbg_init,
2835 			       struct cudbg_buffer *dbg_buff,
2836 			       struct cudbg_error *cudbg_err)
2837 {
2838 	struct card_mem mem_info = {0};
2839 	unsigned long mc1_size;
2840 	int rc;
2841 
2842 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2843 
2844 	collect_mem_info(pdbg_init, &mem_info);
2845 
2846 	if (mem_info.mem_flag & (1 << MC1_FLAG)) {
2847 		mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
2848 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
2849 				 mc1_size, cudbg_err);
2850 		if (rc)
2851 			goto err;
2852 	} else {
2853 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2854 
2855 		if (pdbg_init->verbose)
2856 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2857 				 __func__, err_msg[-rc]);
2858 		goto err;
2859 	}
2860 err:
2861 	return rc;
2862 }
2863 
2864 static int collect_reg_dump(struct cudbg_init *pdbg_init,
2865 			    struct cudbg_buffer *dbg_buff,
2866 			    struct cudbg_error *cudbg_err)
2867 {
2868 	struct cudbg_buffer scratch_buff;
2869 	struct cudbg_buffer tmp_scratch_buff;
2870 	struct adapter *padap = pdbg_init->adap;
2871 	unsigned long	     bytes_read = 0;
2872 	unsigned long	     bytes_left;
2873 	u32		     buf_size = 0, bytes = 0;
2874 	int		     rc = 0;
2875 
2876 	if (is_t4(padap))
2877 		buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
2878 	else if (is_t5(padap) || is_t6(padap))
2879 		buf_size = T5_REGMAP_SIZE;
2880 
2881 	scratch_buff.size = buf_size;
2882 
2883 	tmp_scratch_buff = scratch_buff;
2884 
2885 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2886 	if (rc)
2887 		goto err;
2888 
2889 	/* no return */
2890 	t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
2891 	bytes_left =   scratch_buff.size;
2892 
2893 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2894 	if (rc)
2895 		goto err1;
2896 
2897 	while (bytes_left > 0) {
2898 		tmp_scratch_buff.data =
2899 			((char *)scratch_buff.data) + bytes_read;
2900 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2901 		tmp_scratch_buff.size = bytes;
2902 		compress_buff(&tmp_scratch_buff, dbg_buff);
2903 		bytes_left -= bytes;
2904 		bytes_read += bytes;
2905 	}
2906 
2907 err1:
2908 	release_scratch_buff(&scratch_buff, dbg_buff);
2909 err:
2910 	return rc;
2911 }
2912 
2913 static int collect_cctrl(struct cudbg_init *pdbg_init,
2914 			 struct cudbg_buffer *dbg_buff,
2915 			 struct cudbg_error *cudbg_err)
2916 {
2917 	struct cudbg_buffer scratch_buff;
2918 	struct adapter *padap = pdbg_init->adap;
2919 	u32 size;
2920 	int rc;
2921 
2922 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2923 	scratch_buff.size = size;
2924 
2925 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2926 	if (rc)
2927 		goto err;
2928 
2929 	t4_read_cong_tbl(padap, (void *)scratch_buff.data);
2930 
2931 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2932 	if (rc)
2933 		goto err1;
2934 
2935 	rc = compress_buff(&scratch_buff, dbg_buff);
2936 
2937 err1:
2938 	release_scratch_buff(&scratch_buff, dbg_buff);
2939 err:
2940 	return rc;
2941 }
2942 
2943 static int check_busy_bit(struct adapter *padap)
2944 {
2945 	u32 val;
2946 	u32 busy = 1;
2947 	int i = 0;
2948 	int retry = 10;
2949 	int status = 0;
2950 
2951 	while (busy && i < retry) {
2952 		val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
2953 		busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
2954 		i++;
2955 	}
2956 
2957 	if (busy)
2958 		status = -1;
2959 
2960 	return status;
2961 }
2962 
2963 static int cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
2964 {
2965 	int rc = 0;
2966 
2967 	/* write register address into the A_CIM_HOST_ACC_CTRL */
2968 	t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
2969 
2970 	/* Poll HOSTBUSY */
2971 	rc = check_busy_bit(padap);
2972 	if (rc)
2973 		goto err;
2974 
2975 	/* Read value from A_CIM_HOST_ACC_DATA */
2976 	*val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
2977 
2978 err:
2979 	return rc;
2980 }
2981 
2982 static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
2983 		       struct ireg_field *up_cim_reg, u32 *buff)
2984 {
2985 	u32 i;
2986 	int rc = 0;
2987 
2988 	for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
2989 		rc = cim_ha_rreg(padap,
2990 				 up_cim_reg->ireg_local_offset + (i * 4),
2991 				buff);
2992 		if (rc) {
2993 			if (pdbg_init->verbose)
2994 				pdbg_init->print("BUSY timeout reading"
2995 					 "CIM_HOST_ACC_CTRL\n");
2996 			goto err;
2997 		}
2998 
2999 		buff++;
3000 	}
3001 
3002 err:
3003 	return rc;
3004 }
3005 
3006 static int collect_up_cim_indirect(struct cudbg_init *pdbg_init,
3007 				   struct cudbg_buffer *dbg_buff,
3008 				   struct cudbg_error *cudbg_err)
3009 {
3010 	struct cudbg_buffer scratch_buff;
3011 	struct adapter *padap = pdbg_init->adap;
3012 	struct ireg_buf *up_cim;
3013 	u32 size;
3014 	int i, rc, n;
3015 
3016 	n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
3017 	size = sizeof(struct ireg_buf) * n;
3018 	scratch_buff.size = size;
3019 
3020 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3021 	if (rc)
3022 		goto err;
3023 
3024 	up_cim = (struct ireg_buf *)scratch_buff.data;
3025 
3026 	for (i = 0; i < n; i++) {
3027 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
3028 		u32 *buff = up_cim->outbuf;
3029 
3030 		if (is_t5(padap)) {
3031 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
3032 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
3033 			up_cim_reg->ireg_local_offset =
3034 						t5_up_cim_reg_array[i][2];
3035 			up_cim_reg->ireg_offset_range =
3036 						t5_up_cim_reg_array[i][3];
3037 		} else if (is_t6(padap)) {
3038 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
3039 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
3040 			up_cim_reg->ireg_local_offset =
3041 						t6_up_cim_reg_array[i][2];
3042 			up_cim_reg->ireg_offset_range =
3043 						t6_up_cim_reg_array[i][3];
3044 		}
3045 
3046 		rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
3047 
3048 		up_cim++;
3049 	}
3050 
3051 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3052 	if (rc)
3053 		goto err1;
3054 
3055 	rc = compress_buff(&scratch_buff, dbg_buff);
3056 
3057 err1:
3058 	release_scratch_buff(&scratch_buff, dbg_buff);
3059 err:
3060 	return rc;
3061 }
3062 
3063 static int collect_mbox_log(struct cudbg_init *pdbg_init,
3064 			    struct cudbg_buffer *dbg_buff,
3065 			    struct cudbg_error *cudbg_err)
3066 {
3067 #ifdef notyet
3068 	struct cudbg_buffer scratch_buff;
3069 	struct cudbg_mbox_log *mboxlog = NULL;
3070 	struct mbox_cmd_log *log = NULL;
3071 	struct mbox_cmd *entry;
3072 	u64 flit;
3073 	u32 size;
3074 	unsigned int entry_idx;
3075 	int i, k, rc;
3076 	u16 mbox_cmds;
3077 
3078 	if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
3079 		log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3080 			mboxlog_param.log;
3081 		mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3082 				mboxlog_param.mbox_cmds;
3083 	} else {
3084 		if (pdbg_init->verbose)
3085 			pdbg_init->print("Mbox log is not requested\n");
3086 		return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
3087 	}
3088 
3089 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3090 	scratch_buff.size = size;
3091 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3092 	if (rc)
3093 		goto err;
3094 
3095 	mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
3096 
3097 	for (k = 0; k < mbox_cmds; k++) {
3098 		entry_idx = log->cursor + k;
3099 		if (entry_idx >= log->size)
3100 			entry_idx -= log->size;
3101 		entry = mbox_cmd_log_entry(log, entry_idx);
3102 
3103 		/* skip over unused entries */
3104 		if (entry->timestamp == 0)
3105 			continue;
3106 
3107 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3108 
3109 		for (i = 0; i < MBOX_LEN / 8; i++) {
3110 			flit = entry->cmd[i];
3111 			mboxlog->hi[i] = (u32)(flit >> 32);
3112 			mboxlog->lo[i] = (u32)flit;
3113 		}
3114 
3115 		mboxlog++;
3116 	}
3117 
3118 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3119 	if (rc)
3120 		goto err1;
3121 
3122 	rc = compress_buff(&scratch_buff, dbg_buff);
3123 
3124 err1:
3125 	release_scratch_buff(&scratch_buff, dbg_buff);
3126 err:
3127 	return rc;
3128 #endif
3129 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
3130 }
3131 
3132 static int collect_pbt_tables(struct cudbg_init *pdbg_init,
3133 			      struct cudbg_buffer *dbg_buff,
3134 			      struct cudbg_error *cudbg_err)
3135 {
3136 	struct cudbg_buffer scratch_buff;
3137 	struct adapter *padap = pdbg_init->adap;
3138 	struct cudbg_pbt_tables *pbt = NULL;
3139 	u32 size;
3140 	u32 addr;
3141 	int i, rc;
3142 
3143 	size = sizeof(struct cudbg_pbt_tables);
3144 	scratch_buff.size = size;
3145 
3146 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3147 	if (rc)
3148 		goto err;
3149 
3150 	pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
3151 
3152 	/* PBT dynamic entries */
3153 	addr = CUDBG_CHAC_PBT_ADDR;
3154 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3155 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
3156 		if (rc) {
3157 			if (pdbg_init->verbose)
3158 				pdbg_init->print("BUSY timeout reading"
3159 					 "CIM_HOST_ACC_CTRL\n");
3160 			goto err1;
3161 		}
3162 	}
3163 
3164 	/* PBT static entries */
3165 
3166 	/* static entries start when bit 6 is set */
3167 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3168 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3169 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
3170 		if (rc) {
3171 			if (pdbg_init->verbose)
3172 				pdbg_init->print("BUSY timeout reading"
3173 					 "CIM_HOST_ACC_CTRL\n");
3174 			goto err1;
3175 		}
3176 	}
3177 
3178 	/* LRF entries */
3179 	addr = CUDBG_CHAC_PBT_LRF;
3180 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3181 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
3182 		if (rc) {
3183 			if (pdbg_init->verbose)
3184 				pdbg_init->print("BUSY timeout reading"
3185 					 "CIM_HOST_ACC_CTRL\n");
3186 			goto err1;
3187 		}
3188 	}
3189 
3190 	/* PBT data entries */
3191 	addr = CUDBG_CHAC_PBT_DATA;
3192 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3193 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
3194 		if (rc) {
3195 			if (pdbg_init->verbose)
3196 				pdbg_init->print("BUSY timeout reading"
3197 					 "CIM_HOST_ACC_CTRL\n");
3198 			goto err1;
3199 		}
3200 	}
3201 
3202 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3203 	if (rc)
3204 		goto err1;
3205 
3206 	rc = compress_buff(&scratch_buff, dbg_buff);
3207 
3208 err1:
3209 	release_scratch_buff(&scratch_buff, dbg_buff);
3210 err:
3211 	return rc;
3212 }
3213 
3214 static int collect_pm_indirect(struct cudbg_init *pdbg_init,
3215 			       struct cudbg_buffer *dbg_buff,
3216 			       struct cudbg_error *cudbg_err)
3217 {
3218 	struct cudbg_buffer scratch_buff;
3219 	struct adapter *padap = pdbg_init->adap;
3220 	struct ireg_buf *ch_pm;
3221 	u32 size;
3222 	int i, rc, n;
3223 
3224 	n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
3225 	size = sizeof(struct ireg_buf) * n * 2;
3226 	scratch_buff.size = size;
3227 
3228 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3229 	if (rc)
3230 		goto err;
3231 
3232 	ch_pm = (struct ireg_buf *)scratch_buff.data;
3233 
3234 	/*PM_RX*/
3235 	for (i = 0; i < n; i++) {
3236 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3237 		u32 *buff = ch_pm->outbuf;
3238 
3239 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
3240 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
3241 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
3242 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
3243 
3244 		t4_read_indirect(padap,
3245 				pm_pio->ireg_addr,
3246 				pm_pio->ireg_data,
3247 				buff,
3248 				pm_pio->ireg_offset_range,
3249 				pm_pio->ireg_local_offset);
3250 
3251 		ch_pm++;
3252 	}
3253 
3254 	/*PM_Tx*/
3255 	n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
3256 	for (i = 0; i < n; i++) {
3257 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3258 		u32 *buff = ch_pm->outbuf;
3259 
3260 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
3261 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
3262 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
3263 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
3264 
3265 		t4_read_indirect(padap,
3266 				pm_pio->ireg_addr,
3267 				pm_pio->ireg_data,
3268 				buff,
3269 				pm_pio->ireg_offset_range,
3270 				pm_pio->ireg_local_offset);
3271 
3272 		ch_pm++;
3273 	}
3274 
3275 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3276 	if (rc)
3277 		goto err1;
3278 
3279 	rc = compress_buff(&scratch_buff, dbg_buff);
3280 
3281 err1:
3282 	release_scratch_buff(&scratch_buff, dbg_buff);
3283 err:
3284 	return rc;
3285 
3286 }
3287 
3288 static int collect_tid(struct cudbg_init *pdbg_init,
3289 		       struct cudbg_buffer *dbg_buff,
3290 		       struct cudbg_error *cudbg_err)
3291 {
3292 
3293 	struct cudbg_buffer scratch_buff;
3294 	struct adapter *padap = pdbg_init->adap;
3295 	struct tid_info_region *tid;
3296 	struct tid_info_region_rev1 *tid1;
3297 	u32 para[7], val[7];
3298 	u32 mbox, pf;
3299 	int rc;
3300 
3301 	scratch_buff.size = sizeof(struct tid_info_region_rev1);
3302 
3303 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3304 	if (rc)
3305 		goto err;
3306 
3307 #define FW_PARAM_DEV_A(param) \
3308 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3309 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3310 #define FW_PARAM_PFVF_A(param) \
3311 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3312 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
3313 	 V_FW_PARAMS_PARAM_Y(0) | \
3314 	 V_FW_PARAMS_PARAM_Z(0))
3315 #define MAX_ATIDS_A 8192U
3316 
3317 	tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
3318 	tid = &(tid1->tid);
3319 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
3320 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
3321 	tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
3322 			     sizeof(struct cudbg_ver_hdr);
3323 
3324 	if (is_t5(padap)) {
3325 		tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
3326 		tid1->tid_start = 0;
3327 	} else if (is_t6(padap)) {
3328 		tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
3329 		tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
3330 	}
3331 
3332 	tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
3333 
3334 	para[0] = FW_PARAM_PFVF_A(FILTER_START);
3335 	para[1] = FW_PARAM_PFVF_A(FILTER_END);
3336 	para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
3337 	para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
3338 	para[4] = FW_PARAM_DEV_A(NTID);
3339 	para[5] = FW_PARAM_PFVF_A(SERVER_START);
3340 	para[6] = FW_PARAM_PFVF_A(SERVER_END);
3341 
3342 	rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK, "t4cudq");
3343 	if (rc)
3344 		goto err;
3345 	mbox = padap->mbox;
3346 	pf = padap->pf;
3347 	rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3348 	if (rc <  0) {
3349 		if (rc == -FW_EPERM) {
3350 			/* It looks like we don't have permission to use
3351 			 * padap->mbox.
3352 			 *
3353 			 * Try mbox 4.  If it works, we'll continue to
3354 			 * collect the rest of tid info from mbox 4.
3355 			 * Else, quit trying to collect tid info.
3356 			 */
3357 			mbox = 4;
3358 			pf = 4;
3359 			rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3360 			if (rc < 0) {
3361 				cudbg_err->sys_err = rc;
3362 				goto err1;
3363 			}
3364 		} else {
3365 			cudbg_err->sys_err = rc;
3366 			goto err1;
3367 		}
3368 	}
3369 
3370 	tid->ftid_base = val[0];
3371 	tid->nftids = val[1] - val[0] + 1;
3372 	/*active filter region*/
3373 	if (val[2] != val[3]) {
3374 #ifdef notyet
3375 		tid->flags |= FW_OFLD_CONN;
3376 #endif
3377 		tid->aftid_base = val[2];
3378 		tid->aftid_end = val[3];
3379 	}
3380 	tid->ntids = val[4];
3381 	tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
3382 	tid->stid_base = val[5];
3383 	tid->nstids = val[6] - val[5] + 1;
3384 
3385 	if (chip_id(padap) >= CHELSIO_T6) {
3386 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
3387 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
3388 		rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3389 		if (rc < 0) {
3390 			cudbg_err->sys_err = rc;
3391 			goto err1;
3392 		}
3393 
3394 		tid->hpftid_base = val[0];
3395 		tid->nhpftids = val[1] - val[0] + 1;
3396 	}
3397 
3398 	if (chip_id(padap) <= CHELSIO_T5) {
3399 		tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
3400 		tid->hash_base /= 4;
3401 	} else
3402 		tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
3403 
3404 	/*UO context range*/
3405 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
3406 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
3407 
3408 	rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3409 	if (rc <  0) {
3410 		cudbg_err->sys_err = rc;
3411 		goto err1;
3412 	}
3413 
3414 	if (val[0] != val[1]) {
3415 		tid->uotid_base = val[0];
3416 		tid->nuotids = val[1] - val[0] + 1;
3417 	}
3418 	tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
3419 	tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
3420 
3421 #undef FW_PARAM_PFVF_A
3422 #undef FW_PARAM_DEV_A
3423 #undef MAX_ATIDS_A
3424 
3425 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3426 	if (rc)
3427 		goto err1;
3428 	rc = compress_buff(&scratch_buff, dbg_buff);
3429 
3430 err1:
3431 	end_synchronized_op(padap, 0);
3432 	release_scratch_buff(&scratch_buff, dbg_buff);
3433 err:
3434 	return rc;
3435 }
3436 
3437 static int collect_tx_rate(struct cudbg_init *pdbg_init,
3438 			   struct cudbg_buffer *dbg_buff,
3439 			   struct cudbg_error *cudbg_err)
3440 {
3441 	struct cudbg_buffer scratch_buff;
3442 	struct adapter *padap = pdbg_init->adap;
3443 	struct tx_rate *tx_rate;
3444 	u32 size;
3445 	int rc;
3446 
3447 	size = sizeof(struct tx_rate);
3448 	scratch_buff.size = size;
3449 
3450 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3451 	if (rc)
3452 		goto err;
3453 
3454 	tx_rate = (struct tx_rate *)scratch_buff.data;
3455 	t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
3456 	tx_rate->nchan = padap->chip_params->nchan;
3457 
3458 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3459 	if (rc)
3460 		goto err1;
3461 
3462 	rc = compress_buff(&scratch_buff, dbg_buff);
3463 
3464 err1:
3465 	release_scratch_buff(&scratch_buff, dbg_buff);
3466 err:
3467 	return rc;
3468 }
3469 
3470 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
3471 {
3472 	*mask = x | y;
3473 	y = (__force u64)cpu_to_be64(y);
3474 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
3475 }
3476 
3477 static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
3478 {
3479 	if (is_t5(padap)) {
3480 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3481 							  A_MPS_VF_RPLCT_MAP3));
3482 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3483 							  A_MPS_VF_RPLCT_MAP2));
3484 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3485 							  A_MPS_VF_RPLCT_MAP1));
3486 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3487 							  A_MPS_VF_RPLCT_MAP0));
3488 	} else {
3489 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3490 							  A_MPS_VF_RPLCT_MAP7));
3491 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3492 							  A_MPS_VF_RPLCT_MAP6));
3493 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3494 							  A_MPS_VF_RPLCT_MAP5));
3495 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3496 							  A_MPS_VF_RPLCT_MAP4));
3497 	}
3498 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
3499 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
3500 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
3501 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
3502 }
3503 
3504 static int collect_mps_tcam(struct cudbg_init *pdbg_init,
3505 			    struct cudbg_buffer *dbg_buff,
3506 			    struct cudbg_error *cudbg_err)
3507 {
3508 	struct cudbg_buffer scratch_buff;
3509 	struct adapter *padap = pdbg_init->adap;
3510 	struct cudbg_mps_tcam *tcam = NULL;
3511 	u32 size = 0, i, n, total_size = 0;
3512 	u32 ctl, data2;
3513 	u64 tcamy, tcamx, val;
3514 	int rc;
3515 
3516 	n = padap->chip_params->mps_tcam_size;
3517 	size = sizeof(struct cudbg_mps_tcam) * n;
3518 	scratch_buff.size = size;
3519 
3520 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3521 	if (rc)
3522 		goto err;
3523 	memset(scratch_buff.data, 0, size);
3524 
3525 	tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
3526 	for (i = 0; i < n; i++) {
3527 		if (chip_id(padap) >= CHELSIO_T6) {
3528 			/* CtlReqID   - 1: use Host Driver Requester ID
3529 			 * CtlCmdType - 0: Read, 1: Write
3530 			 * CtlTcamSel - 0: TCAM0, 1: TCAM1
3531 			 * CtlXYBitSel- 0: Y bit, 1: X bit
3532 			 */
3533 
3534 			/* Read tcamy */
3535 			ctl = (V_CTLREQID(1) |
3536 			       V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
3537 			if (i < 256)
3538 				ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
3539 			else
3540 				ctl |= V_CTLTCAMINDEX(i - 256) |
3541 				       V_CTLTCAMSEL(1);
3542 
3543 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3544 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3545 			tcamy = G_DMACH(val) << 32;
3546 			tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3547 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3548 			tcam->lookup_type = G_DATALKPTYPE(data2);
3549 
3550 			/* 0 - Outer header, 1 - Inner header
3551 			 * [71:48] bit locations are overloaded for
3552 			 * outer vs. inner lookup types.
3553 			 */
3554 
3555 			if (tcam->lookup_type &&
3556 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3557 				/* Inner header VNI */
3558 				tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
3559 					     (G_DATAVIDH1(data2) << 16) |
3560 					     G_VIDL(val);
3561 				tcam->dip_hit = data2 & F_DATADIPHIT;
3562 			} else {
3563 				tcam->vlan_vld = data2 & F_DATAVIDH2;
3564 				tcam->ivlan = G_VIDL(val);
3565 			}
3566 
3567 			tcam->port_num = G_DATAPORTNUM(data2);
3568 
3569 			/* Read tcamx. Change the control param */
3570 			ctl |= V_CTLXYBITSEL(1);
3571 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3572 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3573 			tcamx = G_DMACH(val) << 32;
3574 			tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3575 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3576 			if (tcam->lookup_type &&
3577 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3578 				/* Inner header VNI mask */
3579 				tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
3580 					     (G_DATAVIDH1(data2) << 16) |
3581 					     G_VIDL(val);
3582 			}
3583 		} else {
3584 			tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
3585 			tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
3586 		}
3587 
3588 		if (tcamx & tcamy)
3589 			continue;
3590 
3591 		tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
3592 		tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
3593 
3594 		if (is_t5(padap))
3595 			tcam->repli = (tcam->cls_lo & F_REPLICATE);
3596 		else if (is_t6(padap))
3597 			tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
3598 
3599 		if (tcam->repli) {
3600 			struct fw_ldst_cmd ldst_cmd;
3601 			struct fw_ldst_mps_rplc mps_rplc;
3602 
3603 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3604 			ldst_cmd.op_to_addrspace =
3605 				htonl(V_FW_CMD_OP(FW_LDST_CMD) |
3606 				      F_FW_CMD_REQUEST |
3607 				      F_FW_CMD_READ |
3608 				      V_FW_LDST_CMD_ADDRSPACE(
3609 					      FW_LDST_ADDRSPC_MPS));
3610 
3611 			ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3612 
3613 			ldst_cmd.u.mps.rplc.fid_idx =
3614 				htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
3615 				      V_FW_LDST_CMD_IDX(i));
3616 
3617 			rc = begin_synchronized_op(padap, NULL,
3618 			    SLEEP_OK | INTR_OK, "t4cudm");
3619 			if (rc == 0) {
3620 				rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
3621 						sizeof(ldst_cmd), &ldst_cmd);
3622 				end_synchronized_op(padap, 0);
3623 			}
3624 
3625 			if (rc)
3626 				mps_rpl_backdoor(padap, &mps_rplc);
3627 			else
3628 				mps_rplc = ldst_cmd.u.mps.rplc;
3629 
3630 			tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
3631 			tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
3632 			tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
3633 			tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
3634 			if (padap->chip_params->mps_rplc_size >
3635 					CUDBG_MAX_RPLC_SIZE) {
3636 				tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
3637 				tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
3638 				tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
3639 				tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
3640 			}
3641 		}
3642 		cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
3643 
3644 		tcam->idx = i;
3645 		tcam->rplc_size = padap->chip_params->mps_rplc_size;
3646 
3647 		total_size += sizeof(struct cudbg_mps_tcam);
3648 
3649 		tcam++;
3650 	}
3651 
3652 	if (total_size == 0) {
3653 		rc = CUDBG_SYSTEM_ERROR;
3654 		goto err1;
3655 	}
3656 
3657 	scratch_buff.size = total_size;
3658 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3659 	if (rc)
3660 		goto err1;
3661 
3662 	rc = compress_buff(&scratch_buff, dbg_buff);
3663 
3664 err1:
3665 	scratch_buff.size = size;
3666 	release_scratch_buff(&scratch_buff, dbg_buff);
3667 err:
3668 	return rc;
3669 }
3670 
3671 static int collect_pcie_config(struct cudbg_init *pdbg_init,
3672 			       struct cudbg_buffer *dbg_buff,
3673 			       struct cudbg_error *cudbg_err)
3674 {
3675 	struct cudbg_buffer scratch_buff;
3676 	struct adapter *padap = pdbg_init->adap;
3677 	u32 size, *value, j;
3678 	int i, rc, n;
3679 
3680 	size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
3681 	n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
3682 	scratch_buff.size = size;
3683 
3684 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3685 	if (rc)
3686 		goto err;
3687 
3688 	value = (u32 *)scratch_buff.data;
3689 	for (i = 0; i < n; i++) {
3690 		for (j = t5_pcie_config_array[i][0];
3691 		     j <= t5_pcie_config_array[i][1]; j += 4) {
3692 			*value++ = t4_hw_pci_read_cfg4(padap, j);
3693 		}
3694 	}
3695 
3696 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3697 	if (rc)
3698 		goto err1;
3699 
3700 	rc = compress_buff(&scratch_buff, dbg_buff);
3701 
3702 err1:
3703 	release_scratch_buff(&scratch_buff, dbg_buff);
3704 err:
3705 	return rc;
3706 }
3707 
3708 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
3709 			  struct cudbg_tid_data *tid_data)
3710 {
3711 	int i, cmd_retry = 8;
3712 	struct adapter *padap = pdbg_init->adap;
3713 	u32 val;
3714 
3715 	/* Fill REQ_DATA regs with 0's */
3716 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3717 		t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
3718 
3719 	/* Write DBIG command */
3720 	val = (0x4 << S_DBGICMD) | tid;
3721 	t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
3722 	tid_data->dbig_cmd = val;
3723 
3724 	val = 0;
3725 	val |= 1 << S_DBGICMDSTRT;
3726 	val |= 1;  /* LE mode */
3727 	t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
3728 	tid_data->dbig_conf = val;
3729 
3730 	/* Poll the DBGICMDBUSY bit */
3731 	val = 1;
3732 	while (val) {
3733 		val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
3734 		val = (val >> S_DBGICMDBUSY) & 1;
3735 		cmd_retry--;
3736 		if (!cmd_retry) {
3737 			if (pdbg_init->verbose)
3738 				pdbg_init->print("%s(): Timeout waiting for non-busy\n",
3739 					 __func__);
3740 			return CUDBG_SYSTEM_ERROR;
3741 		}
3742 	}
3743 
3744 	/* Check RESP status */
3745 	val = 0;
3746 	val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
3747 	tid_data->dbig_rsp_stat = val;
3748 	if (!(val & 1)) {
3749 		if (pdbg_init->verbose)
3750 			pdbg_init->print("%s(): DBGI command failed\n", __func__);
3751 		return CUDBG_SYSTEM_ERROR;
3752 	}
3753 
3754 	/* Read RESP data */
3755 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3756 		tid_data->data[i] = t4_read_reg(padap,
3757 						A_LE_DB_DBGI_RSP_DATA +
3758 						(i << 2));
3759 
3760 	tid_data->tid = tid;
3761 
3762 	return 0;
3763 }
3764 
3765 static int collect_le_tcam(struct cudbg_init *pdbg_init,
3766 			   struct cudbg_buffer *dbg_buff,
3767 			   struct cudbg_error *cudbg_err)
3768 {
3769 	struct cudbg_buffer scratch_buff;
3770 	struct adapter *padap = pdbg_init->adap;
3771 	struct cudbg_tcam tcam_region = {0};
3772 	struct cudbg_tid_data *tid_data = NULL;
3773 	u32 value, bytes = 0, bytes_left  = 0;
3774 	u32 i;
3775 	int rc, size;
3776 
3777 	/* Get the LE regions */
3778 	value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
3779 							     index */
3780 	tcam_region.tid_hash_base = value;
3781 
3782 	/* Get routing table index */
3783 	value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
3784 	tcam_region.routing_start = value;
3785 
3786 	/*Get clip table index */
3787 	value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
3788 	tcam_region.clip_start = value;
3789 
3790 	/* Get filter table index */
3791 	value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
3792 	tcam_region.filter_start = value;
3793 
3794 	/* Get server table index */
3795 	value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
3796 	tcam_region.server_start = value;
3797 
3798 	/* Check whether hash is enabled and calculate the max tids */
3799 	value = t4_read_reg(padap, A_LE_DB_CONFIG);
3800 	if ((value >> S_HASHEN) & 1) {
3801 		value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
3802 		if (chip_id(padap) > CHELSIO_T5)
3803 			tcam_region.max_tid = (value & 0xFFFFF) +
3804 					      tcam_region.tid_hash_base;
3805 		else {	    /* for T5 */
3806 			value = G_HASHTIDSIZE(value);
3807 			value = 1 << value;
3808 			tcam_region.max_tid = value +
3809 				tcam_region.tid_hash_base;
3810 		}
3811 	} else	 /* hash not enabled */
3812 		tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
3813 
3814 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
3815 	size += sizeof(struct cudbg_tcam);
3816 	scratch_buff.size = size;
3817 
3818 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3819 	if (rc)
3820 		goto err;
3821 
3822 	rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
3823 	if (rc)
3824 		goto err;
3825 
3826 	memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
3827 
3828 	tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
3829 					     scratch_buff.data) + 1);
3830 	bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
3831 	bytes = sizeof(struct cudbg_tcam);
3832 
3833 	/* read all tid */
3834 	for (i = 0; i < tcam_region.max_tid; i++) {
3835 		if (bytes_left < sizeof(struct cudbg_tid_data)) {
3836 			scratch_buff.size = bytes;
3837 			rc = compress_buff(&scratch_buff, dbg_buff);
3838 			if (rc)
3839 				goto err1;
3840 			scratch_buff.size = CUDBG_CHUNK_SIZE;
3841 			release_scratch_buff(&scratch_buff, dbg_buff);
3842 
3843 			/* new alloc */
3844 			rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
3845 					      &scratch_buff);
3846 			if (rc)
3847 				goto err;
3848 
3849 			tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
3850 			bytes_left = CUDBG_CHUNK_SIZE;
3851 			bytes = 0;
3852 		}
3853 
3854 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
3855 
3856 		if (rc) {
3857 			cudbg_err->sys_err = rc;
3858 			goto err1;
3859 		}
3860 
3861 		tid_data++;
3862 		bytes_left -= sizeof(struct cudbg_tid_data);
3863 		bytes += sizeof(struct cudbg_tid_data);
3864 	}
3865 
3866 	if (bytes) {
3867 		scratch_buff.size = bytes;
3868 		rc = compress_buff(&scratch_buff, dbg_buff);
3869 	}
3870 
3871 err1:
3872 	scratch_buff.size = CUDBG_CHUNK_SIZE;
3873 	release_scratch_buff(&scratch_buff, dbg_buff);
3874 err:
3875 	return rc;
3876 }
3877 
3878 static int collect_ma_indirect(struct cudbg_init *pdbg_init,
3879 			       struct cudbg_buffer *dbg_buff,
3880 			       struct cudbg_error *cudbg_err)
3881 {
3882 	struct cudbg_buffer scratch_buff;
3883 	struct adapter *padap = pdbg_init->adap;
3884 	struct ireg_buf *ma_indr = NULL;
3885 	u32 size, j;
3886 	int i, rc, n;
3887 
3888 	if (chip_id(padap) < CHELSIO_T6) {
3889 		if (pdbg_init->verbose)
3890 			pdbg_init->print("MA indirect available only in T6\n");
3891 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3892 		goto err;
3893 	}
3894 
3895 	n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
3896 	size = sizeof(struct ireg_buf) * n * 2;
3897 	scratch_buff.size = size;
3898 
3899 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3900 	if (rc)
3901 		goto err;
3902 
3903 	ma_indr = (struct ireg_buf *)scratch_buff.data;
3904 
3905 	for (i = 0; i < n; i++) {
3906 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3907 		u32 *buff = ma_indr->outbuf;
3908 
3909 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
3910 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3911 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3912 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3913 
3914 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3915 				 buff, ma_fli->ireg_offset_range,
3916 				 ma_fli->ireg_local_offset);
3917 
3918 		ma_indr++;
3919 
3920 	}
3921 
3922 	n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
3923 
3924 	for (i = 0; i < n; i++) {
3925 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3926 		u32 *buff = ma_indr->outbuf;
3927 
3928 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3929 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3930 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3931 
3932 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3933 			t4_read_indirect(padap, ma_fli->ireg_addr,
3934 					 ma_fli->ireg_data, buff, 1,
3935 					 ma_fli->ireg_local_offset);
3936 			buff++;
3937 			ma_fli->ireg_local_offset += 0x20;
3938 		}
3939 		ma_indr++;
3940 	}
3941 
3942 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3943 	if (rc)
3944 		goto err1;
3945 
3946 	rc = compress_buff(&scratch_buff, dbg_buff);
3947 
3948 err1:
3949 	release_scratch_buff(&scratch_buff, dbg_buff);
3950 err:
3951 	return rc;
3952 }
3953 
3954 static int collect_hma_indirect(struct cudbg_init *pdbg_init,
3955 			       struct cudbg_buffer *dbg_buff,
3956 			       struct cudbg_error *cudbg_err)
3957 {
3958 	struct cudbg_buffer scratch_buff;
3959 	struct adapter *padap = pdbg_init->adap;
3960 	struct ireg_buf *hma_indr = NULL;
3961 	u32 size;
3962 	int i, rc, n;
3963 
3964 	if (chip_id(padap) < CHELSIO_T6) {
3965 		if (pdbg_init->verbose)
3966 			pdbg_init->print("HMA indirect available only in T6\n");
3967 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3968 		goto err;
3969 	}
3970 
3971 	n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
3972 	size = sizeof(struct ireg_buf) * n;
3973 	scratch_buff.size = size;
3974 
3975 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3976 	if (rc)
3977 		goto err;
3978 
3979 	hma_indr = (struct ireg_buf *)scratch_buff.data;
3980 
3981 	for (i = 0; i < n; i++) {
3982 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
3983 		u32 *buff = hma_indr->outbuf;
3984 
3985 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3986 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3987 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3988 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3989 
3990 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3991 				 buff, hma_fli->ireg_offset_range,
3992 				 hma_fli->ireg_local_offset);
3993 
3994 		hma_indr++;
3995 
3996 	}
3997 
3998 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3999 	if (rc)
4000 		goto err1;
4001 
4002 	rc = compress_buff(&scratch_buff, dbg_buff);
4003 
4004 err1:
4005 	release_scratch_buff(&scratch_buff, dbg_buff);
4006 err:
4007 	return rc;
4008 }
4009 
4010 static int collect_pcie_indirect(struct cudbg_init *pdbg_init,
4011 				 struct cudbg_buffer *dbg_buff,
4012 				 struct cudbg_error *cudbg_err)
4013 {
4014 	struct cudbg_buffer scratch_buff;
4015 	struct adapter *padap = pdbg_init->adap;
4016 	struct ireg_buf *ch_pcie;
4017 	u32 size;
4018 	int i, rc, n;
4019 
4020 	n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
4021 	size = sizeof(struct ireg_buf) * n * 2;
4022 	scratch_buff.size = size;
4023 
4024 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4025 	if (rc)
4026 		goto err;
4027 
4028 	ch_pcie = (struct ireg_buf *)scratch_buff.data;
4029 
4030 	/*PCIE_PDBG*/
4031 	for (i = 0; i < n; i++) {
4032 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4033 		u32 *buff = ch_pcie->outbuf;
4034 
4035 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
4036 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
4037 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
4038 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
4039 
4040 		t4_read_indirect(padap,
4041 				pcie_pio->ireg_addr,
4042 				pcie_pio->ireg_data,
4043 				buff,
4044 				pcie_pio->ireg_offset_range,
4045 				pcie_pio->ireg_local_offset);
4046 
4047 		ch_pcie++;
4048 	}
4049 
4050 	/*PCIE_CDBG*/
4051 	n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
4052 	for (i = 0; i < n; i++) {
4053 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4054 		u32 *buff = ch_pcie->outbuf;
4055 
4056 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
4057 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
4058 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
4059 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
4060 
4061 		t4_read_indirect(padap,
4062 				pcie_pio->ireg_addr,
4063 				pcie_pio->ireg_data,
4064 				buff,
4065 				pcie_pio->ireg_offset_range,
4066 				pcie_pio->ireg_local_offset);
4067 
4068 		ch_pcie++;
4069 	}
4070 
4071 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4072 	if (rc)
4073 		goto err1;
4074 
4075 	rc = compress_buff(&scratch_buff, dbg_buff);
4076 
4077 err1:
4078 	release_scratch_buff(&scratch_buff, dbg_buff);
4079 err:
4080 	return rc;
4081 
4082 }
4083 
4084 static int collect_tp_indirect(struct cudbg_init *pdbg_init,
4085 			       struct cudbg_buffer *dbg_buff,
4086 			       struct cudbg_error *cudbg_err)
4087 {
4088 	struct cudbg_buffer scratch_buff;
4089 	struct adapter *padap = pdbg_init->adap;
4090 	struct ireg_buf *ch_tp_pio;
4091 	u32 size;
4092 	int i, rc, n = 0;
4093 
4094 	if (is_t5(padap))
4095 		n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
4096 	else if (is_t6(padap))
4097 		n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
4098 
4099 	size = sizeof(struct ireg_buf) * n * 3;
4100 	scratch_buff.size = size;
4101 
4102 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4103 	if (rc)
4104 		goto err;
4105 
4106 	ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
4107 
4108 	/* TP_PIO*/
4109 	for (i = 0; i < n; i++) {
4110 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4111 		u32 *buff = ch_tp_pio->outbuf;
4112 
4113 		if (is_t5(padap)) {
4114 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
4115 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
4116 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
4117 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
4118 		} else if (is_t6(padap)) {
4119 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
4120 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
4121 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
4122 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
4123 		}
4124 
4125 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
4126 			       tp_pio->ireg_local_offset, true);
4127 
4128 		ch_tp_pio++;
4129 	}
4130 
4131 	/* TP_TM_PIO*/
4132 	if (is_t5(padap))
4133 		n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
4134 	else if (is_t6(padap))
4135 		n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
4136 
4137 	for (i = 0; i < n; i++) {
4138 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4139 		u32 *buff = ch_tp_pio->outbuf;
4140 
4141 		if (is_t5(padap)) {
4142 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
4143 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
4144 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
4145 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
4146 		} else if (is_t6(padap)) {
4147 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
4148 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
4149 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
4150 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
4151 		}
4152 
4153 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
4154 				  tp_pio->ireg_local_offset, true);
4155 
4156 		ch_tp_pio++;
4157 	}
4158 
4159 	/* TP_MIB_INDEX*/
4160 	if (is_t5(padap))
4161 		n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
4162 	else if (is_t6(padap))
4163 		n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
4164 
4165 	for (i = 0; i < n ; i++) {
4166 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4167 		u32 *buff = ch_tp_pio->outbuf;
4168 
4169 		if (is_t5(padap)) {
4170 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
4171 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
4172 			tp_pio->ireg_local_offset =
4173 				t5_tp_mib_index_array[i][2];
4174 			tp_pio->ireg_offset_range =
4175 				t5_tp_mib_index_array[i][3];
4176 		} else if (is_t6(padap)) {
4177 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
4178 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
4179 			tp_pio->ireg_local_offset =
4180 				t6_tp_mib_index_array[i][2];
4181 			tp_pio->ireg_offset_range =
4182 				t6_tp_mib_index_array[i][3];
4183 		}
4184 
4185 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
4186 			       tp_pio->ireg_local_offset, true);
4187 
4188 		ch_tp_pio++;
4189 	}
4190 
4191 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4192 	if (rc)
4193 		goto err1;
4194 
4195 	rc = compress_buff(&scratch_buff, dbg_buff);
4196 
4197 err1:
4198 	release_scratch_buff(&scratch_buff, dbg_buff);
4199 err:
4200 	return rc;
4201 }
4202 
4203 static int collect_sge_indirect(struct cudbg_init *pdbg_init,
4204 				struct cudbg_buffer *dbg_buff,
4205 				struct cudbg_error *cudbg_err)
4206 {
4207 	struct cudbg_buffer scratch_buff;
4208 	struct adapter *padap = pdbg_init->adap;
4209 	struct ireg_buf *ch_sge_dbg;
4210 	u32 size;
4211 	int i, rc;
4212 
4213 	size = sizeof(struct ireg_buf) * 2;
4214 	scratch_buff.size = size;
4215 
4216 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4217 	if (rc)
4218 		goto err;
4219 
4220 	ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
4221 
4222 	for (i = 0; i < 2; i++) {
4223 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
4224 		u32 *buff = ch_sge_dbg->outbuf;
4225 
4226 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
4227 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
4228 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
4229 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
4230 
4231 		t4_read_indirect(padap,
4232 				sge_pio->ireg_addr,
4233 				sge_pio->ireg_data,
4234 				buff,
4235 				sge_pio->ireg_offset_range,
4236 				sge_pio->ireg_local_offset);
4237 
4238 		ch_sge_dbg++;
4239 	}
4240 
4241 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4242 	if (rc)
4243 		goto err1;
4244 
4245 	rc = compress_buff(&scratch_buff, dbg_buff);
4246 
4247 err1:
4248 	release_scratch_buff(&scratch_buff, dbg_buff);
4249 err:
4250 	return rc;
4251 }
4252 
4253 static int collect_full(struct cudbg_init *pdbg_init,
4254 			struct cudbg_buffer *dbg_buff,
4255 			struct cudbg_error *cudbg_err)
4256 {
4257 	struct cudbg_buffer scratch_buff;
4258 	struct adapter *padap = pdbg_init->adap;
4259 	u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
4260 	u32 *sp;
4261 	int rc;
4262 	int nreg = 0;
4263 
4264 	/* Collect Registers:
4265 	 * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
4266 	 * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
4267 	 * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
4268 	 * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
4269 	 * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
4270 	 * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3)  This is for T6
4271 	 * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
4272 	 **/
4273 
4274 	if (is_t5(padap))
4275 		nreg = 6;
4276 	else if (is_t6(padap))
4277 		nreg = 7;
4278 
4279 	scratch_buff.size = nreg * sizeof(u32);
4280 
4281 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4282 	if (rc)
4283 		goto err;
4284 
4285 	sp = (u32 *)scratch_buff.data;
4286 
4287 	/* TP_DBG_SCHED_TX */
4288 	reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
4289 	reg_offset_range = 1;
4290 
4291 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4292 
4293 	sp++;
4294 
4295 	/* TP_DBG_SCHED_RX */
4296 	reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
4297 	reg_offset_range = 1;
4298 
4299 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4300 
4301 	sp++;
4302 
4303 	/* TP_DBG_CSIDE_INT */
4304 	reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
4305 	reg_offset_range = 1;
4306 
4307 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4308 
4309 	sp++;
4310 
4311 	/* TP_DBG_ESIDE_INT */
4312 	reg_local_offset = t5_tp_pio_array[8][2] + 3;
4313 	reg_offset_range = 1;
4314 
4315 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4316 
4317 	sp++;
4318 
4319 	/* PCIE_CDEBUG_INDEX[AppData0] */
4320 	reg_addr = t5_pcie_cdbg_array[0][0];
4321 	reg_data = t5_pcie_cdbg_array[0][1];
4322 	reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
4323 	reg_offset_range = 1;
4324 
4325 	t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
4326 			 reg_local_offset);
4327 
4328 	sp++;
4329 
4330 	if (is_t6(padap)) {
4331 		/* PCIE_CDEBUG_INDEX[AppData1] */
4332 		reg_addr = t5_pcie_cdbg_array[0][0];
4333 		reg_data = t5_pcie_cdbg_array[0][1];
4334 		reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
4335 		reg_offset_range = 1;
4336 
4337 		t4_read_indirect(padap, reg_addr, reg_data, sp,
4338 				 reg_offset_range, reg_local_offset);
4339 
4340 		sp++;
4341 	}
4342 
4343 	/* SGE_DEBUG_DATA_HIGH_INDEX_10 */
4344 	*sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
4345 
4346 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4347 	if (rc)
4348 		goto err1;
4349 
4350 	rc = compress_buff(&scratch_buff, dbg_buff);
4351 
4352 err1:
4353 	release_scratch_buff(&scratch_buff, dbg_buff);
4354 err:
4355 	return rc;
4356 }
4357 
4358 static int collect_vpd_data(struct cudbg_init *pdbg_init,
4359 			    struct cudbg_buffer *dbg_buff,
4360 			    struct cudbg_error *cudbg_err)
4361 {
4362 #ifdef notyet
4363 	struct cudbg_buffer scratch_buff;
4364 	struct adapter *padap = pdbg_init->adap;
4365 	struct struct_vpd_data *vpd_data;
4366 	char vpd_ver[4];
4367 	u32 fw_vers;
4368 	u32 size;
4369 	int rc;
4370 
4371 	size = sizeof(struct struct_vpd_data);
4372 	scratch_buff.size = size;
4373 
4374 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4375 	if (rc)
4376 		goto err;
4377 
4378 	vpd_data = (struct struct_vpd_data *)scratch_buff.data;
4379 
4380 	if (is_t5(padap)) {
4381 		read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
4382 		read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
4383 		read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
4384 		read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
4385 	} else if (is_t6(padap)) {
4386 		read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
4387 		read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
4388 		read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
4389 		read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
4390 	}
4391 
4392 	if (is_fw_attached(pdbg_init)) {
4393 	   rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
4394 	} else {
4395 		rc = 1;
4396 	}
4397 
4398 	if (rc) {
4399 		/* Now trying with backdoor mechanism */
4400 		rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
4401 				  (u8 *)&vpd_data->scfg_vers);
4402 		if (rc)
4403 			goto err1;
4404 	}
4405 
4406 	if (is_fw_attached(pdbg_init)) {
4407 		rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
4408 	} else {
4409 		rc = 1;
4410 	}
4411 
4412 	if (rc) {
4413 		/* Now trying with backdoor mechanism */
4414 		rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
4415 				  (u8 *)vpd_ver);
4416 		if (rc)
4417 			goto err1;
4418 		/* read_vpd_reg return string of stored hex
4419 		 * converting hex string to char string
4420 		 * vpd version is 2 bytes only */
4421 		sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
4422 		vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
4423 	}
4424 
4425 	/* Get FW version if it's not already filled in */
4426 	fw_vers = padap->params.fw_vers;
4427 	if (!fw_vers) {
4428 		rc = t4_get_fw_version(padap, &fw_vers);
4429 		if (rc)
4430 			goto err1;
4431 	}
4432 
4433 	vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
4434 	vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
4435 	vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
4436 	vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
4437 
4438 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4439 	if (rc)
4440 		goto err1;
4441 
4442 	rc = compress_buff(&scratch_buff, dbg_buff);
4443 
4444 err1:
4445 	release_scratch_buff(&scratch_buff, dbg_buff);
4446 err:
4447 	return rc;
4448 #endif
4449 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
4450 }
4451