xref: /illumos-gate/usr/src/uts/common/io/cxgbe/t4nex/cudbg_lib.c (revision 3cdba02932a80ce23359d83defb057a1d5ddf6ba)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*-
13  * Copyright (c) 2019 Chelsio Communications, Inc.
14  * All rights reserved.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include <sys/types.h>
39 #include <sys/param.h>
40 
41 #include "common/common.h"
42 #include "common/t4_regs.h"
43 #include "common/t4_chip_type.h"
44 #include "cudbg.h"
45 #include "cudbg_lib_common.h"
46 #include "cudbg_lib.h"
47 #include "cudbg_entity.h"
48 
49 #define  BUFFER_WARN_LIMIT 10000000
50 
51 struct large_entity large_entity_list[] = {
52 	{CUDBG_EDC0, 0, 0},
53 	{CUDBG_EDC1, 0 , 0},
54 	{CUDBG_MC0, 0, 0},
55 	{CUDBG_MC1, 0, 0}
56 };
57 
58 static int
is_fw_attached(struct cudbg_init * pdbg_init)59 is_fw_attached(struct cudbg_init *pdbg_init)
60 {
61 
62 	return (pdbg_init->adap->flags & FW_OK);
63 }
64 
65 /* This function will add additional padding bytes into debug_buffer to make it
66  * 4 byte aligned.*/
67 static void
align_debug_buffer(struct cudbg_buffer * dbg_buff,struct cudbg_entity_hdr * entity_hdr)68 align_debug_buffer(struct cudbg_buffer *dbg_buff,
69 		   struct cudbg_entity_hdr *entity_hdr)
70 {
71 	u8 zero_buf[4] = {0};
72 	u8 padding, remain;
73 
74 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
75 	padding = 4 - remain;
76 	if (remain) {
77 		memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
78 		       padding);
79 		dbg_buff->offset += padding;
80 		entity_hdr->num_pad = padding;
81 	}
82 
83 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
84 }
85 
86 static void
u32_swap(void * a,void * b,int size)87 u32_swap(void *a, void *b, int size)
88 {
89         u32 t = *(u32 *)a;
90 
91         *(u32 *)a = *(u32 *)b;
92         *(u32 *)b = t;
93 }
94 
95 static void
generic_swap(void * a1,void * b1,int size)96 generic_swap(void *a1, void *b1, int size)
97 {
98 	u8 t;
99 	u8 *a = (u8 *)a1;
100 	u8 *b = (u8 *)b1;
101 
102 	do {
103 		t = *a;
104 		*(a++) = *b;
105 		*(b++) = t;
106 	} while (--size > 0);
107 }
108 
109 static void
qsort(void * base_val,int num,int size,int (* cmp_func)(const void *,const void *),void (* swap_func)(void *,void *,int size))110 qsort(void *base_val, int num, int size,
111       int (*cmp_func)(const void *, const void *),
112       void (*swap_func)(void *, void *, int size))
113 {
114 	/* pre-scale counters for performance */
115 	int i = (num / 2 - 1) * size;
116 	int n = num * size;
117 	int c, r;
118 	u8 *base = (u8 *)base_val;
119 
120 	if (!swap_func)
121 		swap_func = (size == 4 ? u32_swap : generic_swap);
122 
123 	/* heapify */
124 	for (; i >= 0; i -= size) {
125 		for (r = i; r * 2 + size < n; r  = c) {
126 			c = r * 2 + size;
127 			if (c < n - size &&
128 					cmp_func(base + c, base + c + size) < 0)
129 				c += size;
130 			if (cmp_func(base + r, base + c) >= 0)
131 				break;
132 			swap_func(base + r, base + c, size);
133 		}
134 	}
135 
136 	/* sort */
137 	for (i = n - size; i > 0; i -= size) {
138 		swap_func(base, base + i, size);
139 		for (r = 0; r * 2 + size < i; r = c) {
140 			c = r * 2 + size;
141 			if (c < i - size &&
142 					cmp_func(base + c, base + c + size) < 0)
143 				c += size;
144 			if (cmp_func(base + r, base + c) >= 0)
145 				break;
146 			swap_func(base + r, base + c, size);
147 		}
148 	}
149 }
150 
151 static void
read_sge_ctxt(struct cudbg_init * pdbg_init,u32 cid,enum ctxt_type ctype,u32 * data)152 read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
153 	      enum ctxt_type ctype, u32 *data)
154 {
155 	struct adapter *padap = pdbg_init->adap;
156 	int rc = -1;
157 
158 	if (is_fw_attached(pdbg_init)) {
159 		ADAPTER_LOCK(padap);
160 		rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
161 		ADAPTER_UNLOCK(padap);
162 	}
163 
164 	if (rc)
165 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
166 }
167 
168 static int
get_next_ext_entity_hdr(void * outbuf,u32 * ext_size,struct cudbg_buffer * dbg_buff,struct cudbg_entity_hdr ** entity_hdr)169 get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
170 			struct cudbg_buffer *dbg_buff,
171 			struct cudbg_entity_hdr **entity_hdr)
172 {
173 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
174 	int rc = 0;
175 	u32 ext_offset = cudbg_hdr->data_len;
176 	*ext_size = 0;
177 
178 	if (dbg_buff->size - dbg_buff->offset <=
179 		 sizeof(struct cudbg_entity_hdr)) {
180 		rc = CUDBG_STATUS_BUFFER_SHORT;
181 		goto err;
182 	}
183 
184 	*entity_hdr = (struct cudbg_entity_hdr *)
185 		       ((char *)outbuf + cudbg_hdr->data_len);
186 
187 	/* Find the last extended entity header */
188 	while ((*entity_hdr)->size) {
189 
190 		ext_offset += sizeof(struct cudbg_entity_hdr) +
191 				     (*entity_hdr)->size;
192 
193 		*ext_size += (*entity_hdr)->size +
194 			      sizeof(struct cudbg_entity_hdr);
195 
196 		if (dbg_buff->size - dbg_buff->offset + *ext_size  <=
197 			sizeof(struct cudbg_entity_hdr)) {
198 			rc = CUDBG_STATUS_BUFFER_SHORT;
199 			goto err;
200 		}
201 
202 		if (ext_offset != (*entity_hdr)->next_ext_offset) {
203 			ext_offset -= sizeof(struct cudbg_entity_hdr) +
204 				     (*entity_hdr)->size;
205 			break;
206 		}
207 
208 		(*entity_hdr)->next_ext_offset = *ext_size;
209 
210 		*entity_hdr = (struct cudbg_entity_hdr *)
211 					   ((char *)outbuf +
212 					   ext_offset);
213 	}
214 
215 	/* update the data offset */
216 	dbg_buff->offset = ext_offset;
217 err:
218 	return rc;
219 }
220 
221 static int
wr_entity_to_flash(void * handle,struct cudbg_buffer * dbg_buff,u32 cur_entity_data_offset,u32 cur_entity_size,int entity_nu,u32 ext_size)222 wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
223 		   u32 cur_entity_data_offset,
224 		   u32 cur_entity_size,
225 		   int entity_nu, u32 ext_size)
226 {
227 	struct cudbg_private *priv = handle;
228 	struct cudbg_init *cudbg_init = &priv->dbg_init;
229 	struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
230 	struct adapter *adap = cudbg_init->adap;
231 	u64 timestamp;
232 	u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
233 	u32 remain_flash_size;
234 	u32 flash_data_offset;
235 	u32 data_hdr_size;
236 	int rc = -1;
237 
238 	data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
239 			sizeof(struct cudbg_hdr);
240 
241 	flash_data_offset = (FLASH_CUDBG_NSECS *
242 			     (sizeof(struct cudbg_flash_hdr) +
243 			      data_hdr_size)) +
244 			    (cur_entity_data_offset - data_hdr_size);
245 
246 	if (flash_data_offset > CUDBG_FLASH_SIZE) {
247 		update_skip_size(sec_info, cur_entity_size);
248 		if (cudbg_init->verbose)
249 			cudbg_init->print(adap->dip, CE_NOTE,
250 					  "Large entity skipping...\n");
251 		return rc;
252 	}
253 
254 	remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
255 
256 	if (cur_entity_size > remain_flash_size) {
257 		update_skip_size(sec_info, cur_entity_size);
258 		if (cudbg_init->verbose)
259 			cudbg_init->print(adap->dip, CE_NOTE,
260 					  "Large entity skipping...\n");
261 	} else {
262 		timestamp = 0;
263 
264 		cur_entity_hdr_offset +=
265 			(sizeof(struct cudbg_entity_hdr) *
266 			(entity_nu - 1));
267 
268 		rc = cudbg_write_flash(handle, timestamp, dbg_buff,
269 				       cur_entity_data_offset,
270 				       cur_entity_hdr_offset,
271 				       cur_entity_size,
272 				       ext_size);
273 		if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
274 			cudbg_init->print(adap->dip, CE_NOTE,
275 					  "\n\tFLASH is full... "
276 					  "can not write in flash more\n\n");
277 	}
278 
279 	return rc;
280 }
281 
282 int
cudbg_collect(void * handle,void * outbuf,u32 * outbuf_size)283 cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
284 {
285 	struct cudbg_entity_hdr *entity_hdr = NULL;
286 	struct cudbg_entity_hdr *ext_entity_hdr = NULL;
287 	struct cudbg_hdr *cudbg_hdr;
288 	struct cudbg_buffer dbg_buff;
289 	struct cudbg_error cudbg_err = {0};
290 	int large_entity_code;
291 
292 	u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
293 	struct cudbg_init *cudbg_init =
294 		&(((struct cudbg_private *)handle)->dbg_init);
295 	struct adapter *padap = cudbg_init->adap;
296 	u32 total_size, remaining_buf_size;
297 	u32 ext_size = 0;
298 	int index, bit, i, rc = -1;
299 	int all;
300 	bool flag_ext = 0;
301 
302 	reset_skip_entity();
303 
304 	dbg_buff.data = outbuf;
305 	dbg_buff.size = *outbuf_size;
306 	dbg_buff.offset = 0;
307 
308 	cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
309 	cudbg_hdr->signature = CUDBG_SIGNATURE;
310 	cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
311 	cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
312 	cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
313 	cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
314 	cudbg_hdr->chip_ver = padap->params.chip;
315 
316 	if (cudbg_hdr->data_len)
317 		flag_ext = 1;
318 
319 	if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
320 	    dbg_buff.size) {
321 		rc = CUDBG_STATUS_SMALL_BUFF;
322 		total_size = cudbg_hdr->hdr_len;
323 		goto err;
324 	}
325 
326 	/* If ext flag is set then move the offset to the end of the buf
327 	 * so that we can add ext entities
328 	 */
329 	if (flag_ext) {
330 		ext_entity_hdr = (struct cudbg_entity_hdr *)
331 			      ((char *)outbuf + cudbg_hdr->hdr_len +
332 			      (sizeof(struct cudbg_entity_hdr) *
333 			      (CUDBG_EXT_ENTITY - 1)));
334 		ext_entity_hdr->start_offset = cudbg_hdr->data_len;
335 		ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
336 		ext_entity_hdr->size = 0;
337 		dbg_buff.offset = cudbg_hdr->data_len;
338 	} else {
339 		dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
340 		dbg_buff.offset += CUDBG_MAX_ENTITY *
341 					sizeof(struct cudbg_entity_hdr);
342 	}
343 
344 	total_size = dbg_buff.offset;
345 	all = dbg_bitmap[0] & (1 << CUDBG_ALL);
346 
347 	for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
348 		index = i / 8;
349 		bit = i % 8;
350 
351 		if (entity_list[i].bit == CUDBG_EXT_ENTITY)
352 			continue;
353 
354 		if (all || (dbg_bitmap[index] & (1 << bit))) {
355 
356 			if (!flag_ext) {
357 				rc = get_entity_hdr(outbuf, i, dbg_buff.size,
358 						    &entity_hdr);
359 				if (rc)
360 					cudbg_hdr->hdr_flags = rc;
361 			} else {
362 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
363 							     &dbg_buff,
364 							     &entity_hdr);
365 				if (rc)
366 					goto err;
367 
368 				/* move the offset after the ext header */
369 				dbg_buff.offset +=
370 					sizeof(struct cudbg_entity_hdr);
371 			}
372 
373 			entity_hdr->entity_type = i;
374 			entity_hdr->start_offset = dbg_buff.offset;
375 			/* process each entity by calling process_entity fp */
376 			remaining_buf_size = dbg_buff.size - dbg_buff.offset;
377 
378 			if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
379 			    is_large_entity(i)) {
380 				if (cudbg_init->verbose)
381 					cudbg_init->print(padap->dip, CE_NOTE,
382 							  "Skipping %s\n",
383 					    entity_list[i].name);
384 				skip_entity(i);
385 				continue;
386 			} else {
387 
388 				/* If fw_attach is 0, then skip entities which
389 				 * communicates with firmware
390 				 */
391 
392 				if (!is_fw_attached(cudbg_init) &&
393 				    (entity_list[i].flag &
394 				    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
395 					if (cudbg_init->verbose)
396 						cudbg_init->print(padap->dip, CE_NOTE,
397 							  "Skipping %s entity,"\
398 							  "because fw_attach "\
399 							  "is 0\n",
400 							  entity_list[i].name);
401 					continue;
402 				}
403 
404 				if (cudbg_init->verbose)
405 					cudbg_init->print(padap->dip, CE_NOTE,
406 							  "collecting debug entity: "\
407 						  "%s\n", entity_list[i].name);
408 				memset(&cudbg_err, 0,
409 				       sizeof(struct cudbg_error));
410 				rc = process_entity[i-1](cudbg_init, &dbg_buff,
411 							 &cudbg_err);
412 			}
413 
414 			if (rc) {
415 				entity_hdr->size = 0;
416 				dbg_buff.offset = entity_hdr->start_offset;
417 			} else
418 				align_debug_buffer(&dbg_buff, entity_hdr);
419 
420 			if (cudbg_err.sys_err)
421 				rc = CUDBG_SYSTEM_ERROR;
422 
423 			entity_hdr->hdr_flags =  rc;
424 			entity_hdr->sys_err = cudbg_err.sys_err;
425 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
426 
427 			/* We don't want to include ext entity size in global
428 			 * header
429 			 */
430 			if (!flag_ext)
431 				total_size += entity_hdr->size;
432 
433 			cudbg_hdr->data_len = total_size;
434 			*outbuf_size = total_size;
435 
436 			/* consider the size of the ext entity header and data
437 			 * also
438 			 */
439 			if (flag_ext) {
440 				ext_size += (sizeof(struct cudbg_entity_hdr) +
441 					     entity_hdr->size);
442 				entity_hdr->start_offset -= cudbg_hdr->data_len;
443 				ext_entity_hdr->size = ext_size;
444 				entity_hdr->next_ext_offset = ext_size;
445 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
446 			}
447 
448 			if (cudbg_init->use_flash) {
449 				if (flag_ext) {
450 					wr_entity_to_flash(handle,
451 							   &dbg_buff,
452 							   ext_entity_hdr->
453 							   start_offset,
454 							   entity_hdr->
455 							   size,
456 							   CUDBG_EXT_ENTITY,
457 							   ext_size);
458 				}
459 				else
460 					wr_entity_to_flash(handle,
461 							   &dbg_buff,
462 							   entity_hdr->\
463 							   start_offset,
464 							   entity_hdr->size,
465 							   i, ext_size);
466 			}
467 		}
468 	}
469 
470 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
471 	     i++) {
472 		large_entity_code = large_entity_list[i].entity_code;
473 		if (large_entity_list[i].skip_flag) {
474 			if (!flag_ext) {
475 				rc = get_entity_hdr(outbuf, large_entity_code,
476 						    dbg_buff.size, &entity_hdr);
477 				if (rc)
478 					cudbg_hdr->hdr_flags = rc;
479 			} else {
480 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
481 							     &dbg_buff,
482 							     &entity_hdr);
483 				if (rc)
484 					goto err;
485 
486 				dbg_buff.offset +=
487 					sizeof(struct cudbg_entity_hdr);
488 			}
489 
490 			/* If fw_attach is 0, then skip entities which
491 			 * communicates with firmware
492 			 */
493 			if (!is_fw_attached(cudbg_init) &&
494 			    (entity_list[large_entity_code].flag &
495 			    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
496 				if (cudbg_init->verbose)
497 					cudbg_init->print(padap->dip, CE_NOTE,
498 						  "Skipping %s entity,"\
499 						  "because fw_attach "\
500 						  "is 0\n",
501 						  entity_list[large_entity_code]
502 						  .name);
503 				continue;
504 			}
505 
506 			entity_hdr->entity_type = large_entity_code;
507 			entity_hdr->start_offset = dbg_buff.offset;
508 			if (cudbg_init->verbose)
509 				cudbg_init->print(padap->dip, CE_NOTE,
510 					  "Re-trying debug entity: %s\n",
511 					  entity_list[large_entity_code].name);
512 
513 			memset(&cudbg_err, 0, sizeof(struct cudbg_error));
514 			rc = process_entity[large_entity_code - 1](cudbg_init,
515 								   &dbg_buff,
516 								   &cudbg_err);
517 			if (rc) {
518 				entity_hdr->size = 0;
519 				dbg_buff.offset = entity_hdr->start_offset;
520 			} else
521 				align_debug_buffer(&dbg_buff, entity_hdr);
522 
523 			if (cudbg_err.sys_err)
524 				rc = CUDBG_SYSTEM_ERROR;
525 
526 			entity_hdr->hdr_flags = rc;
527 			entity_hdr->sys_err = cudbg_err.sys_err;
528 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
529 
530 			/* We don't want to include ext entity size in global
531 			 * header
532 			 */
533 			if (!flag_ext)
534 				total_size += entity_hdr->size;
535 
536 			cudbg_hdr->data_len = total_size;
537 			*outbuf_size = total_size;
538 
539 			/* consider the size of the ext entity header and
540 			 * data also
541 			 */
542 			if (flag_ext) {
543 				ext_size += (sizeof(struct cudbg_entity_hdr) +
544 						   entity_hdr->size);
545 				entity_hdr->start_offset -=
546 							cudbg_hdr->data_len;
547 				ext_entity_hdr->size = ext_size;
548 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
549 			}
550 
551 			if (cudbg_init->use_flash) {
552 				if (flag_ext)
553 					wr_entity_to_flash(handle,
554 							   &dbg_buff,
555 							   ext_entity_hdr->
556 							   start_offset,
557 							   entity_hdr->size,
558 							   CUDBG_EXT_ENTITY,
559 							   ext_size);
560 				else
561 					wr_entity_to_flash(handle,
562 							   &dbg_buff,
563 							   entity_hdr->
564 							   start_offset,
565 							   entity_hdr->
566 							   size,
567 							   large_entity_list[i].
568 							   entity_code,
569 							   ext_size);
570 			}
571 		}
572 	}
573 
574 	cudbg_hdr->data_len = total_size;
575 	*outbuf_size = total_size;
576 
577 	if (flag_ext)
578 		*outbuf_size += ext_size;
579 
580 	return 0;
581 err:
582 	return rc;
583 }
584 
585 void
reset_skip_entity(void)586 reset_skip_entity(void)
587 {
588 	int i;
589 
590 	for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
591 		large_entity_list[i].skip_flag = 0;
592 }
593 
594 void
skip_entity(int entity_code)595 skip_entity(int entity_code)
596 {
597 	int i;
598 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
599 	     i++) {
600 		if (large_entity_list[i].entity_code == entity_code)
601 			large_entity_list[i].skip_flag = 1;
602 	}
603 }
604 
605 int
is_large_entity(int entity_code)606 is_large_entity(int entity_code)
607 {
608 	int i;
609 
610 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
611 	     i++) {
612 		if (large_entity_list[i].entity_code == entity_code)
613 			return 1;
614 	}
615 	return 0;
616 }
617 
618 int
get_entity_hdr(void * outbuf,int i,u32 size,struct cudbg_entity_hdr ** entity_hdr)619 get_entity_hdr(void *outbuf, int i, u32 size,
620 	       struct cudbg_entity_hdr **entity_hdr)
621 {
622 	int rc = 0;
623 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
624 
625 	if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
626 		return CUDBG_STATUS_SMALL_BUFF;
627 
628 	*entity_hdr = (struct cudbg_entity_hdr *)
629 		      ((char *)outbuf+cudbg_hdr->hdr_len +
630 		       (sizeof(struct cudbg_entity_hdr)*(i-1)));
631 	return rc;
632 }
633 
634 static int
collect_rss(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)635 collect_rss(struct cudbg_init *pdbg_init,
636 	    struct cudbg_buffer *dbg_buff,
637 	    struct cudbg_error *cudbg_err)
638 {
639 	struct adapter *padap = pdbg_init->adap;
640 	struct cudbg_buffer scratch_buff;
641 	u32 size;
642 	int rc = 0;
643 
644 	size = RSS_NENTRIES  * sizeof(u16);
645 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
646 	if (rc)
647 		goto err;
648 
649 	rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
650 	if (rc) {
651 		if (pdbg_init->verbose)
652 			pdbg_init->print(padap->dip, CE_NOTE,
653 					 "%s(), t4_read_rss failed!, rc: %d\n",
654 				 __func__, rc);
655 		cudbg_err->sys_err = rc;
656 		goto err1;
657 	}
658 
659 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
660 	if (rc)
661 		goto err1;
662 
663 	rc = compress_buff(&scratch_buff, dbg_buff);
664 
665 err1:
666 	release_scratch_buff(&scratch_buff, dbg_buff);
667 err:
668 	return rc;
669 }
670 
671 static int
collect_sw_state(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)672 collect_sw_state(struct cudbg_init *pdbg_init,
673 		 struct cudbg_buffer *dbg_buff,
674 		 struct cudbg_error *cudbg_err)
675 {
676 	struct adapter *padap = pdbg_init->adap;
677 	struct cudbg_buffer scratch_buff;
678 	struct sw_state *swstate;
679 	u32 size;
680 	int rc = 0;
681 
682 	size = sizeof(struct sw_state);
683 
684 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
685 	if (rc)
686 		goto err;
687 
688 	swstate = (struct sw_state *) scratch_buff.data;
689 
690 	swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
691 	snprintf((char *)swstate->caller_string, sizeof(swstate->caller_string), "%s",
692 	    "Illumos");
693 	swstate->os_type = 0;
694 
695 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
696 	if (rc)
697 		goto err1;
698 
699 	rc = compress_buff(&scratch_buff, dbg_buff);
700 
701 err1:
702 	release_scratch_buff(&scratch_buff, dbg_buff);
703 err:
704 	return rc;
705 }
706 
707 static int
collect_ddp_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)708 collect_ddp_stats(struct cudbg_init *pdbg_init,
709 		  struct cudbg_buffer *dbg_buff,
710 		  struct cudbg_error *cudbg_err)
711 {
712 	struct adapter *padap = pdbg_init->adap;
713 	struct cudbg_buffer scratch_buff;
714 	struct tp_usm_stats  *tp_usm_stats_buff;
715 	u32 size;
716 	int rc = 0;
717 
718 	size = sizeof(struct tp_usm_stats);
719 
720 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
721 	if (rc)
722 		goto err;
723 
724 	tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
725 
726 	/* spin_lock(&padap->stats_lock);	TODO*/
727 	t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
728 	/* spin_unlock(&padap->stats_lock);	TODO*/
729 
730 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
731 	if (rc)
732 		goto err1;
733 
734 	rc = compress_buff(&scratch_buff, dbg_buff);
735 
736 err1:
737 	release_scratch_buff(&scratch_buff, dbg_buff);
738 err:
739 	return rc;
740 }
741 
742 static int
collect_ulptx_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)743 collect_ulptx_la(struct cudbg_init *pdbg_init,
744 		 struct cudbg_buffer *dbg_buff,
745 		 struct cudbg_error *cudbg_err)
746 {
747 	struct adapter *padap = pdbg_init->adap;
748 	struct cudbg_buffer scratch_buff;
749 	struct struct_ulptx_la *ulptx_la_buff;
750 	u32 size, i, j;
751 	int rc = 0;
752 
753 	size = sizeof(struct struct_ulptx_la);
754 
755 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
756 	if (rc)
757 		goto err;
758 
759 	ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
760 
761 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
762 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
763 						      A_ULP_TX_LA_RDPTR_0 +
764 						      0x10 * i);
765 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
766 						      A_ULP_TX_LA_WRPTR_0 +
767 						      0x10 * i);
768 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
769 						       A_ULP_TX_LA_RDDATA_0 +
770 						       0x10 * i);
771 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
772 			ulptx_la_buff->rd_data[i][j] =
773 				t4_read_reg(padap,
774 					    A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
775 		}
776 	}
777 
778 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
779 	if (rc)
780 		goto err1;
781 
782 	rc = compress_buff(&scratch_buff, dbg_buff);
783 
784 err1:
785 	release_scratch_buff(&scratch_buff, dbg_buff);
786 err:
787 	return rc;
788 
789 }
790 
791 static int
collect_ulprx_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)792 collect_ulprx_la(struct cudbg_init *pdbg_init,
793 		 struct cudbg_buffer *dbg_buff,
794 		 struct cudbg_error *cudbg_err)
795 {
796 	struct adapter *padap = pdbg_init->adap;
797 	struct cudbg_buffer scratch_buff;
798 	struct struct_ulprx_la *ulprx_la_buff;
799 	u32 size;
800 	int rc = 0;
801 
802 	size = sizeof(struct struct_ulprx_la);
803 
804 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
805 	if (rc)
806 		goto err;
807 
808 	ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
809 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
810 	ulprx_la_buff->size = ULPRX_LA_SIZE;
811 
812 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
813 	if (rc)
814 		goto err1;
815 
816 	rc = compress_buff(&scratch_buff, dbg_buff);
817 
818 err1:
819 	release_scratch_buff(&scratch_buff, dbg_buff);
820 err:
821 	return rc;
822 }
823 
824 static int
collect_cpl_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)825 collect_cpl_stats(struct cudbg_init *pdbg_init,
826 		  struct cudbg_buffer *dbg_buff,
827 		  struct cudbg_error *cudbg_err)
828 {
829 	struct adapter *padap = pdbg_init->adap;
830 	struct cudbg_buffer scratch_buff;
831 	struct struct_tp_cpl_stats *tp_cpl_stats_buff;
832 	u32 size;
833 	int rc = 0;
834 
835 	size = sizeof(struct struct_tp_cpl_stats);
836 
837 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
838 	if (rc)
839 		goto err;
840 
841 	tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
842 	tp_cpl_stats_buff->nchan = padap->params.arch.nchan;
843 
844 	t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
845 
846 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
847 	if (rc)
848 		goto err1;
849 
850 	rc = compress_buff(&scratch_buff, dbg_buff);
851 
852 err1:
853 	release_scratch_buff(&scratch_buff, dbg_buff);
854 err:
855 	return rc;
856 }
857 
858 static int
collect_wc_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)859 collect_wc_stats(struct cudbg_init *pdbg_init,
860 		 struct cudbg_buffer *dbg_buff,
861 		 struct cudbg_error *cudbg_err)
862 {
863 	struct adapter *padap = pdbg_init->adap;
864 	struct cudbg_buffer scratch_buff;
865 	struct struct_wc_stats *wc_stats_buff;
866 	u32 val1;
867 	u32 val2;
868 	u32 size;
869 
870 	int rc = 0;
871 
872 	size = sizeof(struct struct_wc_stats);
873 
874 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
875 	if (rc)
876 		goto err;
877 
878 	wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
879 
880 	if (!is_t4(padap->params.chip)) {
881 		val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
882 		val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
883 		wc_stats_buff->wr_cl_success = val1 - val2;
884 		wc_stats_buff->wr_cl_fail = val2;
885 	} else {
886 		wc_stats_buff->wr_cl_success = 0;
887 		wc_stats_buff->wr_cl_fail = 0;
888 	}
889 
890 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
891 	if (rc)
892 		goto err1;
893 
894 	rc = compress_buff(&scratch_buff, dbg_buff);
895 err1:
896 	release_scratch_buff(&scratch_buff, dbg_buff);
897 err:
898 	return rc;
899 }
900 
901 static int
mem_desc_cmp(const void * a,const void * b)902 mem_desc_cmp(const void *a, const void *b)
903 {
904 	return ((const struct struct_mem_desc *)a)->base -
905 		((const struct struct_mem_desc *)b)->base;
906 }
907 
908 static int
fill_meminfo(struct adapter * padap,struct struct_meminfo * meminfo_buff)909 fill_meminfo(struct adapter *padap,
910 	     struct struct_meminfo *meminfo_buff)
911 {
912 	struct struct_mem_desc *md;
913 	u32 size, lo, hi;
914 	u32 used, alloc;
915 	int n, i, rc = 0;
916 
917 	size = sizeof(struct struct_meminfo);
918 
919 	memset(meminfo_buff->avail, 0,
920 	       ARRAY_SIZE(meminfo_buff->avail) *
921 	       sizeof(struct struct_mem_desc));
922 	memset(meminfo_buff->mem, 0,
923 	       (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
924 	md  = meminfo_buff->mem;
925 
926 	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
927 		meminfo_buff->mem[i].limit = 0;
928 		meminfo_buff->mem[i].idx = i;
929 	}
930 
931 	i = 0;
932 
933 	lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
934 
935 	if (lo & F_EDRAM0_ENABLE) {
936 		hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
937 		meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
938 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
939 					       (G_EDRAM0_SIZE(hi) << 20);
940 		meminfo_buff->avail[i].idx = 0;
941 		i++;
942 	}
943 
944 	if (lo & F_EDRAM1_ENABLE) {
945 		hi =  t4_read_reg(padap, A_MA_EDRAM1_BAR);
946 		meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
947 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
948 					       (G_EDRAM1_SIZE(hi) << 20);
949 		meminfo_buff->avail[i].idx = 1;
950 		i++;
951 	}
952 
953 	if (is_t5(padap->params.chip)) {
954 		if (lo & F_EXT_MEM0_ENABLE) {
955 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
956 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
957 			meminfo_buff->avail[i].limit =
958 				meminfo_buff->avail[i].base +
959 				(G_EXT_MEM_SIZE(hi) << 20);
960 			meminfo_buff->avail[i].idx = 3;
961 			i++;
962 		}
963 
964 		if (lo & F_EXT_MEM1_ENABLE) {
965 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
966 			meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
967 			meminfo_buff->avail[i].limit =
968 				meminfo_buff->avail[i].base +
969 				(G_EXT_MEM1_SIZE(hi) << 20);
970 			meminfo_buff->avail[i].idx = 4;
971 			i++;
972 		}
973 	} else if (is_t6(padap->params.chip)) {
974 		if (lo & F_EXT_MEM_ENABLE) {
975 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
976 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
977 			meminfo_buff->avail[i].limit =
978 				meminfo_buff->avail[i].base +
979 				(G_EXT_MEM_SIZE(hi) << 20);
980 			meminfo_buff->avail[i].idx = 2;
981 			i++;
982 		}
983 	}
984 
985 	if (!i) {				   /* no memory available */
986 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
987 		goto err;
988 	}
989 
990 	meminfo_buff->avail_c = i;
991 	qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
992 	    mem_desc_cmp, NULL);
993 	(md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
994 	(md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
995 	(md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
996 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
997 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
998 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
999 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
1000 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
1001 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
1002 
1003 	/* the next few have explicit upper bounds */
1004 	md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
1005 	md->limit = md->base - 1 +
1006 		    t4_read_reg(padap,
1007 				A_TP_PMM_TX_PAGE_SIZE) *
1008 				G_PMTXMAXPAGE(t4_read_reg(padap,
1009 							  A_TP_PMM_TX_MAX_PAGE)
1010 					     );
1011 	md++;
1012 
1013 	md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
1014 	md->limit = md->base - 1 +
1015 		    t4_read_reg(padap,
1016 				A_TP_PMM_RX_PAGE_SIZE) *
1017 				G_PMRXMAXPAGE(t4_read_reg(padap,
1018 							  A_TP_PMM_RX_MAX_PAGE)
1019 					      );
1020 	md++;
1021 	if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
1022 		if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
1023 			hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
1024 			md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
1025 		} else {
1026 			hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
1027 			md->base = t4_read_reg(padap,
1028 					       A_LE_DB_HASH_TBL_BASE_ADDR);
1029 		}
1030 		md->limit = 0;
1031 	} else {
1032 		md->base = 0;
1033 		md->idx = ARRAY_SIZE(region);  /* hide it */
1034 	}
1035 	md++;
1036 #define ulp_region(reg) \
1037 	{\
1038 		md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
1039 		(md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
1040 	}
1041 
1042 	ulp_region(RX_ISCSI);
1043 	ulp_region(RX_TDDP);
1044 	ulp_region(TX_TPT);
1045 	ulp_region(RX_STAG);
1046 	ulp_region(RX_RQ);
1047 	ulp_region(RX_RQUDP);
1048 	ulp_region(RX_PBL);
1049 	ulp_region(TX_PBL);
1050 #undef ulp_region
1051 	md->base = 0;
1052 	md->idx = ARRAY_SIZE(region);
1053 	if (!is_t4(padap->params.chip)) {
1054 		u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
1055 		u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
1056 		if (is_t5(padap->params.chip)) {
1057 			if (sge_ctrl & F_VFIFO_ENABLE)
1058 				size = G_DBVFIFO_SIZE(fifo_size);
1059 		} else
1060 			size = G_T6_DBVFIFO_SIZE(fifo_size);
1061 
1062 		if (size) {
1063 			md->base = G_BASEADDR(t4_read_reg(padap,
1064 							  A_SGE_DBVFIFO_BADDR));
1065 			md->limit = md->base + (size << 2) - 1;
1066 		}
1067 	}
1068 
1069 	md++;
1070 
1071 	md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
1072 	md->limit = 0;
1073 	md++;
1074 	md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
1075 	md->limit = 0;
1076 	md++;
1077 #ifndef __NO_DRIVER_OCQ_SUPPORT__
1078 	/*md->base = padap->vres.ocq.start;*/
1079 	/*if (adap->vres.ocq.size)*/
1080 	/*	  md->limit = md->base + adap->vres.ocq.size - 1;*/
1081 	/*else*/
1082 	md->idx = ARRAY_SIZE(region);  /* hide it */
1083 	md++;
1084 #endif
1085 
1086 	/* add any address-space holes, there can be up to 3 */
1087 	for (n = 0; n < i - 1; n++)
1088 		if (meminfo_buff->avail[n].limit <
1089 		    meminfo_buff->avail[n + 1].base)
1090 			(md++)->base = meminfo_buff->avail[n].limit;
1091 
1092 	if (meminfo_buff->avail[n].limit)
1093 		(md++)->base = meminfo_buff->avail[n].limit;
1094 
1095 	n = (int) (md - meminfo_buff->mem);
1096 	meminfo_buff->mem_c = n;
1097 
1098 	qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
1099 	    mem_desc_cmp, NULL);
1100 
1101 	lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
1102 	hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
1103 	meminfo_buff->up_ram_lo = lo;
1104 	meminfo_buff->up_ram_hi = hi;
1105 
1106 	lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
1107 	hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
1108 	meminfo_buff->up_extmem2_lo = lo;
1109 	meminfo_buff->up_extmem2_hi = hi;
1110 
1111 	lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
1112 	meminfo_buff->rx_pages_data[0] =  G_PMRXMAXPAGE(lo);
1113 	meminfo_buff->rx_pages_data[1] =
1114 		t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
1115 	meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
1116 
1117 	lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
1118 	hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
1119 	meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
1120 	meminfo_buff->tx_pages_data[1] =
1121 		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
1122 	meminfo_buff->tx_pages_data[2] =
1123 		hi >= (1 << 20) ? 'M' : 'K';
1124 	meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
1125 
1126 	for (i = 0; i < 4; i++) {
1127 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
1128 			lo = t4_read_reg(padap,
1129 					 A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
1130 		else
1131 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
1132 		if (is_t5(padap->params.chip)) {
1133 			used = G_T5_USED(lo);
1134 			alloc = G_T5_ALLOC(lo);
1135 		} else {
1136 			used = G_USED(lo);
1137 			alloc = G_ALLOC(lo);
1138 		}
1139 		meminfo_buff->port_used[i] = used;
1140 		meminfo_buff->port_alloc[i] = alloc;
1141 	}
1142 
1143 	for (i = 0; i < padap->params.arch.nchan; i++) {
1144 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
1145 			lo = t4_read_reg(padap,
1146 					 A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
1147 		else
1148 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
1149 		if (is_t5(padap->params.chip)) {
1150 			used = G_T5_USED(lo);
1151 			alloc = G_T5_ALLOC(lo);
1152 		} else {
1153 			used = G_USED(lo);
1154 			alloc = G_ALLOC(lo);
1155 		}
1156 		meminfo_buff->loopback_used[i] = used;
1157 		meminfo_buff->loopback_alloc[i] = alloc;
1158 	}
1159 err:
1160 	return rc;
1161 }
1162 
1163 static int
collect_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1164 collect_meminfo(struct cudbg_init *pdbg_init,
1165 		struct cudbg_buffer *dbg_buff,
1166 		struct cudbg_error *cudbg_err)
1167 {
1168 	struct adapter *padap = pdbg_init->adap;
1169 	struct struct_meminfo *meminfo_buff;
1170 	struct cudbg_buffer scratch_buff;
1171 	int rc = 0;
1172 	u32 size;
1173 
1174 	size = sizeof(struct struct_meminfo);
1175 
1176 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1177 	if (rc)
1178 		goto err;
1179 
1180 	meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
1181 
1182 	rc = fill_meminfo(padap, meminfo_buff);
1183 	if (rc)
1184 		goto err;
1185 
1186 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1187 	if (rc)
1188 		goto err1;
1189 
1190 	rc = compress_buff(&scratch_buff, dbg_buff);
1191 err1:
1192 	release_scratch_buff(&scratch_buff, dbg_buff);
1193 err:
1194 	return rc;
1195 }
1196 
1197 static int
collect_lb_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1198 collect_lb_stats(struct cudbg_init *pdbg_init,
1199 		 struct cudbg_buffer *dbg_buff,
1200 		 struct cudbg_error *cudbg_err)
1201 {
1202 	struct adapter *padap = pdbg_init->adap;
1203 	struct struct_lb_stats *lb_stats_buff;
1204 	struct cudbg_buffer scratch_buff;
1205 	struct lb_port_stats *tmp_stats;
1206 	u32 i, n, size;
1207 	int rc = 0;
1208 
1209 	rc = padap->params.nports;
1210 	if (rc < 0)
1211 		goto err;
1212 
1213 	n = rc;
1214 	size = sizeof(struct struct_lb_stats) +
1215 	       n * sizeof(struct lb_port_stats);
1216 
1217 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1218 	if (rc)
1219 		goto err;
1220 
1221 	lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
1222 
1223 	lb_stats_buff->nchan = n;
1224 	tmp_stats = lb_stats_buff->s;
1225 
1226 	for (i = 0; i < n; i += 2, tmp_stats += 2) {
1227 		t4_get_lb_stats(padap, i, tmp_stats);
1228 		t4_get_lb_stats(padap, i + 1, tmp_stats+1);
1229 	}
1230 
1231 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1232 	if (rc)
1233 		goto err1;
1234 
1235 	rc = compress_buff(&scratch_buff, dbg_buff);
1236 err1:
1237 	release_scratch_buff(&scratch_buff, dbg_buff);
1238 err:
1239 	return rc;
1240 }
1241 
1242 static int
collect_rdma_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_er)1243 collect_rdma_stats(struct cudbg_init *pdbg_init,
1244 		   struct cudbg_buffer *dbg_buff,
1245 		   struct cudbg_error *cudbg_er)
1246 {
1247 	struct adapter *padap = pdbg_init->adap;
1248 	struct cudbg_buffer scratch_buff;
1249 	struct tp_rdma_stats *rdma_stats_buff;
1250 	u32 size;
1251 	int rc = 0;
1252 
1253 	size = sizeof(struct tp_rdma_stats);
1254 
1255 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1256 	if (rc)
1257 		goto err;
1258 
1259 	rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
1260 
1261 	/* spin_lock(&padap->stats_lock);	TODO*/
1262 	t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
1263 	/* spin_unlock(&padap->stats_lock);	TODO*/
1264 
1265 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1266 	if (rc)
1267 		goto err1;
1268 
1269 	rc = compress_buff(&scratch_buff, dbg_buff);
1270 err1:
1271 	release_scratch_buff(&scratch_buff, dbg_buff);
1272 err:
1273 	return rc;
1274 }
1275 
1276 static int
collect_clk_info(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1277 collect_clk_info(struct cudbg_init *pdbg_init,
1278 		 struct cudbg_buffer *dbg_buff,
1279 		 struct cudbg_error *cudbg_err)
1280 {
1281 	struct cudbg_buffer scratch_buff;
1282 	struct adapter *padap = pdbg_init->adap;
1283 	struct struct_clk_info *clk_info_buff;
1284 	u64 tp_tick_us;
1285 	int size;
1286 	int rc = 0;
1287 
1288 	if (!padap->params.vpd.cclk) {
1289 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1290 		goto err;
1291 	}
1292 
1293 	size = sizeof(struct struct_clk_info);
1294 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1295 	if (rc)
1296 		goto err;
1297 
1298 	clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
1299 
1300 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;  /* in ps
1301 	*/
1302 	clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
1303 	clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
1304 	clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
1305 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1306 	/* in us */
1307 	clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
1308 				      clk_info_buff->dack_re) / 1000000) *
1309 				     t4_read_reg(padap, A_TP_DACK_TIMER);
1310 
1311 	clk_info_buff->retransmit_min =
1312 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
1313 	clk_info_buff->retransmit_max =
1314 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
1315 
1316 	clk_info_buff->persist_timer_min =
1317 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
1318 	clk_info_buff->persist_timer_max =
1319 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
1320 
1321 	clk_info_buff->keepalive_idle_timer =
1322 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
1323 	clk_info_buff->keepalive_interval =
1324 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
1325 
1326 	clk_info_buff->initial_srtt =
1327 		tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
1328 	clk_info_buff->finwait2_timer =
1329 		tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
1330 
1331 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1332 
1333 	if (rc)
1334 		goto err1;
1335 
1336 	rc = compress_buff(&scratch_buff, dbg_buff);
1337 err1:
1338 	release_scratch_buff(&scratch_buff, dbg_buff);
1339 err:
1340 	return rc;
1341 
1342 }
1343 
1344 static int
collect_macstats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1345 collect_macstats(struct cudbg_init *pdbg_init,
1346 		 struct cudbg_buffer *dbg_buff,
1347 		 struct cudbg_error *cudbg_err)
1348 {
1349 	struct adapter *padap = pdbg_init->adap;
1350 	struct cudbg_buffer scratch_buff;
1351 	struct struct_mac_stats_rev1 *mac_stats_buff;
1352 	u32 i, n, size;
1353 	int rc = 0;
1354 
1355 	rc = padap->params.nports;
1356 	if (rc < 0)
1357 		goto err;
1358 
1359 	n = rc;
1360 	size = sizeof(struct struct_mac_stats_rev1);
1361 
1362 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1363 	if (rc)
1364 		goto err;
1365 
1366 	mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
1367 
1368 	mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1369 	mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
1370 	mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
1371 				       sizeof(struct cudbg_ver_hdr);
1372 
1373 	mac_stats_buff->port_count = n;
1374 	for (i = 0; i <  mac_stats_buff->port_count; i++)
1375 		t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
1376 
1377 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1378 	if (rc)
1379 		goto err1;
1380 
1381 	rc = compress_buff(&scratch_buff, dbg_buff);
1382 err1:
1383 	release_scratch_buff(&scratch_buff, dbg_buff);
1384 err:
1385 	return rc;
1386 }
1387 
1388 static int
collect_cim_pif_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1389 collect_cim_pif_la(struct cudbg_init *pdbg_init,
1390 		   struct cudbg_buffer *dbg_buff,
1391 		   struct cudbg_error *cudbg_err)
1392 {
1393 	struct adapter *padap = pdbg_init->adap;
1394 	struct cudbg_buffer scratch_buff;
1395 	struct cim_pif_la *cim_pif_la_buff;
1396 	u32 size;
1397 	int rc = 0;
1398 
1399 	size = sizeof(struct cim_pif_la) +
1400 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1401 
1402 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1403 	if (rc)
1404 		goto err;
1405 
1406 	cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
1407 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1408 
1409 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1410 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1411 			   NULL, NULL);
1412 
1413 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1414 	if (rc)
1415 		goto err1;
1416 
1417 	rc = compress_buff(&scratch_buff, dbg_buff);
1418 err1:
1419 	release_scratch_buff(&scratch_buff, dbg_buff);
1420 err:
1421 	return rc;
1422 }
1423 
1424 static int
collect_tp_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1425 collect_tp_la(struct cudbg_init *pdbg_init,
1426 	      struct cudbg_buffer *dbg_buff,
1427 	      struct cudbg_error *cudbg_err)
1428 {
1429 	struct adapter *padap = pdbg_init->adap;
1430 	struct cudbg_buffer scratch_buff;
1431 	struct struct_tp_la *tp_la_buff;
1432 	u32 size;
1433 	int rc = 0;
1434 
1435 	size = sizeof(struct struct_tp_la) + TPLA_SIZE *  sizeof(u64);
1436 
1437 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1438 	if (rc)
1439 		goto err;
1440 
1441 	tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
1442 
1443 	tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
1444 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1445 
1446 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1447 	if (rc)
1448 		goto err1;
1449 
1450 	rc = compress_buff(&scratch_buff, dbg_buff);
1451 err1:
1452 	release_scratch_buff(&scratch_buff, dbg_buff);
1453 err:
1454 	return rc;
1455 }
1456 
1457 static int
collect_fcoe_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1458 collect_fcoe_stats(struct cudbg_init *pdbg_init,
1459 		   struct cudbg_buffer *dbg_buff,
1460 		   struct cudbg_error *cudbg_err)
1461 {
1462 	struct adapter *padap = pdbg_init->adap;
1463 	struct cudbg_buffer scratch_buff;
1464 	struct struct_tp_fcoe_stats  *tp_fcoe_stats_buff;
1465 	u32 size;
1466 	int rc = 0;
1467 
1468 	size = sizeof(struct struct_tp_fcoe_stats);
1469 
1470 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1471 	if (rc)
1472 		goto err;
1473 
1474 	tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
1475 
1476 	t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
1477 	t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
1478 
1479 	if (padap->params.arch.nchan == NCHAN) {
1480 		t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
1481 		t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
1482 	}
1483 
1484 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1485 	if (rc)
1486 		goto err1;
1487 
1488 	rc = compress_buff(&scratch_buff, dbg_buff);
1489 err1:
1490 	release_scratch_buff(&scratch_buff, dbg_buff);
1491 err:
1492 	return rc;
1493 }
1494 
1495 static int
collect_tp_err_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1496 collect_tp_err_stats(struct cudbg_init *pdbg_init,
1497 		     struct cudbg_buffer *dbg_buff,
1498 		     struct cudbg_error *cudbg_err)
1499 {
1500 	struct adapter *padap = pdbg_init->adap;
1501 	struct cudbg_buffer scratch_buff;
1502 	struct struct_tp_err_stats *tp_err_stats_buff;
1503 	u32 size;
1504 	int rc = 0;
1505 
1506 	size = sizeof(struct struct_tp_err_stats);
1507 
1508 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1509 	if (rc)
1510 		goto err;
1511 
1512 	tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
1513 
1514 	t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
1515 	tp_err_stats_buff->nchan = padap->params.arch.nchan;
1516 
1517 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1518 	if (rc)
1519 		goto err1;
1520 
1521 	rc = compress_buff(&scratch_buff, dbg_buff);
1522 err1:
1523 	release_scratch_buff(&scratch_buff, dbg_buff);
1524 err:
1525 	return rc;
1526 }
1527 
1528 static int
collect_tcp_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1529 collect_tcp_stats(struct cudbg_init *pdbg_init,
1530 		  struct cudbg_buffer *dbg_buff,
1531 		  struct cudbg_error *cudbg_err)
1532 {
1533 	struct adapter *padap = pdbg_init->adap;
1534 	struct cudbg_buffer scratch_buff;
1535 	struct struct_tcp_stats *tcp_stats_buff;
1536 	u32 size;
1537 	int rc = 0;
1538 
1539 	size = sizeof(struct struct_tcp_stats);
1540 
1541 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1542 	if (rc)
1543 		goto err;
1544 
1545 	tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
1546 
1547 	/* spin_lock(&padap->stats_lock);	TODO*/
1548 	t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
1549 	/* spin_unlock(&padap->stats_lock);	TODO*/
1550 
1551 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1552 	if (rc)
1553 		goto err1;
1554 
1555 	rc = compress_buff(&scratch_buff, dbg_buff);
1556 err1:
1557 	release_scratch_buff(&scratch_buff, dbg_buff);
1558 err:
1559 	return rc;
1560 }
1561 
1562 static int
collect_hw_sched(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1563 collect_hw_sched(struct cudbg_init *pdbg_init,
1564 		 struct cudbg_buffer *dbg_buff,
1565 		 struct cudbg_error *cudbg_err)
1566 {
1567 	struct adapter *padap = pdbg_init->adap;
1568 	struct cudbg_buffer scratch_buff;
1569 	struct struct_hw_sched *hw_sched_buff;
1570 	u32 size;
1571 	int i, rc = 0;
1572 
1573 	if (!padap->params.vpd.cclk) {
1574 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1575 		goto err;
1576 	}
1577 
1578 	size = sizeof(struct struct_hw_sched);
1579 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1580 	if (rc)
1581 		goto err;
1582 
1583 	hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
1584 
1585 	hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
1586 	hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
1587 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1588 
1589 	for (i = 0; i < NTX_SCHED; ++i) {
1590 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1591 		    &hw_sched_buff->ipg[i], 1);
1592 	}
1593 
1594 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1595 	if (rc)
1596 		goto err1;
1597 
1598 	rc = compress_buff(&scratch_buff, dbg_buff);
1599 err1:
1600 	release_scratch_buff(&scratch_buff, dbg_buff);
1601 err:
1602 	return rc;
1603 }
1604 
1605 static int
collect_pm_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1606 collect_pm_stats(struct cudbg_init *pdbg_init,
1607 		 struct cudbg_buffer *dbg_buff,
1608 		 struct cudbg_error *cudbg_err)
1609 {
1610 	struct adapter *padap = pdbg_init->adap;
1611 	struct cudbg_buffer scratch_buff;
1612 	struct struct_pm_stats *pm_stats_buff;
1613 	u32 size;
1614 	int rc = 0;
1615 
1616 	size = sizeof(struct struct_pm_stats);
1617 
1618 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1619 	if (rc)
1620 		goto err;
1621 
1622 	pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
1623 
1624 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1625 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1626 
1627 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1628 	if (rc)
1629 		goto err1;
1630 
1631 	rc = compress_buff(&scratch_buff, dbg_buff);
1632 err1:
1633 	release_scratch_buff(&scratch_buff, dbg_buff);
1634 err:
1635 	return rc;
1636 }
1637 
1638 static int
collect_path_mtu(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1639 collect_path_mtu(struct cudbg_init *pdbg_init,
1640 		 struct cudbg_buffer *dbg_buff,
1641 		 struct cudbg_error *cudbg_err)
1642 {
1643 	struct adapter *padap = pdbg_init->adap;
1644 	struct cudbg_buffer scratch_buff;
1645 	u32 size;
1646 	int rc = 0;
1647 
1648 	size = NMTUS  * sizeof(u16);
1649 
1650 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1651 	if (rc)
1652 		goto err;
1653 
1654 	t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
1655 
1656 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1657 	if (rc)
1658 		goto err1;
1659 
1660 	rc = compress_buff(&scratch_buff, dbg_buff);
1661 err1:
1662 	release_scratch_buff(&scratch_buff, dbg_buff);
1663 err:
1664 	return rc;
1665 }
1666 
1667 static int
collect_rss_key(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1668 collect_rss_key(struct cudbg_init *pdbg_init,
1669 		struct cudbg_buffer *dbg_buff,
1670 		struct cudbg_error *cudbg_err)
1671 {
1672 	struct adapter *padap = pdbg_init->adap;
1673 	struct cudbg_buffer scratch_buff;
1674 	u32 size;
1675 
1676 	int rc = 0;
1677 
1678 	size = 10  * sizeof(u32);
1679 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1680 	if (rc)
1681 		goto err;
1682 
1683 	t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
1684 
1685 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1686 	if (rc)
1687 		goto err1;
1688 
1689 	rc = compress_buff(&scratch_buff, dbg_buff);
1690 err1:
1691 	release_scratch_buff(&scratch_buff, dbg_buff);
1692 err:
1693 	return rc;
1694 }
1695 
1696 static int
collect_rss_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1697 collect_rss_config(struct cudbg_init *pdbg_init,
1698 		   struct cudbg_buffer *dbg_buff,
1699 		   struct cudbg_error *cudbg_err)
1700 {
1701 	struct adapter *padap = pdbg_init->adap;
1702 	struct cudbg_buffer scratch_buff;
1703 	struct rss_config *rss_conf;
1704 	int rc;
1705 	u32 size;
1706 
1707 	size = sizeof(struct rss_config);
1708 
1709 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1710 	if (rc)
1711 		goto err;
1712 
1713 	rss_conf =  (struct rss_config *)scratch_buff.data;
1714 
1715 	rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
1716 	rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
1717 	rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
1718 	rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
1719 	rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
1720 	rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
1721 	rss_conf->chip = padap->params.chip;
1722 
1723 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1724 	if (rc)
1725 		goto err1;
1726 
1727 	rc = compress_buff(&scratch_buff, dbg_buff);
1728 
1729 err1:
1730 	release_scratch_buff(&scratch_buff, dbg_buff);
1731 err:
1732 	return rc;
1733 }
1734 
1735 static int
collect_rss_vf_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1736 collect_rss_vf_config(struct cudbg_init *pdbg_init,
1737 		      struct cudbg_buffer *dbg_buff,
1738 		      struct cudbg_error *cudbg_err)
1739 {
1740 	struct adapter *padap = pdbg_init->adap;
1741 	struct cudbg_buffer scratch_buff;
1742 	struct rss_vf_conf *vfconf;
1743 	int vf, rc, vf_count = 0;
1744 	u32 size;
1745 
1746 	vf_count = padap->params.arch.vfcount;
1747 	size = vf_count * sizeof(*vfconf);
1748 
1749 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1750 	if (rc)
1751 		goto err;
1752 
1753 	vfconf =  (struct rss_vf_conf *)scratch_buff.data;
1754 
1755 	for (vf = 0; vf < vf_count; vf++) {
1756 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1757 				      &vfconf[vf].rss_vf_vfh, 1);
1758 	}
1759 
1760 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1761 	if (rc)
1762 		goto err1;
1763 
1764 	rc = compress_buff(&scratch_buff, dbg_buff);
1765 
1766 err1:
1767 	release_scratch_buff(&scratch_buff, dbg_buff);
1768 err:
1769 	return rc;
1770 }
1771 
1772 static int
collect_rss_pf_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1773 collect_rss_pf_config(struct cudbg_init *pdbg_init,
1774 		      struct cudbg_buffer *dbg_buff,
1775 		      struct cudbg_error *cudbg_err)
1776 {
1777 	struct cudbg_buffer scratch_buff;
1778 	struct rss_pf_conf *pfconf;
1779 	struct adapter *padap = pdbg_init->adap;
1780 	u32 rss_pf_map, rss_pf_mask, size;
1781 	int pf, rc;
1782 
1783 	size = 8  * sizeof(*pfconf);
1784 
1785 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1786 	if (rc)
1787 		goto err;
1788 
1789 	pfconf =  (struct rss_pf_conf *)scratch_buff.data;
1790 
1791 	rss_pf_map = t4_read_rss_pf_map(padap, 1);
1792 	rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
1793 
1794 	for (pf = 0; pf < 8; pf++) {
1795 		pfconf[pf].rss_pf_map = rss_pf_map;
1796 		pfconf[pf].rss_pf_mask = rss_pf_mask;
1797 		/* no return val */
1798 		t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
1799 	}
1800 
1801 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1802 	if (rc)
1803 		goto err1;
1804 
1805 	rc = compress_buff(&scratch_buff, dbg_buff);
1806 err1:
1807 	release_scratch_buff(&scratch_buff, dbg_buff);
1808 err:
1809 	return rc;
1810 }
1811 
1812 static int
check_valid(u32 * buf,int type)1813 check_valid(u32 *buf, int type)
1814 {
1815 	int index;
1816 	int bit;
1817 	int bit_pos = 0;
1818 
1819 	switch (type) {
1820 	case CTXT_EGRESS:
1821 		bit_pos = 176;
1822 		break;
1823 	case CTXT_INGRESS:
1824 		bit_pos = 141;
1825 		break;
1826 	case CTXT_FLM:
1827 		bit_pos = 89;
1828 		break;
1829 	}
1830 	index = bit_pos / 32;
1831 	bit =  bit_pos % 32;
1832 
1833 	return buf[index] & (1U << bit);
1834 }
1835 
1836 /**
1837  * Get EGRESS, INGRESS, FLM, and CNM max qid.
1838  *
1839  * For EGRESS and INGRESS, do the following calculation.
1840  * max_qid = (DBQ/IMSG context region size in bytes) /
1841  *	     (size of context in bytes).
1842  *
1843  * For FLM, do the following calculation.
1844  * max_qid = (FLM cache region size in bytes) /
1845  *	     ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
1846  *
1847  * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
1848  * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
1849  * splitting is enabled, then max CNM qid is half of max FLM qid.
1850  */
1851 static int
get_max_ctxt_qid(struct adapter * padap,struct struct_meminfo * meminfo,u32 * max_ctx_qid,u8 nelem)1852 get_max_ctxt_qid(struct adapter *padap,
1853 		 struct struct_meminfo *meminfo,
1854 		 u32 *max_ctx_qid, u8 nelem)
1855 {
1856 	u32 i, idx, found = 0;
1857 
1858 	if (nelem != (CTXT_CNM + 1))
1859 		return -EINVAL;
1860 
1861 	for (i = 0; i < meminfo->mem_c; i++) {
1862 		if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
1863 			continue;                        /* skip holes */
1864 
1865 		idx = meminfo->mem[i].idx;
1866 		/* Get DBQ, IMSG, and FLM context region size */
1867 		if (idx <= CTXT_FLM) {
1868 			if (!(meminfo->mem[i].limit))
1869 				meminfo->mem[i].limit =
1870 					i < meminfo->mem_c - 1 ?
1871 					meminfo->mem[i + 1].base - 1 : ~0;
1872 
1873 			if (idx < CTXT_FLM) {
1874 				/* Get EGRESS and INGRESS max qid. */
1875 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1876 						    meminfo->mem[i].base + 1) /
1877 						   CUDBG_CTXT_SIZE_BYTES;
1878 				found++;
1879 			} else {
1880 				/* Get FLM and CNM max qid. */
1881 				u32 value, edram_ptr_count;
1882 				u8 bytes_per_ptr = 8;
1883 				u8 nohdr;
1884 
1885 				value = t4_read_reg(padap, A_SGE_FLM_CFG);
1886 
1887 				/* Check if header splitting is enabled. */
1888 				nohdr = (value >> S_NOHDR) & 1U;
1889 
1890 				/* Get the number of pointers in EDRAM per
1891 				 * qid in units of 32.
1892 				 */
1893 				edram_ptr_count = 32 *
1894 						  (1U << G_EDRAMPTRCNT(value));
1895 
1896 				/* EDRAMPTRCNT value of 3 is reserved.
1897 				 * So don't exceed 128.
1898 				 */
1899 				if (edram_ptr_count > 128)
1900 					edram_ptr_count = 128;
1901 
1902 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1903 						    meminfo->mem[i].base + 1) /
1904 						   (edram_ptr_count *
1905 						    bytes_per_ptr);
1906 				found++;
1907 
1908 				/* CNM has 1-to-1 mapping with FLM.
1909 				 * However, if header splitting is enabled,
1910 				 * then max CNM qid is half of max FLM qid.
1911 				 */
1912 				max_ctx_qid[CTXT_CNM] = nohdr ?
1913 							max_ctx_qid[idx] :
1914 							max_ctx_qid[idx] >> 1;
1915 
1916 				/* One more increment for CNM */
1917 				found++;
1918 			}
1919 		}
1920 		if (found == nelem)
1921 			break;
1922 	}
1923 
1924 	/* Sanity check. Ensure the values are within known max. */
1925 	max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
1926 					 M_CTXTQID);
1927 	max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
1928 					  CUDBG_MAX_INGRESS_QIDS);
1929 	max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
1930 				      CUDBG_MAX_FL_QIDS);
1931 	max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
1932 				      CUDBG_MAX_CNM_QIDS);
1933 	return 0;
1934 }
1935 
1936 static int
collect_dump_context(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1937 collect_dump_context(struct cudbg_init *pdbg_init,
1938 		     struct cudbg_buffer *dbg_buff,
1939 		     struct cudbg_error *cudbg_err)
1940 {
1941 	struct cudbg_buffer scratch_buff;
1942 	struct cudbg_buffer temp_buff;
1943 	struct adapter *padap = pdbg_init->adap;
1944 	u32 size = 0, next_offset = 0, total_size = 0;
1945 	struct cudbg_ch_cntxt *buff = NULL;
1946 	struct struct_meminfo meminfo;
1947 	int bytes = 0;
1948 	int rc = 0;
1949 	u32 i, j;
1950 	u32 max_ctx_qid[CTXT_CNM + 1];
1951 	bool limit_qid = false;
1952 	u32 qid_count = 0;
1953 
1954 	rc = fill_meminfo(padap, &meminfo);
1955 	if (rc)
1956 		goto err;
1957 
1958 	/* Get max valid qid for each type of queue */
1959 	rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
1960 	if (rc)
1961 		goto err;
1962 
1963 	/* There are four types of queues. Collect context upto max
1964 	 * qid of each type of queue.
1965 	 */
1966 	for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1967 		size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
1968 
1969 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1970 	if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
1971 		/* Not enough scratch Memory available.
1972 		 * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
1973 		 * for each queue type.
1974 		 */
1975 		size = 0;
1976 		for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1977 			size += sizeof(struct cudbg_ch_cntxt) *
1978 				CUDBG_LOWMEM_MAX_CTXT_QIDS;
1979 
1980 		limit_qid = true;
1981 		rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1982 		if (rc)
1983 			goto err;
1984 	}
1985 
1986 	buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
1987 
1988 	/* Collect context data */
1989 	for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
1990 		qid_count = 0;
1991 		for (j = 0; j < max_ctx_qid[i]; j++) {
1992 			read_sge_ctxt(pdbg_init, j, i, buff->data);
1993 
1994 			rc = check_valid(buff->data, i);
1995 			if (rc) {
1996 				buff->cntxt_type = i;
1997 				buff->cntxt_id = j;
1998 				buff++;
1999 				total_size += sizeof(struct cudbg_ch_cntxt);
2000 
2001 				if (i == CTXT_FLM) {
2002 					read_sge_ctxt(pdbg_init, j, CTXT_CNM,
2003 						      buff->data);
2004 					buff->cntxt_type = CTXT_CNM;
2005 					buff->cntxt_id = j;
2006 					buff++;
2007 					total_size +=
2008 						sizeof(struct cudbg_ch_cntxt);
2009 				}
2010 				qid_count++;
2011 			}
2012 
2013 			/* If there's not enough space to collect more qids,
2014 			 * then bail and move on to next queue type.
2015 			 */
2016 			if (limit_qid &&
2017 			    qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
2018 				break;
2019 		}
2020 	}
2021 
2022 	scratch_buff.size = total_size;
2023 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2024 	if (rc)
2025 		goto err1;
2026 
2027 	/* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
2028 	while (total_size > 0) {
2029 		bytes = min_t(unsigned long, (unsigned long)total_size,
2030 			      (unsigned long)CUDBG_CHUNK_SIZE);
2031 		temp_buff.size = bytes;
2032 		temp_buff.data = (void *)((char *)scratch_buff.data +
2033 					  next_offset);
2034 
2035 		rc = compress_buff(&temp_buff, dbg_buff);
2036 		if (rc)
2037 			goto err1;
2038 
2039 		total_size -= bytes;
2040 		next_offset += bytes;
2041 	}
2042 
2043 err1:
2044 	scratch_buff.size = size;
2045 	release_scratch_buff(&scratch_buff, dbg_buff);
2046 err:
2047 	return rc;
2048 }
2049 
2050 static int
collect_fw_devlog(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2051 collect_fw_devlog(struct cudbg_init *pdbg_init,
2052 		  struct cudbg_buffer *dbg_buff,
2053 		  struct cudbg_error *cudbg_err)
2054 {
2055 	struct adapter *padap = pdbg_init->adap;
2056 	struct devlog_params *dparams = &padap->params.devlog;
2057 	struct cudbg_buffer scratch_buff;
2058 	u32 offset;
2059 	int rc = 0;
2060 
2061 	rc = t4_init_devlog_params(padap, 1);
2062 
2063 	if (rc < 0) {
2064 		pdbg_init->print(padap->dip, CE_NOTE,
2065 				 "%s(), t4_init_devlog_params failed!, rc: "\
2066 				 "%d\n", __func__, rc);
2067 		rc = CUDBG_SYSTEM_ERROR;
2068 		goto err;
2069 	}
2070 
2071 	rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
2072 
2073 	if (rc)
2074 		goto err;
2075 
2076 	/* Collect FW devlog */
2077 	if (dparams->start != 0) {
2078 		offset = scratch_buff.offset;
2079 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
2080 				  dparams->memtype, dparams->start,
2081 				  dparams->size,
2082 				  (__be32 *)((char *)scratch_buff.data +
2083 					     offset), 1);
2084 
2085 		if (rc) {
2086 			pdbg_init->print(padap->dip, CE_NOTE,
2087 					 "%s(), t4_memory_rw failed!, rc: "\
2088 					 "%d\n", __func__, rc);
2089 			cudbg_err->sys_err = rc;
2090 			goto err1;
2091 		}
2092 	}
2093 
2094 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2095 
2096 	if (rc)
2097 		goto err1;
2098 
2099 	rc = compress_buff(&scratch_buff, dbg_buff);
2100 
2101 err1:
2102 	release_scratch_buff(&scratch_buff, dbg_buff);
2103 err:
2104 	return rc;
2105 }
2106 /* CIM OBQ */
2107 
2108 static int
collect_cim_obq_ulp0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2109 collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
2110 		     struct cudbg_buffer *dbg_buff,
2111 		     struct cudbg_error *cudbg_err)
2112 {
2113 	int rc = 0, qid = 0;
2114 
2115 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2116 
2117 	return rc;
2118 }
2119 
2120 static int
collect_cim_obq_ulp1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2121 collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
2122 		     struct cudbg_buffer *dbg_buff,
2123 		     struct cudbg_error *cudbg_err)
2124 {
2125 	int rc = 0, qid = 1;
2126 
2127 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2128 
2129 	return rc;
2130 }
2131 
2132 static int
collect_cim_obq_ulp2(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2133 collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
2134 		     struct cudbg_buffer *dbg_buff,
2135 		     struct cudbg_error *cudbg_err)
2136 {
2137 	int rc = 0, qid = 2;
2138 
2139 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2140 
2141 	return rc;
2142 }
2143 
2144 static int
collect_cim_obq_ulp3(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2145 collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
2146 		     struct cudbg_buffer *dbg_buff,
2147 		     struct cudbg_error *cudbg_err)
2148 {
2149 	int rc = 0, qid = 3;
2150 
2151 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2152 
2153 	return rc;
2154 }
2155 
2156 static int
collect_cim_obq_sge(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2157 collect_cim_obq_sge(struct cudbg_init *pdbg_init,
2158 		    struct cudbg_buffer *dbg_buff,
2159 		    struct cudbg_error *cudbg_err)
2160 {
2161 	int rc = 0, qid = 4;
2162 
2163 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2164 
2165 	return rc;
2166 }
2167 
2168 static int
collect_cim_obq_ncsi(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2169 collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
2170 		     struct cudbg_buffer *dbg_buff,
2171 		     struct cudbg_error *cudbg_err)
2172 {
2173 	int rc = 0, qid = 5;
2174 
2175 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2176 
2177 	return rc;
2178 }
2179 
2180 static int
collect_obq_sge_rx_q0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2181 collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
2182 		      struct cudbg_buffer *dbg_buff,
2183 		      struct cudbg_error *cudbg_err)
2184 {
2185 	int rc = 0, qid = 6;
2186 
2187 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2188 
2189 	return rc;
2190 }
2191 
2192 static int
collect_obq_sge_rx_q1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2193 collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
2194 		      struct cudbg_buffer *dbg_buff,
2195 		      struct cudbg_error *cudbg_err)
2196 {
2197 	int rc = 0, qid = 7;
2198 
2199 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2200 
2201 	return rc;
2202 }
2203 
2204 static int
read_cim_obq(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err,int qid)2205 read_cim_obq(struct cudbg_init *pdbg_init,
2206 	     struct cudbg_buffer *dbg_buff,
2207 	     struct cudbg_error *cudbg_err, int qid)
2208 {
2209 	struct cudbg_buffer scratch_buff;
2210 	struct adapter *padap = pdbg_init->adap;
2211 	u32 qsize;
2212 	int rc;
2213 	int no_of_read_words;
2214 
2215 	/* collect CIM OBQ */
2216 	qsize =  6 * CIM_OBQ_SIZE * 4 *  sizeof(u32);
2217 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2218 	if (rc)
2219 		goto err;
2220 
2221 	/* t4_read_cim_obq will return no. of read words or error */
2222 	no_of_read_words = t4_read_cim_obq(padap, qid,
2223 					   (u32 *)((u32 *)scratch_buff.data +
2224 					   scratch_buff.offset), qsize);
2225 
2226 	/* no_of_read_words is less than or equal to 0 means error */
2227 	if (no_of_read_words <= 0) {
2228 		if (no_of_read_words == 0)
2229 			rc = CUDBG_SYSTEM_ERROR;
2230 		else
2231 			rc = no_of_read_words;
2232 		if (pdbg_init->verbose)
2233 			pdbg_init->print(padap->dip, CE_NOTE,
2234 					 "%s: t4_read_cim_obq failed (%d)\n",
2235 				 __func__, rc);
2236 		cudbg_err->sys_err = rc;
2237 		goto err1;
2238 	}
2239 
2240 	scratch_buff.size = no_of_read_words * 4;
2241 
2242 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2243 
2244 	if (rc)
2245 		goto err1;
2246 
2247 	rc = compress_buff(&scratch_buff, dbg_buff);
2248 
2249 	if (rc)
2250 		goto err1;
2251 
2252 err1:
2253 	release_scratch_buff(&scratch_buff, dbg_buff);
2254 err:
2255 	return rc;
2256 }
2257 
2258 /* CIM IBQ */
2259 
2260 static int
collect_cim_ibq_tp0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2261 collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
2262 		    struct cudbg_buffer *dbg_buff,
2263 		    struct cudbg_error *cudbg_err)
2264 {
2265 	int rc = 0, qid = 0;
2266 
2267 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2268 	return rc;
2269 }
2270 
2271 static int
collect_cim_ibq_tp1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2272 collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
2273 		    struct cudbg_buffer *dbg_buff,
2274 		    struct cudbg_error *cudbg_err)
2275 {
2276 	int rc = 0, qid = 1;
2277 
2278 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2279 	return rc;
2280 }
2281 
2282 static int
collect_cim_ibq_ulp(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2283 collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
2284 		    struct cudbg_buffer *dbg_buff,
2285 		    struct cudbg_error *cudbg_err)
2286 {
2287 	int rc = 0, qid = 2;
2288 
2289 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2290 	return rc;
2291 }
2292 
2293 static int
collect_cim_ibq_sge0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2294 collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
2295 		     struct cudbg_buffer *dbg_buff,
2296 		     struct cudbg_error *cudbg_err)
2297 {
2298 	int rc = 0, qid = 3;
2299 
2300 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2301 	return rc;
2302 }
2303 
2304 static int
collect_cim_ibq_sge1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2305 collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
2306 		     struct cudbg_buffer *dbg_buff,
2307 		     struct cudbg_error *cudbg_err)
2308 {
2309 	int rc = 0, qid = 4;
2310 
2311 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2312 	return rc;
2313 }
2314 
2315 static int
collect_cim_ibq_ncsi(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2316 collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
2317 		     struct cudbg_buffer *dbg_buff,
2318 		     struct cudbg_error *cudbg_err)
2319 {
2320 	int rc, qid = 5;
2321 
2322 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2323 	return rc;
2324 }
2325 
2326 static int
read_cim_ibq(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err,int qid)2327 read_cim_ibq(struct cudbg_init *pdbg_init,
2328 	     struct cudbg_buffer *dbg_buff,
2329 	     struct cudbg_error *cudbg_err, int qid)
2330 {
2331 	struct adapter *padap = pdbg_init->adap;
2332 	struct cudbg_buffer scratch_buff;
2333 	u32 qsize;
2334 	int rc;
2335 	int no_of_read_words;
2336 
2337 	/* collect CIM IBQ */
2338 	qsize = CIM_IBQ_SIZE * 4 *  sizeof(u32);
2339 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2340 
2341 	if (rc)
2342 		goto err;
2343 
2344 	/* t4_read_cim_ibq will return no. of read words or error */
2345 	no_of_read_words = t4_read_cim_ibq(padap, qid,
2346 					   (u32 *)((u32 *)scratch_buff.data +
2347 					   scratch_buff.offset), qsize);
2348 	/* no_of_read_words is less than or equal to 0 means error */
2349 	if (no_of_read_words <= 0) {
2350 		if (no_of_read_words == 0)
2351 			rc = CUDBG_SYSTEM_ERROR;
2352 		else
2353 			rc = no_of_read_words;
2354 		if (pdbg_init->verbose)
2355 			pdbg_init->print(padap->dip, CE_NOTE,
2356 					 "%s: t4_read_cim_ibq failed (%d)\n",
2357 				 __func__, rc);
2358 		cudbg_err->sys_err = rc;
2359 		goto err1;
2360 	}
2361 
2362 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2363 	if (rc)
2364 		goto err1;
2365 
2366 	rc = compress_buff(&scratch_buff, dbg_buff);
2367 	if (rc)
2368 		goto err1;
2369 
2370 err1:
2371 	release_scratch_buff(&scratch_buff, dbg_buff);
2372 
2373 err:
2374 	return rc;
2375 }
2376 
2377 static int
collect_cim_ma_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2378 collect_cim_ma_la(struct cudbg_init *pdbg_init,
2379 		  struct cudbg_buffer *dbg_buff,
2380 		  struct cudbg_error *cudbg_err)
2381 {
2382 	struct cudbg_buffer scratch_buff;
2383 	struct adapter *padap = pdbg_init->adap;
2384 	u32 rc = 0;
2385 
2386 	/* collect CIM MA LA */
2387 	scratch_buff.size =  2 * CIM_MALA_SIZE * 5 * sizeof(u32);
2388 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2389 	if (rc)
2390 		goto err;
2391 
2392 	/* no return */
2393 	t4_cim_read_ma_la(padap,
2394 			  (u32 *) ((char *)scratch_buff.data +
2395 				   scratch_buff.offset),
2396 			  (u32 *) ((char *)scratch_buff.data +
2397 				   scratch_buff.offset + 5 * CIM_MALA_SIZE));
2398 
2399 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2400 	if (rc)
2401 		goto err1;
2402 
2403 	rc = compress_buff(&scratch_buff, dbg_buff);
2404 
2405 err1:
2406 	release_scratch_buff(&scratch_buff, dbg_buff);
2407 err:
2408 	return rc;
2409 }
2410 
2411 static int
collect_cim_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2412 collect_cim_la(struct cudbg_init *pdbg_init,
2413 	       struct cudbg_buffer *dbg_buff,
2414 	       struct cudbg_error *cudbg_err)
2415 {
2416 	struct cudbg_buffer scratch_buff;
2417 	struct adapter *padap = pdbg_init->adap;
2418 
2419 	int rc;
2420 	u32 cfg = 0;
2421 	int size;
2422 
2423 	/* collect CIM LA */
2424 	if (is_t6(padap->params.chip)) {
2425 		size = padap->params.cim_la_size / 10 + 1;
2426 		size *= 11 * sizeof(u32);
2427 	} else {
2428 		size = padap->params.cim_la_size / 8;
2429 		size *= 8 * sizeof(u32);
2430 	}
2431 
2432 	size += sizeof(cfg);
2433 
2434 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
2435 	if (rc)
2436 		goto err;
2437 
2438 	rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
2439 
2440 	if (rc) {
2441 		if (pdbg_init->verbose)
2442 			pdbg_init->print(padap->dip, CE_NOTE,
2443 					 "%s: t4_cim_read failed (%d)\n",
2444 				 __func__, rc);
2445 		cudbg_err->sys_err = rc;
2446 		goto err1;
2447 	}
2448 
2449 	memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
2450 	       sizeof(cfg));
2451 
2452 	rc = t4_cim_read_la(padap,
2453 			    (u32 *) ((char *)scratch_buff.data +
2454 				     scratch_buff.offset + sizeof(cfg)), NULL);
2455 	if (rc < 0) {
2456 		if (pdbg_init->verbose)
2457 			pdbg_init->print(padap->dip, CE_NOTE,
2458 					 "%s: t4_cim_read_la failed (%d)\n",
2459 				 __func__, rc);
2460 		cudbg_err->sys_err = rc;
2461 		goto err1;
2462 	}
2463 
2464 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2465 	if (rc)
2466 		goto err1;
2467 
2468 	rc = compress_buff(&scratch_buff, dbg_buff);
2469 	if (rc)
2470 		goto err1;
2471 
2472 err1:
2473 	release_scratch_buff(&scratch_buff, dbg_buff);
2474 err:
2475 	return rc;
2476 }
2477 
2478 static int
collect_cim_qcfg(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2479 collect_cim_qcfg(struct cudbg_init *pdbg_init,
2480 		 struct cudbg_buffer *dbg_buff,
2481 		 struct cudbg_error *cudbg_err)
2482 {
2483 	struct cudbg_buffer scratch_buff;
2484 	struct adapter *padap = pdbg_init->adap;
2485 	u32 offset;
2486 	int rc = 0;
2487 
2488 	struct struct_cim_qcfg *cim_qcfg_data = NULL;
2489 
2490 	rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
2491 			      &scratch_buff);
2492 
2493 	if (rc)
2494 		goto err;
2495 
2496 	offset = scratch_buff.offset;
2497 
2498 	cim_qcfg_data =
2499 		(struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
2500 					   offset));
2501 
2502 	rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
2503 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
2504 
2505 	if (rc) {
2506 		if (pdbg_init->verbose)
2507 			pdbg_init->print(padap->dip, CE_NOTE,
2508 					 "%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
2509 			    __func__, rc);
2510 		cudbg_err->sys_err = rc;
2511 		goto err1;
2512 	}
2513 
2514 	rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
2515 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
2516 			 cim_qcfg_data->obq_wr);
2517 
2518 	if (rc) {
2519 		if (pdbg_init->verbose)
2520 			pdbg_init->print(padap->dip, CE_NOTE,
2521 					 "%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
2522 			    __func__, rc);
2523 		cudbg_err->sys_err = rc;
2524 		goto err1;
2525 	}
2526 
2527 	/* no return val */
2528 	t4_read_cimq_cfg(padap,
2529 			cim_qcfg_data->base,
2530 			cim_qcfg_data->size,
2531 			cim_qcfg_data->thres);
2532 
2533 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2534 	if (rc)
2535 		goto err1;
2536 
2537 	rc = compress_buff(&scratch_buff, dbg_buff);
2538 	if (rc)
2539 		goto err1;
2540 
2541 err1:
2542 	release_scratch_buff(&scratch_buff, dbg_buff);
2543 err:
2544 	return rc;
2545 }
2546 
2547 static int
read_fw_mem(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,u8 mem_type,unsigned long tot_len,struct cudbg_error * cudbg_err)2548 read_fw_mem(struct cudbg_init *pdbg_init,
2549 	    struct cudbg_buffer *dbg_buff, u8 mem_type,
2550 	    unsigned long tot_len, struct cudbg_error *cudbg_err)
2551 {
2552 	struct cudbg_buffer scratch_buff;
2553 	struct adapter *padap = pdbg_init->adap;
2554 	unsigned long bytes_read = 0;
2555 	unsigned long bytes_left;
2556 	unsigned long bytes;
2557 	int	      rc;
2558 
2559 	bytes_left = tot_len;
2560 	scratch_buff.size = tot_len;
2561 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2562 	if (rc)
2563 		goto err;
2564 
2565 	while (bytes_left > 0) {
2566 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2567 		rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
2568 
2569 		if (rc) {
2570 			rc = CUDBG_STATUS_NO_SCRATCH_MEM;
2571 			goto err;
2572 		}
2573 
2574 		/* Read from file */
2575 		/*fread(scratch_buff.data, 1, Bytes, in);*/
2576 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
2577 				  bytes, (__be32 *)(scratch_buff.data), 1);
2578 
2579 		if (rc) {
2580 			if (pdbg_init->verbose)
2581 				pdbg_init->print(padap->dip, CE_NOTE,
2582 						 "%s: t4_memory_rw failed (%d)",
2583 				    __func__, rc);
2584 			cudbg_err->sys_err = rc;
2585 			goto err1;
2586 		}
2587 
2588 		rc = compress_buff(&scratch_buff, dbg_buff);
2589 		if (rc)
2590 			goto err1;
2591 
2592 		bytes_left -= bytes;
2593 		bytes_read += bytes;
2594 		release_scratch_buff(&scratch_buff, dbg_buff);
2595 	}
2596 
2597 err1:
2598 	if (rc)
2599 		release_scratch_buff(&scratch_buff, dbg_buff);
2600 
2601 err:
2602 	return rc;
2603 }
2604 
2605 static void
collect_mem_info(struct cudbg_init * pdbg_init,struct card_mem * mem_info)2606 collect_mem_info(struct cudbg_init *pdbg_init,
2607 		 struct card_mem *mem_info)
2608 {
2609 	struct adapter *padap = pdbg_init->adap;
2610 	u32 value;
2611 	int t4 = 0;
2612 
2613 	if (is_t4(padap->params.chip))
2614 		t4 = 1;
2615 
2616 	if (t4) {
2617 		value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
2618 		value = G_EXT_MEM_SIZE(value);
2619 		mem_info->size_mc0 = (u16)value;  /* size in MB */
2620 
2621 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2622 		if (value & F_EXT_MEM_ENABLE)
2623 			mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
2624 								  bit */
2625 	} else {
2626 		value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
2627 		value = G_EXT_MEM0_SIZE(value);
2628 		mem_info->size_mc0 = (u16)value;
2629 
2630 		value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
2631 		value = G_EXT_MEM1_SIZE(value);
2632 		mem_info->size_mc1 = (u16)value;
2633 
2634 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2635 		if (value & F_EXT_MEM0_ENABLE)
2636 			mem_info->mem_flag |= (1 << MC0_FLAG);
2637 		if (value & F_EXT_MEM1_ENABLE)
2638 			mem_info->mem_flag |= (1 << MC1_FLAG);
2639 	}
2640 
2641 	value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
2642 	value = G_EDRAM0_SIZE(value);
2643 	mem_info->size_edc0 = (u16)value;
2644 
2645 	value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
2646 	value = G_EDRAM1_SIZE(value);
2647 	mem_info->size_edc1 = (u16)value;
2648 
2649 	value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2650 	if (value & F_EDRAM0_ENABLE)
2651 		mem_info->mem_flag |= (1 << EDC0_FLAG);
2652 	if (value & F_EDRAM1_ENABLE)
2653 		mem_info->mem_flag |= (1 << EDC1_FLAG);
2654 
2655 }
2656 
2657 static void
cudbg_t4_fwcache(struct cudbg_init * pdbg_init,struct cudbg_error * cudbg_err)2658 cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
2659 		 struct cudbg_error *cudbg_err)
2660 {
2661 	struct adapter *padap = pdbg_init->adap;
2662 	int rc;
2663 
2664 	if (is_fw_attached(pdbg_init)) {
2665 
2666 		/* Flush uP dcache before reading edcX/mcX  */
2667 		ADAPTER_LOCK(padap);
2668 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
2669 		ADAPTER_UNLOCK(padap);
2670 
2671 		if (rc) {
2672 			if (pdbg_init->verbose)
2673 				pdbg_init->print(padap->dip, CE_NOTE,
2674 						 "%s: t4_fwcache failed (%d)\n",
2675 				 __func__, rc);
2676 			cudbg_err->sys_warn = rc;
2677 		}
2678 	}
2679 }
2680 
2681 static int
collect_edc0_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2682 collect_edc0_meminfo(struct cudbg_init *pdbg_init,
2683 		     struct cudbg_buffer *dbg_buff,
2684 		     struct cudbg_error *cudbg_err)
2685 {
2686 	struct card_mem mem_info = {0};
2687 	unsigned long edc0_size;
2688 	int rc;
2689 
2690 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2691 
2692 	collect_mem_info(pdbg_init, &mem_info);
2693 
2694 	if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
2695 		edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
2696 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
2697 				 edc0_size, cudbg_err);
2698 		if (rc)
2699 			goto err;
2700 
2701 	} else {
2702 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2703 		if (pdbg_init->verbose)
2704 			pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2705 					 "%s(), collect_mem_info failed!, %s\n",
2706 				 __func__, err_msg[-rc]);
2707 		goto err;
2708 
2709 	}
2710 err:
2711 	return rc;
2712 }
2713 
2714 static int
collect_edc1_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2715 collect_edc1_meminfo(struct cudbg_init *pdbg_init,
2716 		     struct cudbg_buffer *dbg_buff,
2717 		     struct cudbg_error *cudbg_err)
2718 {
2719 	struct card_mem mem_info = {0};
2720 	unsigned long edc1_size;
2721 	int rc;
2722 
2723 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2724 
2725 	collect_mem_info(pdbg_init, &mem_info);
2726 
2727 	if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
2728 		edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
2729 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
2730 				 edc1_size, cudbg_err);
2731 		if (rc)
2732 			goto err;
2733 	} else {
2734 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2735 		if (pdbg_init->verbose)
2736 			pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2737 					 "%s(), collect_mem_info failed!, %s\n",
2738 				 __func__, err_msg[-rc]);
2739 		goto err;
2740 	}
2741 
2742 err:
2743 
2744 	return rc;
2745 }
2746 
2747 static int
collect_mc0_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2748 collect_mc0_meminfo(struct cudbg_init *pdbg_init,
2749 		    struct cudbg_buffer *dbg_buff,
2750 		    struct cudbg_error *cudbg_err)
2751 {
2752 	struct card_mem mem_info = {0};
2753 	unsigned long mc0_size;
2754 	int rc;
2755 
2756 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2757 
2758 	collect_mem_info(pdbg_init, &mem_info);
2759 
2760 	if (mem_info.mem_flag & (1 << MC0_FLAG)) {
2761 		mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
2762 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
2763 				 mc0_size, cudbg_err);
2764 		if (rc)
2765 			goto err;
2766 	} else {
2767 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2768 		if (pdbg_init->verbose)
2769 			pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2770 					 "%s(), collect_mem_info failed!, %s\n",
2771 				 __func__, err_msg[-rc]);
2772 		goto err;
2773 	}
2774 
2775 err:
2776 	return rc;
2777 }
2778 
2779 static int
collect_mc1_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2780 collect_mc1_meminfo(struct cudbg_init *pdbg_init,
2781 		    struct cudbg_buffer *dbg_buff,
2782 		    struct cudbg_error *cudbg_err)
2783 {
2784 	struct card_mem mem_info = {0};
2785 	unsigned long mc1_size;
2786 	int rc;
2787 
2788 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2789 
2790 	collect_mem_info(pdbg_init, &mem_info);
2791 
2792 	if (mem_info.mem_flag & (1 << MC1_FLAG)) {
2793 		mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
2794 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
2795 				 mc1_size, cudbg_err);
2796 		if (rc)
2797 			goto err;
2798 	} else {
2799 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2800 
2801 		if (pdbg_init->verbose)
2802 			pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2803 					"%s(), collect_mem_info failed!, %s\n",
2804 				 __func__, err_msg[-rc]);
2805 		goto err;
2806 	}
2807 err:
2808 	return rc;
2809 }
2810 
2811 static int
collect_reg_dump(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2812 collect_reg_dump(struct cudbg_init *pdbg_init,
2813 		 struct cudbg_buffer *dbg_buff,
2814 		 struct cudbg_error *cudbg_err)
2815 {
2816 	struct cudbg_buffer scratch_buff;
2817 	struct cudbg_buffer tmp_scratch_buff;
2818 	struct adapter *padap = pdbg_init->adap;
2819 	unsigned long	     bytes_read = 0;
2820 	unsigned long	     bytes_left;
2821 	u32		     buf_size = 0, bytes = 0;
2822 	int		     rc = 0;
2823 
2824 	if (is_t4(padap->params.chip))
2825 		buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
2826 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
2827 		buf_size = T5_REGMAP_SIZE;
2828 
2829 	scratch_buff.size = buf_size;
2830 
2831 	tmp_scratch_buff = scratch_buff;
2832 
2833 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2834 	if (rc)
2835 		goto err;
2836 
2837 	/* no return */
2838 	t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
2839 	bytes_left =   scratch_buff.size;
2840 
2841 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2842 	if (rc)
2843 		goto err1;
2844 
2845 	while (bytes_left > 0) {
2846 		tmp_scratch_buff.data =
2847 			((char *)scratch_buff.data) + bytes_read;
2848 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2849 		tmp_scratch_buff.size = bytes;
2850 		compress_buff(&tmp_scratch_buff, dbg_buff);
2851 		bytes_left -= bytes;
2852 		bytes_read += bytes;
2853 	}
2854 
2855 err1:
2856 	release_scratch_buff(&scratch_buff, dbg_buff);
2857 err:
2858 	return rc;
2859 }
2860 
2861 static int
collect_cctrl(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2862 collect_cctrl(struct cudbg_init *pdbg_init,
2863 	      struct cudbg_buffer *dbg_buff,
2864 	      struct cudbg_error *cudbg_err)
2865 {
2866 	struct cudbg_buffer scratch_buff;
2867 	struct adapter *padap = pdbg_init->adap;
2868 	u32 size;
2869 	int rc;
2870 
2871 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2872 	scratch_buff.size = size;
2873 
2874 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2875 	if (rc)
2876 		goto err;
2877 
2878 	t4_read_cong_tbl(padap, (void *)scratch_buff.data);
2879 
2880 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2881 	if (rc)
2882 		goto err1;
2883 
2884 	rc = compress_buff(&scratch_buff, dbg_buff);
2885 
2886 err1:
2887 	release_scratch_buff(&scratch_buff, dbg_buff);
2888 err:
2889 	return rc;
2890 }
2891 
2892 static int
check_busy_bit(struct adapter * padap)2893 check_busy_bit(struct adapter *padap)
2894 {
2895 	u32 val;
2896 	u32 busy = 1;
2897 	int i = 0;
2898 	int retry = 10;
2899 	int status = 0;
2900 
2901 	while (busy && (i < retry)) {
2902 		val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
2903 		busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
2904 		i++;
2905 	}
2906 
2907 	if (busy)
2908 		status = -1;
2909 
2910 	return status;
2911 }
2912 
2913 static int
cim_ha_rreg(struct adapter * padap,u32 addr,u32 * val)2914 cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
2915 {
2916 	int rc = 0;
2917 
2918 	/* write register address into the A_CIM_HOST_ACC_CTRL */
2919 	t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
2920 
2921 	/* Poll HOSTBUSY */
2922 	rc = check_busy_bit(padap);
2923 	if (rc)
2924 		goto err;
2925 
2926 	/* Read value from A_CIM_HOST_ACC_DATA */
2927 	*val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
2928 
2929 err:
2930 	return rc;
2931 }
2932 
2933 static int
dump_up_cim(struct adapter * padap,struct cudbg_init * pdbg_init,struct ireg_field * up_cim_reg,u32 * buff)2934 dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
2935 	    struct ireg_field *up_cim_reg, u32 *buff)
2936 {
2937 	u32 i;
2938 	int rc = 0;
2939 
2940 	for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
2941 		rc = cim_ha_rreg(padap,
2942 				 up_cim_reg->ireg_local_offset + (i * 4),
2943 				buff);
2944 		if (rc) {
2945 			if (pdbg_init->verbose)
2946 				pdbg_init->print(padap->dip, CE_NOTE,
2947 						 "BUSY timeout reading"
2948 					 "CIM_HOST_ACC_CTRL\n");
2949 			goto err;
2950 		}
2951 
2952 		buff++;
2953 	}
2954 
2955 err:
2956 	return rc;
2957 }
2958 
2959 static int
collect_up_cim_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2960 collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2961 			struct cudbg_buffer *dbg_buff,
2962 			struct cudbg_error *cudbg_err)
2963 {
2964 	struct cudbg_buffer scratch_buff;
2965 	struct adapter *padap = pdbg_init->adap;
2966 	struct ireg_buf *up_cim;
2967 	u32 size;
2968 	int i, rc, n;
2969 
2970 	n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
2971 	size = sizeof(struct ireg_buf) * n;
2972 	scratch_buff.size = size;
2973 
2974 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2975 	if (rc)
2976 		goto err;
2977 
2978 	up_cim = (struct ireg_buf *)scratch_buff.data;
2979 
2980 	for (i = 0; i < n; i++) {
2981 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
2982 		u32 *buff = up_cim->outbuf;
2983 
2984 		if (is_t5(padap->params.chip)) {
2985 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
2986 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
2987 			up_cim_reg->ireg_local_offset =
2988 						t5_up_cim_reg_array[i][2];
2989 			up_cim_reg->ireg_offset_range =
2990 						t5_up_cim_reg_array[i][3];
2991 		} else if (is_t6(padap->params.chip)) {
2992 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
2993 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
2994 			up_cim_reg->ireg_local_offset =
2995 						t6_up_cim_reg_array[i][2];
2996 			up_cim_reg->ireg_offset_range =
2997 						t6_up_cim_reg_array[i][3];
2998 		}
2999 
3000 		rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
3001 
3002 		up_cim++;
3003 	}
3004 
3005 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3006 	if (rc)
3007 		goto err1;
3008 
3009 	rc = compress_buff(&scratch_buff, dbg_buff);
3010 
3011 err1:
3012 	release_scratch_buff(&scratch_buff, dbg_buff);
3013 err:
3014 	return rc;
3015 }
3016 
3017 static int
collect_mbox_log(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3018 collect_mbox_log(struct cudbg_init *pdbg_init,
3019 		 struct cudbg_buffer *dbg_buff,
3020 		 struct cudbg_error *cudbg_err)
3021 {
3022 #ifdef notyet
3023 	struct cudbg_buffer scratch_buff;
3024 	struct cudbg_mbox_log *mboxlog = NULL;
3025 	struct mbox_cmd_log *log = NULL;
3026 	struct mbox_cmd *entry;
3027 	u64 flit;
3028 	u32 size;
3029 	unsigned int entry_idx;
3030 	int i, k, rc;
3031 	u16 mbox_cmds;
3032 
3033 	if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
3034 		log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3035 			mboxlog_param.log;
3036 		mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3037 				mboxlog_param.mbox_cmds;
3038 	} else {
3039 		if (pdbg_init->verbose)
3040 			pdbg_init->print(adap->dip, CE_NOTE,
3041 					 "Mbox log is not requested\n");
3042 		return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
3043 	}
3044 
3045 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3046 	scratch_buff.size = size;
3047 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3048 	if (rc)
3049 		goto err;
3050 
3051 	mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
3052 
3053 	for (k = 0; k < mbox_cmds; k++) {
3054 		entry_idx = log->cursor + k;
3055 		if (entry_idx >= log->size)
3056 			entry_idx -= log->size;
3057 		entry = mbox_cmd_log_entry(log, entry_idx);
3058 
3059 		/* skip over unused entries */
3060 		if (entry->timestamp == 0)
3061 			continue;
3062 
3063 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3064 
3065 		for (i = 0; i < MBOX_LEN / 8; i++) {
3066 			flit = entry->cmd[i];
3067 			mboxlog->hi[i] = (u32)(flit >> 32);
3068 			mboxlog->lo[i] = (u32)flit;
3069 		}
3070 
3071 		mboxlog++;
3072 	}
3073 
3074 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3075 	if (rc)
3076 		goto err1;
3077 
3078 	rc = compress_buff(&scratch_buff, dbg_buff);
3079 
3080 err1:
3081 	release_scratch_buff(&scratch_buff, dbg_buff);
3082 err:
3083 	return rc;
3084 #endif
3085 	return (-1);
3086 }
3087 
3088 static int
collect_pbt_tables(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3089 collect_pbt_tables(struct cudbg_init *pdbg_init,
3090 		   struct cudbg_buffer *dbg_buff,
3091 		   struct cudbg_error *cudbg_err)
3092 {
3093 	struct cudbg_buffer scratch_buff;
3094 	struct adapter *padap = pdbg_init->adap;
3095 	struct cudbg_pbt_tables *pbt = NULL;
3096 	u32 size;
3097 	u32 addr;
3098 	int i, rc;
3099 
3100 	size = sizeof(struct cudbg_pbt_tables);
3101 	scratch_buff.size = size;
3102 
3103 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3104 	if (rc)
3105 		goto err;
3106 
3107 	pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
3108 
3109 	/* PBT dynamic entries */
3110 	addr = CUDBG_CHAC_PBT_ADDR;
3111 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3112 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
3113 		if (rc) {
3114 			if (pdbg_init->verbose)
3115 				pdbg_init->print(padap->dip, CE_NOTE,
3116 						 "BUSY timeout reading"
3117 					 "CIM_HOST_ACC_CTRL\n");
3118 			goto err1;
3119 		}
3120 	}
3121 
3122 	/* PBT static entries */
3123 
3124 	/* static entries start when bit 6 is set */
3125 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3126 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3127 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
3128 		if (rc) {
3129 			if (pdbg_init->verbose)
3130 				pdbg_init->print(padap->dip, CE_NOTE,
3131 						 "BUSY timeout reading"
3132 					 "CIM_HOST_ACC_CTRL\n");
3133 			goto err1;
3134 		}
3135 	}
3136 
3137 	/* LRF entries */
3138 	addr = CUDBG_CHAC_PBT_LRF;
3139 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3140 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
3141 		if (rc) {
3142 			if (pdbg_init->verbose)
3143 				pdbg_init->print(padap->dip, CE_NOTE,
3144 						 "BUSY timeout reading"
3145 					 "CIM_HOST_ACC_CTRL\n");
3146 			goto err1;
3147 		}
3148 	}
3149 
3150 	/* PBT data entries */
3151 	addr = CUDBG_CHAC_PBT_DATA;
3152 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3153 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
3154 		if (rc) {
3155 			if (pdbg_init->verbose)
3156 				pdbg_init->print(padap->dip, CE_NOTE,
3157 						 "BUSY timeout reading"
3158 					 "CIM_HOST_ACC_CTRL\n");
3159 			goto err1;
3160 		}
3161 	}
3162 
3163 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3164 	if (rc)
3165 		goto err1;
3166 
3167 	rc = compress_buff(&scratch_buff, dbg_buff);
3168 
3169 err1:
3170 	release_scratch_buff(&scratch_buff, dbg_buff);
3171 err:
3172 	return rc;
3173 }
3174 
3175 static int
collect_pm_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3176 collect_pm_indirect(struct cudbg_init *pdbg_init,
3177 		    struct cudbg_buffer *dbg_buff,
3178 		    struct cudbg_error *cudbg_err)
3179 {
3180 	struct cudbg_buffer scratch_buff;
3181 	struct adapter *padap = pdbg_init->adap;
3182 	struct ireg_buf *ch_pm;
3183 	u32 size;
3184 	int i, rc, n;
3185 
3186 	n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
3187 	size = sizeof(struct ireg_buf) * n * 2;
3188 	scratch_buff.size = size;
3189 
3190 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3191 	if (rc)
3192 		goto err;
3193 
3194 	ch_pm = (struct ireg_buf *)scratch_buff.data;
3195 
3196 	/*PM_RX*/
3197 	for (i = 0; i < n; i++) {
3198 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3199 		u32 *buff = ch_pm->outbuf;
3200 
3201 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
3202 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
3203 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
3204 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
3205 
3206 		t4_read_indirect(padap,
3207 				pm_pio->ireg_addr,
3208 				pm_pio->ireg_data,
3209 				buff,
3210 				pm_pio->ireg_offset_range,
3211 				pm_pio->ireg_local_offset);
3212 
3213 		ch_pm++;
3214 	}
3215 
3216 	/*PM_Tx*/
3217 	n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
3218 	for (i = 0; i < n; i++) {
3219 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3220 		u32 *buff = ch_pm->outbuf;
3221 
3222 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
3223 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
3224 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
3225 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
3226 
3227 		t4_read_indirect(padap,
3228 				pm_pio->ireg_addr,
3229 				pm_pio->ireg_data,
3230 				buff,
3231 				pm_pio->ireg_offset_range,
3232 				pm_pio->ireg_local_offset);
3233 
3234 		ch_pm++;
3235 	}
3236 
3237 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3238 	if (rc)
3239 		goto err1;
3240 
3241 	rc = compress_buff(&scratch_buff, dbg_buff);
3242 
3243 err1:
3244 	release_scratch_buff(&scratch_buff, dbg_buff);
3245 err:
3246 	return rc;
3247 
3248 }
3249 
3250 static int
collect_tid(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3251 collect_tid(struct cudbg_init *pdbg_init,
3252 	    struct cudbg_buffer *dbg_buff,
3253 	    struct cudbg_error *cudbg_err)
3254 {
3255 
3256 	struct cudbg_buffer scratch_buff;
3257 	struct adapter *padap = pdbg_init->adap;
3258 	struct tid_info_region *tid;
3259 	struct tid_info_region_rev1 *tid1;
3260 	u32 para[7], val[7];
3261 	u32 mbox, pf;
3262 	int rc;
3263 
3264 	scratch_buff.size = sizeof(struct tid_info_region_rev1);
3265 
3266 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3267 	if (rc)
3268 		goto err;
3269 
3270 #define FW_PARAM_DEV_A(param) \
3271 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3272 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3273 #define FW_PARAM_PFVF_A(param) \
3274 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3275 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
3276 	 V_FW_PARAMS_PARAM_Y(0) | \
3277 	 V_FW_PARAMS_PARAM_Z(0))
3278 #define MAX_ATIDS_A 8192U
3279 
3280 	tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
3281 	tid = &(tid1->tid);
3282 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
3283 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
3284 	tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
3285 			     sizeof(struct cudbg_ver_hdr);
3286 
3287 	if (is_t5(padap->params.chip)) {
3288 		tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
3289 		tid1->tid_start = 0;
3290 	} else if (is_t6(padap->params.chip)) {
3291 		tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
3292 		tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
3293 	}
3294 
3295 	tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
3296 
3297 	para[0] = FW_PARAM_PFVF_A(FILTER_START);
3298 	para[1] = FW_PARAM_PFVF_A(FILTER_END);
3299 	para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
3300 	para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
3301 	para[4] = FW_PARAM_DEV_A(NTID);
3302 	para[5] = FW_PARAM_PFVF_A(SERVER_START);
3303 	para[6] = FW_PARAM_PFVF_A(SERVER_END);
3304 
3305 	ADAPTER_LOCK(padap);
3306 	mbox = padap->mbox;
3307 	pf = padap->pf;
3308 	rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3309 	if (rc <  0) {
3310 		if (rc == -FW_EPERM) {
3311 			/* It looks like we don't have permission to use
3312 			 * padap->mbox.
3313 			 *
3314 			 * Try mbox 4.  If it works, we'll continue to
3315 			 * collect the rest of tid info from mbox 4.
3316 			 * Else, quit trying to collect tid info.
3317 			 */
3318 			mbox = 4;
3319 			pf = 4;
3320 			rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3321 			if (rc < 0) {
3322 				cudbg_err->sys_err = rc;
3323 				goto err1;
3324 			}
3325 		} else {
3326 			cudbg_err->sys_err = rc;
3327 			goto err1;
3328 		}
3329 	}
3330 
3331 	tid->ftid_base = val[0];
3332 	tid->nftids = val[1] - val[0] + 1;
3333 	/*active filter region*/
3334 	if (val[2] != val[3]) {
3335 #ifdef notyet
3336 		tid->flags |= FW_OFLD_CONN;
3337 #endif
3338 		tid->aftid_base = val[2];
3339 		tid->aftid_end = val[3];
3340 	}
3341 	tid->ntids = val[4];
3342 	tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
3343 	tid->stid_base = val[5];
3344 	tid->nstids = val[6] - val[5] + 1;
3345 
3346 	if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
3347 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
3348 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
3349 		rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3350 		if (rc < 0) {
3351 			cudbg_err->sys_err = rc;
3352 			goto err1;
3353 		}
3354 
3355 		tid->hpftid_base = val[0];
3356 		tid->nhpftids = val[1] - val[0] + 1;
3357 	}
3358 
3359 	if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
3360 		tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
3361 		tid->hash_base /= 4;
3362 	} else
3363 		tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
3364 
3365 	/*UO context range*/
3366 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
3367 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
3368 
3369 	rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3370 	if (rc <  0) {
3371 		cudbg_err->sys_err = rc;
3372 		goto err1;
3373 	}
3374 
3375 	if (val[0] != val[1]) {
3376 		tid->uotid_base = val[0];
3377 		tid->nuotids = val[1] - val[0] + 1;
3378 	}
3379 	tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
3380 	tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
3381 
3382 #undef FW_PARAM_PFVF_A
3383 #undef FW_PARAM_DEV_A
3384 #undef MAX_ATIDS_A
3385 
3386 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3387 	if (rc)
3388 		goto err1;
3389 	rc = compress_buff(&scratch_buff, dbg_buff);
3390 
3391 err1:
3392 	ADAPTER_UNLOCK(padap);
3393 	release_scratch_buff(&scratch_buff, dbg_buff);
3394 err:
3395 	return rc;
3396 }
3397 
3398 static int
collect_tx_rate(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3399 collect_tx_rate(struct cudbg_init *pdbg_init,
3400 		struct cudbg_buffer *dbg_buff,
3401 		struct cudbg_error *cudbg_err)
3402 {
3403 	struct cudbg_buffer scratch_buff;
3404 	struct adapter *padap = pdbg_init->adap;
3405 	struct tx_rate *tx_rate;
3406 	u32 size;
3407 	int rc;
3408 
3409 	size = sizeof(struct tx_rate);
3410 	scratch_buff.size = size;
3411 
3412 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3413 	if (rc)
3414 		goto err;
3415 
3416 	tx_rate = (struct tx_rate *)scratch_buff.data;
3417 	t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
3418 	tx_rate->nchan = padap->params.arch.nchan;
3419 
3420 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3421 	if (rc)
3422 		goto err1;
3423 
3424 	rc = compress_buff(&scratch_buff, dbg_buff);
3425 
3426 err1:
3427 	release_scratch_buff(&scratch_buff, dbg_buff);
3428 err:
3429 	return rc;
3430 }
3431 
3432 static inline void
cudbg_tcamxy2valmask(u64 x,u64 y,u8 * addr,u64 * mask)3433 cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
3434 {
3435 	*mask = x | y;
3436 	y = (__force u64)cpu_to_be64(y);
3437 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
3438 }
3439 
3440 static void
mps_rpl_backdoor(struct adapter * padap,struct fw_ldst_mps_rplc * mps_rplc)3441 mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
3442 {
3443 	if (is_t5(padap->params.chip)) {
3444 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3445 							  A_MPS_VF_RPLCT_MAP3));
3446 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3447 							  A_MPS_VF_RPLCT_MAP2));
3448 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3449 							  A_MPS_VF_RPLCT_MAP1));
3450 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3451 							  A_MPS_VF_RPLCT_MAP0));
3452 	} else {
3453 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3454 							  A_MPS_VF_RPLCT_MAP7));
3455 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3456 							  A_MPS_VF_RPLCT_MAP6));
3457 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3458 							  A_MPS_VF_RPLCT_MAP5));
3459 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3460 							  A_MPS_VF_RPLCT_MAP4));
3461 	}
3462 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
3463 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
3464 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
3465 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
3466 }
3467 
3468 static int
collect_mps_tcam(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3469 collect_mps_tcam(struct cudbg_init *pdbg_init,
3470 		 struct cudbg_buffer *dbg_buff,
3471 		 struct cudbg_error *cudbg_err)
3472 {
3473 	struct cudbg_buffer scratch_buff;
3474 	struct adapter *padap = pdbg_init->adap;
3475 	struct cudbg_mps_tcam *tcam = NULL;
3476 	u32 size = 0, i, n, total_size = 0;
3477 	u32 ctl, data2;
3478 	u64 tcamy, tcamx, val;
3479 	int rc;
3480 
3481 
3482 	n = padap->params.arch.mps_tcam_size;
3483 	size = sizeof(struct cudbg_mps_tcam) * n;
3484 	scratch_buff.size = size;
3485 
3486 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3487 	if (rc)
3488 		goto err;
3489 	memset(scratch_buff.data, 0, size);
3490 
3491 	tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
3492 	for (i = 0; i < n; i++) {
3493 		if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
3494 			/* CtlReqID   - 1: use Host Driver Requester ID
3495 			 * CtlCmdType - 0: Read, 1: Write
3496 			 * CtlTcamSel - 0: TCAM0, 1: TCAM1
3497 			 * CtlXYBitSel- 0: Y bit, 1: X bit
3498 			 */
3499 
3500 			/* Read tcamy */
3501 			ctl = (V_CTLREQID(1) |
3502 			       V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
3503 			if (i < 256)
3504 				ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
3505 			else
3506 				ctl |= V_CTLTCAMINDEX(i - 256) |
3507 				       V_CTLTCAMSEL(1);
3508 
3509 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3510 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3511 			tcamy = G_DMACH(val) << 32;
3512 			tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3513 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3514 			tcam->lookup_type = G_DATALKPTYPE(data2);
3515 
3516 			/* 0 - Outer header, 1 - Inner header
3517 			 * [71:48] bit locations are overloaded for
3518 			 * outer vs. inner lookup types.
3519 			 */
3520 
3521 			if (tcam->lookup_type &&
3522 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3523 				/* Inner header VNI */
3524 				tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
3525 					     (G_DATAVIDH1(data2) << 16) |
3526 					     G_VIDL(val);
3527 				tcam->dip_hit = data2 & F_DATADIPHIT;
3528 			} else {
3529 				tcam->vlan_vld = data2 & F_DATAVIDH2;
3530 				tcam->ivlan = G_VIDL(val);
3531 			}
3532 
3533 			tcam->port_num = G_DATAPORTNUM(data2);
3534 
3535 			/* Read tcamx. Change the control param */
3536 			ctl |= V_CTLXYBITSEL(1);
3537 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3538 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3539 			tcamx = G_DMACH(val) << 32;
3540 			tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3541 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3542 			if (tcam->lookup_type &&
3543 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3544 				/* Inner header VNI mask */
3545 				tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
3546 					     (G_DATAVIDH1(data2) << 16) |
3547 					     G_VIDL(val);
3548 			}
3549 		} else {
3550 			tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
3551 			tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
3552 		}
3553 
3554 		if (tcamx & tcamy)
3555 			continue;
3556 
3557 		tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
3558 		tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
3559 
3560 		if (is_t5(padap->params.chip))
3561 			tcam->repli = (tcam->cls_lo & F_REPLICATE);
3562 		else if (is_t6(padap->params.chip))
3563 			tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
3564 
3565 		if (tcam->repli) {
3566 			struct fw_ldst_cmd ldst_cmd;
3567 			struct fw_ldst_mps_rplc mps_rplc;
3568 
3569 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3570 			ldst_cmd.op_to_addrspace =
3571 				htonl(V_FW_CMD_OP(FW_LDST_CMD) |
3572 				      F_FW_CMD_REQUEST |
3573 				      F_FW_CMD_READ |
3574 				      V_FW_LDST_CMD_ADDRSPACE(
3575 					      FW_LDST_ADDRSPC_MPS));
3576 
3577 			ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3578 
3579 			ldst_cmd.u.mps.rplc.fid_idx =
3580 				htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
3581 				      V_FW_LDST_CMD_IDX(i));
3582 
3583 			ADAPTER_LOCK(padap);
3584 			rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
3585 					sizeof(ldst_cmd), &ldst_cmd);
3586 			ADAPTER_UNLOCK(padap);
3587 
3588 			if (rc)
3589 				mps_rpl_backdoor(padap, &mps_rplc);
3590 			else
3591 				mps_rplc = ldst_cmd.u.mps.rplc;
3592 
3593 			tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
3594 			tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
3595 			tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
3596 			tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
3597 			if (padap->params.arch.mps_rplc_size >
3598 					CUDBG_MAX_RPLC_SIZE) {
3599 				tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
3600 				tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
3601 				tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
3602 				tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
3603 			}
3604 		}
3605 		cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
3606 
3607 		tcam->idx = i;
3608 		tcam->rplc_size = padap->params.arch.mps_rplc_size;
3609 
3610 		total_size += sizeof(struct cudbg_mps_tcam);
3611 
3612 		tcam++;
3613 	}
3614 
3615 	if (total_size == 0) {
3616 		rc = CUDBG_SYSTEM_ERROR;
3617 		goto err1;
3618 	}
3619 
3620 	scratch_buff.size = total_size;
3621 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3622 	if (rc)
3623 		goto err1;
3624 
3625 	rc = compress_buff(&scratch_buff, dbg_buff);
3626 
3627 err1:
3628 	scratch_buff.size = size;
3629 	release_scratch_buff(&scratch_buff, dbg_buff);
3630 err:
3631 	return rc;
3632 }
3633 
3634 static int
collect_pcie_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3635 collect_pcie_config(struct cudbg_init *pdbg_init,
3636 		    struct cudbg_buffer *dbg_buff,
3637 		    struct cudbg_error *cudbg_err)
3638 {
3639 	struct cudbg_buffer scratch_buff;
3640 	struct adapter *padap = pdbg_init->adap;
3641 	u32 size, *value, j;
3642 	int i, rc, n;
3643 
3644 	size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
3645 	n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
3646 	scratch_buff.size = size;
3647 
3648 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3649 	if (rc)
3650 		goto err;
3651 
3652 	value = (u32 *)scratch_buff.data;
3653 	for (i = 0; i < n; i++) {
3654 		for (j = t5_pcie_config_array[i][0];
3655 		     j <= t5_pcie_config_array[i][1]; j += 4) {
3656 			t4_hw_pci_read_cfg4(padap, j, value++);
3657 		}
3658 	}
3659 
3660 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3661 	if (rc)
3662 		goto err1;
3663 
3664 	rc = compress_buff(&scratch_buff, dbg_buff);
3665 
3666 err1:
3667 	release_scratch_buff(&scratch_buff, dbg_buff);
3668 err:
3669 	return rc;
3670 }
3671 
3672 static int
cudbg_read_tid(struct cudbg_init * pdbg_init,u32 tid,struct cudbg_tid_data * tid_data)3673 cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
3674 	       struct cudbg_tid_data *tid_data)
3675 {
3676 	int i, cmd_retry = 8;
3677 	struct adapter *padap = pdbg_init->adap;
3678 	u32 val;
3679 
3680 	/* Fill REQ_DATA regs with 0's */
3681 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3682 		t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
3683 
3684 	/* Write DBIG command */
3685 	val = (0x4 << S_DBGICMD) | tid;
3686 	t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
3687 	tid_data->dbig_cmd = val;
3688 
3689 	val = 0;
3690 	val |= 1 << S_DBGICMDSTRT;
3691 	val |= 1;  /* LE mode */
3692 	t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
3693 	tid_data->dbig_conf = val;
3694 
3695 	/* Poll the DBGICMDBUSY bit */
3696 	val = 1;
3697 	while (val) {
3698 		val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
3699 		val = (val >> S_DBGICMDBUSY) & 1;
3700 		cmd_retry--;
3701 		if (!cmd_retry) {
3702 			if (pdbg_init->verbose)
3703 				pdbg_init->print(padap->dip, CE_NOTE,
3704 						 "%s(): Timeout waiting for non-busy\n",
3705 					 __func__);
3706 			return CUDBG_SYSTEM_ERROR;
3707 		}
3708 	}
3709 
3710 	/* Check RESP status */
3711 	val = 0;
3712 	val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
3713 	tid_data->dbig_rsp_stat = val;
3714 	if (!(val & 1)) {
3715 		if (pdbg_init->verbose)
3716 			pdbg_init->print(padap->dip, CE_NOTE,
3717 					 "%s(): DBGI command failed\n", __func__);
3718 		return CUDBG_SYSTEM_ERROR;
3719 	}
3720 
3721 	/* Read RESP data */
3722 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3723 		tid_data->data[i] = t4_read_reg(padap,
3724 						A_LE_DB_DBGI_RSP_DATA +
3725 						(i << 2));
3726 
3727 	tid_data->tid = tid;
3728 
3729 	return 0;
3730 }
3731 
3732 static int
collect_le_tcam(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3733 collect_le_tcam(struct cudbg_init *pdbg_init,
3734 		struct cudbg_buffer *dbg_buff,
3735 		struct cudbg_error *cudbg_err)
3736 {
3737 	struct cudbg_buffer scratch_buff;
3738 	struct adapter *padap = pdbg_init->adap;
3739 	struct cudbg_tcam tcam_region = {0};
3740 	struct cudbg_tid_data *tid_data = NULL;
3741 	u32 value, bytes = 0, bytes_left  = 0;
3742 	u32 i;
3743 	int rc, size;
3744 
3745 	/* Get the LE regions */
3746 	value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
3747 							     index */
3748 	tcam_region.tid_hash_base = value;
3749 
3750 	/* Get routing table index */
3751 	value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
3752 	tcam_region.routing_start = value;
3753 
3754 	/*Get clip table index */
3755 	value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
3756 	tcam_region.clip_start = value;
3757 
3758 	/* Get filter table index */
3759 	value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
3760 	tcam_region.filter_start = value;
3761 
3762 	/* Get server table index */
3763 	value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
3764 	tcam_region.server_start = value;
3765 
3766 	/* Check whether hash is enabled and calculate the max tids */
3767 	value = t4_read_reg(padap, A_LE_DB_CONFIG);
3768 	if ((value >> S_HASHEN) & 1) {
3769 		value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
3770 		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
3771 			tcam_region.max_tid = (value & 0xFFFFF) +
3772 					      tcam_region.tid_hash_base;
3773 		else {	    /* for T5 */
3774 			value = G_HASHTIDSIZE(value);
3775 			value = 1 << value;
3776 			tcam_region.max_tid = value +
3777 				tcam_region.tid_hash_base;
3778 		}
3779 	} else	 /* hash not enabled */
3780 		tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
3781 
3782 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
3783 	size += sizeof(struct cudbg_tcam);
3784 	scratch_buff.size = size;
3785 
3786 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3787 	if (rc)
3788 		goto err;
3789 
3790 	rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
3791 	if (rc)
3792 		goto err;
3793 
3794 	memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
3795 
3796 	tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
3797 					     scratch_buff.data) + 1);
3798 	bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
3799 	bytes = sizeof(struct cudbg_tcam);
3800 
3801 	/* read all tid */
3802 	for (i = 0; i < tcam_region.max_tid; i++) {
3803 		if (bytes_left < sizeof(struct cudbg_tid_data)) {
3804 			scratch_buff.size = bytes;
3805 			rc = compress_buff(&scratch_buff, dbg_buff);
3806 			if (rc)
3807 				goto err1;
3808 			scratch_buff.size = CUDBG_CHUNK_SIZE;
3809 			release_scratch_buff(&scratch_buff, dbg_buff);
3810 
3811 			/* new alloc */
3812 			rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
3813 					      &scratch_buff);
3814 			if (rc)
3815 				goto err;
3816 
3817 			tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
3818 			bytes_left = CUDBG_CHUNK_SIZE;
3819 			bytes = 0;
3820 		}
3821 
3822 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
3823 
3824 		if (rc) {
3825 			cudbg_err->sys_err = rc;
3826 			goto err1;
3827 		}
3828 
3829 		tid_data++;
3830 		bytes_left -= sizeof(struct cudbg_tid_data);
3831 		bytes += sizeof(struct cudbg_tid_data);
3832 	}
3833 
3834 	if (bytes) {
3835 		scratch_buff.size = bytes;
3836 		rc = compress_buff(&scratch_buff, dbg_buff);
3837 	}
3838 
3839 err1:
3840 	scratch_buff.size = CUDBG_CHUNK_SIZE;
3841 	release_scratch_buff(&scratch_buff, dbg_buff);
3842 err:
3843 	return rc;
3844 }
3845 
3846 static int
collect_ma_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3847 collect_ma_indirect(struct cudbg_init *pdbg_init,
3848 		    struct cudbg_buffer *dbg_buff,
3849 		    struct cudbg_error *cudbg_err)
3850 {
3851 	struct cudbg_buffer scratch_buff;
3852 	struct adapter *padap = pdbg_init->adap;
3853 	struct ireg_buf *ma_indr = NULL;
3854 	u32 size, j;
3855 	int i, rc, n;
3856 
3857 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) {
3858 		if (pdbg_init->verbose)
3859 			pdbg_init->print(padap->dip, CE_NOTE,
3860 					 "MA indirect available only in T6\n");
3861 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3862 		goto err;
3863 	}
3864 
3865 	n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
3866 	size = sizeof(struct ireg_buf) * n * 2;
3867 	scratch_buff.size = size;
3868 
3869 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3870 	if (rc)
3871 		goto err;
3872 
3873 	ma_indr = (struct ireg_buf *)scratch_buff.data;
3874 
3875 	for (i = 0; i < n; i++) {
3876 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3877 		u32 *buff = ma_indr->outbuf;
3878 
3879 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
3880 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3881 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3882 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3883 
3884 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3885 				 buff, ma_fli->ireg_offset_range,
3886 				 ma_fli->ireg_local_offset);
3887 
3888 		ma_indr++;
3889 
3890 	}
3891 
3892 	n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
3893 
3894 	for (i = 0; i < n; i++) {
3895 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3896 		u32 *buff = ma_indr->outbuf;
3897 
3898 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3899 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3900 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3901 
3902 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3903 			t4_read_indirect(padap, ma_fli->ireg_addr,
3904 					 ma_fli->ireg_data, buff, 1,
3905 					 ma_fli->ireg_local_offset);
3906 			buff++;
3907 			ma_fli->ireg_local_offset += 0x20;
3908 		}
3909 		ma_indr++;
3910 	}
3911 
3912 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3913 	if (rc)
3914 		goto err1;
3915 
3916 	rc = compress_buff(&scratch_buff, dbg_buff);
3917 
3918 err1:
3919 	release_scratch_buff(&scratch_buff, dbg_buff);
3920 err:
3921 	return rc;
3922 }
3923 
3924 static int
collect_hma_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3925 collect_hma_indirect(struct cudbg_init *pdbg_init,
3926 		     struct cudbg_buffer *dbg_buff,
3927 		     struct cudbg_error *cudbg_err)
3928 {
3929 	struct cudbg_buffer scratch_buff;
3930 	struct adapter *padap = pdbg_init->adap;
3931 	struct ireg_buf *hma_indr = NULL;
3932 	u32 size;
3933 	int i, rc, n;
3934 
3935 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) {
3936 		if (pdbg_init->verbose)
3937 			pdbg_init->print(padap->dip, CE_NOTE,
3938 					 "HMA indirect available only in T6\n");
3939 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3940 		goto err;
3941 	}
3942 
3943 	n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
3944 	size = sizeof(struct ireg_buf) * n;
3945 	scratch_buff.size = size;
3946 
3947 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3948 	if (rc)
3949 		goto err;
3950 
3951 	hma_indr = (struct ireg_buf *)scratch_buff.data;
3952 
3953 	for (i = 0; i < n; i++) {
3954 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
3955 		u32 *buff = hma_indr->outbuf;
3956 
3957 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3958 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3959 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3960 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3961 
3962 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3963 				 buff, hma_fli->ireg_offset_range,
3964 				 hma_fli->ireg_local_offset);
3965 
3966 		hma_indr++;
3967 
3968 	}
3969 
3970 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3971 	if (rc)
3972 		goto err1;
3973 
3974 	rc = compress_buff(&scratch_buff, dbg_buff);
3975 
3976 err1:
3977 	release_scratch_buff(&scratch_buff, dbg_buff);
3978 err:
3979 	return rc;
3980 }
3981 
3982 static int
collect_pcie_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3983 collect_pcie_indirect(struct cudbg_init *pdbg_init,
3984 		      struct cudbg_buffer *dbg_buff,
3985 		      struct cudbg_error *cudbg_err)
3986 {
3987 	struct cudbg_buffer scratch_buff;
3988 	struct adapter *padap = pdbg_init->adap;
3989 	struct ireg_buf *ch_pcie;
3990 	u32 size;
3991 	int i, rc, n;
3992 
3993 	n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
3994 	size = sizeof(struct ireg_buf) * n * 2;
3995 	scratch_buff.size = size;
3996 
3997 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3998 	if (rc)
3999 		goto err;
4000 
4001 	ch_pcie = (struct ireg_buf *)scratch_buff.data;
4002 
4003 	/*PCIE_PDBG*/
4004 	for (i = 0; i < n; i++) {
4005 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4006 		u32 *buff = ch_pcie->outbuf;
4007 
4008 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
4009 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
4010 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
4011 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
4012 
4013 		t4_read_indirect(padap,
4014 				pcie_pio->ireg_addr,
4015 				pcie_pio->ireg_data,
4016 				buff,
4017 				pcie_pio->ireg_offset_range,
4018 				pcie_pio->ireg_local_offset);
4019 
4020 		ch_pcie++;
4021 	}
4022 
4023 	/*PCIE_CDBG*/
4024 	n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
4025 	for (i = 0; i < n; i++) {
4026 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4027 		u32 *buff = ch_pcie->outbuf;
4028 
4029 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
4030 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
4031 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
4032 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
4033 
4034 		t4_read_indirect(padap,
4035 				pcie_pio->ireg_addr,
4036 				pcie_pio->ireg_data,
4037 				buff,
4038 				pcie_pio->ireg_offset_range,
4039 				pcie_pio->ireg_local_offset);
4040 
4041 		ch_pcie++;
4042 	}
4043 
4044 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4045 	if (rc)
4046 		goto err1;
4047 
4048 	rc = compress_buff(&scratch_buff, dbg_buff);
4049 
4050 err1:
4051 	release_scratch_buff(&scratch_buff, dbg_buff);
4052 err:
4053 	return rc;
4054 
4055 }
4056 
4057 static int
collect_tp_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4058 collect_tp_indirect(struct cudbg_init *pdbg_init,
4059 		    struct cudbg_buffer *dbg_buff,
4060 		    struct cudbg_error *cudbg_err)
4061 {
4062 	struct cudbg_buffer scratch_buff;
4063 	struct adapter *padap = pdbg_init->adap;
4064 	struct ireg_buf *ch_tp_pio;
4065 	u32 size;
4066 	int i, rc, n = 0;
4067 
4068 	if (is_t5(padap->params.chip))
4069 		n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
4070 	else if (is_t6(padap->params.chip))
4071 		n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
4072 
4073 	size = sizeof(struct ireg_buf) * n * 3;
4074 	scratch_buff.size = size;
4075 
4076 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4077 	if (rc)
4078 		goto err;
4079 
4080 	ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
4081 
4082 	/* TP_PIO*/
4083 	for (i = 0; i < n; i++) {
4084 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4085 		u32 *buff = ch_tp_pio->outbuf;
4086 
4087 		if (is_t5(padap->params.chip)) {
4088 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
4089 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
4090 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
4091 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
4092 		} else if (is_t6(padap->params.chip)) {
4093 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
4094 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
4095 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
4096 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
4097 		}
4098 
4099 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
4100 			       tp_pio->ireg_local_offset, true);
4101 
4102 		ch_tp_pio++;
4103 	}
4104 
4105 	/* TP_TM_PIO*/
4106 	if (is_t5(padap->params.chip))
4107 		n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
4108 	else if (is_t6(padap->params.chip))
4109 		n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
4110 
4111 	for (i = 0; i < n; i++) {
4112 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4113 		u32 *buff = ch_tp_pio->outbuf;
4114 
4115 		if (is_t5(padap->params.chip)) {
4116 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
4117 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
4118 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
4119 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
4120 		} else if (is_t6(padap->params.chip)) {
4121 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
4122 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
4123 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
4124 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
4125 		}
4126 
4127 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
4128 				  tp_pio->ireg_local_offset, true);
4129 
4130 		ch_tp_pio++;
4131 	}
4132 
4133 	/* TP_MIB_INDEX*/
4134 	if (is_t5(padap->params.chip))
4135 		n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
4136 	else if (is_t6(padap->params.chip))
4137 		n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
4138 
4139 	for (i = 0; i < n ; i++) {
4140 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4141 		u32 *buff = ch_tp_pio->outbuf;
4142 
4143 		if (is_t5(padap->params.chip)) {
4144 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
4145 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
4146 			tp_pio->ireg_local_offset =
4147 				t5_tp_mib_index_array[i][2];
4148 			tp_pio->ireg_offset_range =
4149 				t5_tp_mib_index_array[i][3];
4150 		} else if (is_t6(padap->params.chip)) {
4151 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
4152 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
4153 			tp_pio->ireg_local_offset =
4154 				t6_tp_mib_index_array[i][2];
4155 			tp_pio->ireg_offset_range =
4156 				t6_tp_mib_index_array[i][3];
4157 		}
4158 
4159 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
4160 			       tp_pio->ireg_local_offset, true);
4161 
4162 		ch_tp_pio++;
4163 	}
4164 
4165 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4166 	if (rc)
4167 		goto err1;
4168 
4169 	rc = compress_buff(&scratch_buff, dbg_buff);
4170 
4171 err1:
4172 	release_scratch_buff(&scratch_buff, dbg_buff);
4173 err:
4174 	return rc;
4175 }
4176 
4177 static int
collect_sge_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4178 collect_sge_indirect(struct cudbg_init *pdbg_init,
4179 		     struct cudbg_buffer *dbg_buff,
4180 		     struct cudbg_error *cudbg_err)
4181 {
4182 	struct cudbg_buffer scratch_buff;
4183 	struct adapter *padap = pdbg_init->adap;
4184 	struct ireg_buf *ch_sge_dbg;
4185 	u32 size;
4186 	int i, rc;
4187 
4188 	size = sizeof(struct ireg_buf) * 2;
4189 	scratch_buff.size = size;
4190 
4191 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4192 	if (rc)
4193 		goto err;
4194 
4195 	ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
4196 
4197 	for (i = 0; i < 2; i++) {
4198 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
4199 		u32 *buff = ch_sge_dbg->outbuf;
4200 
4201 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
4202 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
4203 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
4204 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
4205 
4206 		t4_read_indirect(padap,
4207 				sge_pio->ireg_addr,
4208 				sge_pio->ireg_data,
4209 				buff,
4210 				sge_pio->ireg_offset_range,
4211 				sge_pio->ireg_local_offset);
4212 
4213 		ch_sge_dbg++;
4214 	}
4215 
4216 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4217 	if (rc)
4218 		goto err1;
4219 
4220 	rc = compress_buff(&scratch_buff, dbg_buff);
4221 
4222 err1:
4223 	release_scratch_buff(&scratch_buff, dbg_buff);
4224 err:
4225 	return rc;
4226 }
4227 
4228 static int
collect_full(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4229 collect_full(struct cudbg_init *pdbg_init,
4230 	     struct cudbg_buffer *dbg_buff,
4231 	     struct cudbg_error *cudbg_err)
4232 {
4233 	struct cudbg_buffer scratch_buff;
4234 	struct adapter *padap = pdbg_init->adap;
4235 	u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
4236 	u32 *sp;
4237 	int rc;
4238 	int nreg = 0;
4239 
4240 	/* Collect Registers:
4241 	 * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
4242 	 * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
4243 	 * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
4244 	 * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
4245 	 * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
4246 	 * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3)  This is for T6
4247 	 * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
4248 	 **/
4249 
4250 	if (is_t5(padap->params.chip))
4251 		nreg = 6;
4252 	else if (is_t6(padap->params.chip))
4253 		nreg = 7;
4254 
4255 	scratch_buff.size = nreg * sizeof(u32);
4256 
4257 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4258 	if (rc)
4259 		goto err;
4260 
4261 	sp = (u32 *)scratch_buff.data;
4262 
4263 	/* TP_DBG_SCHED_TX */
4264 	reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
4265 	reg_offset_range = 1;
4266 
4267 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4268 
4269 	sp++;
4270 
4271 	/* TP_DBG_SCHED_RX */
4272 	reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
4273 	reg_offset_range = 1;
4274 
4275 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4276 
4277 	sp++;
4278 
4279 	/* TP_DBG_CSIDE_INT */
4280 	reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
4281 	reg_offset_range = 1;
4282 
4283 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4284 
4285 	sp++;
4286 
4287 	/* TP_DBG_ESIDE_INT */
4288 	reg_local_offset = t5_tp_pio_array[8][2] + 3;
4289 	reg_offset_range = 1;
4290 
4291 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4292 
4293 	sp++;
4294 
4295 	/* PCIE_CDEBUG_INDEX[AppData0] */
4296 	reg_addr = t5_pcie_cdbg_array[0][0];
4297 	reg_data = t5_pcie_cdbg_array[0][1];
4298 	reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
4299 	reg_offset_range = 1;
4300 
4301 	t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
4302 			 reg_local_offset);
4303 
4304 	sp++;
4305 
4306 	if (is_t6(padap->params.chip)) {
4307 		/* PCIE_CDEBUG_INDEX[AppData1] */
4308 		reg_addr = t5_pcie_cdbg_array[0][0];
4309 		reg_data = t5_pcie_cdbg_array[0][1];
4310 		reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
4311 		reg_offset_range = 1;
4312 
4313 		t4_read_indirect(padap, reg_addr, reg_data, sp,
4314 				 reg_offset_range, reg_local_offset);
4315 
4316 		sp++;
4317 	}
4318 
4319 	/* SGE_DEBUG_DATA_HIGH_INDEX_10 */
4320 	*sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
4321 
4322 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4323 	if (rc)
4324 		goto err1;
4325 
4326 	rc = compress_buff(&scratch_buff, dbg_buff);
4327 
4328 err1:
4329 	release_scratch_buff(&scratch_buff, dbg_buff);
4330 err:
4331 	return rc;
4332 }
4333 
4334 static int
collect_vpd_data(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4335 collect_vpd_data(struct cudbg_init *pdbg_init,
4336 		 struct cudbg_buffer *dbg_buff,
4337 		 struct cudbg_error *cudbg_err)
4338 {
4339 #ifdef notyet
4340 	struct cudbg_buffer scratch_buff;
4341 	struct adapter *padap = pdbg_init->adap;
4342 	struct struct_vpd_data *vpd_data;
4343 	char vpd_ver[4];
4344 	u32 fw_vers;
4345 	u32 size;
4346 	int rc;
4347 
4348 	size = sizeof(struct struct_vpd_data);
4349 	scratch_buff.size = size;
4350 
4351 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4352 	if (rc)
4353 		goto err;
4354 
4355 	vpd_data = (struct struct_vpd_data *)scratch_buff.data;
4356 
4357 	if (is_t5(padap->params.chip)) {
4358 		read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
4359 		read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
4360 		read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
4361 		read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
4362 	} else if (is_t6(padap->params.chip)) {
4363 		read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
4364 		read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
4365 		read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
4366 		read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
4367 	}
4368 
4369 	if (is_fw_attached(pdbg_init)) {
4370 	   rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
4371 	} else {
4372 		rc = 1;
4373 	}
4374 
4375 	if (rc) {
4376 		/* Now trying with backdoor mechanism */
4377 		rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
4378 				  (u8 *)&vpd_data->scfg_vers);
4379 		if (rc)
4380 			goto err1;
4381 	}
4382 
4383 	if (is_fw_attached(pdbg_init)) {
4384 		rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
4385 	} else {
4386 		rc = 1;
4387 	}
4388 
4389 	if (rc) {
4390 		/* Now trying with backdoor mechanism */
4391 		rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
4392 				  (u8 *)vpd_ver);
4393 		if (rc)
4394 			goto err1;
4395 		/* read_vpd_reg return string of stored hex
4396 		 * converting hex string to char string
4397 		 * vpd version is 2 bytes only */
4398 		sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
4399 		vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
4400 	}
4401 
4402 	/* Get FW version if it's not already filled in */
4403 	fw_vers = padap->params.fw_vers;
4404 	if (!fw_vers) {
4405 		rc = t4_get_fw_version(padap, &fw_vers);
4406 		if (rc)
4407 			goto err1;
4408 	}
4409 
4410 	vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
4411 	vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
4412 	vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
4413 	vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
4414 
4415 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4416 	if (rc)
4417 		goto err1;
4418 
4419 	rc = compress_buff(&scratch_buff, dbg_buff);
4420 
4421 err1:
4422 	release_scratch_buff(&scratch_buff, dbg_buff);
4423 err:
4424 	return rc;
4425 #endif
4426 	return (-1);
4427 }
4428