xref: /freebsd/sys/dev/cxgbe/cudbg/cudbg_lib.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/param.h>
29 
30 #include "common/common.h"
31 #include "common/t4_regs.h"
32 #include "cudbg.h"
33 #include "cudbg_lib_common.h"
34 #include "cudbg_lib.h"
35 #include "cudbg_entity.h"
36 #define  BUFFER_WARN_LIMIT 10000000
37 
38 struct large_entity large_entity_list[] = {
39 	{CUDBG_EDC0, 0, 0},
40 	{CUDBG_EDC1, 0 , 0},
41 	{CUDBG_MC0, 0, 0},
42 	{CUDBG_MC1, 0, 0}
43 };
44 
is_fw_attached(struct cudbg_init * pdbg_init)45 static int is_fw_attached(struct cudbg_init *pdbg_init)
46 {
47 
48 	return (pdbg_init->adap->flags & FW_OK);
49 }
50 
51 /* This function will add additional padding bytes into debug_buffer to make it
52  * 4 byte aligned.*/
align_debug_buffer(struct cudbg_buffer * dbg_buff,struct cudbg_entity_hdr * entity_hdr)53 static void align_debug_buffer(struct cudbg_buffer *dbg_buff,
54 			struct cudbg_entity_hdr *entity_hdr)
55 {
56 	u8 zero_buf[4] = {0};
57 	u8 padding, remain;
58 
59 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
60 	padding = 4 - remain;
61 	if (remain) {
62 		memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
63 		       padding);
64 		dbg_buff->offset += padding;
65 		entity_hdr->num_pad = padding;
66 	}
67 
68 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
69 }
70 
read_sge_ctxt(struct cudbg_init * pdbg_init,u32 cid,enum ctxt_type ctype,u32 * data)71 static void read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
72 			  enum ctxt_type ctype, u32 *data)
73 {
74 	struct adapter *padap = pdbg_init->adap;
75 	int rc = -1;
76 
77 	if (is_fw_attached(pdbg_init)) {
78 		rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
79 		    "t4cudf");
80 		if (rc != 0)
81 			goto out;
82 		rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
83 				    data);
84 		end_synchronized_op(padap, 0);
85 	}
86 
87 out:
88 	if (rc)
89 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
90 }
91 
get_next_ext_entity_hdr(void * outbuf,u32 * ext_size,struct cudbg_buffer * dbg_buff,struct cudbg_entity_hdr ** entity_hdr)92 static int get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
93 			    struct cudbg_buffer *dbg_buff,
94 			    struct cudbg_entity_hdr **entity_hdr)
95 {
96 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
97 	int rc = 0;
98 	u32 ext_offset = cudbg_hdr->data_len;
99 	*ext_size = 0;
100 
101 	if (dbg_buff->size - dbg_buff->offset <=
102 		 sizeof(struct cudbg_entity_hdr)) {
103 		rc = CUDBG_STATUS_BUFFER_SHORT;
104 		goto err;
105 	}
106 
107 	*entity_hdr = (struct cudbg_entity_hdr *)
108 		       ((char *)outbuf + cudbg_hdr->data_len);
109 
110 	/* Find the last extended entity header */
111 	while ((*entity_hdr)->size) {
112 
113 		ext_offset += sizeof(struct cudbg_entity_hdr) +
114 				     (*entity_hdr)->size;
115 
116 		*ext_size += (*entity_hdr)->size +
117 			      sizeof(struct cudbg_entity_hdr);
118 
119 		if (dbg_buff->size - dbg_buff->offset + *ext_size  <=
120 			sizeof(struct cudbg_entity_hdr)) {
121 			rc = CUDBG_STATUS_BUFFER_SHORT;
122 			goto err;
123 		}
124 
125 		if (ext_offset != (*entity_hdr)->next_ext_offset) {
126 			ext_offset -= sizeof(struct cudbg_entity_hdr) +
127 				     (*entity_hdr)->size;
128 			break;
129 		}
130 
131 		(*entity_hdr)->next_ext_offset = *ext_size;
132 
133 		*entity_hdr = (struct cudbg_entity_hdr *)
134 					   ((char *)outbuf +
135 					   ext_offset);
136 	}
137 
138 	/* update the data offset */
139 	dbg_buff->offset = ext_offset;
140 err:
141 	return rc;
142 }
143 
wr_entity_to_flash(void * handle,struct cudbg_buffer * dbg_buff,u32 cur_entity_data_offset,u32 cur_entity_size,int entity_nu,u32 ext_size)144 static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
145 		       u32 cur_entity_data_offset,
146 		       u32 cur_entity_size,
147 		       int entity_nu, u32 ext_size)
148 {
149 	struct cudbg_private *priv = handle;
150 	struct cudbg_init *cudbg_init = &priv->dbg_init;
151 	struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
152 	u64 timestamp;
153 	u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
154 	u32 remain_flash_size;
155 	u32 flash_data_offset;
156 	u32 data_hdr_size;
157 	int rc = -1;
158 
159 	data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
160 			sizeof(struct cudbg_hdr);
161 
162 	flash_data_offset = (FLASH_CUDBG_NSECS *
163 			     (sizeof(struct cudbg_flash_hdr) +
164 			      data_hdr_size)) +
165 			    (cur_entity_data_offset - data_hdr_size);
166 
167 	if (flash_data_offset > CUDBG_FLASH_SIZE) {
168 		update_skip_size(sec_info, cur_entity_size);
169 		if (cudbg_init->verbose)
170 			cudbg_init->print("Large entity skipping...\n");
171 		return rc;
172 	}
173 
174 	remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
175 
176 	if (cur_entity_size > remain_flash_size) {
177 		update_skip_size(sec_info, cur_entity_size);
178 		if (cudbg_init->verbose)
179 			cudbg_init->print("Large entity skipping...\n");
180 	} else {
181 		timestamp = 0;
182 
183 		cur_entity_hdr_offset +=
184 			(sizeof(struct cudbg_entity_hdr) *
185 			(entity_nu - 1));
186 
187 		rc = cudbg_write_flash(handle, timestamp, dbg_buff,
188 				       cur_entity_data_offset,
189 				       cur_entity_hdr_offset,
190 				       cur_entity_size,
191 				       ext_size);
192 		if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
193 			cudbg_init->print("\n\tFLASH is full... "
194 				"can not write in flash more\n\n");
195 	}
196 
197 	return rc;
198 }
199 
cudbg_collect(void * handle,void * outbuf,u32 * outbuf_size)200 int cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
201 {
202 	struct cudbg_entity_hdr *entity_hdr = NULL;
203 	struct cudbg_entity_hdr *ext_entity_hdr = NULL;
204 	struct cudbg_hdr *cudbg_hdr;
205 	struct cudbg_buffer dbg_buff;
206 	struct cudbg_error cudbg_err = {0};
207 	int large_entity_code;
208 
209 	u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
210 	struct cudbg_init *cudbg_init =
211 		&(((struct cudbg_private *)handle)->dbg_init);
212 	struct adapter *padap = cudbg_init->adap;
213 	u32 total_size, remaining_buf_size;
214 	u32 ext_size = 0;
215 	int index, bit, i, rc = -1;
216 	int all;
217 	bool flag_ext = 0;
218 
219 	reset_skip_entity();
220 
221 	dbg_buff.data = outbuf;
222 	dbg_buff.size = *outbuf_size;
223 	dbg_buff.offset = 0;
224 
225 	cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
226 	cudbg_hdr->signature = CUDBG_SIGNATURE;
227 	cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
228 	cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
229 	cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
230 	cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
231 	cudbg_hdr->chip_ver = padap->params.chipid;
232 
233 	if (cudbg_hdr->data_len)
234 		flag_ext = 1;
235 
236 	if (cudbg_init->use_flash) {
237 #ifndef notyet
238 		rc = t4_get_flash_params(padap);
239 		if (rc) {
240 			if (cudbg_init->verbose)
241 				cudbg_init->print("\nGet flash params failed.\n\n");
242 			cudbg_init->use_flash = 0;
243 		}
244 #endif
245 
246 #ifdef notyet
247 		/* Timestamp is mandatory. If it is not passed then disable
248 		 * flash support
249 		 */
250 		if (!cudbg_init->dbg_params[CUDBG_TIMESTAMP_PARAM].u.time) {
251 			if (cudbg_init->verbose)
252 				cudbg_init->print("\nTimestamp param missing,"
253 					  "so ignoring flash write request\n\n");
254 			cudbg_init->use_flash = 0;
255 		}
256 #endif
257 	}
258 
259 	if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
260 	    dbg_buff.size) {
261 		rc = CUDBG_STATUS_SMALL_BUFF;
262 		total_size = cudbg_hdr->hdr_len;
263 		goto err;
264 	}
265 
266 	/* If ext flag is set then move the offset to the end of the buf
267 	 * so that we can add ext entities
268 	 */
269 	if (flag_ext) {
270 		ext_entity_hdr = (struct cudbg_entity_hdr *)
271 			      ((char *)outbuf + cudbg_hdr->hdr_len +
272 			      (sizeof(struct cudbg_entity_hdr) *
273 			      (CUDBG_EXT_ENTITY - 1)));
274 		ext_entity_hdr->start_offset = cudbg_hdr->data_len;
275 		ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
276 		ext_entity_hdr->size = 0;
277 		dbg_buff.offset = cudbg_hdr->data_len;
278 	} else {
279 		dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
280 		dbg_buff.offset += CUDBG_MAX_ENTITY *
281 					sizeof(struct cudbg_entity_hdr);
282 	}
283 
284 	total_size = dbg_buff.offset;
285 	all = dbg_bitmap[0] & (1 << CUDBG_ALL);
286 
287 	/*sort(large_entity_list);*/
288 
289 	for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
290 		index = i / 8;
291 		bit = i % 8;
292 
293 		if (entity_list[i].bit == CUDBG_EXT_ENTITY)
294 			continue;
295 
296 		if (all || (dbg_bitmap[index] & (1 << bit))) {
297 
298 			if (!flag_ext) {
299 				rc = get_entity_hdr(outbuf, i, dbg_buff.size,
300 						    &entity_hdr);
301 				if (rc)
302 					cudbg_hdr->hdr_flags = rc;
303 			} else {
304 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
305 							     &dbg_buff,
306 							     &entity_hdr);
307 				if (rc)
308 					goto err;
309 
310 				/* move the offset after the ext header */
311 				dbg_buff.offset +=
312 					sizeof(struct cudbg_entity_hdr);
313 			}
314 
315 			entity_hdr->entity_type = i;
316 			entity_hdr->start_offset = dbg_buff.offset;
317 			/* process each entity by calling process_entity fp */
318 			remaining_buf_size = dbg_buff.size - dbg_buff.offset;
319 
320 			if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
321 			    is_large_entity(i)) {
322 				if (cudbg_init->verbose)
323 					cudbg_init->print("Skipping %s\n",
324 					    entity_list[i].name);
325 				skip_entity(i);
326 				continue;
327 			} else {
328 
329 				/* If fw_attach is 0, then skip entities which
330 				 * communicates with firmware
331 				 */
332 
333 				if (!is_fw_attached(cudbg_init) &&
334 				    (entity_list[i].flag &
335 				    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
336 					if (cudbg_init->verbose)
337 						cudbg_init->print("Skipping %s entity,"\
338 							  "because fw_attach "\
339 							  "is 0\n",
340 							  entity_list[i].name);
341 					continue;
342 				}
343 
344 				if (cudbg_init->verbose)
345 					cudbg_init->print("collecting debug entity: "\
346 						  "%s\n", entity_list[i].name);
347 				memset(&cudbg_err, 0,
348 				       sizeof(struct cudbg_error));
349 				rc = process_entity[i-1](cudbg_init, &dbg_buff,
350 							 &cudbg_err);
351 			}
352 
353 			if (rc) {
354 				entity_hdr->size = 0;
355 				dbg_buff.offset = entity_hdr->start_offset;
356 			} else
357 				align_debug_buffer(&dbg_buff, entity_hdr);
358 
359 			if (cudbg_err.sys_err)
360 				rc = CUDBG_SYSTEM_ERROR;
361 
362 			entity_hdr->hdr_flags =  rc;
363 			entity_hdr->sys_err = cudbg_err.sys_err;
364 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
365 
366 			/* We don't want to include ext entity size in global
367 			 * header
368 			 */
369 			if (!flag_ext)
370 				total_size += entity_hdr->size;
371 
372 			cudbg_hdr->data_len = total_size;
373 			*outbuf_size = total_size;
374 
375 			/* consider the size of the ext entity header and data
376 			 * also
377 			 */
378 			if (flag_ext) {
379 				ext_size += (sizeof(struct cudbg_entity_hdr) +
380 					     entity_hdr->size);
381 				entity_hdr->start_offset -= cudbg_hdr->data_len;
382 				ext_entity_hdr->size = ext_size;
383 				entity_hdr->next_ext_offset = ext_size;
384 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
385 			}
386 
387 			if (cudbg_init->use_flash) {
388 				if (flag_ext) {
389 					wr_entity_to_flash(handle,
390 							   &dbg_buff,
391 							   ext_entity_hdr->
392 							   start_offset,
393 							   entity_hdr->
394 							   size,
395 							   CUDBG_EXT_ENTITY,
396 							   ext_size);
397 				}
398 				else
399 					wr_entity_to_flash(handle,
400 							   &dbg_buff,
401 							   entity_hdr->\
402 							   start_offset,
403 							   entity_hdr->size,
404 							   i, ext_size);
405 			}
406 		}
407 	}
408 
409 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
410 	     i++) {
411 		large_entity_code = large_entity_list[i].entity_code;
412 		if (large_entity_list[i].skip_flag) {
413 			if (!flag_ext) {
414 				rc = get_entity_hdr(outbuf, large_entity_code,
415 						    dbg_buff.size, &entity_hdr);
416 				if (rc)
417 					cudbg_hdr->hdr_flags = rc;
418 			} else {
419 				rc = get_next_ext_entity_hdr(outbuf, &ext_size,
420 							     &dbg_buff,
421 							     &entity_hdr);
422 				if (rc)
423 					goto err;
424 
425 				dbg_buff.offset +=
426 					sizeof(struct cudbg_entity_hdr);
427 			}
428 
429 			/* If fw_attach is 0, then skip entities which
430 			 * communicates with firmware
431 			 */
432 			if (!is_fw_attached(cudbg_init) &&
433 			    (entity_list[large_entity_code].flag &
434 			    (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
435 				if (cudbg_init->verbose)
436 					cudbg_init->print("Skipping %s entity,"\
437 						  "because fw_attach "\
438 						  "is 0\n",
439 						  entity_list[large_entity_code]
440 						  .name);
441 				continue;
442 			}
443 
444 			entity_hdr->entity_type = large_entity_code;
445 			entity_hdr->start_offset = dbg_buff.offset;
446 			if (cudbg_init->verbose)
447 				cudbg_init->print("Re-trying debug entity: %s\n",
448 					  entity_list[large_entity_code].name);
449 
450 			memset(&cudbg_err, 0, sizeof(struct cudbg_error));
451 			rc = process_entity[large_entity_code - 1](cudbg_init,
452 								   &dbg_buff,
453 								   &cudbg_err);
454 			if (rc) {
455 				entity_hdr->size = 0;
456 				dbg_buff.offset = entity_hdr->start_offset;
457 			} else
458 				align_debug_buffer(&dbg_buff, entity_hdr);
459 
460 			if (cudbg_err.sys_err)
461 				rc = CUDBG_SYSTEM_ERROR;
462 
463 			entity_hdr->hdr_flags = rc;
464 			entity_hdr->sys_err = cudbg_err.sys_err;
465 			entity_hdr->sys_warn =	cudbg_err.sys_warn;
466 
467 			/* We don't want to include ext entity size in global
468 			 * header
469 			 */
470 			if (!flag_ext)
471 				total_size += entity_hdr->size;
472 
473 			cudbg_hdr->data_len = total_size;
474 			*outbuf_size = total_size;
475 
476 			/* consider the size of the ext entity header and
477 			 * data also
478 			 */
479 			if (flag_ext) {
480 				ext_size += (sizeof(struct cudbg_entity_hdr) +
481 						   entity_hdr->size);
482 				entity_hdr->start_offset -=
483 							cudbg_hdr->data_len;
484 				ext_entity_hdr->size = ext_size;
485 				entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
486 			}
487 
488 			if (cudbg_init->use_flash) {
489 				if (flag_ext)
490 					wr_entity_to_flash(handle,
491 							   &dbg_buff,
492 							   ext_entity_hdr->
493 							   start_offset,
494 							   entity_hdr->size,
495 							   CUDBG_EXT_ENTITY,
496 							   ext_size);
497 				else
498 					wr_entity_to_flash(handle,
499 							   &dbg_buff,
500 							   entity_hdr->
501 							   start_offset,
502 							   entity_hdr->
503 							   size,
504 							   large_entity_list[i].
505 							   entity_code,
506 							   ext_size);
507 			}
508 		}
509 	}
510 
511 	cudbg_hdr->data_len = total_size;
512 	*outbuf_size = total_size;
513 
514 	if (flag_ext)
515 		*outbuf_size += ext_size;
516 
517 	return 0;
518 err:
519 	return rc;
520 }
521 
reset_skip_entity(void)522 void reset_skip_entity(void)
523 {
524 	int i;
525 
526 	for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
527 		large_entity_list[i].skip_flag = 0;
528 }
529 
skip_entity(int entity_code)530 void skip_entity(int entity_code)
531 {
532 	int i;
533 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
534 	     i++) {
535 		if (large_entity_list[i].entity_code == entity_code)
536 			large_entity_list[i].skip_flag = 1;
537 	}
538 }
539 
is_large_entity(int entity_code)540 int is_large_entity(int entity_code)
541 {
542 	int i;
543 
544 	for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
545 	     i++) {
546 		if (large_entity_list[i].entity_code == entity_code)
547 			return 1;
548 	}
549 	return 0;
550 }
551 
get_entity_hdr(void * outbuf,int i,u32 size,struct cudbg_entity_hdr ** entity_hdr)552 int get_entity_hdr(void *outbuf, int i, u32 size,
553 		   struct cudbg_entity_hdr **entity_hdr)
554 {
555 	int rc = 0;
556 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
557 
558 	if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
559 		return CUDBG_STATUS_SMALL_BUFF;
560 
561 	*entity_hdr = (struct cudbg_entity_hdr *)
562 		      ((char *)outbuf+cudbg_hdr->hdr_len +
563 		       (sizeof(struct cudbg_entity_hdr)*(i-1)));
564 	return rc;
565 }
566 
collect_rss(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)567 static int collect_rss(struct cudbg_init *pdbg_init,
568 		       struct cudbg_buffer *dbg_buff,
569 		       struct cudbg_error *cudbg_err)
570 {
571 	struct adapter *padap = pdbg_init->adap;
572 	struct cudbg_buffer scratch_buff;
573 	u32 size;
574 	int rc = 0;
575 
576 	size = padap->chip_params->rss_nentries * sizeof(u16);
577 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
578 	if (rc)
579 		goto err;
580 
581 	rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
582 	if (rc) {
583 		if (pdbg_init->verbose)
584 			pdbg_init->print("%s(), t4_read_rss failed!, rc: %d\n",
585 				 __func__, rc);
586 		cudbg_err->sys_err = rc;
587 		goto err1;
588 	}
589 
590 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
591 	if (rc)
592 		goto err1;
593 
594 	rc = compress_buff(&scratch_buff, dbg_buff);
595 
596 err1:
597 	release_scratch_buff(&scratch_buff, dbg_buff);
598 err:
599 	return rc;
600 }
601 
collect_sw_state(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)602 static int collect_sw_state(struct cudbg_init *pdbg_init,
603 			    struct cudbg_buffer *dbg_buff,
604 			    struct cudbg_error *cudbg_err)
605 {
606 	struct adapter *padap = pdbg_init->adap;
607 	struct cudbg_buffer scratch_buff;
608 	struct sw_state *swstate;
609 	u32 size;
610 	int rc = 0;
611 
612 	size = sizeof(struct sw_state);
613 
614 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
615 	if (rc)
616 		goto err;
617 
618 	swstate = (struct sw_state *) scratch_buff.data;
619 
620 	swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
621 	snprintf(swstate->caller_string, sizeof(swstate->caller_string), "%s",
622 	    "FreeBSD");
623 	swstate->os_type = 0;
624 
625 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
626 	if (rc)
627 		goto err1;
628 
629 	rc = compress_buff(&scratch_buff, dbg_buff);
630 
631 err1:
632 	release_scratch_buff(&scratch_buff, dbg_buff);
633 err:
634 	return rc;
635 }
636 
collect_ddp_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)637 static int collect_ddp_stats(struct cudbg_init *pdbg_init,
638 			     struct cudbg_buffer *dbg_buff,
639 			     struct cudbg_error *cudbg_err)
640 {
641 	struct adapter *padap = pdbg_init->adap;
642 	struct cudbg_buffer scratch_buff;
643 	struct tp_usm_stats  *tp_usm_stats_buff;
644 	u32 size;
645 	int rc = 0;
646 
647 	size = sizeof(struct tp_usm_stats);
648 
649 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
650 	if (rc)
651 		goto err;
652 
653 	tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
654 
655 	/* spin_lock(&padap->stats_lock);	TODO*/
656 	t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
657 	/* spin_unlock(&padap->stats_lock);	TODO*/
658 
659 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
660 	if (rc)
661 		goto err1;
662 
663 	rc = compress_buff(&scratch_buff, dbg_buff);
664 
665 err1:
666 	release_scratch_buff(&scratch_buff, dbg_buff);
667 err:
668 	return rc;
669 }
670 
collect_ulptx_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)671 static int collect_ulptx_la(struct cudbg_init *pdbg_init,
672 			    struct cudbg_buffer *dbg_buff,
673 			    struct cudbg_error *cudbg_err)
674 {
675 	struct adapter *padap = pdbg_init->adap;
676 	struct cudbg_buffer scratch_buff;
677 	struct struct_ulptx_la *ulptx_la_buff;
678 	u32 size, i, j;
679 	int rc = 0;
680 
681 	size = sizeof(struct struct_ulptx_la);
682 
683 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
684 	if (rc)
685 		goto err;
686 
687 	ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
688 
689 	for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
690 		ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
691 						      A_ULP_TX_LA_RDPTR_0 +
692 						      0x10 * i);
693 		ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
694 						      A_ULP_TX_LA_WRPTR_0 +
695 						      0x10 * i);
696 		ulptx_la_buff->rddata[i] = t4_read_reg(padap,
697 						       A_ULP_TX_LA_RDDATA_0 +
698 						       0x10 * i);
699 		for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
700 			ulptx_la_buff->rd_data[i][j] =
701 				t4_read_reg(padap,
702 					    A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
703 		}
704 	}
705 
706 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
707 	if (rc)
708 		goto err1;
709 
710 	rc = compress_buff(&scratch_buff, dbg_buff);
711 
712 err1:
713 	release_scratch_buff(&scratch_buff, dbg_buff);
714 err:
715 	return rc;
716 
717 }
718 
collect_ulprx_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)719 static int collect_ulprx_la(struct cudbg_init *pdbg_init,
720 			    struct cudbg_buffer *dbg_buff,
721 			    struct cudbg_error *cudbg_err)
722 {
723 	struct adapter *padap = pdbg_init->adap;
724 	struct cudbg_buffer scratch_buff;
725 	struct struct_ulprx_la *ulprx_la_buff;
726 	u32 size;
727 	int rc = 0;
728 
729 	size = sizeof(struct struct_ulprx_la);
730 
731 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
732 	if (rc)
733 		goto err;
734 
735 	ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
736 	t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
737 	ulprx_la_buff->size = ULPRX_LA_SIZE;
738 
739 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
740 	if (rc)
741 		goto err1;
742 
743 	rc = compress_buff(&scratch_buff, dbg_buff);
744 
745 err1:
746 	release_scratch_buff(&scratch_buff, dbg_buff);
747 err:
748 	return rc;
749 }
750 
collect_cpl_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)751 static int collect_cpl_stats(struct cudbg_init *pdbg_init,
752 			     struct cudbg_buffer *dbg_buff,
753 			     struct cudbg_error *cudbg_err)
754 {
755 	struct adapter *padap = pdbg_init->adap;
756 	struct cudbg_buffer scratch_buff;
757 	struct struct_tp_cpl_stats *tp_cpl_stats_buff;
758 	u32 size;
759 	int rc = 0;
760 
761 	size = sizeof(struct struct_tp_cpl_stats);
762 
763 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
764 	if (rc)
765 		goto err;
766 
767 	tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
768 	tp_cpl_stats_buff->nchan = padap->chip_params->nchan;
769 
770 	/* spin_lock(&padap->stats_lock);	TODO*/
771 	t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
772 	/* spin_unlock(&padap->stats_lock);	TODO*/
773 
774 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
775 	if (rc)
776 		goto err1;
777 
778 	rc = compress_buff(&scratch_buff, dbg_buff);
779 
780 err1:
781 	release_scratch_buff(&scratch_buff, dbg_buff);
782 err:
783 	return rc;
784 }
785 
collect_wc_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)786 static int collect_wc_stats(struct cudbg_init *pdbg_init,
787 			    struct cudbg_buffer *dbg_buff,
788 			    struct cudbg_error *cudbg_err)
789 {
790 	struct adapter *padap = pdbg_init->adap;
791 	struct cudbg_buffer scratch_buff;
792 	struct struct_wc_stats *wc_stats_buff;
793 	u32 val1;
794 	u32 val2;
795 	u32 size;
796 
797 	int rc = 0;
798 
799 	size = sizeof(struct struct_wc_stats);
800 
801 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
802 	if (rc)
803 		goto err;
804 
805 	wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
806 
807 	if (!is_t4(padap)) {
808 		val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
809 		val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
810 		wc_stats_buff->wr_cl_success = val1 - val2;
811 		wc_stats_buff->wr_cl_fail = val2;
812 	} else {
813 		wc_stats_buff->wr_cl_success = 0;
814 		wc_stats_buff->wr_cl_fail = 0;
815 	}
816 
817 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
818 	if (rc)
819 		goto err1;
820 
821 	rc = compress_buff(&scratch_buff, dbg_buff);
822 err1:
823 	release_scratch_buff(&scratch_buff, dbg_buff);
824 err:
825 	return rc;
826 }
827 
mem_desc_cmp(const void * a,const void * b)828 static int mem_desc_cmp(const void *a, const void *b)
829 {
830 	return ((const struct struct_mem_desc *)a)->base -
831 		((const struct struct_mem_desc *)b)->base;
832 }
833 
fill_meminfo(struct adapter * padap,struct struct_meminfo * meminfo_buff)834 static int fill_meminfo(struct adapter *padap,
835 			struct struct_meminfo *meminfo_buff)
836 {
837 	struct struct_mem_desc *md;
838 	u32 size, lo, hi;
839 	u32 used, alloc;
840 	int n, i, rc = 0;
841 
842 	size = sizeof(struct struct_meminfo);
843 
844 	memset(meminfo_buff->avail, 0,
845 	       ARRAY_SIZE(meminfo_buff->avail) *
846 	       sizeof(struct struct_mem_desc));
847 	memset(meminfo_buff->mem, 0,
848 	       (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
849 	md  = meminfo_buff->mem;
850 
851 	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
852 		meminfo_buff->mem[i].limit = 0;
853 		meminfo_buff->mem[i].idx = i;
854 	}
855 
856 	i = 0;
857 
858 	lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
859 
860 	if (lo & F_EDRAM0_ENABLE) {
861 		hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
862 		meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
863 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
864 					       (G_EDRAM0_SIZE(hi) << 20);
865 		meminfo_buff->avail[i].idx = 0;
866 		i++;
867 	}
868 
869 	if (lo & F_EDRAM1_ENABLE) {
870 		hi =  t4_read_reg(padap, A_MA_EDRAM1_BAR);
871 		meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
872 		meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
873 					       (G_EDRAM1_SIZE(hi) << 20);
874 		meminfo_buff->avail[i].idx = 1;
875 		i++;
876 	}
877 
878 	if (is_t5(padap)) {
879 		if (lo & F_EXT_MEM0_ENABLE) {
880 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
881 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
882 			meminfo_buff->avail[i].limit =
883 				meminfo_buff->avail[i].base +
884 				(G_EXT_MEM_SIZE(hi) << 20);
885 			meminfo_buff->avail[i].idx = 3;
886 			i++;
887 		}
888 
889 		if (lo & F_EXT_MEM1_ENABLE) {
890 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
891 			meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
892 			meminfo_buff->avail[i].limit =
893 				meminfo_buff->avail[i].base +
894 				(G_EXT_MEM1_SIZE(hi) << 20);
895 			meminfo_buff->avail[i].idx = 4;
896 			i++;
897 		}
898 	} else if (is_t6(padap)) {
899 		if (lo & F_EXT_MEM_ENABLE) {
900 			hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
901 			meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
902 			meminfo_buff->avail[i].limit =
903 				meminfo_buff->avail[i].base +
904 				(G_EXT_MEM_SIZE(hi) << 20);
905 			meminfo_buff->avail[i].idx = 2;
906 			i++;
907 		}
908 	}
909 
910 	if (!i) {				   /* no memory available */
911 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
912 		goto err;
913 	}
914 
915 	meminfo_buff->avail_c = i;
916 	qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
917 	    mem_desc_cmp);
918 	(md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
919 	(md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
920 	(md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
921 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
922 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
923 	(md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
924 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
925 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
926 	(md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
927 
928 	/* the next few have explicit upper bounds */
929 	md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
930 	md->limit = md->base - 1 +
931 		    t4_read_reg(padap,
932 				A_TP_PMM_TX_PAGE_SIZE) *
933 				G_PMTXMAXPAGE(t4_read_reg(padap,
934 							  A_TP_PMM_TX_MAX_PAGE)
935 					     );
936 	md++;
937 
938 	md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
939 	md->limit = md->base - 1 +
940 		    t4_read_reg(padap,
941 				A_TP_PMM_RX_PAGE_SIZE) *
942 				G_PMRXMAXPAGE(t4_read_reg(padap,
943 							  A_TP_PMM_RX_MAX_PAGE)
944 					      );
945 	md++;
946 	if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
947 		if (chip_id(padap) <= CHELSIO_T5) {
948 			hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
949 			md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
950 		} else {
951 			hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
952 			md->base = t4_read_reg(padap,
953 					       A_LE_DB_HASH_TBL_BASE_ADDR);
954 		}
955 		md->limit = 0;
956 	} else {
957 		md->base = 0;
958 		md->idx = ARRAY_SIZE(region);  /* hide it */
959 	}
960 	md++;
961 #define ulp_region(reg) \
962 	{\
963 		md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
964 		(md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
965 	}
966 
967 	ulp_region(RX_ISCSI);
968 	ulp_region(RX_TDDP);
969 	ulp_region(TX_TPT);
970 	ulp_region(RX_STAG);
971 	ulp_region(RX_RQ);
972 	ulp_region(RX_RQUDP);
973 	ulp_region(RX_PBL);
974 	ulp_region(TX_PBL);
975 #undef ulp_region
976 	md->base = 0;
977 	md->idx = ARRAY_SIZE(region);
978 	if (!is_t4(padap)) {
979 		u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
980 		u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
981 		if (is_t5(padap)) {
982 			if (sge_ctrl & F_VFIFO_ENABLE)
983 				size = G_DBVFIFO_SIZE(fifo_size);
984 		} else
985 			size = G_T6_DBVFIFO_SIZE(fifo_size);
986 
987 		if (size) {
988 			md->base = G_BASEADDR(t4_read_reg(padap,
989 							  A_SGE_DBVFIFO_BADDR));
990 			md->limit = md->base + (size << 2) - 1;
991 		}
992 	}
993 
994 	md++;
995 
996 	md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
997 	md->limit = 0;
998 	md++;
999 	md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
1000 	md->limit = 0;
1001 	md++;
1002 #ifndef __NO_DRIVER_OCQ_SUPPORT__
1003 	/*md->base = padap->vres.ocq.start;*/
1004 	/*if (adap->vres.ocq.size)*/
1005 	/*	  md->limit = md->base + adap->vres.ocq.size - 1;*/
1006 	/*else*/
1007 	md->idx = ARRAY_SIZE(region);  /* hide it */
1008 	md++;
1009 #endif
1010 
1011 	/* add any address-space holes, there can be up to 3 */
1012 	for (n = 0; n < i - 1; n++)
1013 		if (meminfo_buff->avail[n].limit <
1014 		    meminfo_buff->avail[n + 1].base)
1015 			(md++)->base = meminfo_buff->avail[n].limit;
1016 
1017 	if (meminfo_buff->avail[n].limit)
1018 		(md++)->base = meminfo_buff->avail[n].limit;
1019 
1020 	n = (int) (md - meminfo_buff->mem);
1021 	meminfo_buff->mem_c = n;
1022 
1023 	qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
1024 	    mem_desc_cmp);
1025 
1026 	lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
1027 	hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
1028 	meminfo_buff->up_ram_lo = lo;
1029 	meminfo_buff->up_ram_hi = hi;
1030 
1031 	lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
1032 	hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
1033 	meminfo_buff->up_extmem2_lo = lo;
1034 	meminfo_buff->up_extmem2_hi = hi;
1035 
1036 	lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
1037 	meminfo_buff->rx_pages_data[0] =  G_PMRXMAXPAGE(lo);
1038 	meminfo_buff->rx_pages_data[1] =
1039 		t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
1040 	meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
1041 
1042 	lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
1043 	hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
1044 	meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
1045 	meminfo_buff->tx_pages_data[1] =
1046 		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
1047 	meminfo_buff->tx_pages_data[2] =
1048 		hi >= (1 << 20) ? 'M' : 'K';
1049 	meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
1050 
1051 	for (i = 0; i < 4; i++) {
1052 		if (chip_id(padap) > CHELSIO_T5)
1053 			lo = t4_read_reg(padap,
1054 					 A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
1055 		else
1056 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
1057 		if (is_t5(padap)) {
1058 			used = G_T5_USED(lo);
1059 			alloc = G_T5_ALLOC(lo);
1060 		} else {
1061 			used = G_USED(lo);
1062 			alloc = G_ALLOC(lo);
1063 		}
1064 		meminfo_buff->port_used[i] = used;
1065 		meminfo_buff->port_alloc[i] = alloc;
1066 	}
1067 
1068 	for (i = 0; i < padap->chip_params->nchan; i++) {
1069 		if (chip_id(padap) > CHELSIO_T5)
1070 			lo = t4_read_reg(padap,
1071 					 A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
1072 		else
1073 			lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
1074 		if (is_t5(padap)) {
1075 			used = G_T5_USED(lo);
1076 			alloc = G_T5_ALLOC(lo);
1077 		} else {
1078 			used = G_USED(lo);
1079 			alloc = G_ALLOC(lo);
1080 		}
1081 		meminfo_buff->loopback_used[i] = used;
1082 		meminfo_buff->loopback_alloc[i] = alloc;
1083 	}
1084 err:
1085 	return rc;
1086 }
1087 
collect_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1088 static int collect_meminfo(struct cudbg_init *pdbg_init,
1089 			   struct cudbg_buffer *dbg_buff,
1090 			   struct cudbg_error *cudbg_err)
1091 {
1092 	struct adapter *padap = pdbg_init->adap;
1093 	struct cudbg_buffer scratch_buff;
1094 	struct struct_meminfo *meminfo_buff;
1095 	int rc = 0;
1096 	u32 size;
1097 
1098 	size = sizeof(struct struct_meminfo);
1099 
1100 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1101 	if (rc)
1102 		goto err;
1103 
1104 	meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
1105 
1106 	rc = fill_meminfo(padap, meminfo_buff);
1107 	if (rc)
1108 		goto err;
1109 
1110 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1111 	if (rc)
1112 		goto err1;
1113 
1114 	rc = compress_buff(&scratch_buff, dbg_buff);
1115 err1:
1116 	release_scratch_buff(&scratch_buff, dbg_buff);
1117 err:
1118 	return rc;
1119 }
1120 
collect_lb_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1121 static int collect_lb_stats(struct cudbg_init *pdbg_init,
1122 			    struct cudbg_buffer *dbg_buff,
1123 			    struct cudbg_error *cudbg_err)
1124 {
1125 	struct adapter *padap = pdbg_init->adap;
1126 	struct cudbg_buffer scratch_buff;
1127 	struct lb_port_stats *tmp_stats;
1128 	struct struct_lb_stats *lb_stats_buff;
1129 	u32 i, n, size;
1130 	int rc = 0;
1131 
1132 	rc = padap->params.nports;
1133 	if (rc < 0)
1134 		goto err;
1135 
1136 	n = rc;
1137 	size = sizeof(struct struct_lb_stats) +
1138 	       n * sizeof(struct lb_port_stats);
1139 
1140 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1141 	if (rc)
1142 		goto err;
1143 
1144 	lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
1145 
1146 	lb_stats_buff->nchan = n;
1147 	tmp_stats = lb_stats_buff->s;
1148 
1149 	for (i = 0; i < n; i += 2, tmp_stats += 2) {
1150 		t4_get_lb_stats(padap, i, tmp_stats);
1151 		t4_get_lb_stats(padap, i + 1, tmp_stats+1);
1152 	}
1153 
1154 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1155 	if (rc)
1156 		goto err1;
1157 
1158 	rc = compress_buff(&scratch_buff, dbg_buff);
1159 err1:
1160 	release_scratch_buff(&scratch_buff, dbg_buff);
1161 err:
1162 	return rc;
1163 }
1164 
collect_rdma_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_er)1165 static int collect_rdma_stats(struct cudbg_init *pdbg_init,
1166 			      struct cudbg_buffer *dbg_buff,
1167 			      struct cudbg_error *cudbg_er)
1168 {
1169 	struct adapter *padap = pdbg_init->adap;
1170 	struct cudbg_buffer scratch_buff;
1171 	struct tp_rdma_stats *rdma_stats_buff;
1172 	u32 size;
1173 	int rc = 0;
1174 
1175 	size = sizeof(struct tp_rdma_stats);
1176 
1177 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1178 	if (rc)
1179 		goto err;
1180 
1181 	rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
1182 
1183 	/* spin_lock(&padap->stats_lock);	TODO*/
1184 	t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
1185 	/* spin_unlock(&padap->stats_lock);	TODO*/
1186 
1187 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1188 	if (rc)
1189 		goto err1;
1190 
1191 	rc = compress_buff(&scratch_buff, dbg_buff);
1192 err1:
1193 	release_scratch_buff(&scratch_buff, dbg_buff);
1194 err:
1195 	return rc;
1196 }
1197 
collect_clk_info(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1198 static int collect_clk_info(struct cudbg_init *pdbg_init,
1199 			    struct cudbg_buffer *dbg_buff,
1200 			    struct cudbg_error *cudbg_err)
1201 {
1202 	struct cudbg_buffer scratch_buff;
1203 	struct adapter *padap = pdbg_init->adap;
1204 	struct struct_clk_info *clk_info_buff;
1205 	u64 tp_tick_us;
1206 	int size;
1207 	int rc = 0;
1208 
1209 	if (!padap->params.vpd.cclk) {
1210 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1211 		goto err;
1212 	}
1213 
1214 	size = sizeof(struct struct_clk_info);
1215 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1216 	if (rc)
1217 		goto err;
1218 
1219 	clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
1220 
1221 	clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk;  /* in ps
1222 	*/
1223 	clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
1224 	clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
1225 	clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
1226 	tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1227 	/* in us */
1228 	clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
1229 				      clk_info_buff->dack_re) / 1000000) *
1230 				     t4_read_reg(padap, A_TP_DACK_TIMER);
1231 
1232 	clk_info_buff->retransmit_min =
1233 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
1234 	clk_info_buff->retransmit_max =
1235 		tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
1236 
1237 	clk_info_buff->persist_timer_min =
1238 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
1239 	clk_info_buff->persist_timer_max =
1240 		tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
1241 
1242 	clk_info_buff->keepalive_idle_timer =
1243 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
1244 	clk_info_buff->keepalive_interval =
1245 		tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
1246 
1247 	clk_info_buff->initial_srtt =
1248 		tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
1249 	clk_info_buff->finwait2_timer =
1250 		tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
1251 
1252 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1253 
1254 	if (rc)
1255 		goto err1;
1256 
1257 	rc = compress_buff(&scratch_buff, dbg_buff);
1258 err1:
1259 	release_scratch_buff(&scratch_buff, dbg_buff);
1260 err:
1261 	return rc;
1262 
1263 }
1264 
collect_macstats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1265 static int collect_macstats(struct cudbg_init *pdbg_init,
1266 			    struct cudbg_buffer *dbg_buff,
1267 			    struct cudbg_error *cudbg_err)
1268 {
1269 	struct adapter *padap = pdbg_init->adap;
1270 	struct cudbg_buffer scratch_buff;
1271 	struct struct_mac_stats_rev1 *mac_stats_buff;
1272 	u32 i, n, size;
1273 	int rc = 0;
1274 
1275 	rc = padap->params.nports;
1276 	if (rc < 0)
1277 		goto err;
1278 
1279 	n = rc;
1280 	size = sizeof(struct struct_mac_stats_rev1);
1281 
1282 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1283 	if (rc)
1284 		goto err;
1285 
1286 	mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
1287 
1288 	mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1289 	mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
1290 	mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
1291 				       sizeof(struct cudbg_ver_hdr);
1292 
1293 	mac_stats_buff->port_count = n;
1294 	for (i = 0; i <  mac_stats_buff->port_count; i++)
1295 		t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
1296 
1297 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1298 	if (rc)
1299 		goto err1;
1300 
1301 	rc = compress_buff(&scratch_buff, dbg_buff);
1302 err1:
1303 	release_scratch_buff(&scratch_buff, dbg_buff);
1304 err:
1305 	return rc;
1306 }
1307 
collect_cim_pif_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1308 static int collect_cim_pif_la(struct cudbg_init *pdbg_init,
1309 			      struct cudbg_buffer *dbg_buff,
1310 			      struct cudbg_error *cudbg_err)
1311 {
1312 	struct adapter *padap = pdbg_init->adap;
1313 	struct cudbg_buffer scratch_buff;
1314 	struct cim_pif_la *cim_pif_la_buff;
1315 	u32 size;
1316 	int rc = 0;
1317 
1318 	size = sizeof(struct cim_pif_la) +
1319 	       2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1320 
1321 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1322 	if (rc)
1323 		goto err;
1324 
1325 	cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
1326 	cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1327 
1328 	t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1329 			   (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1330 			   NULL, NULL);
1331 
1332 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1333 	if (rc)
1334 		goto err1;
1335 
1336 	rc = compress_buff(&scratch_buff, dbg_buff);
1337 err1:
1338 	release_scratch_buff(&scratch_buff, dbg_buff);
1339 err:
1340 	return rc;
1341 }
1342 
collect_tp_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1343 static int collect_tp_la(struct cudbg_init *pdbg_init,
1344 			 struct cudbg_buffer *dbg_buff,
1345 			 struct cudbg_error *cudbg_err)
1346 {
1347 	struct adapter *padap = pdbg_init->adap;
1348 	struct cudbg_buffer scratch_buff;
1349 	struct struct_tp_la *tp_la_buff;
1350 	u32 size;
1351 	int rc = 0;
1352 
1353 	size = sizeof(struct struct_tp_la) + TPLA_SIZE *  sizeof(u64);
1354 
1355 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1356 	if (rc)
1357 		goto err;
1358 
1359 	tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
1360 
1361 	tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
1362 	t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1363 
1364 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1365 	if (rc)
1366 		goto err1;
1367 
1368 	rc = compress_buff(&scratch_buff, dbg_buff);
1369 err1:
1370 	release_scratch_buff(&scratch_buff, dbg_buff);
1371 err:
1372 	return rc;
1373 }
1374 
collect_fcoe_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1375 static int collect_fcoe_stats(struct cudbg_init *pdbg_init,
1376 			      struct cudbg_buffer *dbg_buff,
1377 			      struct cudbg_error *cudbg_err)
1378 {
1379 	struct adapter *padap = pdbg_init->adap;
1380 	struct cudbg_buffer scratch_buff;
1381 	struct struct_tp_fcoe_stats  *tp_fcoe_stats_buff;
1382 	u32 size;
1383 	int rc = 0;
1384 
1385 	size = sizeof(struct struct_tp_fcoe_stats);
1386 
1387 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1388 	if (rc)
1389 		goto err;
1390 
1391 	tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
1392 
1393 	/* spin_lock(&padap->stats_lock);	TODO*/
1394 	t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
1395 	t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
1396 	if (padap->chip_params->nchan == NCHAN) {
1397 		t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
1398 		t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
1399 	}
1400 	/* spin_unlock(&padap->stats_lock);	TODO*/
1401 
1402 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1403 	if (rc)
1404 		goto err1;
1405 
1406 	rc = compress_buff(&scratch_buff, dbg_buff);
1407 err1:
1408 	release_scratch_buff(&scratch_buff, dbg_buff);
1409 err:
1410 	return rc;
1411 }
1412 
collect_tp_err_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1413 static int collect_tp_err_stats(struct cudbg_init *pdbg_init,
1414 				struct cudbg_buffer *dbg_buff,
1415 				struct cudbg_error *cudbg_err)
1416 {
1417 	struct adapter *padap = pdbg_init->adap;
1418 	struct cudbg_buffer scratch_buff;
1419 	struct struct_tp_err_stats *tp_err_stats_buff;
1420 	u32 size;
1421 	int rc = 0;
1422 
1423 	size = sizeof(struct struct_tp_err_stats);
1424 
1425 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1426 	if (rc)
1427 		goto err;
1428 
1429 	tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
1430 
1431 	/* spin_lock(&padap->stats_lock);	TODO*/
1432 	t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
1433 	/* spin_unlock(&padap->stats_lock);	TODO*/
1434 	tp_err_stats_buff->nchan = padap->chip_params->nchan;
1435 
1436 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1437 	if (rc)
1438 		goto err1;
1439 
1440 	rc = compress_buff(&scratch_buff, dbg_buff);
1441 err1:
1442 	release_scratch_buff(&scratch_buff, dbg_buff);
1443 err:
1444 	return rc;
1445 }
1446 
collect_tcp_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1447 static int collect_tcp_stats(struct cudbg_init *pdbg_init,
1448 			     struct cudbg_buffer *dbg_buff,
1449 			     struct cudbg_error *cudbg_err)
1450 {
1451 	struct adapter *padap = pdbg_init->adap;
1452 	struct cudbg_buffer scratch_buff;
1453 	struct struct_tcp_stats *tcp_stats_buff;
1454 	u32 size;
1455 	int rc = 0;
1456 
1457 	size = sizeof(struct struct_tcp_stats);
1458 
1459 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1460 	if (rc)
1461 		goto err;
1462 
1463 	tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
1464 
1465 	/* spin_lock(&padap->stats_lock);	TODO*/
1466 	t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
1467 	/* spin_unlock(&padap->stats_lock);	TODO*/
1468 
1469 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1470 	if (rc)
1471 		goto err1;
1472 
1473 	rc = compress_buff(&scratch_buff, dbg_buff);
1474 err1:
1475 	release_scratch_buff(&scratch_buff, dbg_buff);
1476 err:
1477 	return rc;
1478 }
1479 
collect_hw_sched(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1480 static int collect_hw_sched(struct cudbg_init *pdbg_init,
1481 			    struct cudbg_buffer *dbg_buff,
1482 			    struct cudbg_error *cudbg_err)
1483 {
1484 	struct adapter *padap = pdbg_init->adap;
1485 	struct cudbg_buffer scratch_buff;
1486 	struct struct_hw_sched *hw_sched_buff;
1487 	u32 size;
1488 	int i, rc = 0;
1489 
1490 	if (!padap->params.vpd.cclk) {
1491 		rc =  CUDBG_STATUS_CCLK_NOT_DEFINED;
1492 		goto err;
1493 	}
1494 
1495 	size = sizeof(struct struct_hw_sched);
1496 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1497 	if (rc)
1498 		goto err;
1499 
1500 	hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
1501 
1502 	hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
1503 	hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
1504 	t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1505 
1506 	for (i = 0; i < NTX_SCHED; ++i) {
1507 		t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1508 		    &hw_sched_buff->ipg[i], 1);
1509 	}
1510 
1511 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1512 	if (rc)
1513 		goto err1;
1514 
1515 	rc = compress_buff(&scratch_buff, dbg_buff);
1516 err1:
1517 	release_scratch_buff(&scratch_buff, dbg_buff);
1518 err:
1519 	return rc;
1520 }
1521 
collect_pm_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1522 static int collect_pm_stats(struct cudbg_init *pdbg_init,
1523 			    struct cudbg_buffer *dbg_buff,
1524 			    struct cudbg_error *cudbg_err)
1525 {
1526 	struct adapter *padap = pdbg_init->adap;
1527 	struct cudbg_buffer scratch_buff;
1528 	struct struct_pm_stats *pm_stats_buff;
1529 	u32 size;
1530 	int rc = 0;
1531 
1532 	size = sizeof(struct struct_pm_stats);
1533 
1534 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1535 	if (rc)
1536 		goto err;
1537 
1538 	pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
1539 
1540 	t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1541 	t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1542 
1543 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1544 	if (rc)
1545 		goto err1;
1546 
1547 	rc = compress_buff(&scratch_buff, dbg_buff);
1548 err1:
1549 	release_scratch_buff(&scratch_buff, dbg_buff);
1550 err:
1551 	return rc;
1552 }
1553 
collect_path_mtu(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1554 static int collect_path_mtu(struct cudbg_init *pdbg_init,
1555 			    struct cudbg_buffer *dbg_buff,
1556 			    struct cudbg_error *cudbg_err)
1557 {
1558 	struct adapter *padap = pdbg_init->adap;
1559 	struct cudbg_buffer scratch_buff;
1560 	u32 size;
1561 	int rc = 0;
1562 
1563 	size = NMTUS  * sizeof(u16);
1564 
1565 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1566 	if (rc)
1567 		goto err;
1568 
1569 	t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
1570 
1571 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1572 	if (rc)
1573 		goto err1;
1574 
1575 	rc = compress_buff(&scratch_buff, dbg_buff);
1576 err1:
1577 	release_scratch_buff(&scratch_buff, dbg_buff);
1578 err:
1579 	return rc;
1580 }
1581 
collect_rss_key(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1582 static int collect_rss_key(struct cudbg_init *pdbg_init,
1583 			   struct cudbg_buffer *dbg_buff,
1584 			   struct cudbg_error *cudbg_err)
1585 {
1586 	struct adapter *padap = pdbg_init->adap;
1587 	struct cudbg_buffer scratch_buff;
1588 	u32 size;
1589 
1590 	int rc = 0;
1591 
1592 	size = 10  * sizeof(u32);
1593 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1594 	if (rc)
1595 		goto err;
1596 
1597 	t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
1598 
1599 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1600 	if (rc)
1601 		goto err1;
1602 
1603 	rc = compress_buff(&scratch_buff, dbg_buff);
1604 err1:
1605 	release_scratch_buff(&scratch_buff, dbg_buff);
1606 err:
1607 	return rc;
1608 }
1609 
collect_rss_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1610 static int collect_rss_config(struct cudbg_init *pdbg_init,
1611 			      struct cudbg_buffer *dbg_buff,
1612 			      struct cudbg_error *cudbg_err)
1613 {
1614 	struct adapter *padap = pdbg_init->adap;
1615 	struct cudbg_buffer scratch_buff;
1616 	struct rss_config *rss_conf;
1617 	int rc;
1618 	u32 size;
1619 
1620 	size = sizeof(struct rss_config);
1621 
1622 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1623 	if (rc)
1624 		goto err;
1625 
1626 	rss_conf =  (struct rss_config *)scratch_buff.data;
1627 
1628 	rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
1629 	rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
1630 	rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
1631 	rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
1632 	rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
1633 	rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
1634 	rss_conf->chip = padap->params.chipid;
1635 
1636 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1637 	if (rc)
1638 		goto err1;
1639 
1640 	rc = compress_buff(&scratch_buff, dbg_buff);
1641 
1642 err1:
1643 	release_scratch_buff(&scratch_buff, dbg_buff);
1644 err:
1645 	return rc;
1646 }
1647 
collect_rss_vf_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1648 static int collect_rss_vf_config(struct cudbg_init *pdbg_init,
1649 				 struct cudbg_buffer *dbg_buff,
1650 				 struct cudbg_error *cudbg_err)
1651 {
1652 	struct adapter *padap = pdbg_init->adap;
1653 	struct cudbg_buffer scratch_buff;
1654 	struct rss_vf_conf *vfconf;
1655 	int vf, rc, vf_count;
1656 	u32 size;
1657 
1658 	vf_count = padap->chip_params->vfcount;
1659 	size = vf_count * sizeof(*vfconf);
1660 
1661 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1662 	if (rc)
1663 		goto err;
1664 
1665 	vfconf =  (struct rss_vf_conf *)scratch_buff.data;
1666 
1667 	for (vf = 0; vf < vf_count; vf++) {
1668 		t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1669 				      &vfconf[vf].rss_vf_vfh, 1);
1670 	}
1671 
1672 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1673 	if (rc)
1674 		goto err1;
1675 
1676 	rc = compress_buff(&scratch_buff, dbg_buff);
1677 
1678 err1:
1679 	release_scratch_buff(&scratch_buff, dbg_buff);
1680 err:
1681 	return rc;
1682 }
1683 
collect_rss_pf_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1684 static int collect_rss_pf_config(struct cudbg_init *pdbg_init,
1685 				 struct cudbg_buffer *dbg_buff,
1686 				 struct cudbg_error *cudbg_err)
1687 {
1688 	struct cudbg_buffer scratch_buff;
1689 	struct rss_pf_conf *pfconf;
1690 	struct adapter *padap = pdbg_init->adap;
1691 	u32 rss_pf_map, rss_pf_mask, size;
1692 	int pf, rc;
1693 
1694 	size = 8  * sizeof(*pfconf);
1695 
1696 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1697 	if (rc)
1698 		goto err;
1699 
1700 	pfconf =  (struct rss_pf_conf *)scratch_buff.data;
1701 
1702 	rss_pf_map = t4_read_rss_pf_map(padap, 1);
1703 	rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
1704 
1705 	for (pf = 0; pf < 8; pf++) {
1706 		pfconf[pf].rss_pf_map = rss_pf_map;
1707 		pfconf[pf].rss_pf_mask = rss_pf_mask;
1708 		/* no return val */
1709 		t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
1710 	}
1711 
1712 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1713 	if (rc)
1714 		goto err1;
1715 
1716 	rc = compress_buff(&scratch_buff, dbg_buff);
1717 err1:
1718 	release_scratch_buff(&scratch_buff, dbg_buff);
1719 err:
1720 	return rc;
1721 }
1722 
check_valid(u32 * buf,int type)1723 static int check_valid(u32 *buf, int type)
1724 {
1725 	int index;
1726 	int bit;
1727 	int bit_pos = 0;
1728 
1729 	switch (type) {
1730 	case CTXT_EGRESS:
1731 		bit_pos = 176;
1732 		break;
1733 	case CTXT_INGRESS:
1734 		bit_pos = 141;
1735 		break;
1736 	case CTXT_FLM:
1737 		bit_pos = 89;
1738 		break;
1739 	}
1740 	index = bit_pos / 32;
1741 	bit =  bit_pos % 32;
1742 
1743 	return buf[index] & (1U << bit);
1744 }
1745 
1746 /**
1747  * Get EGRESS, INGRESS, FLM, and CNM max qid.
1748  *
1749  * For EGRESS and INGRESS, do the following calculation.
1750  * max_qid = (DBQ/IMSG context region size in bytes) /
1751  *	     (size of context in bytes).
1752  *
1753  * For FLM, do the following calculation.
1754  * max_qid = (FLM cache region size in bytes) /
1755  *	     ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
1756  *
1757  * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
1758  * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
1759  * splitting is enabled, then max CNM qid is half of max FLM qid.
1760  */
get_max_ctxt_qid(struct adapter * padap,struct struct_meminfo * meminfo,u32 * max_ctx_qid,u8 nelem)1761 static int get_max_ctxt_qid(struct adapter *padap,
1762 			    struct struct_meminfo *meminfo,
1763 			    u32 *max_ctx_qid, u8 nelem)
1764 {
1765 	u32 i, idx, found = 0;
1766 
1767 	if (nelem != (CTXT_CNM + 1))
1768 		return -EINVAL;
1769 
1770 	for (i = 0; i < meminfo->mem_c; i++) {
1771 		if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
1772 			continue;                        /* skip holes */
1773 
1774 		idx = meminfo->mem[i].idx;
1775 		/* Get DBQ, IMSG, and FLM context region size */
1776 		if (idx <= CTXT_FLM) {
1777 			if (!(meminfo->mem[i].limit))
1778 				meminfo->mem[i].limit =
1779 					i < meminfo->mem_c - 1 ?
1780 					meminfo->mem[i + 1].base - 1 : ~0;
1781 
1782 			if (idx < CTXT_FLM) {
1783 				/* Get EGRESS and INGRESS max qid. */
1784 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1785 						    meminfo->mem[i].base + 1) /
1786 						   CUDBG_CTXT_SIZE_BYTES;
1787 				found++;
1788 			} else {
1789 				/* Get FLM and CNM max qid. */
1790 				u32 value, edram_ptr_count;
1791 				u8 bytes_per_ptr = 8;
1792 				u8 nohdr;
1793 
1794 				value = t4_read_reg(padap, A_SGE_FLM_CFG);
1795 
1796 				/* Check if header splitting is enabled. */
1797 				nohdr = (value >> S_NOHDR) & 1U;
1798 
1799 				/* Get the number of pointers in EDRAM per
1800 				 * qid in units of 32.
1801 				 */
1802 				edram_ptr_count = 32 *
1803 						  (1U << G_EDRAMPTRCNT(value));
1804 
1805 				/* EDRAMPTRCNT value of 3 is reserved.
1806 				 * So don't exceed 128.
1807 				 */
1808 				if (edram_ptr_count > 128)
1809 					edram_ptr_count = 128;
1810 
1811 				max_ctx_qid[idx] = (meminfo->mem[i].limit -
1812 						    meminfo->mem[i].base + 1) /
1813 						   (edram_ptr_count *
1814 						    bytes_per_ptr);
1815 				found++;
1816 
1817 				/* CNM has 1-to-1 mapping with FLM.
1818 				 * However, if header splitting is enabled,
1819 				 * then max CNM qid is half of max FLM qid.
1820 				 */
1821 				max_ctx_qid[CTXT_CNM] = nohdr ?
1822 							max_ctx_qid[idx] :
1823 							max_ctx_qid[idx] >> 1;
1824 
1825 				/* One more increment for CNM */
1826 				found++;
1827 			}
1828 		}
1829 		if (found == nelem)
1830 			break;
1831 	}
1832 
1833 	/* Sanity check. Ensure the values are within known max. */
1834 	max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
1835 					 M_CTXTQID);
1836 	max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
1837 					  CUDBG_MAX_INGRESS_QIDS);
1838 	max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
1839 				      CUDBG_MAX_FL_QIDS);
1840 	max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
1841 				      CUDBG_MAX_CNM_QIDS);
1842 	return 0;
1843 }
1844 
collect_dump_context(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1845 static int collect_dump_context(struct cudbg_init *pdbg_init,
1846 				struct cudbg_buffer *dbg_buff,
1847 				struct cudbg_error *cudbg_err)
1848 {
1849 	struct cudbg_buffer scratch_buff;
1850 	struct cudbg_buffer temp_buff;
1851 	struct adapter *padap = pdbg_init->adap;
1852 	u32 size = 0, next_offset = 0, total_size = 0;
1853 	struct cudbg_ch_cntxt *buff = NULL;
1854 	struct struct_meminfo meminfo;
1855 	int bytes = 0;
1856 	int rc = 0;
1857 	u32 i, j;
1858 	u32 max_ctx_qid[CTXT_CNM + 1];
1859 	bool limit_qid = false;
1860 	u32 qid_count = 0;
1861 
1862 	rc = fill_meminfo(padap, &meminfo);
1863 	if (rc)
1864 		goto err;
1865 
1866 	/* Get max valid qid for each type of queue */
1867 	rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
1868 	if (rc)
1869 		goto err;
1870 
1871 	/* There are four types of queues. Collect context upto max
1872 	 * qid of each type of queue.
1873 	 */
1874 	for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1875 		size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
1876 
1877 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1878 	if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
1879 		/* Not enough scratch Memory available.
1880 		 * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
1881 		 * for each queue type.
1882 		 */
1883 		size = 0;
1884 		for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1885 			size += sizeof(struct cudbg_ch_cntxt) *
1886 				CUDBG_LOWMEM_MAX_CTXT_QIDS;
1887 
1888 		limit_qid = true;
1889 		rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1890 		if (rc)
1891 			goto err;
1892 	}
1893 
1894 	buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
1895 
1896 	/* Collect context data */
1897 	for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
1898 		qid_count = 0;
1899 		for (j = 0; j < max_ctx_qid[i]; j++) {
1900 			read_sge_ctxt(pdbg_init, j, i, buff->data);
1901 
1902 			rc = check_valid(buff->data, i);
1903 			if (rc) {
1904 				buff->cntxt_type = i;
1905 				buff->cntxt_id = j;
1906 				buff++;
1907 				total_size += sizeof(struct cudbg_ch_cntxt);
1908 
1909 				if (i == CTXT_FLM) {
1910 					read_sge_ctxt(pdbg_init, j, CTXT_CNM,
1911 						      buff->data);
1912 					buff->cntxt_type = CTXT_CNM;
1913 					buff->cntxt_id = j;
1914 					buff++;
1915 					total_size +=
1916 						sizeof(struct cudbg_ch_cntxt);
1917 				}
1918 				qid_count++;
1919 			}
1920 
1921 			/* If there's not enough space to collect more qids,
1922 			 * then bail and move on to next queue type.
1923 			 */
1924 			if (limit_qid &&
1925 			    qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
1926 				break;
1927 		}
1928 	}
1929 
1930 	scratch_buff.size = total_size;
1931 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1932 	if (rc)
1933 		goto err1;
1934 
1935 	/* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
1936 	while (total_size > 0) {
1937 		bytes = min_t(unsigned long, (unsigned long)total_size,
1938 			      (unsigned long)CUDBG_CHUNK_SIZE);
1939 		temp_buff.size = bytes;
1940 		temp_buff.data = (void *)((char *)scratch_buff.data +
1941 					  next_offset);
1942 
1943 		rc = compress_buff(&temp_buff, dbg_buff);
1944 		if (rc)
1945 			goto err1;
1946 
1947 		total_size -= bytes;
1948 		next_offset += bytes;
1949 	}
1950 
1951 err1:
1952 	scratch_buff.size = size;
1953 	release_scratch_buff(&scratch_buff, dbg_buff);
1954 err:
1955 	return rc;
1956 }
1957 
collect_fw_devlog(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1958 static int collect_fw_devlog(struct cudbg_init *pdbg_init,
1959 			     struct cudbg_buffer *dbg_buff,
1960 			     struct cudbg_error *cudbg_err)
1961 {
1962 #ifdef notyet
1963 	struct adapter *padap = pdbg_init->adap;
1964 	struct devlog_params *dparams = &padap->params.devlog;
1965 	struct cudbg_param *params = NULL;
1966 	struct cudbg_buffer scratch_buff;
1967 	u32 offset;
1968 	int rc = 0, i;
1969 
1970 	rc = t4_init_devlog_params(padap, 1);
1971 
1972 	if (rc < 0) {
1973 		pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
1974 				 "%d\n", __func__, rc);
1975 		for (i = 0; i < pdbg_init->dbg_params_cnt; i++) {
1976 			if (pdbg_init->dbg_params[i].param_type ==
1977 			    CUDBG_DEVLOG_PARAM) {
1978 				params = &pdbg_init->dbg_params[i];
1979 				break;
1980 			}
1981 		}
1982 
1983 		if (params) {
1984 			dparams->memtype = params->u.devlog_param.memtype;
1985 			dparams->start = params->u.devlog_param.start;
1986 			dparams->size = params->u.devlog_param.size;
1987 		} else {
1988 			cudbg_err->sys_err = rc;
1989 			goto err;
1990 		}
1991 	}
1992 
1993 	rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
1994 
1995 	if (rc)
1996 		goto err;
1997 
1998 	/* Collect FW devlog */
1999 	if (dparams->start != 0) {
2000 		offset = scratch_buff.offset;
2001 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
2002 				  dparams->memtype, dparams->start,
2003 				  dparams->size,
2004 				  (__be32 *)((char *)scratch_buff.data +
2005 					     offset), 1);
2006 
2007 		if (rc) {
2008 			pdbg_init->print("%s(), t4_memory_rw failed!, rc: "\
2009 					 "%d\n", __func__, rc);
2010 			cudbg_err->sys_err = rc;
2011 			goto err1;
2012 		}
2013 	}
2014 
2015 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2016 
2017 	if (rc)
2018 		goto err1;
2019 
2020 	rc = compress_buff(&scratch_buff, dbg_buff);
2021 
2022 err1:
2023 	release_scratch_buff(&scratch_buff, dbg_buff);
2024 err:
2025 	return rc;
2026 #endif
2027 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
2028 }
2029 /* CIM OBQ */
2030 
collect_cim_obq_ulp0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2031 static int collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
2032 				struct cudbg_buffer *dbg_buff,
2033 				struct cudbg_error *cudbg_err)
2034 {
2035 	int rc = 0, qid = 0;
2036 
2037 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2038 
2039 	return rc;
2040 }
2041 
collect_cim_obq_ulp1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2042 static int collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
2043 				struct cudbg_buffer *dbg_buff,
2044 				struct cudbg_error *cudbg_err)
2045 {
2046 	int rc = 0, qid = 1;
2047 
2048 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2049 
2050 	return rc;
2051 }
2052 
collect_cim_obq_ulp2(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2053 static int collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
2054 				struct cudbg_buffer *dbg_buff,
2055 				struct cudbg_error *cudbg_err)
2056 {
2057 	int rc = 0, qid = 2;
2058 
2059 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2060 
2061 	return rc;
2062 }
2063 
collect_cim_obq_ulp3(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2064 static int collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
2065 				struct cudbg_buffer *dbg_buff,
2066 				struct cudbg_error *cudbg_err)
2067 {
2068 	int rc = 0, qid = 3;
2069 
2070 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2071 
2072 	return rc;
2073 }
2074 
collect_cim_obq_sge(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2075 static int collect_cim_obq_sge(struct cudbg_init *pdbg_init,
2076 			       struct cudbg_buffer *dbg_buff,
2077 			       struct cudbg_error *cudbg_err)
2078 {
2079 	int rc = 0, qid = 4;
2080 
2081 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2082 
2083 	return rc;
2084 }
2085 
collect_cim_obq_ncsi(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2086 static int collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
2087 				struct cudbg_buffer *dbg_buff,
2088 				struct cudbg_error *cudbg_err)
2089 {
2090 	int rc = 0, qid = 5;
2091 
2092 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2093 
2094 	return rc;
2095 }
2096 
collect_obq_sge_rx_q0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2097 static int collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
2098 				 struct cudbg_buffer *dbg_buff,
2099 				 struct cudbg_error *cudbg_err)
2100 {
2101 	int rc = 0, qid = 6;
2102 
2103 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2104 
2105 	return rc;
2106 }
2107 
collect_obq_sge_rx_q1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2108 static int collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
2109 				 struct cudbg_buffer *dbg_buff,
2110 				 struct cudbg_error *cudbg_err)
2111 {
2112 	int rc = 0, qid = 7;
2113 
2114 	rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2115 
2116 	return rc;
2117 }
2118 
read_cim_obq(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err,int qid)2119 static int read_cim_obq(struct cudbg_init *pdbg_init,
2120 			struct cudbg_buffer *dbg_buff,
2121 			struct cudbg_error *cudbg_err, int qid)
2122 {
2123 	struct cudbg_buffer scratch_buff;
2124 	struct adapter *padap = pdbg_init->adap;
2125 	u32 qsize;
2126 	int rc;
2127 	int no_of_read_words;
2128 
2129 	/* collect CIM OBQ */
2130 	qsize =  6 * CIM_OBQ_SIZE * 4 *  sizeof(u32);
2131 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2132 	if (rc)
2133 		goto err;
2134 
2135 	/* t4_read_cim_obq will return no. of read words or error */
2136 	no_of_read_words = t4_read_cim_obq(padap, qid,
2137 					   (u32 *)((u32 *)scratch_buff.data +
2138 					   scratch_buff.offset), qsize);
2139 
2140 	/* no_of_read_words is less than or equal to 0 means error */
2141 	if (no_of_read_words <= 0) {
2142 		if (no_of_read_words == 0)
2143 			rc = CUDBG_SYSTEM_ERROR;
2144 		else
2145 			rc = no_of_read_words;
2146 		if (pdbg_init->verbose)
2147 			pdbg_init->print("%s: t4_read_cim_obq failed (%d)\n",
2148 				 __func__, rc);
2149 		cudbg_err->sys_err = rc;
2150 		goto err1;
2151 	}
2152 
2153 	scratch_buff.size = no_of_read_words * 4;
2154 
2155 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2156 
2157 	if (rc)
2158 		goto err1;
2159 
2160 	rc = compress_buff(&scratch_buff, dbg_buff);
2161 
2162 	if (rc)
2163 		goto err1;
2164 
2165 err1:
2166 	release_scratch_buff(&scratch_buff, dbg_buff);
2167 err:
2168 	return rc;
2169 }
2170 
2171 /* CIM IBQ */
2172 
collect_cim_ibq_tp0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2173 static int collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
2174 			       struct cudbg_buffer *dbg_buff,
2175 			       struct cudbg_error *cudbg_err)
2176 {
2177 	int rc = 0, qid = 0;
2178 
2179 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2180 	return rc;
2181 }
2182 
collect_cim_ibq_tp1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2183 static int collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
2184 			       struct cudbg_buffer *dbg_buff,
2185 			       struct cudbg_error *cudbg_err)
2186 {
2187 	int rc = 0, qid = 1;
2188 
2189 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2190 	return rc;
2191 }
2192 
collect_cim_ibq_ulp(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2193 static int collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
2194 			       struct cudbg_buffer *dbg_buff,
2195 			       struct cudbg_error *cudbg_err)
2196 {
2197 	int rc = 0, qid = 2;
2198 
2199 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2200 	return rc;
2201 }
2202 
collect_cim_ibq_sge0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2203 static int collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
2204 				struct cudbg_buffer *dbg_buff,
2205 				struct cudbg_error *cudbg_err)
2206 {
2207 	int rc = 0, qid = 3;
2208 
2209 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2210 	return rc;
2211 }
2212 
collect_cim_ibq_sge1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2213 static int collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
2214 				struct cudbg_buffer *dbg_buff,
2215 				struct cudbg_error *cudbg_err)
2216 {
2217 	int rc = 0, qid = 4;
2218 
2219 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2220 	return rc;
2221 }
2222 
collect_cim_ibq_ncsi(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2223 static int collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
2224 				struct cudbg_buffer *dbg_buff,
2225 				struct cudbg_error *cudbg_err)
2226 {
2227 	int rc, qid = 5;
2228 
2229 	rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2230 	return rc;
2231 }
2232 
read_cim_ibq(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err,int qid)2233 static int read_cim_ibq(struct cudbg_init *pdbg_init,
2234 			struct cudbg_buffer *dbg_buff,
2235 			struct cudbg_error *cudbg_err, int qid)
2236 {
2237 	struct adapter *padap = pdbg_init->adap;
2238 	struct cudbg_buffer scratch_buff;
2239 	u32 qsize;
2240 	int rc;
2241 	int no_of_read_words;
2242 
2243 	/* collect CIM IBQ */
2244 	qsize = CIM_IBQ_SIZE * 4 *  sizeof(u32);
2245 	rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2246 
2247 	if (rc)
2248 		goto err;
2249 
2250 	/* t4_read_cim_ibq will return no. of read words or error */
2251 	no_of_read_words = t4_read_cim_ibq(padap, qid,
2252 					   (u32 *)((u32 *)scratch_buff.data +
2253 					   scratch_buff.offset), qsize);
2254 	/* no_of_read_words is less than or equal to 0 means error */
2255 	if (no_of_read_words <= 0) {
2256 		if (no_of_read_words == 0)
2257 			rc = CUDBG_SYSTEM_ERROR;
2258 		else
2259 			rc = no_of_read_words;
2260 		if (pdbg_init->verbose)
2261 			pdbg_init->print("%s: t4_read_cim_ibq failed (%d)\n",
2262 				 __func__, rc);
2263 		cudbg_err->sys_err = rc;
2264 		goto err1;
2265 	}
2266 
2267 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2268 	if (rc)
2269 		goto err1;
2270 
2271 	rc = compress_buff(&scratch_buff, dbg_buff);
2272 	if (rc)
2273 		goto err1;
2274 
2275 err1:
2276 	release_scratch_buff(&scratch_buff, dbg_buff);
2277 
2278 err:
2279 	return rc;
2280 }
2281 
collect_cim_ma_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2282 static int collect_cim_ma_la(struct cudbg_init *pdbg_init,
2283 			     struct cudbg_buffer *dbg_buff,
2284 			     struct cudbg_error *cudbg_err)
2285 {
2286 	struct cudbg_buffer scratch_buff;
2287 	struct adapter *padap = pdbg_init->adap;
2288 	u32 rc = 0;
2289 
2290 	/* collect CIM MA LA */
2291 	scratch_buff.size =  2 * CIM_MALA_SIZE * 5 * sizeof(u32);
2292 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2293 	if (rc)
2294 		goto err;
2295 
2296 	/* no return */
2297 	t4_cim_read_ma_la(padap,
2298 			  (u32 *) ((char *)scratch_buff.data +
2299 				   scratch_buff.offset),
2300 			  (u32 *) ((char *)scratch_buff.data +
2301 				   scratch_buff.offset + 5 * CIM_MALA_SIZE));
2302 
2303 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2304 	if (rc)
2305 		goto err1;
2306 
2307 	rc = compress_buff(&scratch_buff, dbg_buff);
2308 
2309 err1:
2310 	release_scratch_buff(&scratch_buff, dbg_buff);
2311 err:
2312 	return rc;
2313 }
2314 
collect_cim_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2315 static int collect_cim_la(struct cudbg_init *pdbg_init,
2316 			  struct cudbg_buffer *dbg_buff,
2317 			  struct cudbg_error *cudbg_err)
2318 {
2319 	struct cudbg_buffer scratch_buff;
2320 	struct adapter *padap = pdbg_init->adap;
2321 
2322 	int rc;
2323 	u32 cfg = 0;
2324 	int size;
2325 
2326 	/* collect CIM LA */
2327 	if (is_t6(padap)) {
2328 		size = padap->params.cim_la_size / 10 + 1;
2329 		size *= 11 * sizeof(u32);
2330 	} else {
2331 		size = padap->params.cim_la_size / 8;
2332 		size *= 8 * sizeof(u32);
2333 	}
2334 
2335 	size += sizeof(cfg);
2336 
2337 	rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
2338 	if (rc)
2339 		goto err;
2340 
2341 	rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
2342 
2343 	if (rc) {
2344 		if (pdbg_init->verbose)
2345 			pdbg_init->print("%s: t4_cim_read failed (%d)\n",
2346 				 __func__, rc);
2347 		cudbg_err->sys_err = rc;
2348 		goto err1;
2349 	}
2350 
2351 	memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
2352 	       sizeof(cfg));
2353 
2354 	rc = t4_cim_read_la(padap,
2355 			    (u32 *) ((char *)scratch_buff.data +
2356 				     scratch_buff.offset + sizeof(cfg)), NULL);
2357 	if (rc < 0) {
2358 		if (pdbg_init->verbose)
2359 			pdbg_init->print("%s: t4_cim_read_la failed (%d)\n",
2360 				 __func__, rc);
2361 		cudbg_err->sys_err = rc;
2362 		goto err1;
2363 	}
2364 
2365 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2366 	if (rc)
2367 		goto err1;
2368 
2369 	rc = compress_buff(&scratch_buff, dbg_buff);
2370 	if (rc)
2371 		goto err1;
2372 
2373 err1:
2374 	release_scratch_buff(&scratch_buff, dbg_buff);
2375 err:
2376 	return rc;
2377 }
2378 
collect_cim_qcfg(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2379 static int collect_cim_qcfg(struct cudbg_init *pdbg_init,
2380 			    struct cudbg_buffer *dbg_buff,
2381 			    struct cudbg_error *cudbg_err)
2382 {
2383 	struct cudbg_buffer scratch_buff;
2384 	struct adapter *padap = pdbg_init->adap;
2385 	u32 offset;
2386 	int rc = 0;
2387 
2388 	struct struct_cim_qcfg *cim_qcfg_data = NULL;
2389 
2390 	rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
2391 			      &scratch_buff);
2392 
2393 	if (rc)
2394 		goto err;
2395 
2396 	offset = scratch_buff.offset;
2397 
2398 	cim_qcfg_data =
2399 		(struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
2400 					   offset));
2401 
2402 	rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
2403 			 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
2404 
2405 	if (rc) {
2406 		if (pdbg_init->verbose)
2407 			pdbg_init->print("%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
2408 			    __func__, rc);
2409 		cudbg_err->sys_err = rc;
2410 		goto err1;
2411 	}
2412 
2413 	rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
2414 			 ARRAY_SIZE(cim_qcfg_data->obq_wr),
2415 			 cim_qcfg_data->obq_wr);
2416 
2417 	if (rc) {
2418 		if (pdbg_init->verbose)
2419 			pdbg_init->print("%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
2420 			    __func__, rc);
2421 		cudbg_err->sys_err = rc;
2422 		goto err1;
2423 	}
2424 
2425 	/* no return val */
2426 	t4_read_cimq_cfg(padap,
2427 			cim_qcfg_data->base,
2428 			cim_qcfg_data->size,
2429 			cim_qcfg_data->thres);
2430 
2431 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2432 	if (rc)
2433 		goto err1;
2434 
2435 	rc = compress_buff(&scratch_buff, dbg_buff);
2436 	if (rc)
2437 		goto err1;
2438 
2439 err1:
2440 	release_scratch_buff(&scratch_buff, dbg_buff);
2441 err:
2442 	return rc;
2443 }
2444 
2445 /**
2446  * Fetch the TX/RX payload regions start and end.
2447  *
2448  * @padap (IN): adapter handle.
2449  * @mem_type (IN): EDC0, EDC1, MC/MC0/MC1.
2450  * @mem_tot_len (IN): total length of @mem_type memory region to read.
2451  * @payload_type (IN): TX or RX Payload.
2452  * @reg_info (OUT): store the payload region info.
2453  *
2454  * Fetch the TX/RX payload region information from meminfo.
2455  * However, reading from the @mem_type region starts at 0 and not
2456  * from whatever base info is stored in meminfo.  Hence, if the
2457  * payload region exists, then calculate the payload region
2458  * start and end wrt 0 and @mem_tot_len, respectively, and set
2459  * @reg_info->exist to true. Otherwise, set @reg_info->exist to false.
2460  */
2461 #ifdef notyet
get_payload_range(struct adapter * padap,u8 mem_type,unsigned long mem_tot_len,u8 payload_type,struct struct_region_info * reg_info)2462 static int get_payload_range(struct adapter *padap, u8 mem_type,
2463 			     unsigned long mem_tot_len, u8 payload_type,
2464 			     struct struct_region_info *reg_info)
2465 {
2466 	struct struct_meminfo meminfo;
2467 	struct struct_mem_desc mem_region;
2468 	struct struct_mem_desc payload;
2469 	u32 i, idx, found = 0;
2470 	u8 mc_type;
2471 	int rc;
2472 
2473 	/* Get meminfo of all regions */
2474 	rc = fill_meminfo(padap, &meminfo);
2475 	if (rc)
2476 		return rc;
2477 
2478 	/* Extract the specified TX or RX Payload region range */
2479 	memset(&payload, 0, sizeof(struct struct_mem_desc));
2480 	for (i = 0; i < meminfo.mem_c; i++) {
2481 		if (meminfo.mem[i].idx >= ARRAY_SIZE(region))
2482 			continue;                        /* skip holes */
2483 
2484 		idx = meminfo.mem[i].idx;
2485 		/* Get TX or RX Payload region start and end */
2486 		if (idx == payload_type) {
2487 			if (!(meminfo.mem[i].limit))
2488 				meminfo.mem[i].limit =
2489 					i < meminfo.mem_c - 1 ?
2490 					meminfo.mem[i + 1].base - 1 : ~0;
2491 
2492 			memcpy(&payload, &meminfo.mem[i], sizeof(payload));
2493 			found = 1;
2494 			break;
2495 		}
2496 	}
2497 
2498 	/* If TX or RX Payload region is not found return error. */
2499 	if (!found)
2500 		return -EINVAL;
2501 
2502 	if (mem_type < MEM_MC) {
2503 		memcpy(&mem_region, &meminfo.avail[mem_type],
2504 		       sizeof(mem_region));
2505 	} else {
2506 		/* Check if both MC0 and MC1 exist by checking if a
2507 		 * base address for the specified @mem_type exists.
2508 		 * If a base address exists, then there is MC1 and
2509 		 * hence use the base address stored at index 3.
2510 		 * Otherwise, use the base address stored at index 2.
2511 		 */
2512 		mc_type = meminfo.avail[mem_type].base ?
2513 			  mem_type : mem_type - 1;
2514 		memcpy(&mem_region, &meminfo.avail[mc_type],
2515 		       sizeof(mem_region));
2516 	}
2517 
2518 	/* Check if payload region exists in current memory */
2519 	if (payload.base < mem_region.base && payload.limit < mem_region.base) {
2520 		reg_info->exist = false;
2521 		return 0;
2522 	}
2523 
2524 	/* Get Payload region start and end with respect to 0 and
2525 	 * mem_tot_len, respectively.  This is because reading from the
2526 	 * memory region starts at 0 and not at base info stored in meminfo.
2527 	 */
2528 	if (payload.base < mem_region.limit) {
2529 		reg_info->exist = true;
2530 		if (payload.base >= mem_region.base)
2531 			reg_info->start = payload.base - mem_region.base;
2532 		else
2533 			reg_info->start = 0;
2534 
2535 		if (payload.limit < mem_region.limit)
2536 			reg_info->end = payload.limit - mem_region.base;
2537 		else
2538 			reg_info->end = mem_tot_len;
2539 	}
2540 
2541 	return 0;
2542 }
2543 #endif
2544 
read_fw_mem(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,u8 mem_type,unsigned long tot_len,struct cudbg_error * cudbg_err)2545 static int read_fw_mem(struct cudbg_init *pdbg_init,
2546 			struct cudbg_buffer *dbg_buff, u8 mem_type,
2547 			unsigned long tot_len, struct cudbg_error *cudbg_err)
2548 {
2549 #ifdef notyet
2550 	struct cudbg_buffer scratch_buff;
2551 	struct adapter *padap = pdbg_init->adap;
2552 	unsigned long bytes_read = 0;
2553 	unsigned long bytes_left;
2554 	unsigned long bytes;
2555 	int	      rc;
2556 	struct struct_region_info payload[2]; /* TX and RX Payload Region */
2557 	u16 get_payload_flag;
2558 	u8 i;
2559 
2560 	get_payload_flag =
2561 		pdbg_init->dbg_params[CUDBG_GET_PAYLOAD_PARAM].param_type;
2562 
2563 	/* If explicitly asked to get TX/RX Payload data,
2564 	 * then don't zero out the payload data. Otherwise,
2565 	 * zero out the payload data.
2566 	 */
2567 	if (!get_payload_flag) {
2568 		u8 region_index[2];
2569 		u8 j = 0;
2570 
2571 		/* Find the index of TX and RX Payload regions in meminfo */
2572 		for (i = 0; i < ARRAY_SIZE(region); i++) {
2573 			if (!strcmp(region[i], "Tx payload:") ||
2574 			    !strcmp(region[i], "Rx payload:")) {
2575 				region_index[j] = i;
2576 				j++;
2577 				if (j == 2)
2578 					break;
2579 			}
2580 		}
2581 
2582 		/* Get TX/RX Payload region range if they exist */
2583 		memset(payload, 0, ARRAY_SIZE(payload) * sizeof(payload[0]));
2584 		for (i = 0; i < ARRAY_SIZE(payload); i++) {
2585 			rc = get_payload_range(padap, mem_type, tot_len,
2586 					       region_index[i],
2587 					       &payload[i]);
2588 			if (rc)
2589 				goto err;
2590 
2591 			if (payload[i].exist) {
2592 				/* Align start and end to avoid wrap around */
2593 				payload[i].start =
2594 					roundup(payload[i].start,
2595 					    CUDBG_CHUNK_SIZE);
2596 				payload[i].end =
2597 					rounddown(payload[i].end,
2598 					    CUDBG_CHUNK_SIZE);
2599 			}
2600 		}
2601 	}
2602 
2603 	bytes_left = tot_len;
2604 	scratch_buff.size = tot_len;
2605 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2606 	if (rc)
2607 		goto err;
2608 
2609 	while (bytes_left > 0) {
2610 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2611 		rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
2612 
2613 		if (rc) {
2614 			rc = CUDBG_STATUS_NO_SCRATCH_MEM;
2615 			goto err;
2616 		}
2617 
2618 		if (!get_payload_flag) {
2619 			for (i = 0; i < ARRAY_SIZE(payload); i++) {
2620 				if (payload[i].exist &&
2621 				    bytes_read >= payload[i].start &&
2622 				    (bytes_read + bytes) <= payload[i].end) {
2623 					memset(scratch_buff.data, 0, bytes);
2624 					/* TX and RX Payload regions
2625 					 * can't overlap.
2626 					 */
2627 					goto skip_read;
2628 				}
2629 			}
2630 		}
2631 
2632 		/* Read from file */
2633 		/*fread(scratch_buff.data, 1, Bytes, in);*/
2634 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
2635 				  bytes, (__be32 *)(scratch_buff.data), 1);
2636 
2637 		if (rc) {
2638 			if (pdbg_init->verbose)
2639 				pdbg_init->print("%s: t4_memory_rw failed (%d)",
2640 				    __func__, rc);
2641 			cudbg_err->sys_err = rc;
2642 			goto err1;
2643 		}
2644 
2645 skip_read:
2646 		rc = compress_buff(&scratch_buff, dbg_buff);
2647 		if (rc)
2648 			goto err1;
2649 
2650 		bytes_left -= bytes;
2651 		bytes_read += bytes;
2652 		release_scratch_buff(&scratch_buff, dbg_buff);
2653 	}
2654 
2655 err1:
2656 	if (rc)
2657 		release_scratch_buff(&scratch_buff, dbg_buff);
2658 
2659 err:
2660 	return rc;
2661 #endif
2662 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
2663 }
2664 
collect_mem_info(struct cudbg_init * pdbg_init,struct card_mem * mem_info)2665 static void collect_mem_info(struct cudbg_init *pdbg_init,
2666 			     struct card_mem *mem_info)
2667 {
2668 	struct adapter *padap = pdbg_init->adap;
2669 	u32 value;
2670 	int t4 = 0;
2671 
2672 	if (is_t4(padap))
2673 		t4 = 1;
2674 
2675 	if (t4) {
2676 		value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
2677 		value = G_EXT_MEM_SIZE(value);
2678 		mem_info->size_mc0 = (u16)value;  /* size in MB */
2679 
2680 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2681 		if (value & F_EXT_MEM_ENABLE)
2682 			mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
2683 								  bit */
2684 	} else {
2685 		value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
2686 		value = G_EXT_MEM0_SIZE(value);
2687 		mem_info->size_mc0 = (u16)value;
2688 
2689 		value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
2690 		value = G_EXT_MEM1_SIZE(value);
2691 		mem_info->size_mc1 = (u16)value;
2692 
2693 		value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2694 		if (value & F_EXT_MEM0_ENABLE)
2695 			mem_info->mem_flag |= (1 << MC0_FLAG);
2696 		if (value & F_EXT_MEM1_ENABLE)
2697 			mem_info->mem_flag |= (1 << MC1_FLAG);
2698 	}
2699 
2700 	value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
2701 	value = G_EDRAM0_SIZE(value);
2702 	mem_info->size_edc0 = (u16)value;
2703 
2704 	value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
2705 	value = G_EDRAM1_SIZE(value);
2706 	mem_info->size_edc1 = (u16)value;
2707 
2708 	value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2709 	if (value & F_EDRAM0_ENABLE)
2710 		mem_info->mem_flag |= (1 << EDC0_FLAG);
2711 	if (value & F_EDRAM1_ENABLE)
2712 		mem_info->mem_flag |= (1 << EDC1_FLAG);
2713 
2714 }
2715 
cudbg_t4_fwcache(struct cudbg_init * pdbg_init,struct cudbg_error * cudbg_err)2716 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
2717 				struct cudbg_error *cudbg_err)
2718 {
2719 	struct adapter *padap = pdbg_init->adap;
2720 	int rc;
2721 
2722 	if (is_fw_attached(pdbg_init)) {
2723 
2724 		/* Flush uP dcache before reading edcX/mcX  */
2725 		rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK,
2726 		    "t4cudl");
2727 		if (rc == 0) {
2728 			rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
2729 			end_synchronized_op(padap, 0);
2730 		}
2731 
2732 		if (rc) {
2733 			if (pdbg_init->verbose)
2734 				pdbg_init->print("%s: t4_fwcache failed (%d)\n",
2735 				 __func__, rc);
2736 			cudbg_err->sys_warn = rc;
2737 		}
2738 	}
2739 }
2740 
collect_edc0_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2741 static int collect_edc0_meminfo(struct cudbg_init *pdbg_init,
2742 				struct cudbg_buffer *dbg_buff,
2743 				struct cudbg_error *cudbg_err)
2744 {
2745 	struct card_mem mem_info = {0};
2746 	unsigned long edc0_size;
2747 	int rc;
2748 
2749 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2750 
2751 	collect_mem_info(pdbg_init, &mem_info);
2752 
2753 	if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
2754 		edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
2755 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
2756 				 edc0_size, cudbg_err);
2757 		if (rc)
2758 			goto err;
2759 
2760 	} else {
2761 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2762 		if (pdbg_init->verbose)
2763 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2764 				 __func__, err_msg[-rc]);
2765 		goto err;
2766 
2767 	}
2768 err:
2769 	return rc;
2770 }
2771 
collect_edc1_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2772 static int collect_edc1_meminfo(struct cudbg_init *pdbg_init,
2773 				struct cudbg_buffer *dbg_buff,
2774 				struct cudbg_error *cudbg_err)
2775 {
2776 	struct card_mem mem_info = {0};
2777 	unsigned long edc1_size;
2778 	int rc;
2779 
2780 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2781 
2782 	collect_mem_info(pdbg_init, &mem_info);
2783 
2784 	if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
2785 		edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
2786 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
2787 				 edc1_size, cudbg_err);
2788 		if (rc)
2789 			goto err;
2790 	} else {
2791 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2792 		if (pdbg_init->verbose)
2793 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2794 				 __func__, err_msg[-rc]);
2795 		goto err;
2796 	}
2797 
2798 err:
2799 
2800 	return rc;
2801 }
2802 
collect_mc0_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2803 static int collect_mc0_meminfo(struct cudbg_init *pdbg_init,
2804 			       struct cudbg_buffer *dbg_buff,
2805 			       struct cudbg_error *cudbg_err)
2806 {
2807 	struct card_mem mem_info = {0};
2808 	unsigned long mc0_size;
2809 	int rc;
2810 
2811 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2812 
2813 	collect_mem_info(pdbg_init, &mem_info);
2814 
2815 	if (mem_info.mem_flag & (1 << MC0_FLAG)) {
2816 		mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
2817 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
2818 				 mc0_size, cudbg_err);
2819 		if (rc)
2820 			goto err;
2821 	} else {
2822 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2823 		if (pdbg_init->verbose)
2824 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2825 				 __func__, err_msg[-rc]);
2826 		goto err;
2827 	}
2828 
2829 err:
2830 	return rc;
2831 }
2832 
collect_mc1_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2833 static int collect_mc1_meminfo(struct cudbg_init *pdbg_init,
2834 			       struct cudbg_buffer *dbg_buff,
2835 			       struct cudbg_error *cudbg_err)
2836 {
2837 	struct card_mem mem_info = {0};
2838 	unsigned long mc1_size;
2839 	int rc;
2840 
2841 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
2842 
2843 	collect_mem_info(pdbg_init, &mem_info);
2844 
2845 	if (mem_info.mem_flag & (1 << MC1_FLAG)) {
2846 		mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
2847 		rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
2848 				 mc1_size, cudbg_err);
2849 		if (rc)
2850 			goto err;
2851 	} else {
2852 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2853 
2854 		if (pdbg_init->verbose)
2855 			pdbg_init->print("%s(), collect_mem_info failed!, %s\n",
2856 				 __func__, err_msg[-rc]);
2857 		goto err;
2858 	}
2859 err:
2860 	return rc;
2861 }
2862 
collect_reg_dump(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2863 static int collect_reg_dump(struct cudbg_init *pdbg_init,
2864 			    struct cudbg_buffer *dbg_buff,
2865 			    struct cudbg_error *cudbg_err)
2866 {
2867 	struct cudbg_buffer scratch_buff;
2868 	struct cudbg_buffer tmp_scratch_buff;
2869 	struct adapter *padap = pdbg_init->adap;
2870 	unsigned long	     bytes_read = 0;
2871 	unsigned long	     bytes_left;
2872 	u32		     buf_size = 0, bytes = 0;
2873 	int		     rc = 0;
2874 
2875 	if (is_t4(padap))
2876 		buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
2877 	else if (is_t5(padap) || is_t6(padap))
2878 		buf_size = T5_REGMAP_SIZE;
2879 
2880 	scratch_buff.size = buf_size;
2881 
2882 	tmp_scratch_buff = scratch_buff;
2883 
2884 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2885 	if (rc)
2886 		goto err;
2887 
2888 	/* no return */
2889 	t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
2890 	bytes_left =   scratch_buff.size;
2891 
2892 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2893 	if (rc)
2894 		goto err1;
2895 
2896 	while (bytes_left > 0) {
2897 		tmp_scratch_buff.data =
2898 			((char *)scratch_buff.data) + bytes_read;
2899 		bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2900 		tmp_scratch_buff.size = bytes;
2901 		compress_buff(&tmp_scratch_buff, dbg_buff);
2902 		bytes_left -= bytes;
2903 		bytes_read += bytes;
2904 	}
2905 
2906 err1:
2907 	release_scratch_buff(&scratch_buff, dbg_buff);
2908 err:
2909 	return rc;
2910 }
2911 
collect_cctrl(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2912 static int collect_cctrl(struct cudbg_init *pdbg_init,
2913 			 struct cudbg_buffer *dbg_buff,
2914 			 struct cudbg_error *cudbg_err)
2915 {
2916 	struct cudbg_buffer scratch_buff;
2917 	struct adapter *padap = pdbg_init->adap;
2918 	u32 size;
2919 	int rc;
2920 
2921 	size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2922 	scratch_buff.size = size;
2923 
2924 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2925 	if (rc)
2926 		goto err;
2927 
2928 	t4_read_cong_tbl(padap, (void *)scratch_buff.data);
2929 
2930 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
2931 	if (rc)
2932 		goto err1;
2933 
2934 	rc = compress_buff(&scratch_buff, dbg_buff);
2935 
2936 err1:
2937 	release_scratch_buff(&scratch_buff, dbg_buff);
2938 err:
2939 	return rc;
2940 }
2941 
check_busy_bit(struct adapter * padap)2942 static int check_busy_bit(struct adapter *padap)
2943 {
2944 	u32 val;
2945 	u32 busy = 1;
2946 	int i = 0;
2947 	int retry = 10;
2948 	int status = 0;
2949 
2950 	while (busy && i < retry) {
2951 		val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
2952 		busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
2953 		i++;
2954 	}
2955 
2956 	if (busy)
2957 		status = -1;
2958 
2959 	return status;
2960 }
2961 
cim_ha_rreg(struct adapter * padap,u32 addr,u32 * val)2962 static int cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
2963 {
2964 	int rc = 0;
2965 
2966 	/* write register address into the A_CIM_HOST_ACC_CTRL */
2967 	t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
2968 
2969 	/* Poll HOSTBUSY */
2970 	rc = check_busy_bit(padap);
2971 	if (rc)
2972 		goto err;
2973 
2974 	/* Read value from A_CIM_HOST_ACC_DATA */
2975 	*val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
2976 
2977 err:
2978 	return rc;
2979 }
2980 
dump_up_cim(struct adapter * padap,struct cudbg_init * pdbg_init,struct ireg_field * up_cim_reg,u32 * buff)2981 static int dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
2982 		       struct ireg_field *up_cim_reg, u32 *buff)
2983 {
2984 	u32 i;
2985 	int rc = 0;
2986 
2987 	for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
2988 		rc = cim_ha_rreg(padap,
2989 				 up_cim_reg->ireg_local_offset + (i * 4),
2990 				buff);
2991 		if (rc) {
2992 			if (pdbg_init->verbose)
2993 				pdbg_init->print("BUSY timeout reading"
2994 					 "CIM_HOST_ACC_CTRL\n");
2995 			goto err;
2996 		}
2997 
2998 		buff++;
2999 	}
3000 
3001 err:
3002 	return rc;
3003 }
3004 
collect_up_cim_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3005 static int collect_up_cim_indirect(struct cudbg_init *pdbg_init,
3006 				   struct cudbg_buffer *dbg_buff,
3007 				   struct cudbg_error *cudbg_err)
3008 {
3009 	struct cudbg_buffer scratch_buff;
3010 	struct adapter *padap = pdbg_init->adap;
3011 	struct ireg_buf *up_cim;
3012 	u32 size;
3013 	int i, rc, n;
3014 
3015 	n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
3016 	size = sizeof(struct ireg_buf) * n;
3017 	scratch_buff.size = size;
3018 
3019 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3020 	if (rc)
3021 		goto err;
3022 
3023 	up_cim = (struct ireg_buf *)scratch_buff.data;
3024 
3025 	for (i = 0; i < n; i++) {
3026 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
3027 		u32 *buff = up_cim->outbuf;
3028 
3029 		if (is_t5(padap)) {
3030 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
3031 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
3032 			up_cim_reg->ireg_local_offset =
3033 						t5_up_cim_reg_array[i][2];
3034 			up_cim_reg->ireg_offset_range =
3035 						t5_up_cim_reg_array[i][3];
3036 		} else if (is_t6(padap)) {
3037 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
3038 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
3039 			up_cim_reg->ireg_local_offset =
3040 						t6_up_cim_reg_array[i][2];
3041 			up_cim_reg->ireg_offset_range =
3042 						t6_up_cim_reg_array[i][3];
3043 		}
3044 
3045 		rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
3046 
3047 		up_cim++;
3048 	}
3049 
3050 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3051 	if (rc)
3052 		goto err1;
3053 
3054 	rc = compress_buff(&scratch_buff, dbg_buff);
3055 
3056 err1:
3057 	release_scratch_buff(&scratch_buff, dbg_buff);
3058 err:
3059 	return rc;
3060 }
3061 
collect_mbox_log(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3062 static int collect_mbox_log(struct cudbg_init *pdbg_init,
3063 			    struct cudbg_buffer *dbg_buff,
3064 			    struct cudbg_error *cudbg_err)
3065 {
3066 #ifdef notyet
3067 	struct cudbg_buffer scratch_buff;
3068 	struct cudbg_mbox_log *mboxlog = NULL;
3069 	struct mbox_cmd_log *log = NULL;
3070 	struct mbox_cmd *entry;
3071 	u64 flit;
3072 	u32 size;
3073 	unsigned int entry_idx;
3074 	int i, k, rc;
3075 	u16 mbox_cmds;
3076 
3077 	if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
3078 		log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3079 			mboxlog_param.log;
3080 		mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3081 				mboxlog_param.mbox_cmds;
3082 	} else {
3083 		if (pdbg_init->verbose)
3084 			pdbg_init->print("Mbox log is not requested\n");
3085 		return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
3086 	}
3087 
3088 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3089 	scratch_buff.size = size;
3090 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3091 	if (rc)
3092 		goto err;
3093 
3094 	mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
3095 
3096 	for (k = 0; k < mbox_cmds; k++) {
3097 		entry_idx = log->cursor + k;
3098 		if (entry_idx >= log->size)
3099 			entry_idx -= log->size;
3100 		entry = mbox_cmd_log_entry(log, entry_idx);
3101 
3102 		/* skip over unused entries */
3103 		if (entry->timestamp == 0)
3104 			continue;
3105 
3106 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3107 
3108 		for (i = 0; i < MBOX_LEN / 8; i++) {
3109 			flit = entry->cmd[i];
3110 			mboxlog->hi[i] = (u32)(flit >> 32);
3111 			mboxlog->lo[i] = (u32)flit;
3112 		}
3113 
3114 		mboxlog++;
3115 	}
3116 
3117 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3118 	if (rc)
3119 		goto err1;
3120 
3121 	rc = compress_buff(&scratch_buff, dbg_buff);
3122 
3123 err1:
3124 	release_scratch_buff(&scratch_buff, dbg_buff);
3125 err:
3126 	return rc;
3127 #endif
3128 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
3129 }
3130 
collect_pbt_tables(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3131 static int collect_pbt_tables(struct cudbg_init *pdbg_init,
3132 			      struct cudbg_buffer *dbg_buff,
3133 			      struct cudbg_error *cudbg_err)
3134 {
3135 	struct cudbg_buffer scratch_buff;
3136 	struct adapter *padap = pdbg_init->adap;
3137 	struct cudbg_pbt_tables *pbt = NULL;
3138 	u32 size;
3139 	u32 addr;
3140 	int i, rc;
3141 
3142 	size = sizeof(struct cudbg_pbt_tables);
3143 	scratch_buff.size = size;
3144 
3145 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3146 	if (rc)
3147 		goto err;
3148 
3149 	pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
3150 
3151 	/* PBT dynamic entries */
3152 	addr = CUDBG_CHAC_PBT_ADDR;
3153 	for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3154 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
3155 		if (rc) {
3156 			if (pdbg_init->verbose)
3157 				pdbg_init->print("BUSY timeout reading"
3158 					 "CIM_HOST_ACC_CTRL\n");
3159 			goto err1;
3160 		}
3161 	}
3162 
3163 	/* PBT static entries */
3164 
3165 	/* static entries start when bit 6 is set */
3166 	addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3167 	for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3168 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
3169 		if (rc) {
3170 			if (pdbg_init->verbose)
3171 				pdbg_init->print("BUSY timeout reading"
3172 					 "CIM_HOST_ACC_CTRL\n");
3173 			goto err1;
3174 		}
3175 	}
3176 
3177 	/* LRF entries */
3178 	addr = CUDBG_CHAC_PBT_LRF;
3179 	for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3180 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
3181 		if (rc) {
3182 			if (pdbg_init->verbose)
3183 				pdbg_init->print("BUSY timeout reading"
3184 					 "CIM_HOST_ACC_CTRL\n");
3185 			goto err1;
3186 		}
3187 	}
3188 
3189 	/* PBT data entries */
3190 	addr = CUDBG_CHAC_PBT_DATA;
3191 	for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3192 		rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
3193 		if (rc) {
3194 			if (pdbg_init->verbose)
3195 				pdbg_init->print("BUSY timeout reading"
3196 					 "CIM_HOST_ACC_CTRL\n");
3197 			goto err1;
3198 		}
3199 	}
3200 
3201 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3202 	if (rc)
3203 		goto err1;
3204 
3205 	rc = compress_buff(&scratch_buff, dbg_buff);
3206 
3207 err1:
3208 	release_scratch_buff(&scratch_buff, dbg_buff);
3209 err:
3210 	return rc;
3211 }
3212 
collect_pm_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3213 static int collect_pm_indirect(struct cudbg_init *pdbg_init,
3214 			       struct cudbg_buffer *dbg_buff,
3215 			       struct cudbg_error *cudbg_err)
3216 {
3217 	struct cudbg_buffer scratch_buff;
3218 	struct adapter *padap = pdbg_init->adap;
3219 	struct ireg_buf *ch_pm;
3220 	u32 size;
3221 	int i, rc, n;
3222 
3223 	n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
3224 	size = sizeof(struct ireg_buf) * n * 2;
3225 	scratch_buff.size = size;
3226 
3227 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3228 	if (rc)
3229 		goto err;
3230 
3231 	ch_pm = (struct ireg_buf *)scratch_buff.data;
3232 
3233 	/*PM_RX*/
3234 	for (i = 0; i < n; i++) {
3235 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3236 		u32 *buff = ch_pm->outbuf;
3237 
3238 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
3239 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
3240 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
3241 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
3242 
3243 		t4_read_indirect(padap,
3244 				pm_pio->ireg_addr,
3245 				pm_pio->ireg_data,
3246 				buff,
3247 				pm_pio->ireg_offset_range,
3248 				pm_pio->ireg_local_offset);
3249 
3250 		ch_pm++;
3251 	}
3252 
3253 	/*PM_Tx*/
3254 	n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
3255 	for (i = 0; i < n; i++) {
3256 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
3257 		u32 *buff = ch_pm->outbuf;
3258 
3259 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
3260 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
3261 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
3262 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
3263 
3264 		t4_read_indirect(padap,
3265 				pm_pio->ireg_addr,
3266 				pm_pio->ireg_data,
3267 				buff,
3268 				pm_pio->ireg_offset_range,
3269 				pm_pio->ireg_local_offset);
3270 
3271 		ch_pm++;
3272 	}
3273 
3274 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3275 	if (rc)
3276 		goto err1;
3277 
3278 	rc = compress_buff(&scratch_buff, dbg_buff);
3279 
3280 err1:
3281 	release_scratch_buff(&scratch_buff, dbg_buff);
3282 err:
3283 	return rc;
3284 
3285 }
3286 
collect_tid(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3287 static int collect_tid(struct cudbg_init *pdbg_init,
3288 		       struct cudbg_buffer *dbg_buff,
3289 		       struct cudbg_error *cudbg_err)
3290 {
3291 
3292 	struct cudbg_buffer scratch_buff;
3293 	struct adapter *padap = pdbg_init->adap;
3294 	struct tid_info_region *tid;
3295 	struct tid_info_region_rev1 *tid1;
3296 	u32 para[7], val[7];
3297 	u32 mbox, pf;
3298 	int rc;
3299 
3300 	scratch_buff.size = sizeof(struct tid_info_region_rev1);
3301 
3302 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3303 	if (rc)
3304 		goto err;
3305 
3306 #define FW_PARAM_DEV_A(param) \
3307 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3308 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3309 #define FW_PARAM_PFVF_A(param) \
3310 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3311 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
3312 	 V_FW_PARAMS_PARAM_Y(0) | \
3313 	 V_FW_PARAMS_PARAM_Z(0))
3314 #define MAX_ATIDS_A 8192U
3315 
3316 	tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
3317 	tid = &(tid1->tid);
3318 	tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
3319 	tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
3320 	tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
3321 			     sizeof(struct cudbg_ver_hdr);
3322 
3323 	if (is_t5(padap)) {
3324 		tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
3325 		tid1->tid_start = 0;
3326 	} else if (is_t6(padap)) {
3327 		tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
3328 		tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
3329 	}
3330 
3331 	tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
3332 
3333 	para[0] = FW_PARAM_PFVF_A(FILTER_START);
3334 	para[1] = FW_PARAM_PFVF_A(FILTER_END);
3335 	para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
3336 	para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
3337 	para[4] = FW_PARAM_DEV_A(NTID);
3338 	para[5] = FW_PARAM_PFVF_A(SERVER_START);
3339 	para[6] = FW_PARAM_PFVF_A(SERVER_END);
3340 
3341 	rc = begin_synchronized_op(padap, NULL, SLEEP_OK | INTR_OK, "t4cudq");
3342 	if (rc)
3343 		goto err;
3344 	mbox = padap->mbox;
3345 	pf = padap->pf;
3346 	rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3347 	if (rc <  0) {
3348 		if (rc == -FW_EPERM) {
3349 			/* It looks like we don't have permission to use
3350 			 * padap->mbox.
3351 			 *
3352 			 * Try mbox 4.  If it works, we'll continue to
3353 			 * collect the rest of tid info from mbox 4.
3354 			 * Else, quit trying to collect tid info.
3355 			 */
3356 			mbox = 4;
3357 			pf = 4;
3358 			rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3359 			if (rc < 0) {
3360 				cudbg_err->sys_err = rc;
3361 				goto err1;
3362 			}
3363 		} else {
3364 			cudbg_err->sys_err = rc;
3365 			goto err1;
3366 		}
3367 	}
3368 
3369 	tid->ftid_base = val[0];
3370 	tid->nftids = val[1] - val[0] + 1;
3371 	/*active filter region*/
3372 	if (val[2] != val[3]) {
3373 #ifdef notyet
3374 		tid->flags |= FW_OFLD_CONN;
3375 #endif
3376 		tid->aftid_base = val[2];
3377 		tid->aftid_end = val[3];
3378 	}
3379 	tid->ntids = val[4];
3380 	tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
3381 	tid->stid_base = val[5];
3382 	tid->nstids = val[6] - val[5] + 1;
3383 
3384 	if (chip_id(padap) >= CHELSIO_T6) {
3385 		para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
3386 		para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
3387 		rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3388 		if (rc < 0) {
3389 			cudbg_err->sys_err = rc;
3390 			goto err1;
3391 		}
3392 
3393 		tid->hpftid_base = val[0];
3394 		tid->nhpftids = val[1] - val[0] + 1;
3395 	}
3396 
3397 	if (chip_id(padap) <= CHELSIO_T5) {
3398 		tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
3399 		tid->hash_base /= 4;
3400 	} else
3401 		tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
3402 
3403 	/*UO context range*/
3404 	para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
3405 	para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
3406 
3407 	rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3408 	if (rc <  0) {
3409 		cudbg_err->sys_err = rc;
3410 		goto err1;
3411 	}
3412 
3413 	if (val[0] != val[1]) {
3414 		tid->uotid_base = val[0];
3415 		tid->nuotids = val[1] - val[0] + 1;
3416 	}
3417 	tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
3418 	tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
3419 
3420 #undef FW_PARAM_PFVF_A
3421 #undef FW_PARAM_DEV_A
3422 #undef MAX_ATIDS_A
3423 
3424 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3425 	if (rc)
3426 		goto err1;
3427 	rc = compress_buff(&scratch_buff, dbg_buff);
3428 
3429 err1:
3430 	end_synchronized_op(padap, 0);
3431 	release_scratch_buff(&scratch_buff, dbg_buff);
3432 err:
3433 	return rc;
3434 }
3435 
collect_tx_rate(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3436 static int collect_tx_rate(struct cudbg_init *pdbg_init,
3437 			   struct cudbg_buffer *dbg_buff,
3438 			   struct cudbg_error *cudbg_err)
3439 {
3440 	struct cudbg_buffer scratch_buff;
3441 	struct adapter *padap = pdbg_init->adap;
3442 	struct tx_rate *tx_rate;
3443 	u32 size;
3444 	int rc;
3445 
3446 	size = sizeof(struct tx_rate);
3447 	scratch_buff.size = size;
3448 
3449 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3450 	if (rc)
3451 		goto err;
3452 
3453 	tx_rate = (struct tx_rate *)scratch_buff.data;
3454 	t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
3455 	tx_rate->nchan = padap->chip_params->nchan;
3456 
3457 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3458 	if (rc)
3459 		goto err1;
3460 
3461 	rc = compress_buff(&scratch_buff, dbg_buff);
3462 
3463 err1:
3464 	release_scratch_buff(&scratch_buff, dbg_buff);
3465 err:
3466 	return rc;
3467 }
3468 
cudbg_tcamxy2valmask(u64 x,u64 y,u8 * addr,u64 * mask)3469 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
3470 {
3471 	*mask = x | y;
3472 	y = (__force u64)cpu_to_be64(y);
3473 	memcpy(addr, (char *)&y + 2, ETH_ALEN);
3474 }
3475 
mps_rpl_backdoor(struct adapter * padap,struct fw_ldst_mps_rplc * mps_rplc)3476 static void mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
3477 {
3478 	if (is_t5(padap)) {
3479 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3480 							  A_MPS_VF_RPLCT_MAP3));
3481 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3482 							  A_MPS_VF_RPLCT_MAP2));
3483 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3484 							  A_MPS_VF_RPLCT_MAP1));
3485 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3486 							  A_MPS_VF_RPLCT_MAP0));
3487 	} else {
3488 		mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3489 							  A_MPS_VF_RPLCT_MAP7));
3490 		mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3491 							  A_MPS_VF_RPLCT_MAP6));
3492 		mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3493 							  A_MPS_VF_RPLCT_MAP5));
3494 		mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3495 							  A_MPS_VF_RPLCT_MAP4));
3496 	}
3497 	mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
3498 	mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
3499 	mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
3500 	mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
3501 }
3502 
collect_mps_tcam(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3503 static int collect_mps_tcam(struct cudbg_init *pdbg_init,
3504 			    struct cudbg_buffer *dbg_buff,
3505 			    struct cudbg_error *cudbg_err)
3506 {
3507 	struct cudbg_buffer scratch_buff;
3508 	struct adapter *padap = pdbg_init->adap;
3509 	struct cudbg_mps_tcam *tcam = NULL;
3510 	u32 size = 0, i, n, total_size = 0;
3511 	u32 ctl, data2;
3512 	u64 tcamy, tcamx, val;
3513 	int rc;
3514 
3515 	n = padap->chip_params->mps_tcam_size;
3516 	size = sizeof(struct cudbg_mps_tcam) * n;
3517 	scratch_buff.size = size;
3518 
3519 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3520 	if (rc)
3521 		goto err;
3522 	memset(scratch_buff.data, 0, size);
3523 
3524 	tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
3525 	for (i = 0; i < n; i++) {
3526 		if (chip_id(padap) >= CHELSIO_T6) {
3527 			/* CtlReqID   - 1: use Host Driver Requester ID
3528 			 * CtlCmdType - 0: Read, 1: Write
3529 			 * CtlTcamSel - 0: TCAM0, 1: TCAM1
3530 			 * CtlXYBitSel- 0: Y bit, 1: X bit
3531 			 */
3532 
3533 			/* Read tcamy */
3534 			ctl = (V_CTLREQID(1) |
3535 			       V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
3536 			if (i < 256)
3537 				ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
3538 			else
3539 				ctl |= V_CTLTCAMINDEX(i - 256) |
3540 				       V_CTLTCAMSEL(1);
3541 
3542 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3543 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3544 			tcamy = G_DMACH(val) << 32;
3545 			tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3546 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3547 			tcam->lookup_type = G_DATALKPTYPE(data2);
3548 
3549 			/* 0 - Outer header, 1 - Inner header
3550 			 * [71:48] bit locations are overloaded for
3551 			 * outer vs. inner lookup types.
3552 			 */
3553 
3554 			if (tcam->lookup_type &&
3555 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3556 				/* Inner header VNI */
3557 				tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
3558 					     (G_DATAVIDH1(data2) << 16) |
3559 					     G_VIDL(val);
3560 				tcam->dip_hit = data2 & F_DATADIPHIT;
3561 			} else {
3562 				tcam->vlan_vld = data2 & F_DATAVIDH2;
3563 				tcam->ivlan = G_VIDL(val);
3564 			}
3565 
3566 			tcam->port_num = G_DATAPORTNUM(data2);
3567 
3568 			/* Read tcamx. Change the control param */
3569 			ctl |= V_CTLXYBITSEL(1);
3570 			t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3571 			val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3572 			tcamx = G_DMACH(val) << 32;
3573 			tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3574 			data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3575 			if (tcam->lookup_type &&
3576 			    (tcam->lookup_type != M_DATALKPTYPE)) {
3577 				/* Inner header VNI mask */
3578 				tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
3579 					     (G_DATAVIDH1(data2) << 16) |
3580 					     G_VIDL(val);
3581 			}
3582 		} else {
3583 			tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
3584 			tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
3585 		}
3586 
3587 		if (tcamx & tcamy)
3588 			continue;
3589 
3590 		tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
3591 		tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
3592 
3593 		if (is_t5(padap))
3594 			tcam->repli = (tcam->cls_lo & F_REPLICATE);
3595 		else if (is_t6(padap))
3596 			tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
3597 
3598 		if (tcam->repli) {
3599 			struct fw_ldst_cmd ldst_cmd;
3600 			struct fw_ldst_mps_rplc mps_rplc;
3601 
3602 			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3603 			ldst_cmd.op_to_addrspace =
3604 				htonl(V_FW_CMD_OP(FW_LDST_CMD) |
3605 				      F_FW_CMD_REQUEST |
3606 				      F_FW_CMD_READ |
3607 				      V_FW_LDST_CMD_ADDRSPACE(
3608 					      FW_LDST_ADDRSPC_MPS));
3609 
3610 			ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3611 
3612 			ldst_cmd.u.mps.rplc.fid_idx =
3613 				htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
3614 				      V_FW_LDST_CMD_IDX(i));
3615 
3616 			rc = begin_synchronized_op(padap, NULL,
3617 			    SLEEP_OK | INTR_OK, "t4cudm");
3618 			if (rc == 0) {
3619 				rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
3620 						sizeof(ldst_cmd), &ldst_cmd);
3621 				end_synchronized_op(padap, 0);
3622 			}
3623 
3624 			if (rc)
3625 				mps_rpl_backdoor(padap, &mps_rplc);
3626 			else
3627 				mps_rplc = ldst_cmd.u.mps.rplc;
3628 
3629 			tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
3630 			tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
3631 			tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
3632 			tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
3633 			if (padap->chip_params->mps_rplc_size >
3634 					CUDBG_MAX_RPLC_SIZE) {
3635 				tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
3636 				tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
3637 				tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
3638 				tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
3639 			}
3640 		}
3641 		cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
3642 
3643 		tcam->idx = i;
3644 		tcam->rplc_size = padap->chip_params->mps_rplc_size;
3645 
3646 		total_size += sizeof(struct cudbg_mps_tcam);
3647 
3648 		tcam++;
3649 	}
3650 
3651 	if (total_size == 0) {
3652 		rc = CUDBG_SYSTEM_ERROR;
3653 		goto err1;
3654 	}
3655 
3656 	scratch_buff.size = total_size;
3657 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3658 	if (rc)
3659 		goto err1;
3660 
3661 	rc = compress_buff(&scratch_buff, dbg_buff);
3662 
3663 err1:
3664 	scratch_buff.size = size;
3665 	release_scratch_buff(&scratch_buff, dbg_buff);
3666 err:
3667 	return rc;
3668 }
3669 
collect_pcie_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3670 static int collect_pcie_config(struct cudbg_init *pdbg_init,
3671 			       struct cudbg_buffer *dbg_buff,
3672 			       struct cudbg_error *cudbg_err)
3673 {
3674 	struct cudbg_buffer scratch_buff;
3675 	struct adapter *padap = pdbg_init->adap;
3676 	u32 size, *value, j;
3677 	int i, rc, n;
3678 
3679 	size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
3680 	n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
3681 	scratch_buff.size = size;
3682 
3683 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3684 	if (rc)
3685 		goto err;
3686 
3687 	value = (u32 *)scratch_buff.data;
3688 	for (i = 0; i < n; i++) {
3689 		for (j = t5_pcie_config_array[i][0];
3690 		     j <= t5_pcie_config_array[i][1]; j += 4) {
3691 			*value++ = t4_hw_pci_read_cfg4(padap, j);
3692 		}
3693 	}
3694 
3695 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3696 	if (rc)
3697 		goto err1;
3698 
3699 	rc = compress_buff(&scratch_buff, dbg_buff);
3700 
3701 err1:
3702 	release_scratch_buff(&scratch_buff, dbg_buff);
3703 err:
3704 	return rc;
3705 }
3706 
cudbg_read_tid(struct cudbg_init * pdbg_init,u32 tid,struct cudbg_tid_data * tid_data)3707 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
3708 			  struct cudbg_tid_data *tid_data)
3709 {
3710 	int i, cmd_retry = 8;
3711 	struct adapter *padap = pdbg_init->adap;
3712 	u32 val;
3713 
3714 	/* Fill REQ_DATA regs with 0's */
3715 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3716 		t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
3717 
3718 	/* Write DBIG command */
3719 	val = (0x4 << S_DBGICMD) | tid;
3720 	t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
3721 	tid_data->dbig_cmd = val;
3722 
3723 	val = 0;
3724 	val |= 1 << S_DBGICMDSTRT;
3725 	val |= 1;  /* LE mode */
3726 	t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
3727 	tid_data->dbig_conf = val;
3728 
3729 	/* Poll the DBGICMDBUSY bit */
3730 	val = 1;
3731 	while (val) {
3732 		val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
3733 		val = (val >> S_DBGICMDBUSY) & 1;
3734 		cmd_retry--;
3735 		if (!cmd_retry) {
3736 			if (pdbg_init->verbose)
3737 				pdbg_init->print("%s(): Timeout waiting for non-busy\n",
3738 					 __func__);
3739 			return CUDBG_SYSTEM_ERROR;
3740 		}
3741 	}
3742 
3743 	/* Check RESP status */
3744 	val = 0;
3745 	val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
3746 	tid_data->dbig_rsp_stat = val;
3747 	if (!(val & 1)) {
3748 		if (pdbg_init->verbose)
3749 			pdbg_init->print("%s(): DBGI command failed\n", __func__);
3750 		return CUDBG_SYSTEM_ERROR;
3751 	}
3752 
3753 	/* Read RESP data */
3754 	for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3755 		tid_data->data[i] = t4_read_reg(padap,
3756 						A_LE_DB_DBGI_RSP_DATA +
3757 						(i << 2));
3758 
3759 	tid_data->tid = tid;
3760 
3761 	return 0;
3762 }
3763 
collect_le_tcam(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3764 static int collect_le_tcam(struct cudbg_init *pdbg_init,
3765 			   struct cudbg_buffer *dbg_buff,
3766 			   struct cudbg_error *cudbg_err)
3767 {
3768 	struct cudbg_buffer scratch_buff;
3769 	struct adapter *padap = pdbg_init->adap;
3770 	struct cudbg_tcam tcam_region = {0};
3771 	struct cudbg_tid_data *tid_data = NULL;
3772 	u32 value, bytes = 0, bytes_left  = 0;
3773 	u32 i;
3774 	int rc, size;
3775 
3776 	/* Get the LE regions */
3777 	value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
3778 							     index */
3779 	tcam_region.tid_hash_base = value;
3780 
3781 	/* Get routing table index */
3782 	value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
3783 	tcam_region.routing_start = value;
3784 
3785 	/*Get clip table index */
3786 	value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
3787 	tcam_region.clip_start = value;
3788 
3789 	/* Get filter table index */
3790 	value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
3791 	tcam_region.filter_start = value;
3792 
3793 	/* Get server table index */
3794 	value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
3795 	tcam_region.server_start = value;
3796 
3797 	/* Check whether hash is enabled and calculate the max tids */
3798 	value = t4_read_reg(padap, A_LE_DB_CONFIG);
3799 	if ((value >> S_HASHEN) & 1) {
3800 		value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
3801 		if (chip_id(padap) > CHELSIO_T5)
3802 			tcam_region.max_tid = (value & 0xFFFFF) +
3803 					      tcam_region.tid_hash_base;
3804 		else {	    /* for T5 */
3805 			value = G_HASHTIDSIZE(value);
3806 			value = 1 << value;
3807 			tcam_region.max_tid = value +
3808 				tcam_region.tid_hash_base;
3809 		}
3810 	} else	 /* hash not enabled */
3811 		tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
3812 
3813 	size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
3814 	size += sizeof(struct cudbg_tcam);
3815 	scratch_buff.size = size;
3816 
3817 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3818 	if (rc)
3819 		goto err;
3820 
3821 	rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
3822 	if (rc)
3823 		goto err;
3824 
3825 	memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
3826 
3827 	tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
3828 					     scratch_buff.data) + 1);
3829 	bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
3830 	bytes = sizeof(struct cudbg_tcam);
3831 
3832 	/* read all tid */
3833 	for (i = 0; i < tcam_region.max_tid; i++) {
3834 		if (bytes_left < sizeof(struct cudbg_tid_data)) {
3835 			scratch_buff.size = bytes;
3836 			rc = compress_buff(&scratch_buff, dbg_buff);
3837 			if (rc)
3838 				goto err1;
3839 			scratch_buff.size = CUDBG_CHUNK_SIZE;
3840 			release_scratch_buff(&scratch_buff, dbg_buff);
3841 
3842 			/* new alloc */
3843 			rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
3844 					      &scratch_buff);
3845 			if (rc)
3846 				goto err;
3847 
3848 			tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
3849 			bytes_left = CUDBG_CHUNK_SIZE;
3850 			bytes = 0;
3851 		}
3852 
3853 		rc = cudbg_read_tid(pdbg_init, i, tid_data);
3854 
3855 		if (rc) {
3856 			cudbg_err->sys_err = rc;
3857 			goto err1;
3858 		}
3859 
3860 		tid_data++;
3861 		bytes_left -= sizeof(struct cudbg_tid_data);
3862 		bytes += sizeof(struct cudbg_tid_data);
3863 	}
3864 
3865 	if (bytes) {
3866 		scratch_buff.size = bytes;
3867 		rc = compress_buff(&scratch_buff, dbg_buff);
3868 	}
3869 
3870 err1:
3871 	scratch_buff.size = CUDBG_CHUNK_SIZE;
3872 	release_scratch_buff(&scratch_buff, dbg_buff);
3873 err:
3874 	return rc;
3875 }
3876 
collect_ma_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3877 static int collect_ma_indirect(struct cudbg_init *pdbg_init,
3878 			       struct cudbg_buffer *dbg_buff,
3879 			       struct cudbg_error *cudbg_err)
3880 {
3881 	struct cudbg_buffer scratch_buff;
3882 	struct adapter *padap = pdbg_init->adap;
3883 	struct ireg_buf *ma_indr = NULL;
3884 	u32 size, j;
3885 	int i, rc, n;
3886 
3887 	if (chip_id(padap) < CHELSIO_T6) {
3888 		if (pdbg_init->verbose)
3889 			pdbg_init->print("MA indirect available only in T6\n");
3890 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3891 		goto err;
3892 	}
3893 
3894 	n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
3895 	size = sizeof(struct ireg_buf) * n * 2;
3896 	scratch_buff.size = size;
3897 
3898 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3899 	if (rc)
3900 		goto err;
3901 
3902 	ma_indr = (struct ireg_buf *)scratch_buff.data;
3903 
3904 	for (i = 0; i < n; i++) {
3905 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3906 		u32 *buff = ma_indr->outbuf;
3907 
3908 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
3909 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3910 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3911 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3912 
3913 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3914 				 buff, ma_fli->ireg_offset_range,
3915 				 ma_fli->ireg_local_offset);
3916 
3917 		ma_indr++;
3918 
3919 	}
3920 
3921 	n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
3922 
3923 	for (i = 0; i < n; i++) {
3924 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
3925 		u32 *buff = ma_indr->outbuf;
3926 
3927 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3928 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3929 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3930 
3931 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3932 			t4_read_indirect(padap, ma_fli->ireg_addr,
3933 					 ma_fli->ireg_data, buff, 1,
3934 					 ma_fli->ireg_local_offset);
3935 			buff++;
3936 			ma_fli->ireg_local_offset += 0x20;
3937 		}
3938 		ma_indr++;
3939 	}
3940 
3941 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3942 	if (rc)
3943 		goto err1;
3944 
3945 	rc = compress_buff(&scratch_buff, dbg_buff);
3946 
3947 err1:
3948 	release_scratch_buff(&scratch_buff, dbg_buff);
3949 err:
3950 	return rc;
3951 }
3952 
collect_hma_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3953 static int collect_hma_indirect(struct cudbg_init *pdbg_init,
3954 			       struct cudbg_buffer *dbg_buff,
3955 			       struct cudbg_error *cudbg_err)
3956 {
3957 	struct cudbg_buffer scratch_buff;
3958 	struct adapter *padap = pdbg_init->adap;
3959 	struct ireg_buf *hma_indr = NULL;
3960 	u32 size;
3961 	int i, rc, n;
3962 
3963 	if (chip_id(padap) < CHELSIO_T6) {
3964 		if (pdbg_init->verbose)
3965 			pdbg_init->print("HMA indirect available only in T6\n");
3966 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3967 		goto err;
3968 	}
3969 
3970 	n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
3971 	size = sizeof(struct ireg_buf) * n;
3972 	scratch_buff.size = size;
3973 
3974 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3975 	if (rc)
3976 		goto err;
3977 
3978 	hma_indr = (struct ireg_buf *)scratch_buff.data;
3979 
3980 	for (i = 0; i < n; i++) {
3981 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
3982 		u32 *buff = hma_indr->outbuf;
3983 
3984 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3985 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3986 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3987 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3988 
3989 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3990 				 buff, hma_fli->ireg_offset_range,
3991 				 hma_fli->ireg_local_offset);
3992 
3993 		hma_indr++;
3994 
3995 	}
3996 
3997 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
3998 	if (rc)
3999 		goto err1;
4000 
4001 	rc = compress_buff(&scratch_buff, dbg_buff);
4002 
4003 err1:
4004 	release_scratch_buff(&scratch_buff, dbg_buff);
4005 err:
4006 	return rc;
4007 }
4008 
collect_pcie_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4009 static int collect_pcie_indirect(struct cudbg_init *pdbg_init,
4010 				 struct cudbg_buffer *dbg_buff,
4011 				 struct cudbg_error *cudbg_err)
4012 {
4013 	struct cudbg_buffer scratch_buff;
4014 	struct adapter *padap = pdbg_init->adap;
4015 	struct ireg_buf *ch_pcie;
4016 	u32 size;
4017 	int i, rc, n;
4018 
4019 	n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
4020 	size = sizeof(struct ireg_buf) * n * 2;
4021 	scratch_buff.size = size;
4022 
4023 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4024 	if (rc)
4025 		goto err;
4026 
4027 	ch_pcie = (struct ireg_buf *)scratch_buff.data;
4028 
4029 	/*PCIE_PDBG*/
4030 	for (i = 0; i < n; i++) {
4031 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4032 		u32 *buff = ch_pcie->outbuf;
4033 
4034 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
4035 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
4036 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
4037 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
4038 
4039 		t4_read_indirect(padap,
4040 				pcie_pio->ireg_addr,
4041 				pcie_pio->ireg_data,
4042 				buff,
4043 				pcie_pio->ireg_offset_range,
4044 				pcie_pio->ireg_local_offset);
4045 
4046 		ch_pcie++;
4047 	}
4048 
4049 	/*PCIE_CDBG*/
4050 	n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
4051 	for (i = 0; i < n; i++) {
4052 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4053 		u32 *buff = ch_pcie->outbuf;
4054 
4055 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
4056 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
4057 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
4058 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
4059 
4060 		t4_read_indirect(padap,
4061 				pcie_pio->ireg_addr,
4062 				pcie_pio->ireg_data,
4063 				buff,
4064 				pcie_pio->ireg_offset_range,
4065 				pcie_pio->ireg_local_offset);
4066 
4067 		ch_pcie++;
4068 	}
4069 
4070 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4071 	if (rc)
4072 		goto err1;
4073 
4074 	rc = compress_buff(&scratch_buff, dbg_buff);
4075 
4076 err1:
4077 	release_scratch_buff(&scratch_buff, dbg_buff);
4078 err:
4079 	return rc;
4080 
4081 }
4082 
collect_tp_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4083 static int collect_tp_indirect(struct cudbg_init *pdbg_init,
4084 			       struct cudbg_buffer *dbg_buff,
4085 			       struct cudbg_error *cudbg_err)
4086 {
4087 	struct cudbg_buffer scratch_buff;
4088 	struct adapter *padap = pdbg_init->adap;
4089 	struct ireg_buf *ch_tp_pio;
4090 	u32 size;
4091 	int i, rc, n = 0;
4092 
4093 	if (is_t5(padap))
4094 		n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
4095 	else if (is_t6(padap))
4096 		n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
4097 
4098 	size = sizeof(struct ireg_buf) * n * 3;
4099 	scratch_buff.size = size;
4100 
4101 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4102 	if (rc)
4103 		goto err;
4104 
4105 	ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
4106 
4107 	/* TP_PIO*/
4108 	for (i = 0; i < n; i++) {
4109 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4110 		u32 *buff = ch_tp_pio->outbuf;
4111 
4112 		if (is_t5(padap)) {
4113 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
4114 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
4115 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
4116 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
4117 		} else if (is_t6(padap)) {
4118 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
4119 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
4120 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
4121 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
4122 		}
4123 
4124 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
4125 			       tp_pio->ireg_local_offset, true);
4126 
4127 		ch_tp_pio++;
4128 	}
4129 
4130 	/* TP_TM_PIO*/
4131 	if (is_t5(padap))
4132 		n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
4133 	else if (is_t6(padap))
4134 		n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
4135 
4136 	for (i = 0; i < n; i++) {
4137 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4138 		u32 *buff = ch_tp_pio->outbuf;
4139 
4140 		if (is_t5(padap)) {
4141 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
4142 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
4143 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
4144 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
4145 		} else if (is_t6(padap)) {
4146 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
4147 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
4148 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
4149 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
4150 		}
4151 
4152 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
4153 				  tp_pio->ireg_local_offset, true);
4154 
4155 		ch_tp_pio++;
4156 	}
4157 
4158 	/* TP_MIB_INDEX*/
4159 	if (is_t5(padap))
4160 		n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
4161 	else if (is_t6(padap))
4162 		n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
4163 
4164 	for (i = 0; i < n ; i++) {
4165 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4166 		u32 *buff = ch_tp_pio->outbuf;
4167 
4168 		if (is_t5(padap)) {
4169 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
4170 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
4171 			tp_pio->ireg_local_offset =
4172 				t5_tp_mib_index_array[i][2];
4173 			tp_pio->ireg_offset_range =
4174 				t5_tp_mib_index_array[i][3];
4175 		} else if (is_t6(padap)) {
4176 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
4177 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
4178 			tp_pio->ireg_local_offset =
4179 				t6_tp_mib_index_array[i][2];
4180 			tp_pio->ireg_offset_range =
4181 				t6_tp_mib_index_array[i][3];
4182 		}
4183 
4184 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
4185 			       tp_pio->ireg_local_offset, true);
4186 
4187 		ch_tp_pio++;
4188 	}
4189 
4190 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4191 	if (rc)
4192 		goto err1;
4193 
4194 	rc = compress_buff(&scratch_buff, dbg_buff);
4195 
4196 err1:
4197 	release_scratch_buff(&scratch_buff, dbg_buff);
4198 err:
4199 	return rc;
4200 }
4201 
collect_sge_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4202 static int collect_sge_indirect(struct cudbg_init *pdbg_init,
4203 				struct cudbg_buffer *dbg_buff,
4204 				struct cudbg_error *cudbg_err)
4205 {
4206 	struct cudbg_buffer scratch_buff;
4207 	struct adapter *padap = pdbg_init->adap;
4208 	struct ireg_buf *ch_sge_dbg;
4209 	u32 size;
4210 	int i, rc;
4211 
4212 	size = sizeof(struct ireg_buf) * 2;
4213 	scratch_buff.size = size;
4214 
4215 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4216 	if (rc)
4217 		goto err;
4218 
4219 	ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
4220 
4221 	for (i = 0; i < 2; i++) {
4222 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
4223 		u32 *buff = ch_sge_dbg->outbuf;
4224 
4225 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
4226 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
4227 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
4228 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
4229 
4230 		t4_read_indirect(padap,
4231 				sge_pio->ireg_addr,
4232 				sge_pio->ireg_data,
4233 				buff,
4234 				sge_pio->ireg_offset_range,
4235 				sge_pio->ireg_local_offset);
4236 
4237 		ch_sge_dbg++;
4238 	}
4239 
4240 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4241 	if (rc)
4242 		goto err1;
4243 
4244 	rc = compress_buff(&scratch_buff, dbg_buff);
4245 
4246 err1:
4247 	release_scratch_buff(&scratch_buff, dbg_buff);
4248 err:
4249 	return rc;
4250 }
4251 
collect_full(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4252 static int collect_full(struct cudbg_init *pdbg_init,
4253 			struct cudbg_buffer *dbg_buff,
4254 			struct cudbg_error *cudbg_err)
4255 {
4256 	struct cudbg_buffer scratch_buff;
4257 	struct adapter *padap = pdbg_init->adap;
4258 	u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
4259 	u32 *sp;
4260 	int rc;
4261 	int nreg = 0;
4262 
4263 	/* Collect Registers:
4264 	 * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
4265 	 * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
4266 	 * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
4267 	 * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
4268 	 * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
4269 	 * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3)  This is for T6
4270 	 * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
4271 	 **/
4272 
4273 	if (is_t5(padap))
4274 		nreg = 6;
4275 	else if (is_t6(padap))
4276 		nreg = 7;
4277 
4278 	scratch_buff.size = nreg * sizeof(u32);
4279 
4280 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4281 	if (rc)
4282 		goto err;
4283 
4284 	sp = (u32 *)scratch_buff.data;
4285 
4286 	/* TP_DBG_SCHED_TX */
4287 	reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
4288 	reg_offset_range = 1;
4289 
4290 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4291 
4292 	sp++;
4293 
4294 	/* TP_DBG_SCHED_RX */
4295 	reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
4296 	reg_offset_range = 1;
4297 
4298 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4299 
4300 	sp++;
4301 
4302 	/* TP_DBG_CSIDE_INT */
4303 	reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
4304 	reg_offset_range = 1;
4305 
4306 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4307 
4308 	sp++;
4309 
4310 	/* TP_DBG_ESIDE_INT */
4311 	reg_local_offset = t5_tp_pio_array[8][2] + 3;
4312 	reg_offset_range = 1;
4313 
4314 	t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4315 
4316 	sp++;
4317 
4318 	/* PCIE_CDEBUG_INDEX[AppData0] */
4319 	reg_addr = t5_pcie_cdbg_array[0][0];
4320 	reg_data = t5_pcie_cdbg_array[0][1];
4321 	reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
4322 	reg_offset_range = 1;
4323 
4324 	t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
4325 			 reg_local_offset);
4326 
4327 	sp++;
4328 
4329 	if (is_t6(padap)) {
4330 		/* PCIE_CDEBUG_INDEX[AppData1] */
4331 		reg_addr = t5_pcie_cdbg_array[0][0];
4332 		reg_data = t5_pcie_cdbg_array[0][1];
4333 		reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
4334 		reg_offset_range = 1;
4335 
4336 		t4_read_indirect(padap, reg_addr, reg_data, sp,
4337 				 reg_offset_range, reg_local_offset);
4338 
4339 		sp++;
4340 	}
4341 
4342 	/* SGE_DEBUG_DATA_HIGH_INDEX_10 */
4343 	*sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
4344 
4345 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4346 	if (rc)
4347 		goto err1;
4348 
4349 	rc = compress_buff(&scratch_buff, dbg_buff);
4350 
4351 err1:
4352 	release_scratch_buff(&scratch_buff, dbg_buff);
4353 err:
4354 	return rc;
4355 }
4356 
collect_vpd_data(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4357 static int collect_vpd_data(struct cudbg_init *pdbg_init,
4358 			    struct cudbg_buffer *dbg_buff,
4359 			    struct cudbg_error *cudbg_err)
4360 {
4361 #ifdef notyet
4362 	struct cudbg_buffer scratch_buff;
4363 	struct adapter *padap = pdbg_init->adap;
4364 	struct struct_vpd_data *vpd_data;
4365 	char vpd_ver[4];
4366 	u32 fw_vers;
4367 	u32 size;
4368 	int rc;
4369 
4370 	size = sizeof(struct struct_vpd_data);
4371 	scratch_buff.size = size;
4372 
4373 	rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4374 	if (rc)
4375 		goto err;
4376 
4377 	vpd_data = (struct struct_vpd_data *)scratch_buff.data;
4378 
4379 	if (is_t5(padap)) {
4380 		read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
4381 		read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
4382 		read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
4383 		read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
4384 	} else if (is_t6(padap)) {
4385 		read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
4386 		read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
4387 		read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
4388 		read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
4389 	}
4390 
4391 	if (is_fw_attached(pdbg_init)) {
4392 	   rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
4393 	} else {
4394 		rc = 1;
4395 	}
4396 
4397 	if (rc) {
4398 		/* Now trying with backdoor mechanism */
4399 		rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
4400 				  (u8 *)&vpd_data->scfg_vers);
4401 		if (rc)
4402 			goto err1;
4403 	}
4404 
4405 	if (is_fw_attached(pdbg_init)) {
4406 		rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
4407 	} else {
4408 		rc = 1;
4409 	}
4410 
4411 	if (rc) {
4412 		/* Now trying with backdoor mechanism */
4413 		rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
4414 				  (u8 *)vpd_ver);
4415 		if (rc)
4416 			goto err1;
4417 		/* read_vpd_reg return string of stored hex
4418 		 * converting hex string to char string
4419 		 * vpd version is 2 bytes only */
4420 		sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
4421 		vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
4422 	}
4423 
4424 	/* Get FW version if it's not already filled in */
4425 	fw_vers = padap->params.fw_vers;
4426 	if (!fw_vers) {
4427 		rc = t4_get_fw_version(padap, &fw_vers);
4428 		if (rc)
4429 			goto err1;
4430 	}
4431 
4432 	vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
4433 	vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
4434 	vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
4435 	vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
4436 
4437 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
4438 	if (rc)
4439 		goto err1;
4440 
4441 	rc = compress_buff(&scratch_buff, dbg_buff);
4442 
4443 err1:
4444 	release_scratch_buff(&scratch_buff, dbg_buff);
4445 err:
4446 	return rc;
4447 #endif
4448 	return (CUDBG_STATUS_NOT_IMPLEMENTED);
4449 }
4450