1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*-
13 * Copyright (c) 2019 Chelsio Communications, Inc.
14 * All rights reserved.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40
41 #include "common/common.h"
42 #include "common/t4_regs.h"
43 #include "common/t4_chip_type.h"
44 #include "cudbg.h"
45 #include "cudbg_lib_common.h"
46 #include "cudbg_lib.h"
47 #include "cudbg_entity.h"
48
49 #define BUFFER_WARN_LIMIT 10000000
50
51 struct large_entity large_entity_list[] = {
52 {CUDBG_EDC0, 0, 0},
53 {CUDBG_EDC1, 0 , 0},
54 {CUDBG_MC0, 0, 0},
55 {CUDBG_MC1, 0, 0}
56 };
57
58 static int
is_fw_attached(struct cudbg_init * pdbg_init)59 is_fw_attached(struct cudbg_init *pdbg_init)
60 {
61
62 return (pdbg_init->adap->flags & FW_OK);
63 }
64
65 /* This function will add additional padding bytes into debug_buffer to make it
66 * 4 byte aligned.*/
67 static void
align_debug_buffer(struct cudbg_buffer * dbg_buff,struct cudbg_entity_hdr * entity_hdr)68 align_debug_buffer(struct cudbg_buffer *dbg_buff,
69 struct cudbg_entity_hdr *entity_hdr)
70 {
71 u8 zero_buf[4] = {0};
72 u8 padding, remain;
73
74 remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
75 padding = 4 - remain;
76 if (remain) {
77 memcpy(((u8 *) dbg_buff->data) + dbg_buff->offset, &zero_buf,
78 padding);
79 dbg_buff->offset += padding;
80 entity_hdr->num_pad = padding;
81 }
82
83 entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
84 }
85
86 static void
u32_swap(void * a,void * b,int size)87 u32_swap(void *a, void *b, int size)
88 {
89 u32 t = *(u32 *)a;
90
91 *(u32 *)a = *(u32 *)b;
92 *(u32 *)b = t;
93 }
94
95 static void
generic_swap(void * a1,void * b1,int size)96 generic_swap(void *a1, void *b1, int size)
97 {
98 u8 t;
99 u8 *a = (u8 *)a1;
100 u8 *b = (u8 *)b1;
101
102 do {
103 t = *a;
104 *(a++) = *b;
105 *(b++) = t;
106 } while (--size > 0);
107 }
108
109 static void
qsort(void * base_val,int num,int size,int (* cmp_func)(const void *,const void *),void (* swap_func)(void *,void *,int size))110 qsort(void *base_val, int num, int size,
111 int (*cmp_func)(const void *, const void *),
112 void (*swap_func)(void *, void *, int size))
113 {
114 /* pre-scale counters for performance */
115 int i = (num / 2 - 1) * size;
116 int n = num * size;
117 int c, r;
118 u8 *base = (u8 *)base_val;
119
120 if (!swap_func)
121 swap_func = (size == 4 ? u32_swap : generic_swap);
122
123 /* heapify */
124 for (; i >= 0; i -= size) {
125 for (r = i; r * 2 + size < n; r = c) {
126 c = r * 2 + size;
127 if (c < n - size &&
128 cmp_func(base + c, base + c + size) < 0)
129 c += size;
130 if (cmp_func(base + r, base + c) >= 0)
131 break;
132 swap_func(base + r, base + c, size);
133 }
134 }
135
136 /* sort */
137 for (i = n - size; i > 0; i -= size) {
138 swap_func(base, base + i, size);
139 for (r = 0; r * 2 + size < i; r = c) {
140 c = r * 2 + size;
141 if (c < i - size &&
142 cmp_func(base + c, base + c + size) < 0)
143 c += size;
144 if (cmp_func(base + r, base + c) >= 0)
145 break;
146 swap_func(base + r, base + c, size);
147 }
148 }
149 }
150
151 static void
read_sge_ctxt(struct cudbg_init * pdbg_init,u32 cid,enum ctxt_type ctype,u32 * data)152 read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
153 enum ctxt_type ctype, u32 *data)
154 {
155 struct adapter *padap = pdbg_init->adap;
156 int rc = -1;
157
158 if (is_fw_attached(pdbg_init)) {
159 rc =begin_synchronized_op(padap->port[0], 1, 1);
160 if (rc != 0)
161 goto out;
162 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype,
163 data);
164 end_synchronized_op(padap->port[0], 1);
165 }
166
167 out:
168 if (rc)
169 t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
170 }
171
172 static int
get_next_ext_entity_hdr(void * outbuf,u32 * ext_size,struct cudbg_buffer * dbg_buff,struct cudbg_entity_hdr ** entity_hdr)173 get_next_ext_entity_hdr(void *outbuf, u32 *ext_size,
174 struct cudbg_buffer *dbg_buff,
175 struct cudbg_entity_hdr **entity_hdr)
176 {
177 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
178 int rc = 0;
179 u32 ext_offset = cudbg_hdr->data_len;
180 *ext_size = 0;
181
182 if (dbg_buff->size - dbg_buff->offset <=
183 sizeof(struct cudbg_entity_hdr)) {
184 rc = CUDBG_STATUS_BUFFER_SHORT;
185 goto err;
186 }
187
188 *entity_hdr = (struct cudbg_entity_hdr *)
189 ((char *)outbuf + cudbg_hdr->data_len);
190
191 /* Find the last extended entity header */
192 while ((*entity_hdr)->size) {
193
194 ext_offset += sizeof(struct cudbg_entity_hdr) +
195 (*entity_hdr)->size;
196
197 *ext_size += (*entity_hdr)->size +
198 sizeof(struct cudbg_entity_hdr);
199
200 if (dbg_buff->size - dbg_buff->offset + *ext_size <=
201 sizeof(struct cudbg_entity_hdr)) {
202 rc = CUDBG_STATUS_BUFFER_SHORT;
203 goto err;
204 }
205
206 if (ext_offset != (*entity_hdr)->next_ext_offset) {
207 ext_offset -= sizeof(struct cudbg_entity_hdr) +
208 (*entity_hdr)->size;
209 break;
210 }
211
212 (*entity_hdr)->next_ext_offset = *ext_size;
213
214 *entity_hdr = (struct cudbg_entity_hdr *)
215 ((char *)outbuf +
216 ext_offset);
217 }
218
219 /* update the data offset */
220 dbg_buff->offset = ext_offset;
221 err:
222 return rc;
223 }
224
225 static int
wr_entity_to_flash(void * handle,struct cudbg_buffer * dbg_buff,u32 cur_entity_data_offset,u32 cur_entity_size,int entity_nu,u32 ext_size)226 wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
227 u32 cur_entity_data_offset,
228 u32 cur_entity_size,
229 int entity_nu, u32 ext_size)
230 {
231 struct cudbg_private *priv = handle;
232 struct cudbg_init *cudbg_init = &priv->dbg_init;
233 struct cudbg_flash_sec_info *sec_info = &priv->sec_info;
234 struct adapter *adap = cudbg_init->adap;
235 u64 timestamp;
236 u32 cur_entity_hdr_offset = sizeof(struct cudbg_hdr);
237 u32 remain_flash_size;
238 u32 flash_data_offset;
239 u32 data_hdr_size;
240 int rc = -1;
241
242 data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
243 sizeof(struct cudbg_hdr);
244
245 flash_data_offset = (FLASH_CUDBG_NSECS *
246 (sizeof(struct cudbg_flash_hdr) +
247 data_hdr_size)) +
248 (cur_entity_data_offset - data_hdr_size);
249
250 if (flash_data_offset > CUDBG_FLASH_SIZE) {
251 update_skip_size(sec_info, cur_entity_size);
252 if (cudbg_init->verbose)
253 cudbg_init->print(adap->dip, CE_NOTE,
254 "Large entity skipping...\n");
255 return rc;
256 }
257
258 remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
259
260 if (cur_entity_size > remain_flash_size) {
261 update_skip_size(sec_info, cur_entity_size);
262 if (cudbg_init->verbose)
263 cudbg_init->print(adap->dip, CE_NOTE,
264 "Large entity skipping...\n");
265 } else {
266 timestamp = 0;
267
268 cur_entity_hdr_offset +=
269 (sizeof(struct cudbg_entity_hdr) *
270 (entity_nu - 1));
271
272 rc = cudbg_write_flash(handle, timestamp, dbg_buff,
273 cur_entity_data_offset,
274 cur_entity_hdr_offset,
275 cur_entity_size,
276 ext_size);
277 if (rc == CUDBG_STATUS_FLASH_FULL && cudbg_init->verbose)
278 cudbg_init->print(adap->dip, CE_NOTE,
279 "\n\tFLASH is full... "
280 "can not write in flash more\n\n");
281 }
282
283 return rc;
284 }
285
286 int
cudbg_collect(void * handle,void * outbuf,u32 * outbuf_size)287 cudbg_collect(void *handle, void *outbuf, u32 *outbuf_size)
288 {
289 struct cudbg_entity_hdr *entity_hdr = NULL;
290 struct cudbg_entity_hdr *ext_entity_hdr = NULL;
291 struct cudbg_hdr *cudbg_hdr;
292 struct cudbg_buffer dbg_buff;
293 struct cudbg_error cudbg_err = {0};
294 int large_entity_code;
295
296 u8 *dbg_bitmap = ((struct cudbg_private *)handle)->dbg_init.dbg_bitmap;
297 struct cudbg_init *cudbg_init =
298 &(((struct cudbg_private *)handle)->dbg_init);
299 struct adapter *padap = cudbg_init->adap;
300 u32 total_size, remaining_buf_size;
301 u32 ext_size = 0;
302 int index, bit, i, rc = -1;
303 int all;
304 bool flag_ext = 0;
305
306 reset_skip_entity();
307
308 dbg_buff.data = outbuf;
309 dbg_buff.size = *outbuf_size;
310 dbg_buff.offset = 0;
311
312 cudbg_hdr = (struct cudbg_hdr *)dbg_buff.data;
313 cudbg_hdr->signature = CUDBG_SIGNATURE;
314 cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
315 cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
316 cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
317 cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
318 cudbg_hdr->chip_ver = padap->params.chip;
319
320 if (cudbg_hdr->data_len)
321 flag_ext = 1;
322
323 if (sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY >
324 dbg_buff.size) {
325 rc = CUDBG_STATUS_SMALL_BUFF;
326 total_size = cudbg_hdr->hdr_len;
327 goto err;
328 }
329
330 /* If ext flag is set then move the offset to the end of the buf
331 * so that we can add ext entities
332 */
333 if (flag_ext) {
334 ext_entity_hdr = (struct cudbg_entity_hdr *)
335 ((char *)outbuf + cudbg_hdr->hdr_len +
336 (sizeof(struct cudbg_entity_hdr) *
337 (CUDBG_EXT_ENTITY - 1)));
338 ext_entity_hdr->start_offset = cudbg_hdr->data_len;
339 ext_entity_hdr->entity_type = CUDBG_EXT_ENTITY;
340 ext_entity_hdr->size = 0;
341 dbg_buff.offset = cudbg_hdr->data_len;
342 } else {
343 dbg_buff.offset += cudbg_hdr->hdr_len; /* move 24 bytes*/
344 dbg_buff.offset += CUDBG_MAX_ENTITY *
345 sizeof(struct cudbg_entity_hdr);
346 }
347
348 total_size = dbg_buff.offset;
349 all = dbg_bitmap[0] & (1 << CUDBG_ALL);
350
351 for (i = 1; i < CUDBG_MAX_ENTITY; i++) {
352 index = i / 8;
353 bit = i % 8;
354
355 if (entity_list[i].bit == CUDBG_EXT_ENTITY)
356 continue;
357
358 if (all || (dbg_bitmap[index] & (1 << bit))) {
359
360 if (!flag_ext) {
361 rc = get_entity_hdr(outbuf, i, dbg_buff.size,
362 &entity_hdr);
363 if (rc)
364 cudbg_hdr->hdr_flags = rc;
365 } else {
366 rc = get_next_ext_entity_hdr(outbuf, &ext_size,
367 &dbg_buff,
368 &entity_hdr);
369 if (rc)
370 goto err;
371
372 /* move the offset after the ext header */
373 dbg_buff.offset +=
374 sizeof(struct cudbg_entity_hdr);
375 }
376
377 entity_hdr->entity_type = i;
378 entity_hdr->start_offset = dbg_buff.offset;
379 /* process each entity by calling process_entity fp */
380 remaining_buf_size = dbg_buff.size - dbg_buff.offset;
381
382 if ((remaining_buf_size <= BUFFER_WARN_LIMIT) &&
383 is_large_entity(i)) {
384 if (cudbg_init->verbose)
385 cudbg_init->print(padap->dip, CE_NOTE,
386 "Skipping %s\n",
387 entity_list[i].name);
388 skip_entity(i);
389 continue;
390 } else {
391
392 /* If fw_attach is 0, then skip entities which
393 * communicates with firmware
394 */
395
396 if (!is_fw_attached(cudbg_init) &&
397 (entity_list[i].flag &
398 (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
399 if (cudbg_init->verbose)
400 cudbg_init->print(padap->dip, CE_NOTE,
401 "Skipping %s entity,"\
402 "because fw_attach "\
403 "is 0\n",
404 entity_list[i].name);
405 continue;
406 }
407
408 if (cudbg_init->verbose)
409 cudbg_init->print(padap->dip, CE_NOTE,
410 "collecting debug entity: "\
411 "%s\n", entity_list[i].name);
412 memset(&cudbg_err, 0,
413 sizeof(struct cudbg_error));
414 rc = process_entity[i-1](cudbg_init, &dbg_buff,
415 &cudbg_err);
416 }
417
418 if (rc) {
419 entity_hdr->size = 0;
420 dbg_buff.offset = entity_hdr->start_offset;
421 } else
422 align_debug_buffer(&dbg_buff, entity_hdr);
423
424 if (cudbg_err.sys_err)
425 rc = CUDBG_SYSTEM_ERROR;
426
427 entity_hdr->hdr_flags = rc;
428 entity_hdr->sys_err = cudbg_err.sys_err;
429 entity_hdr->sys_warn = cudbg_err.sys_warn;
430
431 /* We don't want to include ext entity size in global
432 * header
433 */
434 if (!flag_ext)
435 total_size += entity_hdr->size;
436
437 cudbg_hdr->data_len = total_size;
438 *outbuf_size = total_size;
439
440 /* consider the size of the ext entity header and data
441 * also
442 */
443 if (flag_ext) {
444 ext_size += (sizeof(struct cudbg_entity_hdr) +
445 entity_hdr->size);
446 entity_hdr->start_offset -= cudbg_hdr->data_len;
447 ext_entity_hdr->size = ext_size;
448 entity_hdr->next_ext_offset = ext_size;
449 entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
450 }
451
452 if (cudbg_init->use_flash) {
453 if (flag_ext) {
454 wr_entity_to_flash(handle,
455 &dbg_buff,
456 ext_entity_hdr->
457 start_offset,
458 entity_hdr->
459 size,
460 CUDBG_EXT_ENTITY,
461 ext_size);
462 }
463 else
464 wr_entity_to_flash(handle,
465 &dbg_buff,
466 entity_hdr->\
467 start_offset,
468 entity_hdr->size,
469 i, ext_size);
470 }
471 }
472 }
473
474 for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
475 i++) {
476 large_entity_code = large_entity_list[i].entity_code;
477 if (large_entity_list[i].skip_flag) {
478 if (!flag_ext) {
479 rc = get_entity_hdr(outbuf, large_entity_code,
480 dbg_buff.size, &entity_hdr);
481 if (rc)
482 cudbg_hdr->hdr_flags = rc;
483 } else {
484 rc = get_next_ext_entity_hdr(outbuf, &ext_size,
485 &dbg_buff,
486 &entity_hdr);
487 if (rc)
488 goto err;
489
490 dbg_buff.offset +=
491 sizeof(struct cudbg_entity_hdr);
492 }
493
494 /* If fw_attach is 0, then skip entities which
495 * communicates with firmware
496 */
497 if (!is_fw_attached(cudbg_init) &&
498 (entity_list[large_entity_code].flag &
499 (1 << ENTITY_FLAG_FW_NO_ATTACH))) {
500 if (cudbg_init->verbose)
501 cudbg_init->print(padap->dip, CE_NOTE,
502 "Skipping %s entity,"\
503 "because fw_attach "\
504 "is 0\n",
505 entity_list[large_entity_code]
506 .name);
507 continue;
508 }
509
510 entity_hdr->entity_type = large_entity_code;
511 entity_hdr->start_offset = dbg_buff.offset;
512 if (cudbg_init->verbose)
513 cudbg_init->print(padap->dip, CE_NOTE,
514 "Re-trying debug entity: %s\n",
515 entity_list[large_entity_code].name);
516
517 memset(&cudbg_err, 0, sizeof(struct cudbg_error));
518 rc = process_entity[large_entity_code - 1](cudbg_init,
519 &dbg_buff,
520 &cudbg_err);
521 if (rc) {
522 entity_hdr->size = 0;
523 dbg_buff.offset = entity_hdr->start_offset;
524 } else
525 align_debug_buffer(&dbg_buff, entity_hdr);
526
527 if (cudbg_err.sys_err)
528 rc = CUDBG_SYSTEM_ERROR;
529
530 entity_hdr->hdr_flags = rc;
531 entity_hdr->sys_err = cudbg_err.sys_err;
532 entity_hdr->sys_warn = cudbg_err.sys_warn;
533
534 /* We don't want to include ext entity size in global
535 * header
536 */
537 if (!flag_ext)
538 total_size += entity_hdr->size;
539
540 cudbg_hdr->data_len = total_size;
541 *outbuf_size = total_size;
542
543 /* consider the size of the ext entity header and
544 * data also
545 */
546 if (flag_ext) {
547 ext_size += (sizeof(struct cudbg_entity_hdr) +
548 entity_hdr->size);
549 entity_hdr->start_offset -=
550 cudbg_hdr->data_len;
551 ext_entity_hdr->size = ext_size;
552 entity_hdr->flag |= CUDBG_EXT_DATA_VALID;
553 }
554
555 if (cudbg_init->use_flash) {
556 if (flag_ext)
557 wr_entity_to_flash(handle,
558 &dbg_buff,
559 ext_entity_hdr->
560 start_offset,
561 entity_hdr->size,
562 CUDBG_EXT_ENTITY,
563 ext_size);
564 else
565 wr_entity_to_flash(handle,
566 &dbg_buff,
567 entity_hdr->
568 start_offset,
569 entity_hdr->
570 size,
571 large_entity_list[i].
572 entity_code,
573 ext_size);
574 }
575 }
576 }
577
578 cudbg_hdr->data_len = total_size;
579 *outbuf_size = total_size;
580
581 if (flag_ext)
582 *outbuf_size += ext_size;
583
584 return 0;
585 err:
586 return rc;
587 }
588
589 void
reset_skip_entity(void)590 reset_skip_entity(void)
591 {
592 int i;
593
594 for (i = 0; i < ARRAY_SIZE(large_entity_list); i++)
595 large_entity_list[i].skip_flag = 0;
596 }
597
598 void
skip_entity(int entity_code)599 skip_entity(int entity_code)
600 {
601 int i;
602 for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
603 i++) {
604 if (large_entity_list[i].entity_code == entity_code)
605 large_entity_list[i].skip_flag = 1;
606 }
607 }
608
609 int
is_large_entity(int entity_code)610 is_large_entity(int entity_code)
611 {
612 int i;
613
614 for (i = 0; i < sizeof(large_entity_list) / sizeof(struct large_entity);
615 i++) {
616 if (large_entity_list[i].entity_code == entity_code)
617 return 1;
618 }
619 return 0;
620 }
621
622 int
get_entity_hdr(void * outbuf,int i,u32 size,struct cudbg_entity_hdr ** entity_hdr)623 get_entity_hdr(void *outbuf, int i, u32 size,
624 struct cudbg_entity_hdr **entity_hdr)
625 {
626 int rc = 0;
627 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
628
629 if (cudbg_hdr->hdr_len + (sizeof(struct cudbg_entity_hdr)*i) > size)
630 return CUDBG_STATUS_SMALL_BUFF;
631
632 *entity_hdr = (struct cudbg_entity_hdr *)
633 ((char *)outbuf+cudbg_hdr->hdr_len +
634 (sizeof(struct cudbg_entity_hdr)*(i-1)));
635 return rc;
636 }
637
638 static int
collect_rss(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)639 collect_rss(struct cudbg_init *pdbg_init,
640 struct cudbg_buffer *dbg_buff,
641 struct cudbg_error *cudbg_err)
642 {
643 struct adapter *padap = pdbg_init->adap;
644 struct cudbg_buffer scratch_buff;
645 u32 size;
646 int rc = 0;
647
648 size = RSS_NENTRIES * sizeof(u16);
649 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
650 if (rc)
651 goto err;
652
653 rc = t4_read_rss(padap, (u16 *)scratch_buff.data);
654 if (rc) {
655 if (pdbg_init->verbose)
656 pdbg_init->print(padap->dip, CE_NOTE,
657 "%s(), t4_read_rss failed!, rc: %d\n",
658 __func__, rc);
659 cudbg_err->sys_err = rc;
660 goto err1;
661 }
662
663 rc = write_compression_hdr(&scratch_buff, dbg_buff);
664 if (rc)
665 goto err1;
666
667 rc = compress_buff(&scratch_buff, dbg_buff);
668
669 err1:
670 release_scratch_buff(&scratch_buff, dbg_buff);
671 err:
672 return rc;
673 }
674
675 static int
collect_sw_state(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)676 collect_sw_state(struct cudbg_init *pdbg_init,
677 struct cudbg_buffer *dbg_buff,
678 struct cudbg_error *cudbg_err)
679 {
680 struct adapter *padap = pdbg_init->adap;
681 struct cudbg_buffer scratch_buff;
682 struct sw_state *swstate;
683 u32 size;
684 int rc = 0;
685
686 size = sizeof(struct sw_state);
687
688 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
689 if (rc)
690 goto err;
691
692 swstate = (struct sw_state *) scratch_buff.data;
693
694 swstate->fw_state = t4_read_reg(padap, A_PCIE_FW);
695 snprintf((char *)swstate->caller_string, sizeof(swstate->caller_string), "%s",
696 "Illumos");
697 swstate->os_type = 0;
698
699 rc = write_compression_hdr(&scratch_buff, dbg_buff);
700 if (rc)
701 goto err1;
702
703 rc = compress_buff(&scratch_buff, dbg_buff);
704
705 err1:
706 release_scratch_buff(&scratch_buff, dbg_buff);
707 err:
708 return rc;
709 }
710
711 static int
collect_ddp_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)712 collect_ddp_stats(struct cudbg_init *pdbg_init,
713 struct cudbg_buffer *dbg_buff,
714 struct cudbg_error *cudbg_err)
715 {
716 struct adapter *padap = pdbg_init->adap;
717 struct cudbg_buffer scratch_buff;
718 struct tp_usm_stats *tp_usm_stats_buff;
719 u32 size;
720 int rc = 0;
721
722 size = sizeof(struct tp_usm_stats);
723
724 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
725 if (rc)
726 goto err;
727
728 tp_usm_stats_buff = (struct tp_usm_stats *) scratch_buff.data;
729
730 /* spin_lock(&padap->stats_lock); TODO*/
731 t4_get_usm_stats(padap, tp_usm_stats_buff, 1);
732 /* spin_unlock(&padap->stats_lock); TODO*/
733
734 rc = write_compression_hdr(&scratch_buff, dbg_buff);
735 if (rc)
736 goto err1;
737
738 rc = compress_buff(&scratch_buff, dbg_buff);
739
740 err1:
741 release_scratch_buff(&scratch_buff, dbg_buff);
742 err:
743 return rc;
744 }
745
746 static int
collect_ulptx_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)747 collect_ulptx_la(struct cudbg_init *pdbg_init,
748 struct cudbg_buffer *dbg_buff,
749 struct cudbg_error *cudbg_err)
750 {
751 struct adapter *padap = pdbg_init->adap;
752 struct cudbg_buffer scratch_buff;
753 struct struct_ulptx_la *ulptx_la_buff;
754 u32 size, i, j;
755 int rc = 0;
756
757 size = sizeof(struct struct_ulptx_la);
758
759 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
760 if (rc)
761 goto err;
762
763 ulptx_la_buff = (struct struct_ulptx_la *) scratch_buff.data;
764
765 for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
766 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
767 A_ULP_TX_LA_RDPTR_0 +
768 0x10 * i);
769 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
770 A_ULP_TX_LA_WRPTR_0 +
771 0x10 * i);
772 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
773 A_ULP_TX_LA_RDDATA_0 +
774 0x10 * i);
775 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) {
776 ulptx_la_buff->rd_data[i][j] =
777 t4_read_reg(padap,
778 A_ULP_TX_LA_RDDATA_0 + 0x10 * i);
779 }
780 }
781
782 rc = write_compression_hdr(&scratch_buff, dbg_buff);
783 if (rc)
784 goto err1;
785
786 rc = compress_buff(&scratch_buff, dbg_buff);
787
788 err1:
789 release_scratch_buff(&scratch_buff, dbg_buff);
790 err:
791 return rc;
792
793 }
794
795 static int
collect_ulprx_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)796 collect_ulprx_la(struct cudbg_init *pdbg_init,
797 struct cudbg_buffer *dbg_buff,
798 struct cudbg_error *cudbg_err)
799 {
800 struct adapter *padap = pdbg_init->adap;
801 struct cudbg_buffer scratch_buff;
802 struct struct_ulprx_la *ulprx_la_buff;
803 u32 size;
804 int rc = 0;
805
806 size = sizeof(struct struct_ulprx_la);
807
808 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
809 if (rc)
810 goto err;
811
812 ulprx_la_buff = (struct struct_ulprx_la *) scratch_buff.data;
813 t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
814 ulprx_la_buff->size = ULPRX_LA_SIZE;
815
816 rc = write_compression_hdr(&scratch_buff, dbg_buff);
817 if (rc)
818 goto err1;
819
820 rc = compress_buff(&scratch_buff, dbg_buff);
821
822 err1:
823 release_scratch_buff(&scratch_buff, dbg_buff);
824 err:
825 return rc;
826 }
827
828 static int
collect_cpl_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)829 collect_cpl_stats(struct cudbg_init *pdbg_init,
830 struct cudbg_buffer *dbg_buff,
831 struct cudbg_error *cudbg_err)
832 {
833 struct adapter *padap = pdbg_init->adap;
834 struct cudbg_buffer scratch_buff;
835 struct struct_tp_cpl_stats *tp_cpl_stats_buff;
836 u32 size;
837 int rc = 0;
838
839 size = sizeof(struct struct_tp_cpl_stats);
840
841 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
842 if (rc)
843 goto err;
844
845 tp_cpl_stats_buff = (struct struct_tp_cpl_stats *) scratch_buff.data;
846 tp_cpl_stats_buff->nchan = padap->params.arch.nchan;
847
848 t4_tp_get_cpl_stats(padap, &tp_cpl_stats_buff->stats, 1);
849
850 rc = write_compression_hdr(&scratch_buff, dbg_buff);
851 if (rc)
852 goto err1;
853
854 rc = compress_buff(&scratch_buff, dbg_buff);
855
856 err1:
857 release_scratch_buff(&scratch_buff, dbg_buff);
858 err:
859 return rc;
860 }
861
862 static int
collect_wc_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)863 collect_wc_stats(struct cudbg_init *pdbg_init,
864 struct cudbg_buffer *dbg_buff,
865 struct cudbg_error *cudbg_err)
866 {
867 struct adapter *padap = pdbg_init->adap;
868 struct cudbg_buffer scratch_buff;
869 struct struct_wc_stats *wc_stats_buff;
870 u32 val1;
871 u32 val2;
872 u32 size;
873
874 int rc = 0;
875
876 size = sizeof(struct struct_wc_stats);
877
878 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
879 if (rc)
880 goto err;
881
882 wc_stats_buff = (struct struct_wc_stats *) scratch_buff.data;
883
884 if (!is_t4(padap->params.chip)) {
885 val1 = t4_read_reg(padap, A_SGE_STAT_TOTAL);
886 val2 = t4_read_reg(padap, A_SGE_STAT_MATCH);
887 wc_stats_buff->wr_cl_success = val1 - val2;
888 wc_stats_buff->wr_cl_fail = val2;
889 } else {
890 wc_stats_buff->wr_cl_success = 0;
891 wc_stats_buff->wr_cl_fail = 0;
892 }
893
894 rc = write_compression_hdr(&scratch_buff, dbg_buff);
895 if (rc)
896 goto err1;
897
898 rc = compress_buff(&scratch_buff, dbg_buff);
899 err1:
900 release_scratch_buff(&scratch_buff, dbg_buff);
901 err:
902 return rc;
903 }
904
905 static int
mem_desc_cmp(const void * a,const void * b)906 mem_desc_cmp(const void *a, const void *b)
907 {
908 return ((const struct struct_mem_desc *)a)->base -
909 ((const struct struct_mem_desc *)b)->base;
910 }
911
912 static int
fill_meminfo(struct adapter * padap,struct struct_meminfo * meminfo_buff)913 fill_meminfo(struct adapter *padap,
914 struct struct_meminfo *meminfo_buff)
915 {
916 struct struct_mem_desc *md;
917 u32 size, lo, hi;
918 u32 used, alloc;
919 int n, i, rc = 0;
920
921 size = sizeof(struct struct_meminfo);
922
923 memset(meminfo_buff->avail, 0,
924 ARRAY_SIZE(meminfo_buff->avail) *
925 sizeof(struct struct_mem_desc));
926 memset(meminfo_buff->mem, 0,
927 (ARRAY_SIZE(region) + 3) * sizeof(struct struct_mem_desc));
928 md = meminfo_buff->mem;
929
930 for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
931 meminfo_buff->mem[i].limit = 0;
932 meminfo_buff->mem[i].idx = i;
933 }
934
935 i = 0;
936
937 lo = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
938
939 if (lo & F_EDRAM0_ENABLE) {
940 hi = t4_read_reg(padap, A_MA_EDRAM0_BAR);
941 meminfo_buff->avail[i].base = G_EDRAM0_BASE(hi) << 20;
942 meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
943 (G_EDRAM0_SIZE(hi) << 20);
944 meminfo_buff->avail[i].idx = 0;
945 i++;
946 }
947
948 if (lo & F_EDRAM1_ENABLE) {
949 hi = t4_read_reg(padap, A_MA_EDRAM1_BAR);
950 meminfo_buff->avail[i].base = G_EDRAM1_BASE(hi) << 20;
951 meminfo_buff->avail[i].limit = meminfo_buff->avail[i].base +
952 (G_EDRAM1_SIZE(hi) << 20);
953 meminfo_buff->avail[i].idx = 1;
954 i++;
955 }
956
957 if (is_t5(padap->params.chip)) {
958 if (lo & F_EXT_MEM0_ENABLE) {
959 hi = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
960 meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
961 meminfo_buff->avail[i].limit =
962 meminfo_buff->avail[i].base +
963 (G_EXT_MEM_SIZE(hi) << 20);
964 meminfo_buff->avail[i].idx = 3;
965 i++;
966 }
967
968 if (lo & F_EXT_MEM1_ENABLE) {
969 hi = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
970 meminfo_buff->avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
971 meminfo_buff->avail[i].limit =
972 meminfo_buff->avail[i].base +
973 (G_EXT_MEM1_SIZE(hi) << 20);
974 meminfo_buff->avail[i].idx = 4;
975 i++;
976 }
977 } else if (is_t6(padap->params.chip)) {
978 if (lo & F_EXT_MEM_ENABLE) {
979 hi = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
980 meminfo_buff->avail[i].base = G_EXT_MEM_BASE(hi) << 20;
981 meminfo_buff->avail[i].limit =
982 meminfo_buff->avail[i].base +
983 (G_EXT_MEM_SIZE(hi) << 20);
984 meminfo_buff->avail[i].idx = 2;
985 i++;
986 }
987 }
988
989 if (!i) { /* no memory available */
990 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
991 goto err;
992 }
993
994 meminfo_buff->avail_c = i;
995 qsort(meminfo_buff->avail, i, sizeof(struct struct_mem_desc),
996 mem_desc_cmp, NULL);
997 (md++)->base = t4_read_reg(padap, A_SGE_DBQ_CTXT_BADDR);
998 (md++)->base = t4_read_reg(padap, A_SGE_IMSG_CTXT_BADDR);
999 (md++)->base = t4_read_reg(padap, A_SGE_FLM_CACHE_BADDR);
1000 (md++)->base = t4_read_reg(padap, A_TP_CMM_TCB_BASE);
1001 (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_BASE);
1002 (md++)->base = t4_read_reg(padap, A_TP_CMM_TIMER_BASE);
1003 (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_RX_FLST_BASE);
1004 (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_TX_FLST_BASE);
1005 (md++)->base = t4_read_reg(padap, A_TP_CMM_MM_PS_FLST_BASE);
1006
1007 /* the next few have explicit upper bounds */
1008 md->base = t4_read_reg(padap, A_TP_PMM_TX_BASE);
1009 md->limit = md->base - 1 +
1010 t4_read_reg(padap,
1011 A_TP_PMM_TX_PAGE_SIZE) *
1012 G_PMTXMAXPAGE(t4_read_reg(padap,
1013 A_TP_PMM_TX_MAX_PAGE)
1014 );
1015 md++;
1016
1017 md->base = t4_read_reg(padap, A_TP_PMM_RX_BASE);
1018 md->limit = md->base - 1 +
1019 t4_read_reg(padap,
1020 A_TP_PMM_RX_PAGE_SIZE) *
1021 G_PMRXMAXPAGE(t4_read_reg(padap,
1022 A_TP_PMM_RX_MAX_PAGE)
1023 );
1024 md++;
1025 if (t4_read_reg(padap, A_LE_DB_CONFIG) & F_HASHEN) {
1026 if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
1027 hi = t4_read_reg(padap, A_LE_DB_TID_HASHBASE) / 4;
1028 md->base = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
1029 } else {
1030 hi = t4_read_reg(padap, A_LE_DB_HASH_TID_BASE);
1031 md->base = t4_read_reg(padap,
1032 A_LE_DB_HASH_TBL_BASE_ADDR);
1033 }
1034 md->limit = 0;
1035 } else {
1036 md->base = 0;
1037 md->idx = ARRAY_SIZE(region); /* hide it */
1038 }
1039 md++;
1040 #define ulp_region(reg) \
1041 {\
1042 md->base = t4_read_reg(padap, A_ULP_ ## reg ## _LLIMIT);\
1043 (md++)->limit = t4_read_reg(padap, A_ULP_ ## reg ## _ULIMIT);\
1044 }
1045
1046 ulp_region(RX_ISCSI);
1047 ulp_region(RX_TDDP);
1048 ulp_region(TX_TPT);
1049 ulp_region(RX_STAG);
1050 ulp_region(RX_RQ);
1051 ulp_region(RX_RQUDP);
1052 ulp_region(RX_PBL);
1053 ulp_region(TX_PBL);
1054 #undef ulp_region
1055 md->base = 0;
1056 md->idx = ARRAY_SIZE(region);
1057 if (!is_t4(padap->params.chip)) {
1058 u32 sge_ctrl = t4_read_reg(padap, A_SGE_CONTROL2);
1059 u32 fifo_size = t4_read_reg(padap, A_SGE_DBVFIFO_SIZE);
1060 if (is_t5(padap->params.chip)) {
1061 if (sge_ctrl & F_VFIFO_ENABLE)
1062 size = G_DBVFIFO_SIZE(fifo_size);
1063 } else
1064 size = G_T6_DBVFIFO_SIZE(fifo_size);
1065
1066 if (size) {
1067 md->base = G_BASEADDR(t4_read_reg(padap,
1068 A_SGE_DBVFIFO_BADDR));
1069 md->limit = md->base + (size << 2) - 1;
1070 }
1071 }
1072
1073 md++;
1074
1075 md->base = t4_read_reg(padap, A_ULP_RX_CTX_BASE);
1076 md->limit = 0;
1077 md++;
1078 md->base = t4_read_reg(padap, A_ULP_TX_ERR_TABLE_BASE);
1079 md->limit = 0;
1080 md++;
1081 #ifndef __NO_DRIVER_OCQ_SUPPORT__
1082 /*md->base = padap->vres.ocq.start;*/
1083 /*if (adap->vres.ocq.size)*/
1084 /* md->limit = md->base + adap->vres.ocq.size - 1;*/
1085 /*else*/
1086 md->idx = ARRAY_SIZE(region); /* hide it */
1087 md++;
1088 #endif
1089
1090 /* add any address-space holes, there can be up to 3 */
1091 for (n = 0; n < i - 1; n++)
1092 if (meminfo_buff->avail[n].limit <
1093 meminfo_buff->avail[n + 1].base)
1094 (md++)->base = meminfo_buff->avail[n].limit;
1095
1096 if (meminfo_buff->avail[n].limit)
1097 (md++)->base = meminfo_buff->avail[n].limit;
1098
1099 n = (int) (md - meminfo_buff->mem);
1100 meminfo_buff->mem_c = n;
1101
1102 qsort(meminfo_buff->mem, n, sizeof(struct struct_mem_desc),
1103 mem_desc_cmp, NULL);
1104
1105 lo = t4_read_reg(padap, A_CIM_SDRAM_BASE_ADDR);
1106 hi = t4_read_reg(padap, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
1107 meminfo_buff->up_ram_lo = lo;
1108 meminfo_buff->up_ram_hi = hi;
1109
1110 lo = t4_read_reg(padap, A_CIM_EXTMEM2_BASE_ADDR);
1111 hi = t4_read_reg(padap, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
1112 meminfo_buff->up_extmem2_lo = lo;
1113 meminfo_buff->up_extmem2_hi = hi;
1114
1115 lo = t4_read_reg(padap, A_TP_PMM_RX_MAX_PAGE);
1116 meminfo_buff->rx_pages_data[0] = G_PMRXMAXPAGE(lo);
1117 meminfo_buff->rx_pages_data[1] =
1118 t4_read_reg(padap, A_TP_PMM_RX_PAGE_SIZE) >> 10;
1119 meminfo_buff->rx_pages_data[2] = (lo & F_PMRXNUMCHN) ? 2 : 1 ;
1120
1121 lo = t4_read_reg(padap, A_TP_PMM_TX_MAX_PAGE);
1122 hi = t4_read_reg(padap, A_TP_PMM_TX_PAGE_SIZE);
1123 meminfo_buff->tx_pages_data[0] = G_PMTXMAXPAGE(lo);
1124 meminfo_buff->tx_pages_data[1] =
1125 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
1126 meminfo_buff->tx_pages_data[2] =
1127 hi >= (1 << 20) ? 'M' : 'K';
1128 meminfo_buff->tx_pages_data[3] = 1 << G_PMTXNUMCHN(lo);
1129
1130 for (i = 0; i < 4; i++) {
1131 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
1132 lo = t4_read_reg(padap,
1133 A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
1134 else
1135 lo = t4_read_reg(padap, A_MPS_RX_PG_RSV0 + i * 4);
1136 if (is_t5(padap->params.chip)) {
1137 used = G_T5_USED(lo);
1138 alloc = G_T5_ALLOC(lo);
1139 } else {
1140 used = G_USED(lo);
1141 alloc = G_ALLOC(lo);
1142 }
1143 meminfo_buff->port_used[i] = used;
1144 meminfo_buff->port_alloc[i] = alloc;
1145 }
1146
1147 for (i = 0; i < padap->params.arch.nchan; i++) {
1148 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
1149 lo = t4_read_reg(padap,
1150 A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
1151 else
1152 lo = t4_read_reg(padap, A_MPS_RX_PG_RSV4 + i * 4);
1153 if (is_t5(padap->params.chip)) {
1154 used = G_T5_USED(lo);
1155 alloc = G_T5_ALLOC(lo);
1156 } else {
1157 used = G_USED(lo);
1158 alloc = G_ALLOC(lo);
1159 }
1160 meminfo_buff->loopback_used[i] = used;
1161 meminfo_buff->loopback_alloc[i] = alloc;
1162 }
1163 err:
1164 return rc;
1165 }
1166
1167 static int
collect_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1168 collect_meminfo(struct cudbg_init *pdbg_init,
1169 struct cudbg_buffer *dbg_buff,
1170 struct cudbg_error *cudbg_err)
1171 {
1172 struct adapter *padap = pdbg_init->adap;
1173 struct struct_meminfo *meminfo_buff;
1174 struct cudbg_buffer scratch_buff;
1175 int rc = 0;
1176 u32 size;
1177
1178 size = sizeof(struct struct_meminfo);
1179
1180 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1181 if (rc)
1182 goto err;
1183
1184 meminfo_buff = (struct struct_meminfo *)scratch_buff.data;
1185
1186 rc = fill_meminfo(padap, meminfo_buff);
1187 if (rc)
1188 goto err;
1189
1190 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1191 if (rc)
1192 goto err1;
1193
1194 rc = compress_buff(&scratch_buff, dbg_buff);
1195 err1:
1196 release_scratch_buff(&scratch_buff, dbg_buff);
1197 err:
1198 return rc;
1199 }
1200
1201 static int
collect_lb_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1202 collect_lb_stats(struct cudbg_init *pdbg_init,
1203 struct cudbg_buffer *dbg_buff,
1204 struct cudbg_error *cudbg_err)
1205 {
1206 struct adapter *padap = pdbg_init->adap;
1207 struct struct_lb_stats *lb_stats_buff;
1208 struct cudbg_buffer scratch_buff;
1209 struct lb_port_stats *tmp_stats;
1210 u32 i, n, size;
1211 int rc = 0;
1212
1213 rc = padap->params.nports;
1214 if (rc < 0)
1215 goto err;
1216
1217 n = rc;
1218 size = sizeof(struct struct_lb_stats) +
1219 n * sizeof(struct lb_port_stats);
1220
1221 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1222 if (rc)
1223 goto err;
1224
1225 lb_stats_buff = (struct struct_lb_stats *) scratch_buff.data;
1226
1227 lb_stats_buff->nchan = n;
1228 tmp_stats = lb_stats_buff->s;
1229
1230 for (i = 0; i < n; i += 2, tmp_stats += 2) {
1231 t4_get_lb_stats(padap, i, tmp_stats);
1232 t4_get_lb_stats(padap, i + 1, tmp_stats+1);
1233 }
1234
1235 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1236 if (rc)
1237 goto err1;
1238
1239 rc = compress_buff(&scratch_buff, dbg_buff);
1240 err1:
1241 release_scratch_buff(&scratch_buff, dbg_buff);
1242 err:
1243 return rc;
1244 }
1245
1246 static int
collect_rdma_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_er)1247 collect_rdma_stats(struct cudbg_init *pdbg_init,
1248 struct cudbg_buffer *dbg_buff,
1249 struct cudbg_error *cudbg_er)
1250 {
1251 struct adapter *padap = pdbg_init->adap;
1252 struct cudbg_buffer scratch_buff;
1253 struct tp_rdma_stats *rdma_stats_buff;
1254 u32 size;
1255 int rc = 0;
1256
1257 size = sizeof(struct tp_rdma_stats);
1258
1259 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1260 if (rc)
1261 goto err;
1262
1263 rdma_stats_buff = (struct tp_rdma_stats *) scratch_buff.data;
1264
1265 /* spin_lock(&padap->stats_lock); TODO*/
1266 t4_tp_get_rdma_stats(padap, rdma_stats_buff, 1);
1267 /* spin_unlock(&padap->stats_lock); TODO*/
1268
1269 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1270 if (rc)
1271 goto err1;
1272
1273 rc = compress_buff(&scratch_buff, dbg_buff);
1274 err1:
1275 release_scratch_buff(&scratch_buff, dbg_buff);
1276 err:
1277 return rc;
1278 }
1279
1280 static int
collect_clk_info(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1281 collect_clk_info(struct cudbg_init *pdbg_init,
1282 struct cudbg_buffer *dbg_buff,
1283 struct cudbg_error *cudbg_err)
1284 {
1285 struct cudbg_buffer scratch_buff;
1286 struct adapter *padap = pdbg_init->adap;
1287 struct struct_clk_info *clk_info_buff;
1288 u64 tp_tick_us;
1289 int size;
1290 int rc = 0;
1291
1292 if (!padap->params.vpd.cclk) {
1293 rc = CUDBG_STATUS_CCLK_NOT_DEFINED;
1294 goto err;
1295 }
1296
1297 size = sizeof(struct struct_clk_info);
1298 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1299 if (rc)
1300 goto err;
1301
1302 clk_info_buff = (struct struct_clk_info *) scratch_buff.data;
1303
1304 clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* in ps
1305 */
1306 clk_info_buff->res = t4_read_reg(padap, A_TP_TIMER_RESOLUTION);
1307 clk_info_buff->tre = G_TIMERRESOLUTION(clk_info_buff->res);
1308 clk_info_buff->dack_re = G_DELAYEDACKRESOLUTION(clk_info_buff->res);
1309 tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1310 /* in us */
1311 clk_info_buff->dack_timer = ((clk_info_buff->cclk_ps <<
1312 clk_info_buff->dack_re) / 1000000) *
1313 t4_read_reg(padap, A_TP_DACK_TIMER);
1314
1315 clk_info_buff->retransmit_min =
1316 tp_tick_us * t4_read_reg(padap, A_TP_RXT_MIN);
1317 clk_info_buff->retransmit_max =
1318 tp_tick_us * t4_read_reg(padap, A_TP_RXT_MAX);
1319
1320 clk_info_buff->persist_timer_min =
1321 tp_tick_us * t4_read_reg(padap, A_TP_PERS_MIN);
1322 clk_info_buff->persist_timer_max =
1323 tp_tick_us * t4_read_reg(padap, A_TP_PERS_MAX);
1324
1325 clk_info_buff->keepalive_idle_timer =
1326 tp_tick_us * t4_read_reg(padap, A_TP_KEEP_IDLE);
1327 clk_info_buff->keepalive_interval =
1328 tp_tick_us * t4_read_reg(padap, A_TP_KEEP_INTVL);
1329
1330 clk_info_buff->initial_srtt =
1331 tp_tick_us * G_INITSRTT(t4_read_reg(padap, A_TP_INIT_SRTT));
1332 clk_info_buff->finwait2_timer =
1333 tp_tick_us * t4_read_reg(padap, A_TP_FINWAIT2_TIMER);
1334
1335 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1336
1337 if (rc)
1338 goto err1;
1339
1340 rc = compress_buff(&scratch_buff, dbg_buff);
1341 err1:
1342 release_scratch_buff(&scratch_buff, dbg_buff);
1343 err:
1344 return rc;
1345
1346 }
1347
1348 static int
collect_macstats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1349 collect_macstats(struct cudbg_init *pdbg_init,
1350 struct cudbg_buffer *dbg_buff,
1351 struct cudbg_error *cudbg_err)
1352 {
1353 struct adapter *padap = pdbg_init->adap;
1354 struct cudbg_buffer scratch_buff;
1355 struct struct_mac_stats_rev1 *mac_stats_buff;
1356 u32 i, n, size;
1357 int rc = 0;
1358
1359 rc = padap->params.nports;
1360 if (rc < 0)
1361 goto err;
1362
1363 n = rc;
1364 size = sizeof(struct struct_mac_stats_rev1);
1365
1366 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1367 if (rc)
1368 goto err;
1369
1370 mac_stats_buff = (struct struct_mac_stats_rev1 *) scratch_buff.data;
1371
1372 mac_stats_buff->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1373 mac_stats_buff->ver_hdr.revision = CUDBG_MAC_STATS_REV;
1374 mac_stats_buff->ver_hdr.size = sizeof(struct struct_mac_stats_rev1) -
1375 sizeof(struct cudbg_ver_hdr);
1376
1377 mac_stats_buff->port_count = n;
1378 for (i = 0; i < mac_stats_buff->port_count; i++)
1379 t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
1380
1381 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1382 if (rc)
1383 goto err1;
1384
1385 rc = compress_buff(&scratch_buff, dbg_buff);
1386 err1:
1387 release_scratch_buff(&scratch_buff, dbg_buff);
1388 err:
1389 return rc;
1390 }
1391
1392 static int
collect_cim_pif_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1393 collect_cim_pif_la(struct cudbg_init *pdbg_init,
1394 struct cudbg_buffer *dbg_buff,
1395 struct cudbg_error *cudbg_err)
1396 {
1397 struct adapter *padap = pdbg_init->adap;
1398 struct cudbg_buffer scratch_buff;
1399 struct cim_pif_la *cim_pif_la_buff;
1400 u32 size;
1401 int rc = 0;
1402
1403 size = sizeof(struct cim_pif_la) +
1404 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1405
1406 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1407 if (rc)
1408 goto err;
1409
1410 cim_pif_la_buff = (struct cim_pif_la *) scratch_buff.data;
1411 cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1412
1413 t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1414 (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1415 NULL, NULL);
1416
1417 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1418 if (rc)
1419 goto err1;
1420
1421 rc = compress_buff(&scratch_buff, dbg_buff);
1422 err1:
1423 release_scratch_buff(&scratch_buff, dbg_buff);
1424 err:
1425 return rc;
1426 }
1427
1428 static int
collect_tp_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1429 collect_tp_la(struct cudbg_init *pdbg_init,
1430 struct cudbg_buffer *dbg_buff,
1431 struct cudbg_error *cudbg_err)
1432 {
1433 struct adapter *padap = pdbg_init->adap;
1434 struct cudbg_buffer scratch_buff;
1435 struct struct_tp_la *tp_la_buff;
1436 u32 size;
1437 int rc = 0;
1438
1439 size = sizeof(struct struct_tp_la) + TPLA_SIZE * sizeof(u64);
1440
1441 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1442 if (rc)
1443 goto err;
1444
1445 tp_la_buff = (struct struct_tp_la *) scratch_buff.data;
1446
1447 tp_la_buff->mode = G_DBGLAMODE(t4_read_reg(padap, A_TP_DBG_LA_CONFIG));
1448 t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1449
1450 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1451 if (rc)
1452 goto err1;
1453
1454 rc = compress_buff(&scratch_buff, dbg_buff);
1455 err1:
1456 release_scratch_buff(&scratch_buff, dbg_buff);
1457 err:
1458 return rc;
1459 }
1460
1461 static int
collect_fcoe_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1462 collect_fcoe_stats(struct cudbg_init *pdbg_init,
1463 struct cudbg_buffer *dbg_buff,
1464 struct cudbg_error *cudbg_err)
1465 {
1466 struct adapter *padap = pdbg_init->adap;
1467 struct cudbg_buffer scratch_buff;
1468 struct struct_tp_fcoe_stats *tp_fcoe_stats_buff;
1469 u32 size;
1470 int rc = 0;
1471
1472 size = sizeof(struct struct_tp_fcoe_stats);
1473
1474 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1475 if (rc)
1476 goto err;
1477
1478 tp_fcoe_stats_buff = (struct struct_tp_fcoe_stats *) scratch_buff.data;
1479
1480 t4_get_fcoe_stats(padap, 0, &tp_fcoe_stats_buff->stats[0], 1);
1481 t4_get_fcoe_stats(padap, 1, &tp_fcoe_stats_buff->stats[1], 1);
1482
1483 if (padap->params.arch.nchan == NCHAN) {
1484 t4_get_fcoe_stats(padap, 2, &tp_fcoe_stats_buff->stats[2], 1);
1485 t4_get_fcoe_stats(padap, 3, &tp_fcoe_stats_buff->stats[3], 1);
1486 }
1487
1488 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1489 if (rc)
1490 goto err1;
1491
1492 rc = compress_buff(&scratch_buff, dbg_buff);
1493 err1:
1494 release_scratch_buff(&scratch_buff, dbg_buff);
1495 err:
1496 return rc;
1497 }
1498
1499 static int
collect_tp_err_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1500 collect_tp_err_stats(struct cudbg_init *pdbg_init,
1501 struct cudbg_buffer *dbg_buff,
1502 struct cudbg_error *cudbg_err)
1503 {
1504 struct adapter *padap = pdbg_init->adap;
1505 struct cudbg_buffer scratch_buff;
1506 struct struct_tp_err_stats *tp_err_stats_buff;
1507 u32 size;
1508 int rc = 0;
1509
1510 size = sizeof(struct struct_tp_err_stats);
1511
1512 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1513 if (rc)
1514 goto err;
1515
1516 tp_err_stats_buff = (struct struct_tp_err_stats *) scratch_buff.data;
1517
1518 t4_tp_get_err_stats(padap, &tp_err_stats_buff->stats, 1);
1519 tp_err_stats_buff->nchan = padap->params.arch.nchan;
1520
1521 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1522 if (rc)
1523 goto err1;
1524
1525 rc = compress_buff(&scratch_buff, dbg_buff);
1526 err1:
1527 release_scratch_buff(&scratch_buff, dbg_buff);
1528 err:
1529 return rc;
1530 }
1531
1532 static int
collect_tcp_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1533 collect_tcp_stats(struct cudbg_init *pdbg_init,
1534 struct cudbg_buffer *dbg_buff,
1535 struct cudbg_error *cudbg_err)
1536 {
1537 struct adapter *padap = pdbg_init->adap;
1538 struct cudbg_buffer scratch_buff;
1539 struct struct_tcp_stats *tcp_stats_buff;
1540 u32 size;
1541 int rc = 0;
1542
1543 size = sizeof(struct struct_tcp_stats);
1544
1545 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1546 if (rc)
1547 goto err;
1548
1549 tcp_stats_buff = (struct struct_tcp_stats *) scratch_buff.data;
1550
1551 /* spin_lock(&padap->stats_lock); TODO*/
1552 t4_tp_get_tcp_stats(padap, &tcp_stats_buff->v4, &tcp_stats_buff->v6, 1);
1553 /* spin_unlock(&padap->stats_lock); TODO*/
1554
1555 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1556 if (rc)
1557 goto err1;
1558
1559 rc = compress_buff(&scratch_buff, dbg_buff);
1560 err1:
1561 release_scratch_buff(&scratch_buff, dbg_buff);
1562 err:
1563 return rc;
1564 }
1565
1566 static int
collect_hw_sched(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1567 collect_hw_sched(struct cudbg_init *pdbg_init,
1568 struct cudbg_buffer *dbg_buff,
1569 struct cudbg_error *cudbg_err)
1570 {
1571 struct adapter *padap = pdbg_init->adap;
1572 struct cudbg_buffer scratch_buff;
1573 struct struct_hw_sched *hw_sched_buff;
1574 u32 size;
1575 int i, rc = 0;
1576
1577 if (!padap->params.vpd.cclk) {
1578 rc = CUDBG_STATUS_CCLK_NOT_DEFINED;
1579 goto err;
1580 }
1581
1582 size = sizeof(struct struct_hw_sched);
1583 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1584 if (rc)
1585 goto err;
1586
1587 hw_sched_buff = (struct struct_hw_sched *) scratch_buff.data;
1588
1589 hw_sched_buff->map = t4_read_reg(padap, A_TP_TX_MOD_QUEUE_REQ_MAP);
1590 hw_sched_buff->mode = G_TIMERMODE(t4_read_reg(padap, A_TP_MOD_CONFIG));
1591 t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1592
1593 for (i = 0; i < NTX_SCHED; ++i) {
1594 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1595 &hw_sched_buff->ipg[i], 1);
1596 }
1597
1598 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1599 if (rc)
1600 goto err1;
1601
1602 rc = compress_buff(&scratch_buff, dbg_buff);
1603 err1:
1604 release_scratch_buff(&scratch_buff, dbg_buff);
1605 err:
1606 return rc;
1607 }
1608
1609 static int
collect_pm_stats(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1610 collect_pm_stats(struct cudbg_init *pdbg_init,
1611 struct cudbg_buffer *dbg_buff,
1612 struct cudbg_error *cudbg_err)
1613 {
1614 struct adapter *padap = pdbg_init->adap;
1615 struct cudbg_buffer scratch_buff;
1616 struct struct_pm_stats *pm_stats_buff;
1617 u32 size;
1618 int rc = 0;
1619
1620 size = sizeof(struct struct_pm_stats);
1621
1622 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1623 if (rc)
1624 goto err;
1625
1626 pm_stats_buff = (struct struct_pm_stats *) scratch_buff.data;
1627
1628 t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1629 t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1630
1631 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1632 if (rc)
1633 goto err1;
1634
1635 rc = compress_buff(&scratch_buff, dbg_buff);
1636 err1:
1637 release_scratch_buff(&scratch_buff, dbg_buff);
1638 err:
1639 return rc;
1640 }
1641
1642 static int
collect_path_mtu(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1643 collect_path_mtu(struct cudbg_init *pdbg_init,
1644 struct cudbg_buffer *dbg_buff,
1645 struct cudbg_error *cudbg_err)
1646 {
1647 struct adapter *padap = pdbg_init->adap;
1648 struct cudbg_buffer scratch_buff;
1649 u32 size;
1650 int rc = 0;
1651
1652 size = NMTUS * sizeof(u16);
1653
1654 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1655 if (rc)
1656 goto err;
1657
1658 t4_read_mtu_tbl(padap, (u16 *)scratch_buff.data, NULL);
1659
1660 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1661 if (rc)
1662 goto err1;
1663
1664 rc = compress_buff(&scratch_buff, dbg_buff);
1665 err1:
1666 release_scratch_buff(&scratch_buff, dbg_buff);
1667 err:
1668 return rc;
1669 }
1670
1671 static int
collect_rss_key(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1672 collect_rss_key(struct cudbg_init *pdbg_init,
1673 struct cudbg_buffer *dbg_buff,
1674 struct cudbg_error *cudbg_err)
1675 {
1676 struct adapter *padap = pdbg_init->adap;
1677 struct cudbg_buffer scratch_buff;
1678 u32 size;
1679
1680 int rc = 0;
1681
1682 size = 10 * sizeof(u32);
1683 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1684 if (rc)
1685 goto err;
1686
1687 t4_read_rss_key(padap, (u32 *)scratch_buff.data, 1);
1688
1689 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1690 if (rc)
1691 goto err1;
1692
1693 rc = compress_buff(&scratch_buff, dbg_buff);
1694 err1:
1695 release_scratch_buff(&scratch_buff, dbg_buff);
1696 err:
1697 return rc;
1698 }
1699
1700 static int
collect_rss_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1701 collect_rss_config(struct cudbg_init *pdbg_init,
1702 struct cudbg_buffer *dbg_buff,
1703 struct cudbg_error *cudbg_err)
1704 {
1705 struct adapter *padap = pdbg_init->adap;
1706 struct cudbg_buffer scratch_buff;
1707 struct rss_config *rss_conf;
1708 int rc;
1709 u32 size;
1710
1711 size = sizeof(struct rss_config);
1712
1713 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1714 if (rc)
1715 goto err;
1716
1717 rss_conf = (struct rss_config *)scratch_buff.data;
1718
1719 rss_conf->tp_rssconf = t4_read_reg(padap, A_TP_RSS_CONFIG);
1720 rss_conf->tp_rssconf_tnl = t4_read_reg(padap, A_TP_RSS_CONFIG_TNL);
1721 rss_conf->tp_rssconf_ofd = t4_read_reg(padap, A_TP_RSS_CONFIG_OFD);
1722 rss_conf->tp_rssconf_syn = t4_read_reg(padap, A_TP_RSS_CONFIG_SYN);
1723 rss_conf->tp_rssconf_vrt = t4_read_reg(padap, A_TP_RSS_CONFIG_VRT);
1724 rss_conf->tp_rssconf_cng = t4_read_reg(padap, A_TP_RSS_CONFIG_CNG);
1725 rss_conf->chip = padap->params.chip;
1726
1727 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1728 if (rc)
1729 goto err1;
1730
1731 rc = compress_buff(&scratch_buff, dbg_buff);
1732
1733 err1:
1734 release_scratch_buff(&scratch_buff, dbg_buff);
1735 err:
1736 return rc;
1737 }
1738
1739 static int
collect_rss_vf_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1740 collect_rss_vf_config(struct cudbg_init *pdbg_init,
1741 struct cudbg_buffer *dbg_buff,
1742 struct cudbg_error *cudbg_err)
1743 {
1744 struct adapter *padap = pdbg_init->adap;
1745 struct cudbg_buffer scratch_buff;
1746 struct rss_vf_conf *vfconf;
1747 int vf, rc, vf_count = 0;
1748 u32 size;
1749
1750 vf_count = padap->params.arch.vfcount;
1751 size = vf_count * sizeof(*vfconf);
1752
1753 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1754 if (rc)
1755 goto err;
1756
1757 vfconf = (struct rss_vf_conf *)scratch_buff.data;
1758
1759 for (vf = 0; vf < vf_count; vf++) {
1760 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1761 &vfconf[vf].rss_vf_vfh, 1);
1762 }
1763
1764 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1765 if (rc)
1766 goto err1;
1767
1768 rc = compress_buff(&scratch_buff, dbg_buff);
1769
1770 err1:
1771 release_scratch_buff(&scratch_buff, dbg_buff);
1772 err:
1773 return rc;
1774 }
1775
1776 static int
collect_rss_pf_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1777 collect_rss_pf_config(struct cudbg_init *pdbg_init,
1778 struct cudbg_buffer *dbg_buff,
1779 struct cudbg_error *cudbg_err)
1780 {
1781 struct cudbg_buffer scratch_buff;
1782 struct rss_pf_conf *pfconf;
1783 struct adapter *padap = pdbg_init->adap;
1784 u32 rss_pf_map, rss_pf_mask, size;
1785 int pf, rc;
1786
1787 size = 8 * sizeof(*pfconf);
1788
1789 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1790 if (rc)
1791 goto err;
1792
1793 pfconf = (struct rss_pf_conf *)scratch_buff.data;
1794
1795 rss_pf_map = t4_read_rss_pf_map(padap, 1);
1796 rss_pf_mask = t4_read_rss_pf_mask(padap, 1);
1797
1798 for (pf = 0; pf < 8; pf++) {
1799 pfconf[pf].rss_pf_map = rss_pf_map;
1800 pfconf[pf].rss_pf_mask = rss_pf_mask;
1801 /* no return val */
1802 t4_read_rss_pf_config(padap, pf, &pfconf[pf].rss_pf_config, 1);
1803 }
1804
1805 rc = write_compression_hdr(&scratch_buff, dbg_buff);
1806 if (rc)
1807 goto err1;
1808
1809 rc = compress_buff(&scratch_buff, dbg_buff);
1810 err1:
1811 release_scratch_buff(&scratch_buff, dbg_buff);
1812 err:
1813 return rc;
1814 }
1815
1816 static int
check_valid(u32 * buf,int type)1817 check_valid(u32 *buf, int type)
1818 {
1819 int index;
1820 int bit;
1821 int bit_pos = 0;
1822
1823 switch (type) {
1824 case CTXT_EGRESS:
1825 bit_pos = 176;
1826 break;
1827 case CTXT_INGRESS:
1828 bit_pos = 141;
1829 break;
1830 case CTXT_FLM:
1831 bit_pos = 89;
1832 break;
1833 }
1834 index = bit_pos / 32;
1835 bit = bit_pos % 32;
1836
1837 return buf[index] & (1U << bit);
1838 }
1839
1840 /**
1841 * Get EGRESS, INGRESS, FLM, and CNM max qid.
1842 *
1843 * For EGRESS and INGRESS, do the following calculation.
1844 * max_qid = (DBQ/IMSG context region size in bytes) /
1845 * (size of context in bytes).
1846 *
1847 * For FLM, do the following calculation.
1848 * max_qid = (FLM cache region size in bytes) /
1849 * ((number of pointers cached in EDRAM) * 8 (bytes per pointer)).
1850 *
1851 * There's a 1-to-1 mapping between FLM and CNM if there's no header splitting
1852 * enabled; i.e., max CNM qid is equal to max FLM qid. However, if header
1853 * splitting is enabled, then max CNM qid is half of max FLM qid.
1854 */
1855 static int
get_max_ctxt_qid(struct adapter * padap,struct struct_meminfo * meminfo,u32 * max_ctx_qid,u8 nelem)1856 get_max_ctxt_qid(struct adapter *padap,
1857 struct struct_meminfo *meminfo,
1858 u32 *max_ctx_qid, u8 nelem)
1859 {
1860 u32 i, idx, found = 0;
1861
1862 if (nelem != (CTXT_CNM + 1))
1863 return -EINVAL;
1864
1865 for (i = 0; i < meminfo->mem_c; i++) {
1866 if (meminfo->mem[i].idx >= ARRAY_SIZE(region))
1867 continue; /* skip holes */
1868
1869 idx = meminfo->mem[i].idx;
1870 /* Get DBQ, IMSG, and FLM context region size */
1871 if (idx <= CTXT_FLM) {
1872 if (!(meminfo->mem[i].limit))
1873 meminfo->mem[i].limit =
1874 i < meminfo->mem_c - 1 ?
1875 meminfo->mem[i + 1].base - 1 : ~0;
1876
1877 if (idx < CTXT_FLM) {
1878 /* Get EGRESS and INGRESS max qid. */
1879 max_ctx_qid[idx] = (meminfo->mem[i].limit -
1880 meminfo->mem[i].base + 1) /
1881 CUDBG_CTXT_SIZE_BYTES;
1882 found++;
1883 } else {
1884 /* Get FLM and CNM max qid. */
1885 u32 value, edram_ptr_count;
1886 u8 bytes_per_ptr = 8;
1887 u8 nohdr;
1888
1889 value = t4_read_reg(padap, A_SGE_FLM_CFG);
1890
1891 /* Check if header splitting is enabled. */
1892 nohdr = (value >> S_NOHDR) & 1U;
1893
1894 /* Get the number of pointers in EDRAM per
1895 * qid in units of 32.
1896 */
1897 edram_ptr_count = 32 *
1898 (1U << G_EDRAMPTRCNT(value));
1899
1900 /* EDRAMPTRCNT value of 3 is reserved.
1901 * So don't exceed 128.
1902 */
1903 if (edram_ptr_count > 128)
1904 edram_ptr_count = 128;
1905
1906 max_ctx_qid[idx] = (meminfo->mem[i].limit -
1907 meminfo->mem[i].base + 1) /
1908 (edram_ptr_count *
1909 bytes_per_ptr);
1910 found++;
1911
1912 /* CNM has 1-to-1 mapping with FLM.
1913 * However, if header splitting is enabled,
1914 * then max CNM qid is half of max FLM qid.
1915 */
1916 max_ctx_qid[CTXT_CNM] = nohdr ?
1917 max_ctx_qid[idx] :
1918 max_ctx_qid[idx] >> 1;
1919
1920 /* One more increment for CNM */
1921 found++;
1922 }
1923 }
1924 if (found == nelem)
1925 break;
1926 }
1927
1928 /* Sanity check. Ensure the values are within known max. */
1929 max_ctx_qid[CTXT_EGRESS] = min_t(u32, max_ctx_qid[CTXT_EGRESS],
1930 M_CTXTQID);
1931 max_ctx_qid[CTXT_INGRESS] = min_t(u32, max_ctx_qid[CTXT_INGRESS],
1932 CUDBG_MAX_INGRESS_QIDS);
1933 max_ctx_qid[CTXT_FLM] = min_t(u32, max_ctx_qid[CTXT_FLM],
1934 CUDBG_MAX_FL_QIDS);
1935 max_ctx_qid[CTXT_CNM] = min_t(u32, max_ctx_qid[CTXT_CNM],
1936 CUDBG_MAX_CNM_QIDS);
1937 return 0;
1938 }
1939
1940 static int
collect_dump_context(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1941 collect_dump_context(struct cudbg_init *pdbg_init,
1942 struct cudbg_buffer *dbg_buff,
1943 struct cudbg_error *cudbg_err)
1944 {
1945 struct cudbg_buffer scratch_buff;
1946 struct cudbg_buffer temp_buff;
1947 struct adapter *padap = pdbg_init->adap;
1948 u32 size = 0, next_offset = 0, total_size = 0;
1949 struct cudbg_ch_cntxt *buff = NULL;
1950 struct struct_meminfo meminfo;
1951 int bytes = 0;
1952 int rc = 0;
1953 u32 i, j;
1954 u32 max_ctx_qid[CTXT_CNM + 1];
1955 bool limit_qid = false;
1956 u32 qid_count = 0;
1957
1958 rc = fill_meminfo(padap, &meminfo);
1959 if (rc)
1960 goto err;
1961
1962 /* Get max valid qid for each type of queue */
1963 rc = get_max_ctxt_qid(padap, &meminfo, max_ctx_qid, CTXT_CNM + 1);
1964 if (rc)
1965 goto err;
1966
1967 /* There are four types of queues. Collect context upto max
1968 * qid of each type of queue.
1969 */
1970 for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1971 size += sizeof(struct cudbg_ch_cntxt) * max_ctx_qid[i];
1972
1973 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1974 if (rc == CUDBG_STATUS_NO_SCRATCH_MEM) {
1975 /* Not enough scratch Memory available.
1976 * Collect context of at least CUDBG_LOWMEM_MAX_CTXT_QIDS
1977 * for each queue type.
1978 */
1979 size = 0;
1980 for (i = CTXT_EGRESS; i <= CTXT_CNM; i++)
1981 size += sizeof(struct cudbg_ch_cntxt) *
1982 CUDBG_LOWMEM_MAX_CTXT_QIDS;
1983
1984 limit_qid = true;
1985 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
1986 if (rc)
1987 goto err;
1988 }
1989
1990 buff = (struct cudbg_ch_cntxt *)scratch_buff.data;
1991
1992 /* Collect context data */
1993 for (i = CTXT_EGRESS; i <= CTXT_FLM; i++) {
1994 qid_count = 0;
1995 for (j = 0; j < max_ctx_qid[i]; j++) {
1996 read_sge_ctxt(pdbg_init, j, i, buff->data);
1997
1998 rc = check_valid(buff->data, i);
1999 if (rc) {
2000 buff->cntxt_type = i;
2001 buff->cntxt_id = j;
2002 buff++;
2003 total_size += sizeof(struct cudbg_ch_cntxt);
2004
2005 if (i == CTXT_FLM) {
2006 read_sge_ctxt(pdbg_init, j, CTXT_CNM,
2007 buff->data);
2008 buff->cntxt_type = CTXT_CNM;
2009 buff->cntxt_id = j;
2010 buff++;
2011 total_size +=
2012 sizeof(struct cudbg_ch_cntxt);
2013 }
2014 qid_count++;
2015 }
2016
2017 /* If there's not enough space to collect more qids,
2018 * then bail and move on to next queue type.
2019 */
2020 if (limit_qid &&
2021 qid_count >= CUDBG_LOWMEM_MAX_CTXT_QIDS)
2022 break;
2023 }
2024 }
2025
2026 scratch_buff.size = total_size;
2027 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2028 if (rc)
2029 goto err1;
2030
2031 /* Splitting buffer and writing in terms of CUDBG_CHUNK_SIZE */
2032 while (total_size > 0) {
2033 bytes = min_t(unsigned long, (unsigned long)total_size,
2034 (unsigned long)CUDBG_CHUNK_SIZE);
2035 temp_buff.size = bytes;
2036 temp_buff.data = (void *)((char *)scratch_buff.data +
2037 next_offset);
2038
2039 rc = compress_buff(&temp_buff, dbg_buff);
2040 if (rc)
2041 goto err1;
2042
2043 total_size -= bytes;
2044 next_offset += bytes;
2045 }
2046
2047 err1:
2048 scratch_buff.size = size;
2049 release_scratch_buff(&scratch_buff, dbg_buff);
2050 err:
2051 return rc;
2052 }
2053
2054 static int
collect_fw_devlog(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2055 collect_fw_devlog(struct cudbg_init *pdbg_init,
2056 struct cudbg_buffer *dbg_buff,
2057 struct cudbg_error *cudbg_err)
2058 {
2059 struct adapter *padap = pdbg_init->adap;
2060 struct devlog_params *dparams = &padap->params.devlog;
2061 struct cudbg_buffer scratch_buff;
2062 u32 offset;
2063 int rc = 0;
2064
2065 rc = t4_init_devlog_params(padap, 1);
2066
2067 if (rc < 0) {
2068 pdbg_init->print(padap->dip, CE_NOTE,
2069 "%s(), t4_init_devlog_params failed!, rc: "\
2070 "%d\n", __func__, rc);
2071 rc = CUDBG_SYSTEM_ERROR;
2072 goto err;
2073 }
2074
2075 rc = get_scratch_buff(dbg_buff, dparams->size, &scratch_buff);
2076
2077 if (rc)
2078 goto err;
2079
2080 /* Collect FW devlog */
2081 if (dparams->start != 0) {
2082 offset = scratch_buff.offset;
2083 rc = t4_memory_rw(padap, padap->params.drv_memwin,
2084 dparams->memtype, dparams->start,
2085 dparams->size,
2086 (__be32 *)((char *)scratch_buff.data +
2087 offset), 1);
2088
2089 if (rc) {
2090 pdbg_init->print(padap->dip, CE_NOTE,
2091 "%s(), t4_memory_rw failed!, rc: "\
2092 "%d\n", __func__, rc);
2093 cudbg_err->sys_err = rc;
2094 goto err1;
2095 }
2096 }
2097
2098 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2099
2100 if (rc)
2101 goto err1;
2102
2103 rc = compress_buff(&scratch_buff, dbg_buff);
2104
2105 err1:
2106 release_scratch_buff(&scratch_buff, dbg_buff);
2107 err:
2108 return rc;
2109 }
2110 /* CIM OBQ */
2111
2112 static int
collect_cim_obq_ulp0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2113 collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
2114 struct cudbg_buffer *dbg_buff,
2115 struct cudbg_error *cudbg_err)
2116 {
2117 int rc = 0, qid = 0;
2118
2119 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2120
2121 return rc;
2122 }
2123
2124 static int
collect_cim_obq_ulp1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2125 collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
2126 struct cudbg_buffer *dbg_buff,
2127 struct cudbg_error *cudbg_err)
2128 {
2129 int rc = 0, qid = 1;
2130
2131 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2132
2133 return rc;
2134 }
2135
2136 static int
collect_cim_obq_ulp2(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2137 collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
2138 struct cudbg_buffer *dbg_buff,
2139 struct cudbg_error *cudbg_err)
2140 {
2141 int rc = 0, qid = 2;
2142
2143 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2144
2145 return rc;
2146 }
2147
2148 static int
collect_cim_obq_ulp3(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2149 collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
2150 struct cudbg_buffer *dbg_buff,
2151 struct cudbg_error *cudbg_err)
2152 {
2153 int rc = 0, qid = 3;
2154
2155 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2156
2157 return rc;
2158 }
2159
2160 static int
collect_cim_obq_sge(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2161 collect_cim_obq_sge(struct cudbg_init *pdbg_init,
2162 struct cudbg_buffer *dbg_buff,
2163 struct cudbg_error *cudbg_err)
2164 {
2165 int rc = 0, qid = 4;
2166
2167 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2168
2169 return rc;
2170 }
2171
2172 static int
collect_cim_obq_ncsi(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2173 collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
2174 struct cudbg_buffer *dbg_buff,
2175 struct cudbg_error *cudbg_err)
2176 {
2177 int rc = 0, qid = 5;
2178
2179 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2180
2181 return rc;
2182 }
2183
2184 static int
collect_obq_sge_rx_q0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2185 collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
2186 struct cudbg_buffer *dbg_buff,
2187 struct cudbg_error *cudbg_err)
2188 {
2189 int rc = 0, qid = 6;
2190
2191 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2192
2193 return rc;
2194 }
2195
2196 static int
collect_obq_sge_rx_q1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2197 collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
2198 struct cudbg_buffer *dbg_buff,
2199 struct cudbg_error *cudbg_err)
2200 {
2201 int rc = 0, qid = 7;
2202
2203 rc = read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid);
2204
2205 return rc;
2206 }
2207
2208 static int
read_cim_obq(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err,int qid)2209 read_cim_obq(struct cudbg_init *pdbg_init,
2210 struct cudbg_buffer *dbg_buff,
2211 struct cudbg_error *cudbg_err, int qid)
2212 {
2213 struct cudbg_buffer scratch_buff;
2214 struct adapter *padap = pdbg_init->adap;
2215 u32 qsize;
2216 int rc;
2217 int no_of_read_words;
2218
2219 /* collect CIM OBQ */
2220 qsize = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32);
2221 rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2222 if (rc)
2223 goto err;
2224
2225 /* t4_read_cim_obq will return no. of read words or error */
2226 no_of_read_words = t4_read_cim_obq(padap, qid,
2227 (u32 *)((u32 *)scratch_buff.data +
2228 scratch_buff.offset), qsize);
2229
2230 /* no_of_read_words is less than or equal to 0 means error */
2231 if (no_of_read_words <= 0) {
2232 if (no_of_read_words == 0)
2233 rc = CUDBG_SYSTEM_ERROR;
2234 else
2235 rc = no_of_read_words;
2236 if (pdbg_init->verbose)
2237 pdbg_init->print(padap->dip, CE_NOTE,
2238 "%s: t4_read_cim_obq failed (%d)\n",
2239 __func__, rc);
2240 cudbg_err->sys_err = rc;
2241 goto err1;
2242 }
2243
2244 scratch_buff.size = no_of_read_words * 4;
2245
2246 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2247
2248 if (rc)
2249 goto err1;
2250
2251 rc = compress_buff(&scratch_buff, dbg_buff);
2252
2253 if (rc)
2254 goto err1;
2255
2256 err1:
2257 release_scratch_buff(&scratch_buff, dbg_buff);
2258 err:
2259 return rc;
2260 }
2261
2262 /* CIM IBQ */
2263
2264 static int
collect_cim_ibq_tp0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2265 collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
2266 struct cudbg_buffer *dbg_buff,
2267 struct cudbg_error *cudbg_err)
2268 {
2269 int rc = 0, qid = 0;
2270
2271 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2272 return rc;
2273 }
2274
2275 static int
collect_cim_ibq_tp1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2276 collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
2277 struct cudbg_buffer *dbg_buff,
2278 struct cudbg_error *cudbg_err)
2279 {
2280 int rc = 0, qid = 1;
2281
2282 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2283 return rc;
2284 }
2285
2286 static int
collect_cim_ibq_ulp(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2287 collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
2288 struct cudbg_buffer *dbg_buff,
2289 struct cudbg_error *cudbg_err)
2290 {
2291 int rc = 0, qid = 2;
2292
2293 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2294 return rc;
2295 }
2296
2297 static int
collect_cim_ibq_sge0(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2298 collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
2299 struct cudbg_buffer *dbg_buff,
2300 struct cudbg_error *cudbg_err)
2301 {
2302 int rc = 0, qid = 3;
2303
2304 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2305 return rc;
2306 }
2307
2308 static int
collect_cim_ibq_sge1(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2309 collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
2310 struct cudbg_buffer *dbg_buff,
2311 struct cudbg_error *cudbg_err)
2312 {
2313 int rc = 0, qid = 4;
2314
2315 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2316 return rc;
2317 }
2318
2319 static int
collect_cim_ibq_ncsi(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2320 collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
2321 struct cudbg_buffer *dbg_buff,
2322 struct cudbg_error *cudbg_err)
2323 {
2324 int rc, qid = 5;
2325
2326 rc = read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid);
2327 return rc;
2328 }
2329
2330 static int
read_cim_ibq(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err,int qid)2331 read_cim_ibq(struct cudbg_init *pdbg_init,
2332 struct cudbg_buffer *dbg_buff,
2333 struct cudbg_error *cudbg_err, int qid)
2334 {
2335 struct adapter *padap = pdbg_init->adap;
2336 struct cudbg_buffer scratch_buff;
2337 u32 qsize;
2338 int rc;
2339 int no_of_read_words;
2340
2341 /* collect CIM IBQ */
2342 qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
2343 rc = get_scratch_buff(dbg_buff, qsize, &scratch_buff);
2344
2345 if (rc)
2346 goto err;
2347
2348 /* t4_read_cim_ibq will return no. of read words or error */
2349 no_of_read_words = t4_read_cim_ibq(padap, qid,
2350 (u32 *)((u32 *)scratch_buff.data +
2351 scratch_buff.offset), qsize);
2352 /* no_of_read_words is less than or equal to 0 means error */
2353 if (no_of_read_words <= 0) {
2354 if (no_of_read_words == 0)
2355 rc = CUDBG_SYSTEM_ERROR;
2356 else
2357 rc = no_of_read_words;
2358 if (pdbg_init->verbose)
2359 pdbg_init->print(padap->dip, CE_NOTE,
2360 "%s: t4_read_cim_ibq failed (%d)\n",
2361 __func__, rc);
2362 cudbg_err->sys_err = rc;
2363 goto err1;
2364 }
2365
2366 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2367 if (rc)
2368 goto err1;
2369
2370 rc = compress_buff(&scratch_buff, dbg_buff);
2371 if (rc)
2372 goto err1;
2373
2374 err1:
2375 release_scratch_buff(&scratch_buff, dbg_buff);
2376
2377 err:
2378 return rc;
2379 }
2380
2381 static int
collect_cim_ma_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2382 collect_cim_ma_la(struct cudbg_init *pdbg_init,
2383 struct cudbg_buffer *dbg_buff,
2384 struct cudbg_error *cudbg_err)
2385 {
2386 struct cudbg_buffer scratch_buff;
2387 struct adapter *padap = pdbg_init->adap;
2388 u32 rc = 0;
2389
2390 /* collect CIM MA LA */
2391 scratch_buff.size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
2392 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2393 if (rc)
2394 goto err;
2395
2396 /* no return */
2397 t4_cim_read_ma_la(padap,
2398 (u32 *) ((char *)scratch_buff.data +
2399 scratch_buff.offset),
2400 (u32 *) ((char *)scratch_buff.data +
2401 scratch_buff.offset + 5 * CIM_MALA_SIZE));
2402
2403 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2404 if (rc)
2405 goto err1;
2406
2407 rc = compress_buff(&scratch_buff, dbg_buff);
2408
2409 err1:
2410 release_scratch_buff(&scratch_buff, dbg_buff);
2411 err:
2412 return rc;
2413 }
2414
2415 static int
collect_cim_la(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2416 collect_cim_la(struct cudbg_init *pdbg_init,
2417 struct cudbg_buffer *dbg_buff,
2418 struct cudbg_error *cudbg_err)
2419 {
2420 struct cudbg_buffer scratch_buff;
2421 struct adapter *padap = pdbg_init->adap;
2422
2423 int rc;
2424 u32 cfg = 0;
2425 int size;
2426
2427 /* collect CIM LA */
2428 if (is_t6(padap->params.chip)) {
2429 size = padap->params.cim_la_size / 10 + 1;
2430 size *= 11 * sizeof(u32);
2431 } else {
2432 size = padap->params.cim_la_size / 8;
2433 size *= 8 * sizeof(u32);
2434 }
2435
2436 size += sizeof(cfg);
2437
2438 rc = get_scratch_buff(dbg_buff, size, &scratch_buff);
2439 if (rc)
2440 goto err;
2441
2442 rc = t4_cim_read(padap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
2443
2444 if (rc) {
2445 if (pdbg_init->verbose)
2446 pdbg_init->print(padap->dip, CE_NOTE,
2447 "%s: t4_cim_read failed (%d)\n",
2448 __func__, rc);
2449 cudbg_err->sys_err = rc;
2450 goto err1;
2451 }
2452
2453 memcpy((char *)scratch_buff.data + scratch_buff.offset, &cfg,
2454 sizeof(cfg));
2455
2456 rc = t4_cim_read_la(padap,
2457 (u32 *) ((char *)scratch_buff.data +
2458 scratch_buff.offset + sizeof(cfg)), NULL);
2459 if (rc < 0) {
2460 if (pdbg_init->verbose)
2461 pdbg_init->print(padap->dip, CE_NOTE,
2462 "%s: t4_cim_read_la failed (%d)\n",
2463 __func__, rc);
2464 cudbg_err->sys_err = rc;
2465 goto err1;
2466 }
2467
2468 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2469 if (rc)
2470 goto err1;
2471
2472 rc = compress_buff(&scratch_buff, dbg_buff);
2473 if (rc)
2474 goto err1;
2475
2476 err1:
2477 release_scratch_buff(&scratch_buff, dbg_buff);
2478 err:
2479 return rc;
2480 }
2481
2482 static int
collect_cim_qcfg(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2483 collect_cim_qcfg(struct cudbg_init *pdbg_init,
2484 struct cudbg_buffer *dbg_buff,
2485 struct cudbg_error *cudbg_err)
2486 {
2487 struct cudbg_buffer scratch_buff;
2488 struct adapter *padap = pdbg_init->adap;
2489 u32 offset;
2490 int rc = 0;
2491
2492 struct struct_cim_qcfg *cim_qcfg_data = NULL;
2493
2494 rc = get_scratch_buff(dbg_buff, sizeof(struct struct_cim_qcfg),
2495 &scratch_buff);
2496
2497 if (rc)
2498 goto err;
2499
2500 offset = scratch_buff.offset;
2501
2502 cim_qcfg_data =
2503 (struct struct_cim_qcfg *)((u8 *)((char *)scratch_buff.data +
2504 offset));
2505
2506 rc = t4_cim_read(padap, A_UP_IBQ_0_RDADDR,
2507 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
2508
2509 if (rc) {
2510 if (pdbg_init->verbose)
2511 pdbg_init->print(padap->dip, CE_NOTE,
2512 "%s: t4_cim_read IBQ_0_RDADDR failed (%d)\n",
2513 __func__, rc);
2514 cudbg_err->sys_err = rc;
2515 goto err1;
2516 }
2517
2518 rc = t4_cim_read(padap, A_UP_OBQ_0_REALADDR,
2519 ARRAY_SIZE(cim_qcfg_data->obq_wr),
2520 cim_qcfg_data->obq_wr);
2521
2522 if (rc) {
2523 if (pdbg_init->verbose)
2524 pdbg_init->print(padap->dip, CE_NOTE,
2525 "%s: t4_cim_read OBQ_0_REALADDR failed (%d)\n",
2526 __func__, rc);
2527 cudbg_err->sys_err = rc;
2528 goto err1;
2529 }
2530
2531 /* no return val */
2532 t4_read_cimq_cfg(padap,
2533 cim_qcfg_data->base,
2534 cim_qcfg_data->size,
2535 cim_qcfg_data->thres);
2536
2537 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2538 if (rc)
2539 goto err1;
2540
2541 rc = compress_buff(&scratch_buff, dbg_buff);
2542 if (rc)
2543 goto err1;
2544
2545 err1:
2546 release_scratch_buff(&scratch_buff, dbg_buff);
2547 err:
2548 return rc;
2549 }
2550
2551 static int
read_fw_mem(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,u8 mem_type,unsigned long tot_len,struct cudbg_error * cudbg_err)2552 read_fw_mem(struct cudbg_init *pdbg_init,
2553 struct cudbg_buffer *dbg_buff, u8 mem_type,
2554 unsigned long tot_len, struct cudbg_error *cudbg_err)
2555 {
2556 struct cudbg_buffer scratch_buff;
2557 struct adapter *padap = pdbg_init->adap;
2558 unsigned long bytes_read = 0;
2559 unsigned long bytes_left;
2560 unsigned long bytes;
2561 int rc;
2562
2563 bytes_left = tot_len;
2564 scratch_buff.size = tot_len;
2565 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2566 if (rc)
2567 goto err;
2568
2569 while (bytes_left > 0) {
2570 bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2571 rc = get_scratch_buff(dbg_buff, bytes, &scratch_buff);
2572
2573 if (rc) {
2574 rc = CUDBG_STATUS_NO_SCRATCH_MEM;
2575 goto err;
2576 }
2577
2578 /* Read from file */
2579 /*fread(scratch_buff.data, 1, Bytes, in);*/
2580 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, bytes_read,
2581 bytes, (__be32 *)(scratch_buff.data), 1);
2582
2583 if (rc) {
2584 if (pdbg_init->verbose)
2585 pdbg_init->print(padap->dip, CE_NOTE,
2586 "%s: t4_memory_rw failed (%d)",
2587 __func__, rc);
2588 cudbg_err->sys_err = rc;
2589 goto err1;
2590 }
2591
2592 rc = compress_buff(&scratch_buff, dbg_buff);
2593 if (rc)
2594 goto err1;
2595
2596 bytes_left -= bytes;
2597 bytes_read += bytes;
2598 release_scratch_buff(&scratch_buff, dbg_buff);
2599 }
2600
2601 err1:
2602 if (rc)
2603 release_scratch_buff(&scratch_buff, dbg_buff);
2604
2605 err:
2606 return rc;
2607 }
2608
2609 static void
collect_mem_info(struct cudbg_init * pdbg_init,struct card_mem * mem_info)2610 collect_mem_info(struct cudbg_init *pdbg_init,
2611 struct card_mem *mem_info)
2612 {
2613 struct adapter *padap = pdbg_init->adap;
2614 u32 value;
2615 int t4 = 0;
2616
2617 if (is_t4(padap->params.chip))
2618 t4 = 1;
2619
2620 if (t4) {
2621 value = t4_read_reg(padap, A_MA_EXT_MEMORY_BAR);
2622 value = G_EXT_MEM_SIZE(value);
2623 mem_info->size_mc0 = (u16)value; /* size in MB */
2624
2625 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2626 if (value & F_EXT_MEM_ENABLE)
2627 mem_info->mem_flag |= (1 << MC0_FLAG); /* set mc0 flag
2628 bit */
2629 } else {
2630 value = t4_read_reg(padap, A_MA_EXT_MEMORY0_BAR);
2631 value = G_EXT_MEM0_SIZE(value);
2632 mem_info->size_mc0 = (u16)value;
2633
2634 value = t4_read_reg(padap, A_MA_EXT_MEMORY1_BAR);
2635 value = G_EXT_MEM1_SIZE(value);
2636 mem_info->size_mc1 = (u16)value;
2637
2638 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2639 if (value & F_EXT_MEM0_ENABLE)
2640 mem_info->mem_flag |= (1 << MC0_FLAG);
2641 if (value & F_EXT_MEM1_ENABLE)
2642 mem_info->mem_flag |= (1 << MC1_FLAG);
2643 }
2644
2645 value = t4_read_reg(padap, A_MA_EDRAM0_BAR);
2646 value = G_EDRAM0_SIZE(value);
2647 mem_info->size_edc0 = (u16)value;
2648
2649 value = t4_read_reg(padap, A_MA_EDRAM1_BAR);
2650 value = G_EDRAM1_SIZE(value);
2651 mem_info->size_edc1 = (u16)value;
2652
2653 value = t4_read_reg(padap, A_MA_TARGET_MEM_ENABLE);
2654 if (value & F_EDRAM0_ENABLE)
2655 mem_info->mem_flag |= (1 << EDC0_FLAG);
2656 if (value & F_EDRAM1_ENABLE)
2657 mem_info->mem_flag |= (1 << EDC1_FLAG);
2658
2659 }
2660
2661 static void
cudbg_t4_fwcache(struct cudbg_init * pdbg_init,struct cudbg_error * cudbg_err)2662 cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
2663 struct cudbg_error *cudbg_err)
2664 {
2665 struct adapter *padap = pdbg_init->adap;
2666 int rc;
2667
2668 if (is_fw_attached(pdbg_init)) {
2669
2670 /* Flush uP dcache before reading edcX/mcX */
2671 rc = begin_synchronized_op(padap->port[0], 1, 1);
2672 if (rc == 0) {
2673 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
2674 end_synchronized_op(padap->port[0], 1);
2675 }
2676
2677 if (rc) {
2678 if (pdbg_init->verbose)
2679 pdbg_init->print(padap->dip, CE_NOTE,
2680 "%s: t4_fwcache failed (%d)\n",
2681 __func__, rc);
2682 cudbg_err->sys_warn = rc;
2683 }
2684 }
2685 }
2686
2687 static int
collect_edc0_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2688 collect_edc0_meminfo(struct cudbg_init *pdbg_init,
2689 struct cudbg_buffer *dbg_buff,
2690 struct cudbg_error *cudbg_err)
2691 {
2692 struct card_mem mem_info = {0};
2693 unsigned long edc0_size;
2694 int rc;
2695
2696 cudbg_t4_fwcache(pdbg_init, cudbg_err);
2697
2698 collect_mem_info(pdbg_init, &mem_info);
2699
2700 if (mem_info.mem_flag & (1 << EDC0_FLAG)) {
2701 edc0_size = (((unsigned long)mem_info.size_edc0) * 1024 * 1024);
2702 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC0,
2703 edc0_size, cudbg_err);
2704 if (rc)
2705 goto err;
2706
2707 } else {
2708 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2709 if (pdbg_init->verbose)
2710 pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2711 "%s(), collect_mem_info failed!, %s\n",
2712 __func__, err_msg[-rc]);
2713 goto err;
2714
2715 }
2716 err:
2717 return rc;
2718 }
2719
2720 static int
collect_edc1_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2721 collect_edc1_meminfo(struct cudbg_init *pdbg_init,
2722 struct cudbg_buffer *dbg_buff,
2723 struct cudbg_error *cudbg_err)
2724 {
2725 struct card_mem mem_info = {0};
2726 unsigned long edc1_size;
2727 int rc;
2728
2729 cudbg_t4_fwcache(pdbg_init, cudbg_err);
2730
2731 collect_mem_info(pdbg_init, &mem_info);
2732
2733 if (mem_info.mem_flag & (1 << EDC1_FLAG)) {
2734 edc1_size = (((unsigned long)mem_info.size_edc1) * 1024 * 1024);
2735 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_EDC1,
2736 edc1_size, cudbg_err);
2737 if (rc)
2738 goto err;
2739 } else {
2740 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2741 if (pdbg_init->verbose)
2742 pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2743 "%s(), collect_mem_info failed!, %s\n",
2744 __func__, err_msg[-rc]);
2745 goto err;
2746 }
2747
2748 err:
2749
2750 return rc;
2751 }
2752
2753 static int
collect_mc0_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2754 collect_mc0_meminfo(struct cudbg_init *pdbg_init,
2755 struct cudbg_buffer *dbg_buff,
2756 struct cudbg_error *cudbg_err)
2757 {
2758 struct card_mem mem_info = {0};
2759 unsigned long mc0_size;
2760 int rc;
2761
2762 cudbg_t4_fwcache(pdbg_init, cudbg_err);
2763
2764 collect_mem_info(pdbg_init, &mem_info);
2765
2766 if (mem_info.mem_flag & (1 << MC0_FLAG)) {
2767 mc0_size = (((unsigned long)mem_info.size_mc0) * 1024 * 1024);
2768 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC0,
2769 mc0_size, cudbg_err);
2770 if (rc)
2771 goto err;
2772 } else {
2773 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2774 if (pdbg_init->verbose)
2775 pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2776 "%s(), collect_mem_info failed!, %s\n",
2777 __func__, err_msg[-rc]);
2778 goto err;
2779 }
2780
2781 err:
2782 return rc;
2783 }
2784
2785 static int
collect_mc1_meminfo(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2786 collect_mc1_meminfo(struct cudbg_init *pdbg_init,
2787 struct cudbg_buffer *dbg_buff,
2788 struct cudbg_error *cudbg_err)
2789 {
2790 struct card_mem mem_info = {0};
2791 unsigned long mc1_size;
2792 int rc;
2793
2794 cudbg_t4_fwcache(pdbg_init, cudbg_err);
2795
2796 collect_mem_info(pdbg_init, &mem_info);
2797
2798 if (mem_info.mem_flag & (1 << MC1_FLAG)) {
2799 mc1_size = (((unsigned long)mem_info.size_mc1) * 1024 * 1024);
2800 rc = read_fw_mem(pdbg_init, dbg_buff, MEM_MC1,
2801 mc1_size, cudbg_err);
2802 if (rc)
2803 goto err;
2804 } else {
2805 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
2806
2807 if (pdbg_init->verbose)
2808 pdbg_init->print(pdbg_init->adap->dip, CE_NOTE,
2809 "%s(), collect_mem_info failed!, %s\n",
2810 __func__, err_msg[-rc]);
2811 goto err;
2812 }
2813 err:
2814 return rc;
2815 }
2816
2817 static int
collect_reg_dump(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2818 collect_reg_dump(struct cudbg_init *pdbg_init,
2819 struct cudbg_buffer *dbg_buff,
2820 struct cudbg_error *cudbg_err)
2821 {
2822 struct cudbg_buffer scratch_buff;
2823 struct cudbg_buffer tmp_scratch_buff;
2824 struct adapter *padap = pdbg_init->adap;
2825 unsigned long bytes_read = 0;
2826 unsigned long bytes_left;
2827 u32 buf_size = 0, bytes = 0;
2828 int rc = 0;
2829
2830 if (is_t4(padap->params.chip))
2831 buf_size = T4_REGMAP_SIZE ;/*+ sizeof(unsigned int);*/
2832 else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
2833 buf_size = T5_REGMAP_SIZE;
2834
2835 scratch_buff.size = buf_size;
2836
2837 tmp_scratch_buff = scratch_buff;
2838
2839 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2840 if (rc)
2841 goto err;
2842
2843 /* no return */
2844 t4_get_regs(padap, (void *)scratch_buff.data, scratch_buff.size);
2845 bytes_left = scratch_buff.size;
2846
2847 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2848 if (rc)
2849 goto err1;
2850
2851 while (bytes_left > 0) {
2852 tmp_scratch_buff.data =
2853 ((char *)scratch_buff.data) + bytes_read;
2854 bytes = min_t(unsigned long, bytes_left, (unsigned long)CUDBG_CHUNK_SIZE);
2855 tmp_scratch_buff.size = bytes;
2856 compress_buff(&tmp_scratch_buff, dbg_buff);
2857 bytes_left -= bytes;
2858 bytes_read += bytes;
2859 }
2860
2861 err1:
2862 release_scratch_buff(&scratch_buff, dbg_buff);
2863 err:
2864 return rc;
2865 }
2866
2867 static int
collect_cctrl(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2868 collect_cctrl(struct cudbg_init *pdbg_init,
2869 struct cudbg_buffer *dbg_buff,
2870 struct cudbg_error *cudbg_err)
2871 {
2872 struct cudbg_buffer scratch_buff;
2873 struct adapter *padap = pdbg_init->adap;
2874 u32 size;
2875 int rc;
2876
2877 size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2878 scratch_buff.size = size;
2879
2880 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2881 if (rc)
2882 goto err;
2883
2884 t4_read_cong_tbl(padap, (void *)scratch_buff.data);
2885
2886 rc = write_compression_hdr(&scratch_buff, dbg_buff);
2887 if (rc)
2888 goto err1;
2889
2890 rc = compress_buff(&scratch_buff, dbg_buff);
2891
2892 err1:
2893 release_scratch_buff(&scratch_buff, dbg_buff);
2894 err:
2895 return rc;
2896 }
2897
2898 static int
check_busy_bit(struct adapter * padap)2899 check_busy_bit(struct adapter *padap)
2900 {
2901 u32 val;
2902 u32 busy = 1;
2903 int i = 0;
2904 int retry = 10;
2905 int status = 0;
2906
2907 while (busy && (i < retry)) {
2908 val = t4_read_reg(padap, A_CIM_HOST_ACC_CTRL);
2909 busy = (0 != (val & CUDBG_CIM_BUSY_BIT));
2910 i++;
2911 }
2912
2913 if (busy)
2914 status = -1;
2915
2916 return status;
2917 }
2918
2919 static int
cim_ha_rreg(struct adapter * padap,u32 addr,u32 * val)2920 cim_ha_rreg(struct adapter *padap, u32 addr, u32 *val)
2921 {
2922 int rc = 0;
2923
2924 /* write register address into the A_CIM_HOST_ACC_CTRL */
2925 t4_write_reg(padap, A_CIM_HOST_ACC_CTRL, addr);
2926
2927 /* Poll HOSTBUSY */
2928 rc = check_busy_bit(padap);
2929 if (rc)
2930 goto err;
2931
2932 /* Read value from A_CIM_HOST_ACC_DATA */
2933 *val = t4_read_reg(padap, A_CIM_HOST_ACC_DATA);
2934
2935 err:
2936 return rc;
2937 }
2938
2939 static int
dump_up_cim(struct adapter * padap,struct cudbg_init * pdbg_init,struct ireg_field * up_cim_reg,u32 * buff)2940 dump_up_cim(struct adapter *padap, struct cudbg_init *pdbg_init,
2941 struct ireg_field *up_cim_reg, u32 *buff)
2942 {
2943 u32 i;
2944 int rc = 0;
2945
2946 for (i = 0; i < up_cim_reg->ireg_offset_range; i++) {
2947 rc = cim_ha_rreg(padap,
2948 up_cim_reg->ireg_local_offset + (i * 4),
2949 buff);
2950 if (rc) {
2951 if (pdbg_init->verbose)
2952 pdbg_init->print(padap->dip, CE_NOTE,
2953 "BUSY timeout reading"
2954 "CIM_HOST_ACC_CTRL\n");
2955 goto err;
2956 }
2957
2958 buff++;
2959 }
2960
2961 err:
2962 return rc;
2963 }
2964
2965 static int
collect_up_cim_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)2966 collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2967 struct cudbg_buffer *dbg_buff,
2968 struct cudbg_error *cudbg_err)
2969 {
2970 struct cudbg_buffer scratch_buff;
2971 struct adapter *padap = pdbg_init->adap;
2972 struct ireg_buf *up_cim;
2973 u32 size;
2974 int i, rc, n;
2975
2976 n = sizeof(t5_up_cim_reg_array) / (4 * sizeof(u32));
2977 size = sizeof(struct ireg_buf) * n;
2978 scratch_buff.size = size;
2979
2980 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
2981 if (rc)
2982 goto err;
2983
2984 up_cim = (struct ireg_buf *)scratch_buff.data;
2985
2986 for (i = 0; i < n; i++) {
2987 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
2988 u32 *buff = up_cim->outbuf;
2989
2990 if (is_t5(padap->params.chip)) {
2991 up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
2992 up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
2993 up_cim_reg->ireg_local_offset =
2994 t5_up_cim_reg_array[i][2];
2995 up_cim_reg->ireg_offset_range =
2996 t5_up_cim_reg_array[i][3];
2997 } else if (is_t6(padap->params.chip)) {
2998 up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
2999 up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
3000 up_cim_reg->ireg_local_offset =
3001 t6_up_cim_reg_array[i][2];
3002 up_cim_reg->ireg_offset_range =
3003 t6_up_cim_reg_array[i][3];
3004 }
3005
3006 rc = dump_up_cim(padap, pdbg_init, up_cim_reg, buff);
3007
3008 up_cim++;
3009 }
3010
3011 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3012 if (rc)
3013 goto err1;
3014
3015 rc = compress_buff(&scratch_buff, dbg_buff);
3016
3017 err1:
3018 release_scratch_buff(&scratch_buff, dbg_buff);
3019 err:
3020 return rc;
3021 }
3022
3023 static int
collect_mbox_log(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3024 collect_mbox_log(struct cudbg_init *pdbg_init,
3025 struct cudbg_buffer *dbg_buff,
3026 struct cudbg_error *cudbg_err)
3027 {
3028 #ifdef notyet
3029 struct cudbg_buffer scratch_buff;
3030 struct cudbg_mbox_log *mboxlog = NULL;
3031 struct mbox_cmd_log *log = NULL;
3032 struct mbox_cmd *entry;
3033 u64 flit;
3034 u32 size;
3035 unsigned int entry_idx;
3036 int i, k, rc;
3037 u16 mbox_cmds;
3038
3039 if (pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.mboxlog_param.log) {
3040 log = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3041 mboxlog_param.log;
3042 mbox_cmds = pdbg_init->dbg_params[CUDBG_MBOX_LOG_PARAM].u.
3043 mboxlog_param.mbox_cmds;
3044 } else {
3045 if (pdbg_init->verbose)
3046 pdbg_init->print(adap->dip, CE_NOTE,
3047 "Mbox log is not requested\n");
3048 return CUDBG_STATUS_ENTITY_NOT_REQUESTED;
3049 }
3050
3051 size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
3052 scratch_buff.size = size;
3053 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3054 if (rc)
3055 goto err;
3056
3057 mboxlog = (struct cudbg_mbox_log *)scratch_buff.data;
3058
3059 for (k = 0; k < mbox_cmds; k++) {
3060 entry_idx = log->cursor + k;
3061 if (entry_idx >= log->size)
3062 entry_idx -= log->size;
3063 entry = mbox_cmd_log_entry(log, entry_idx);
3064
3065 /* skip over unused entries */
3066 if (entry->timestamp == 0)
3067 continue;
3068
3069 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
3070
3071 for (i = 0; i < MBOX_LEN / 8; i++) {
3072 flit = entry->cmd[i];
3073 mboxlog->hi[i] = (u32)(flit >> 32);
3074 mboxlog->lo[i] = (u32)flit;
3075 }
3076
3077 mboxlog++;
3078 }
3079
3080 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3081 if (rc)
3082 goto err1;
3083
3084 rc = compress_buff(&scratch_buff, dbg_buff);
3085
3086 err1:
3087 release_scratch_buff(&scratch_buff, dbg_buff);
3088 err:
3089 return rc;
3090 #endif
3091 return (-1);
3092 }
3093
3094 static int
collect_pbt_tables(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3095 collect_pbt_tables(struct cudbg_init *pdbg_init,
3096 struct cudbg_buffer *dbg_buff,
3097 struct cudbg_error *cudbg_err)
3098 {
3099 struct cudbg_buffer scratch_buff;
3100 struct adapter *padap = pdbg_init->adap;
3101 struct cudbg_pbt_tables *pbt = NULL;
3102 u32 size;
3103 u32 addr;
3104 int i, rc;
3105
3106 size = sizeof(struct cudbg_pbt_tables);
3107 scratch_buff.size = size;
3108
3109 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3110 if (rc)
3111 goto err;
3112
3113 pbt = (struct cudbg_pbt_tables *)scratch_buff.data;
3114
3115 /* PBT dynamic entries */
3116 addr = CUDBG_CHAC_PBT_ADDR;
3117 for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
3118 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_dynamic[i]);
3119 if (rc) {
3120 if (pdbg_init->verbose)
3121 pdbg_init->print(padap->dip, CE_NOTE,
3122 "BUSY timeout reading"
3123 "CIM_HOST_ACC_CTRL\n");
3124 goto err1;
3125 }
3126 }
3127
3128 /* PBT static entries */
3129
3130 /* static entries start when bit 6 is set */
3131 addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
3132 for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
3133 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_static[i]);
3134 if (rc) {
3135 if (pdbg_init->verbose)
3136 pdbg_init->print(padap->dip, CE_NOTE,
3137 "BUSY timeout reading"
3138 "CIM_HOST_ACC_CTRL\n");
3139 goto err1;
3140 }
3141 }
3142
3143 /* LRF entries */
3144 addr = CUDBG_CHAC_PBT_LRF;
3145 for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
3146 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->lrf_table[i]);
3147 if (rc) {
3148 if (pdbg_init->verbose)
3149 pdbg_init->print(padap->dip, CE_NOTE,
3150 "BUSY timeout reading"
3151 "CIM_HOST_ACC_CTRL\n");
3152 goto err1;
3153 }
3154 }
3155
3156 /* PBT data entries */
3157 addr = CUDBG_CHAC_PBT_DATA;
3158 for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
3159 rc = cim_ha_rreg(padap, addr + (i * 4), &pbt->pbt_data[i]);
3160 if (rc) {
3161 if (pdbg_init->verbose)
3162 pdbg_init->print(padap->dip, CE_NOTE,
3163 "BUSY timeout reading"
3164 "CIM_HOST_ACC_CTRL\n");
3165 goto err1;
3166 }
3167 }
3168
3169 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3170 if (rc)
3171 goto err1;
3172
3173 rc = compress_buff(&scratch_buff, dbg_buff);
3174
3175 err1:
3176 release_scratch_buff(&scratch_buff, dbg_buff);
3177 err:
3178 return rc;
3179 }
3180
3181 static int
collect_pm_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3182 collect_pm_indirect(struct cudbg_init *pdbg_init,
3183 struct cudbg_buffer *dbg_buff,
3184 struct cudbg_error *cudbg_err)
3185 {
3186 struct cudbg_buffer scratch_buff;
3187 struct adapter *padap = pdbg_init->adap;
3188 struct ireg_buf *ch_pm;
3189 u32 size;
3190 int i, rc, n;
3191
3192 n = sizeof(t5_pm_rx_array) / (4 * sizeof(u32));
3193 size = sizeof(struct ireg_buf) * n * 2;
3194 scratch_buff.size = size;
3195
3196 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3197 if (rc)
3198 goto err;
3199
3200 ch_pm = (struct ireg_buf *)scratch_buff.data;
3201
3202 /*PM_RX*/
3203 for (i = 0; i < n; i++) {
3204 struct ireg_field *pm_pio = &ch_pm->tp_pio;
3205 u32 *buff = ch_pm->outbuf;
3206
3207 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
3208 pm_pio->ireg_data = t5_pm_rx_array[i][1];
3209 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
3210 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
3211
3212 t4_read_indirect(padap,
3213 pm_pio->ireg_addr,
3214 pm_pio->ireg_data,
3215 buff,
3216 pm_pio->ireg_offset_range,
3217 pm_pio->ireg_local_offset);
3218
3219 ch_pm++;
3220 }
3221
3222 /*PM_Tx*/
3223 n = sizeof(t5_pm_tx_array) / (4 * sizeof(u32));
3224 for (i = 0; i < n; i++) {
3225 struct ireg_field *pm_pio = &ch_pm->tp_pio;
3226 u32 *buff = ch_pm->outbuf;
3227
3228 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
3229 pm_pio->ireg_data = t5_pm_tx_array[i][1];
3230 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
3231 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
3232
3233 t4_read_indirect(padap,
3234 pm_pio->ireg_addr,
3235 pm_pio->ireg_data,
3236 buff,
3237 pm_pio->ireg_offset_range,
3238 pm_pio->ireg_local_offset);
3239
3240 ch_pm++;
3241 }
3242
3243 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3244 if (rc)
3245 goto err1;
3246
3247 rc = compress_buff(&scratch_buff, dbg_buff);
3248
3249 err1:
3250 release_scratch_buff(&scratch_buff, dbg_buff);
3251 err:
3252 return rc;
3253
3254 }
3255
3256 static int
collect_tid(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3257 collect_tid(struct cudbg_init *pdbg_init,
3258 struct cudbg_buffer *dbg_buff,
3259 struct cudbg_error *cudbg_err)
3260 {
3261
3262 struct cudbg_buffer scratch_buff;
3263 struct adapter *padap = pdbg_init->adap;
3264 struct tid_info_region *tid;
3265 struct tid_info_region_rev1 *tid1;
3266 u32 para[7], val[7];
3267 u32 mbox, pf;
3268 int rc;
3269
3270 scratch_buff.size = sizeof(struct tid_info_region_rev1);
3271
3272 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3273 if (rc)
3274 goto err;
3275
3276 #define FW_PARAM_DEV_A(param) \
3277 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3278 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3279 #define FW_PARAM_PFVF_A(param) \
3280 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3281 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
3282 V_FW_PARAMS_PARAM_Y(0) | \
3283 V_FW_PARAMS_PARAM_Z(0))
3284 #define MAX_ATIDS_A 8192U
3285
3286 tid1 = (struct tid_info_region_rev1 *)scratch_buff.data;
3287 tid = &(tid1->tid);
3288 tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
3289 tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
3290 tid1->ver_hdr.size = sizeof(struct tid_info_region_rev1) -
3291 sizeof(struct cudbg_ver_hdr);
3292
3293 if (is_t5(padap->params.chip)) {
3294 tid->hash_base = t4_read_reg(padap, A_LE_DB_TID_HASHBASE);
3295 tid1->tid_start = 0;
3296 } else if (is_t6(padap->params.chip)) {
3297 tid->hash_base = t4_read_reg(padap, A_T6_LE_DB_HASH_TID_BASE);
3298 tid1->tid_start = t4_read_reg(padap, A_LE_DB_ACTIVE_TABLE_START_INDEX);
3299 }
3300
3301 tid->le_db_conf = t4_read_reg(padap, A_LE_DB_CONFIG);
3302
3303 para[0] = FW_PARAM_PFVF_A(FILTER_START);
3304 para[1] = FW_PARAM_PFVF_A(FILTER_END);
3305 para[2] = FW_PARAM_PFVF_A(ACTIVE_FILTER_START);
3306 para[3] = FW_PARAM_PFVF_A(ACTIVE_FILTER_END);
3307 para[4] = FW_PARAM_DEV_A(NTID);
3308 para[5] = FW_PARAM_PFVF_A(SERVER_START);
3309 para[6] = FW_PARAM_PFVF_A(SERVER_END);
3310
3311 rc = begin_synchronized_op(padap->port[0], 1, 1);
3312 if (rc)
3313 goto err;
3314 mbox = padap->mbox;
3315 pf = padap->pf;
3316 rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3317 if (rc < 0) {
3318 if (rc == -FW_EPERM) {
3319 /* It looks like we don't have permission to use
3320 * padap->mbox.
3321 *
3322 * Try mbox 4. If it works, we'll continue to
3323 * collect the rest of tid info from mbox 4.
3324 * Else, quit trying to collect tid info.
3325 */
3326 mbox = 4;
3327 pf = 4;
3328 rc = t4_query_params(padap, mbox, pf, 0, 7, para, val);
3329 if (rc < 0) {
3330 cudbg_err->sys_err = rc;
3331 goto err1;
3332 }
3333 } else {
3334 cudbg_err->sys_err = rc;
3335 goto err1;
3336 }
3337 }
3338
3339 tid->ftid_base = val[0];
3340 tid->nftids = val[1] - val[0] + 1;
3341 /*active filter region*/
3342 if (val[2] != val[3]) {
3343 #ifdef notyet
3344 tid->flags |= FW_OFLD_CONN;
3345 #endif
3346 tid->aftid_base = val[2];
3347 tid->aftid_end = val[3];
3348 }
3349 tid->ntids = val[4];
3350 tid->natids = min_t(u32, tid->ntids / 2, MAX_ATIDS_A);
3351 tid->stid_base = val[5];
3352 tid->nstids = val[6] - val[5] + 1;
3353
3354 if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
3355 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
3356 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
3357 rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3358 if (rc < 0) {
3359 cudbg_err->sys_err = rc;
3360 goto err1;
3361 }
3362
3363 tid->hpftid_base = val[0];
3364 tid->nhpftids = val[1] - val[0] + 1;
3365 }
3366
3367 if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
3368 tid->sb = t4_read_reg(padap, A_LE_DB_SERVER_INDEX) / 4;
3369 tid->hash_base /= 4;
3370 } else
3371 tid->sb = t4_read_reg(padap, A_LE_DB_SRVR_START_INDEX);
3372
3373 /*UO context range*/
3374 para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
3375 para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
3376
3377 rc = t4_query_params(padap, mbox, pf, 0, 2, para, val);
3378 if (rc < 0) {
3379 cudbg_err->sys_err = rc;
3380 goto err1;
3381 }
3382
3383 if (val[0] != val[1]) {
3384 tid->uotid_base = val[0];
3385 tid->nuotids = val[1] - val[0] + 1;
3386 }
3387 tid->IP_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV4);
3388 tid->IPv6_users = t4_read_reg(padap, A_LE_DB_ACT_CNT_IPV6);
3389
3390 #undef FW_PARAM_PFVF_A
3391 #undef FW_PARAM_DEV_A
3392 #undef MAX_ATIDS_A
3393
3394 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3395 if (rc)
3396 goto err1;
3397 rc = compress_buff(&scratch_buff, dbg_buff);
3398
3399 err1:
3400 end_synchronized_op(padap->port[0], 1);
3401 release_scratch_buff(&scratch_buff, dbg_buff);
3402 err:
3403 return rc;
3404 }
3405
3406 static int
collect_tx_rate(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3407 collect_tx_rate(struct cudbg_init *pdbg_init,
3408 struct cudbg_buffer *dbg_buff,
3409 struct cudbg_error *cudbg_err)
3410 {
3411 struct cudbg_buffer scratch_buff;
3412 struct adapter *padap = pdbg_init->adap;
3413 struct tx_rate *tx_rate;
3414 u32 size;
3415 int rc;
3416
3417 size = sizeof(struct tx_rate);
3418 scratch_buff.size = size;
3419
3420 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3421 if (rc)
3422 goto err;
3423
3424 tx_rate = (struct tx_rate *)scratch_buff.data;
3425 t4_get_chan_txrate(padap, tx_rate->nrate, tx_rate->orate);
3426 tx_rate->nchan = padap->params.arch.nchan;
3427
3428 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3429 if (rc)
3430 goto err1;
3431
3432 rc = compress_buff(&scratch_buff, dbg_buff);
3433
3434 err1:
3435 release_scratch_buff(&scratch_buff, dbg_buff);
3436 err:
3437 return rc;
3438 }
3439
3440 static inline void
cudbg_tcamxy2valmask(u64 x,u64 y,u8 * addr,u64 * mask)3441 cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
3442 {
3443 *mask = x | y;
3444 y = (__force u64)cpu_to_be64(y);
3445 memcpy(addr, (char *)&y + 2, ETH_ALEN);
3446 }
3447
3448 static void
mps_rpl_backdoor(struct adapter * padap,struct fw_ldst_mps_rplc * mps_rplc)3449 mps_rpl_backdoor(struct adapter *padap, struct fw_ldst_mps_rplc *mps_rplc)
3450 {
3451 if (is_t5(padap->params.chip)) {
3452 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3453 A_MPS_VF_RPLCT_MAP3));
3454 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3455 A_MPS_VF_RPLCT_MAP2));
3456 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3457 A_MPS_VF_RPLCT_MAP1));
3458 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3459 A_MPS_VF_RPLCT_MAP0));
3460 } else {
3461 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
3462 A_MPS_VF_RPLCT_MAP7));
3463 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
3464 A_MPS_VF_RPLCT_MAP6));
3465 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
3466 A_MPS_VF_RPLCT_MAP5));
3467 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
3468 A_MPS_VF_RPLCT_MAP4));
3469 }
3470 mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP3));
3471 mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP2));
3472 mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP1));
3473 mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, A_MPS_VF_RPLCT_MAP0));
3474 }
3475
3476 static int
collect_mps_tcam(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3477 collect_mps_tcam(struct cudbg_init *pdbg_init,
3478 struct cudbg_buffer *dbg_buff,
3479 struct cudbg_error *cudbg_err)
3480 {
3481 struct cudbg_buffer scratch_buff;
3482 struct adapter *padap = pdbg_init->adap;
3483 struct cudbg_mps_tcam *tcam = NULL;
3484 u32 size = 0, i, n, total_size = 0;
3485 u32 ctl, data2;
3486 u64 tcamy, tcamx, val;
3487 int rc;
3488
3489
3490 n = padap->params.arch.mps_tcam_size;
3491 size = sizeof(struct cudbg_mps_tcam) * n;
3492 scratch_buff.size = size;
3493
3494 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3495 if (rc)
3496 goto err;
3497 memset(scratch_buff.data, 0, size);
3498
3499 tcam = (struct cudbg_mps_tcam *)scratch_buff.data;
3500 for (i = 0; i < n; i++) {
3501 if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
3502 /* CtlReqID - 1: use Host Driver Requester ID
3503 * CtlCmdType - 0: Read, 1: Write
3504 * CtlTcamSel - 0: TCAM0, 1: TCAM1
3505 * CtlXYBitSel- 0: Y bit, 1: X bit
3506 */
3507
3508 /* Read tcamy */
3509 ctl = (V_CTLREQID(1) |
3510 V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
3511 if (i < 256)
3512 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
3513 else
3514 ctl |= V_CTLTCAMINDEX(i - 256) |
3515 V_CTLTCAMSEL(1);
3516
3517 t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3518 val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3519 tcamy = G_DMACH(val) << 32;
3520 tcamy |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3521 data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3522 tcam->lookup_type = G_DATALKPTYPE(data2);
3523
3524 /* 0 - Outer header, 1 - Inner header
3525 * [71:48] bit locations are overloaded for
3526 * outer vs. inner lookup types.
3527 */
3528
3529 if (tcam->lookup_type &&
3530 (tcam->lookup_type != M_DATALKPTYPE)) {
3531 /* Inner header VNI */
3532 tcam->vniy = ((data2 & F_DATAVIDH2) << 23) |
3533 (G_DATAVIDH1(data2) << 16) |
3534 G_VIDL(val);
3535 tcam->dip_hit = data2 & F_DATADIPHIT;
3536 } else {
3537 tcam->vlan_vld = data2 & F_DATAVIDH2;
3538 tcam->ivlan = G_VIDL(val);
3539 }
3540
3541 tcam->port_num = G_DATAPORTNUM(data2);
3542
3543 /* Read tcamx. Change the control param */
3544 ctl |= V_CTLXYBITSEL(1);
3545 t4_write_reg(padap, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
3546 val = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
3547 tcamx = G_DMACH(val) << 32;
3548 tcamx |= t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
3549 data2 = t4_read_reg(padap, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
3550 if (tcam->lookup_type &&
3551 (tcam->lookup_type != M_DATALKPTYPE)) {
3552 /* Inner header VNI mask */
3553 tcam->vnix = ((data2 & F_DATAVIDH2) << 23) |
3554 (G_DATAVIDH1(data2) << 16) |
3555 G_VIDL(val);
3556 }
3557 } else {
3558 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(i));
3559 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(i));
3560 }
3561
3562 if (tcamx & tcamy)
3563 continue;
3564
3565 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(i));
3566 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(i));
3567
3568 if (is_t5(padap->params.chip))
3569 tcam->repli = (tcam->cls_lo & F_REPLICATE);
3570 else if (is_t6(padap->params.chip))
3571 tcam->repli = (tcam->cls_lo & F_T6_REPLICATE);
3572
3573 if (tcam->repli) {
3574 struct fw_ldst_cmd ldst_cmd;
3575 struct fw_ldst_mps_rplc mps_rplc;
3576
3577 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3578 ldst_cmd.op_to_addrspace =
3579 htonl(V_FW_CMD_OP(FW_LDST_CMD) |
3580 F_FW_CMD_REQUEST |
3581 F_FW_CMD_READ |
3582 V_FW_LDST_CMD_ADDRSPACE(
3583 FW_LDST_ADDRSPC_MPS));
3584
3585 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3586
3587 ldst_cmd.u.mps.rplc.fid_idx =
3588 htons(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
3589 V_FW_LDST_CMD_IDX(i));
3590
3591 rc = begin_synchronized_op(padap->port[0], 1, 1);
3592 if (rc == 0) {
3593 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
3594 sizeof(ldst_cmd), &ldst_cmd);
3595 end_synchronized_op(padap->port[0], 1);
3596 }
3597
3598 if (rc)
3599 mps_rpl_backdoor(padap, &mps_rplc);
3600 else
3601 mps_rplc = ldst_cmd.u.mps.rplc;
3602
3603 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
3604 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
3605 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
3606 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
3607 if (padap->params.arch.mps_rplc_size >
3608 CUDBG_MAX_RPLC_SIZE) {
3609 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
3610 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
3611 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
3612 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
3613 }
3614 }
3615 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
3616
3617 tcam->idx = i;
3618 tcam->rplc_size = padap->params.arch.mps_rplc_size;
3619
3620 total_size += sizeof(struct cudbg_mps_tcam);
3621
3622 tcam++;
3623 }
3624
3625 if (total_size == 0) {
3626 rc = CUDBG_SYSTEM_ERROR;
3627 goto err1;
3628 }
3629
3630 scratch_buff.size = total_size;
3631 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3632 if (rc)
3633 goto err1;
3634
3635 rc = compress_buff(&scratch_buff, dbg_buff);
3636
3637 err1:
3638 scratch_buff.size = size;
3639 release_scratch_buff(&scratch_buff, dbg_buff);
3640 err:
3641 return rc;
3642 }
3643
3644 static int
collect_pcie_config(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3645 collect_pcie_config(struct cudbg_init *pdbg_init,
3646 struct cudbg_buffer *dbg_buff,
3647 struct cudbg_error *cudbg_err)
3648 {
3649 struct cudbg_buffer scratch_buff;
3650 struct adapter *padap = pdbg_init->adap;
3651 u32 size, *value, j;
3652 int i, rc, n;
3653
3654 size = sizeof(u32) * NUM_PCIE_CONFIG_REGS;
3655 n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
3656 scratch_buff.size = size;
3657
3658 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3659 if (rc)
3660 goto err;
3661
3662 value = (u32 *)scratch_buff.data;
3663 for (i = 0; i < n; i++) {
3664 for (j = t5_pcie_config_array[i][0];
3665 j <= t5_pcie_config_array[i][1]; j += 4) {
3666 t4_hw_pci_read_cfg4(padap, j, value++);
3667 }
3668 }
3669
3670 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3671 if (rc)
3672 goto err1;
3673
3674 rc = compress_buff(&scratch_buff, dbg_buff);
3675
3676 err1:
3677 release_scratch_buff(&scratch_buff, dbg_buff);
3678 err:
3679 return rc;
3680 }
3681
3682 static int
cudbg_read_tid(struct cudbg_init * pdbg_init,u32 tid,struct cudbg_tid_data * tid_data)3683 cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
3684 struct cudbg_tid_data *tid_data)
3685 {
3686 int i, cmd_retry = 8;
3687 struct adapter *padap = pdbg_init->adap;
3688 u32 val;
3689
3690 /* Fill REQ_DATA regs with 0's */
3691 for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3692 t4_write_reg(padap, A_LE_DB_DBGI_REQ_DATA + (i << 2), 0);
3693
3694 /* Write DBIG command */
3695 val = (0x4 << S_DBGICMD) | tid;
3696 t4_write_reg(padap, A_LE_DB_DBGI_REQ_TCAM_CMD, val);
3697 tid_data->dbig_cmd = val;
3698
3699 val = 0;
3700 val |= 1 << S_DBGICMDSTRT;
3701 val |= 1; /* LE mode */
3702 t4_write_reg(padap, A_LE_DB_DBGI_CONFIG, val);
3703 tid_data->dbig_conf = val;
3704
3705 /* Poll the DBGICMDBUSY bit */
3706 val = 1;
3707 while (val) {
3708 val = t4_read_reg(padap, A_LE_DB_DBGI_CONFIG);
3709 val = (val >> S_DBGICMDBUSY) & 1;
3710 cmd_retry--;
3711 if (!cmd_retry) {
3712 if (pdbg_init->verbose)
3713 pdbg_init->print(padap->dip, CE_NOTE,
3714 "%s(): Timeout waiting for non-busy\n",
3715 __func__);
3716 return CUDBG_SYSTEM_ERROR;
3717 }
3718 }
3719
3720 /* Check RESP status */
3721 val = 0;
3722 val = t4_read_reg(padap, A_LE_DB_DBGI_RSP_STATUS);
3723 tid_data->dbig_rsp_stat = val;
3724 if (!(val & 1)) {
3725 if (pdbg_init->verbose)
3726 pdbg_init->print(padap->dip, CE_NOTE,
3727 "%s(): DBGI command failed\n", __func__);
3728 return CUDBG_SYSTEM_ERROR;
3729 }
3730
3731 /* Read RESP data */
3732 for (i = 0; i < CUDBG_NUM_REQ_REGS; i++)
3733 tid_data->data[i] = t4_read_reg(padap,
3734 A_LE_DB_DBGI_RSP_DATA +
3735 (i << 2));
3736
3737 tid_data->tid = tid;
3738
3739 return 0;
3740 }
3741
3742 static int
collect_le_tcam(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3743 collect_le_tcam(struct cudbg_init *pdbg_init,
3744 struct cudbg_buffer *dbg_buff,
3745 struct cudbg_error *cudbg_err)
3746 {
3747 struct cudbg_buffer scratch_buff;
3748 struct adapter *padap = pdbg_init->adap;
3749 struct cudbg_tcam tcam_region = {0};
3750 struct cudbg_tid_data *tid_data = NULL;
3751 u32 value, bytes = 0, bytes_left = 0;
3752 u32 i;
3753 int rc, size;
3754
3755 /* Get the LE regions */
3756 value = t4_read_reg(padap, A_LE_DB_TID_HASHBASE); /* Get hash base
3757 index */
3758 tcam_region.tid_hash_base = value;
3759
3760 /* Get routing table index */
3761 value = t4_read_reg(padap, A_LE_DB_ROUTING_TABLE_INDEX);
3762 tcam_region.routing_start = value;
3763
3764 /*Get clip table index */
3765 value = t4_read_reg(padap, A_LE_DB_CLIP_TABLE_INDEX);
3766 tcam_region.clip_start = value;
3767
3768 /* Get filter table index */
3769 value = t4_read_reg(padap, A_LE_DB_FILTER_TABLE_INDEX);
3770 tcam_region.filter_start = value;
3771
3772 /* Get server table index */
3773 value = t4_read_reg(padap, A_LE_DB_SERVER_INDEX);
3774 tcam_region.server_start = value;
3775
3776 /* Check whether hash is enabled and calculate the max tids */
3777 value = t4_read_reg(padap, A_LE_DB_CONFIG);
3778 if ((value >> S_HASHEN) & 1) {
3779 value = t4_read_reg(padap, A_LE_DB_HASH_CONFIG);
3780 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
3781 tcam_region.max_tid = (value & 0xFFFFF) +
3782 tcam_region.tid_hash_base;
3783 else { /* for T5 */
3784 value = G_HASHTIDSIZE(value);
3785 value = 1 << value;
3786 tcam_region.max_tid = value +
3787 tcam_region.tid_hash_base;
3788 }
3789 } else /* hash not enabled */
3790 tcam_region.max_tid = CUDBG_MAX_TCAM_TID;
3791
3792 size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
3793 size += sizeof(struct cudbg_tcam);
3794 scratch_buff.size = size;
3795
3796 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3797 if (rc)
3798 goto err;
3799
3800 rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE, &scratch_buff);
3801 if (rc)
3802 goto err;
3803
3804 memcpy(scratch_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
3805
3806 tid_data = (struct cudbg_tid_data *)(((struct cudbg_tcam *)
3807 scratch_buff.data) + 1);
3808 bytes_left = CUDBG_CHUNK_SIZE - sizeof(struct cudbg_tcam);
3809 bytes = sizeof(struct cudbg_tcam);
3810
3811 /* read all tid */
3812 for (i = 0; i < tcam_region.max_tid; i++) {
3813 if (bytes_left < sizeof(struct cudbg_tid_data)) {
3814 scratch_buff.size = bytes;
3815 rc = compress_buff(&scratch_buff, dbg_buff);
3816 if (rc)
3817 goto err1;
3818 scratch_buff.size = CUDBG_CHUNK_SIZE;
3819 release_scratch_buff(&scratch_buff, dbg_buff);
3820
3821 /* new alloc */
3822 rc = get_scratch_buff(dbg_buff, CUDBG_CHUNK_SIZE,
3823 &scratch_buff);
3824 if (rc)
3825 goto err;
3826
3827 tid_data = (struct cudbg_tid_data *)(scratch_buff.data);
3828 bytes_left = CUDBG_CHUNK_SIZE;
3829 bytes = 0;
3830 }
3831
3832 rc = cudbg_read_tid(pdbg_init, i, tid_data);
3833
3834 if (rc) {
3835 cudbg_err->sys_err = rc;
3836 goto err1;
3837 }
3838
3839 tid_data++;
3840 bytes_left -= sizeof(struct cudbg_tid_data);
3841 bytes += sizeof(struct cudbg_tid_data);
3842 }
3843
3844 if (bytes) {
3845 scratch_buff.size = bytes;
3846 rc = compress_buff(&scratch_buff, dbg_buff);
3847 }
3848
3849 err1:
3850 scratch_buff.size = CUDBG_CHUNK_SIZE;
3851 release_scratch_buff(&scratch_buff, dbg_buff);
3852 err:
3853 return rc;
3854 }
3855
3856 static int
collect_ma_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3857 collect_ma_indirect(struct cudbg_init *pdbg_init,
3858 struct cudbg_buffer *dbg_buff,
3859 struct cudbg_error *cudbg_err)
3860 {
3861 struct cudbg_buffer scratch_buff;
3862 struct adapter *padap = pdbg_init->adap;
3863 struct ireg_buf *ma_indr = NULL;
3864 u32 size, j;
3865 int i, rc, n;
3866
3867 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) {
3868 if (pdbg_init->verbose)
3869 pdbg_init->print(padap->dip, CE_NOTE,
3870 "MA indirect available only in T6\n");
3871 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3872 goto err;
3873 }
3874
3875 n = sizeof(t6_ma_ireg_array) / (4 * sizeof(u32));
3876 size = sizeof(struct ireg_buf) * n * 2;
3877 scratch_buff.size = size;
3878
3879 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3880 if (rc)
3881 goto err;
3882
3883 ma_indr = (struct ireg_buf *)scratch_buff.data;
3884
3885 for (i = 0; i < n; i++) {
3886 struct ireg_field *ma_fli = &ma_indr->tp_pio;
3887 u32 *buff = ma_indr->outbuf;
3888
3889 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
3890 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
3891 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
3892 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
3893
3894 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
3895 buff, ma_fli->ireg_offset_range,
3896 ma_fli->ireg_local_offset);
3897
3898 ma_indr++;
3899
3900 }
3901
3902 n = sizeof(t6_ma_ireg_array2) / (4 * sizeof(u32));
3903
3904 for (i = 0; i < n; i++) {
3905 struct ireg_field *ma_fli = &ma_indr->tp_pio;
3906 u32 *buff = ma_indr->outbuf;
3907
3908 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
3909 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
3910 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
3911
3912 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
3913 t4_read_indirect(padap, ma_fli->ireg_addr,
3914 ma_fli->ireg_data, buff, 1,
3915 ma_fli->ireg_local_offset);
3916 buff++;
3917 ma_fli->ireg_local_offset += 0x20;
3918 }
3919 ma_indr++;
3920 }
3921
3922 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3923 if (rc)
3924 goto err1;
3925
3926 rc = compress_buff(&scratch_buff, dbg_buff);
3927
3928 err1:
3929 release_scratch_buff(&scratch_buff, dbg_buff);
3930 err:
3931 return rc;
3932 }
3933
3934 static int
collect_hma_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3935 collect_hma_indirect(struct cudbg_init *pdbg_init,
3936 struct cudbg_buffer *dbg_buff,
3937 struct cudbg_error *cudbg_err)
3938 {
3939 struct cudbg_buffer scratch_buff;
3940 struct adapter *padap = pdbg_init->adap;
3941 struct ireg_buf *hma_indr = NULL;
3942 u32 size;
3943 int i, rc, n;
3944
3945 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) {
3946 if (pdbg_init->verbose)
3947 pdbg_init->print(padap->dip, CE_NOTE,
3948 "HMA indirect available only in T6\n");
3949 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
3950 goto err;
3951 }
3952
3953 n = sizeof(t6_hma_ireg_array) / (4 * sizeof(u32));
3954 size = sizeof(struct ireg_buf) * n;
3955 scratch_buff.size = size;
3956
3957 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
3958 if (rc)
3959 goto err;
3960
3961 hma_indr = (struct ireg_buf *)scratch_buff.data;
3962
3963 for (i = 0; i < n; i++) {
3964 struct ireg_field *hma_fli = &hma_indr->tp_pio;
3965 u32 *buff = hma_indr->outbuf;
3966
3967 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
3968 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
3969 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
3970 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
3971
3972 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
3973 buff, hma_fli->ireg_offset_range,
3974 hma_fli->ireg_local_offset);
3975
3976 hma_indr++;
3977
3978 }
3979
3980 rc = write_compression_hdr(&scratch_buff, dbg_buff);
3981 if (rc)
3982 goto err1;
3983
3984 rc = compress_buff(&scratch_buff, dbg_buff);
3985
3986 err1:
3987 release_scratch_buff(&scratch_buff, dbg_buff);
3988 err:
3989 return rc;
3990 }
3991
3992 static int
collect_pcie_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)3993 collect_pcie_indirect(struct cudbg_init *pdbg_init,
3994 struct cudbg_buffer *dbg_buff,
3995 struct cudbg_error *cudbg_err)
3996 {
3997 struct cudbg_buffer scratch_buff;
3998 struct adapter *padap = pdbg_init->adap;
3999 struct ireg_buf *ch_pcie;
4000 u32 size;
4001 int i, rc, n;
4002
4003 n = sizeof(t5_pcie_pdbg_array) / (4 * sizeof(u32));
4004 size = sizeof(struct ireg_buf) * n * 2;
4005 scratch_buff.size = size;
4006
4007 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4008 if (rc)
4009 goto err;
4010
4011 ch_pcie = (struct ireg_buf *)scratch_buff.data;
4012
4013 /*PCIE_PDBG*/
4014 for (i = 0; i < n; i++) {
4015 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4016 u32 *buff = ch_pcie->outbuf;
4017
4018 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
4019 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
4020 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
4021 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
4022
4023 t4_read_indirect(padap,
4024 pcie_pio->ireg_addr,
4025 pcie_pio->ireg_data,
4026 buff,
4027 pcie_pio->ireg_offset_range,
4028 pcie_pio->ireg_local_offset);
4029
4030 ch_pcie++;
4031 }
4032
4033 /*PCIE_CDBG*/
4034 n = sizeof(t5_pcie_cdbg_array) / (4 * sizeof(u32));
4035 for (i = 0; i < n; i++) {
4036 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
4037 u32 *buff = ch_pcie->outbuf;
4038
4039 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
4040 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
4041 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
4042 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
4043
4044 t4_read_indirect(padap,
4045 pcie_pio->ireg_addr,
4046 pcie_pio->ireg_data,
4047 buff,
4048 pcie_pio->ireg_offset_range,
4049 pcie_pio->ireg_local_offset);
4050
4051 ch_pcie++;
4052 }
4053
4054 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4055 if (rc)
4056 goto err1;
4057
4058 rc = compress_buff(&scratch_buff, dbg_buff);
4059
4060 err1:
4061 release_scratch_buff(&scratch_buff, dbg_buff);
4062 err:
4063 return rc;
4064
4065 }
4066
4067 static int
collect_tp_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4068 collect_tp_indirect(struct cudbg_init *pdbg_init,
4069 struct cudbg_buffer *dbg_buff,
4070 struct cudbg_error *cudbg_err)
4071 {
4072 struct cudbg_buffer scratch_buff;
4073 struct adapter *padap = pdbg_init->adap;
4074 struct ireg_buf *ch_tp_pio;
4075 u32 size;
4076 int i, rc, n = 0;
4077
4078 if (is_t5(padap->params.chip))
4079 n = sizeof(t5_tp_pio_array) / (4 * sizeof(u32));
4080 else if (is_t6(padap->params.chip))
4081 n = sizeof(t6_tp_pio_array) / (4 * sizeof(u32));
4082
4083 size = sizeof(struct ireg_buf) * n * 3;
4084 scratch_buff.size = size;
4085
4086 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4087 if (rc)
4088 goto err;
4089
4090 ch_tp_pio = (struct ireg_buf *)scratch_buff.data;
4091
4092 /* TP_PIO*/
4093 for (i = 0; i < n; i++) {
4094 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4095 u32 *buff = ch_tp_pio->outbuf;
4096
4097 if (is_t5(padap->params.chip)) {
4098 tp_pio->ireg_addr = t5_tp_pio_array[i][0];
4099 tp_pio->ireg_data = t5_tp_pio_array[i][1];
4100 tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
4101 tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
4102 } else if (is_t6(padap->params.chip)) {
4103 tp_pio->ireg_addr = t6_tp_pio_array[i][0];
4104 tp_pio->ireg_data = t6_tp_pio_array[i][1];
4105 tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
4106 tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
4107 }
4108
4109 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
4110 tp_pio->ireg_local_offset, true);
4111
4112 ch_tp_pio++;
4113 }
4114
4115 /* TP_TM_PIO*/
4116 if (is_t5(padap->params.chip))
4117 n = sizeof(t5_tp_tm_pio_array) / (4 * sizeof(u32));
4118 else if (is_t6(padap->params.chip))
4119 n = sizeof(t6_tp_tm_pio_array) / (4 * sizeof(u32));
4120
4121 for (i = 0; i < n; i++) {
4122 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4123 u32 *buff = ch_tp_pio->outbuf;
4124
4125 if (is_t5(padap->params.chip)) {
4126 tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
4127 tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
4128 tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
4129 tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
4130 } else if (is_t6(padap->params.chip)) {
4131 tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
4132 tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
4133 tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
4134 tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
4135 }
4136
4137 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
4138 tp_pio->ireg_local_offset, true);
4139
4140 ch_tp_pio++;
4141 }
4142
4143 /* TP_MIB_INDEX*/
4144 if (is_t5(padap->params.chip))
4145 n = sizeof(t5_tp_mib_index_array) / (4 * sizeof(u32));
4146 else if (is_t6(padap->params.chip))
4147 n = sizeof(t6_tp_mib_index_array) / (4 * sizeof(u32));
4148
4149 for (i = 0; i < n ; i++) {
4150 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
4151 u32 *buff = ch_tp_pio->outbuf;
4152
4153 if (is_t5(padap->params.chip)) {
4154 tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
4155 tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
4156 tp_pio->ireg_local_offset =
4157 t5_tp_mib_index_array[i][2];
4158 tp_pio->ireg_offset_range =
4159 t5_tp_mib_index_array[i][3];
4160 } else if (is_t6(padap->params.chip)) {
4161 tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
4162 tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
4163 tp_pio->ireg_local_offset =
4164 t6_tp_mib_index_array[i][2];
4165 tp_pio->ireg_offset_range =
4166 t6_tp_mib_index_array[i][3];
4167 }
4168
4169 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
4170 tp_pio->ireg_local_offset, true);
4171
4172 ch_tp_pio++;
4173 }
4174
4175 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4176 if (rc)
4177 goto err1;
4178
4179 rc = compress_buff(&scratch_buff, dbg_buff);
4180
4181 err1:
4182 release_scratch_buff(&scratch_buff, dbg_buff);
4183 err:
4184 return rc;
4185 }
4186
4187 static int
collect_sge_indirect(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4188 collect_sge_indirect(struct cudbg_init *pdbg_init,
4189 struct cudbg_buffer *dbg_buff,
4190 struct cudbg_error *cudbg_err)
4191 {
4192 struct cudbg_buffer scratch_buff;
4193 struct adapter *padap = pdbg_init->adap;
4194 struct ireg_buf *ch_sge_dbg;
4195 u32 size;
4196 int i, rc;
4197
4198 size = sizeof(struct ireg_buf) * 2;
4199 scratch_buff.size = size;
4200
4201 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4202 if (rc)
4203 goto err;
4204
4205 ch_sge_dbg = (struct ireg_buf *)scratch_buff.data;
4206
4207 for (i = 0; i < 2; i++) {
4208 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
4209 u32 *buff = ch_sge_dbg->outbuf;
4210
4211 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
4212 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
4213 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
4214 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
4215
4216 t4_read_indirect(padap,
4217 sge_pio->ireg_addr,
4218 sge_pio->ireg_data,
4219 buff,
4220 sge_pio->ireg_offset_range,
4221 sge_pio->ireg_local_offset);
4222
4223 ch_sge_dbg++;
4224 }
4225
4226 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4227 if (rc)
4228 goto err1;
4229
4230 rc = compress_buff(&scratch_buff, dbg_buff);
4231
4232 err1:
4233 release_scratch_buff(&scratch_buff, dbg_buff);
4234 err:
4235 return rc;
4236 }
4237
4238 static int
collect_full(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4239 collect_full(struct cudbg_init *pdbg_init,
4240 struct cudbg_buffer *dbg_buff,
4241 struct cudbg_error *cudbg_err)
4242 {
4243 struct cudbg_buffer scratch_buff;
4244 struct adapter *padap = pdbg_init->adap;
4245 u32 reg_addr, reg_data, reg_local_offset, reg_offset_range;
4246 u32 *sp;
4247 int rc;
4248 int nreg = 0;
4249
4250 /* Collect Registers:
4251 * TP_DBG_SCHED_TX (0x7e40 + 0x6a),
4252 * TP_DBG_SCHED_RX (0x7e40 + 0x6b),
4253 * TP_DBG_CSIDE_INT (0x7e40 + 0x23f),
4254 * TP_DBG_ESIDE_INT (0x7e40 + 0x148),
4255 * PCIE_CDEBUG_INDEX[AppData0] (0x5a10 + 2),
4256 * PCIE_CDEBUG_INDEX[AppData1] (0x5a10 + 3) This is for T6
4257 * SGE_DEBUG_DATA_HIGH_INDEX_10 (0x12a8)
4258 **/
4259
4260 if (is_t5(padap->params.chip))
4261 nreg = 6;
4262 else if (is_t6(padap->params.chip))
4263 nreg = 7;
4264
4265 scratch_buff.size = nreg * sizeof(u32);
4266
4267 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4268 if (rc)
4269 goto err;
4270
4271 sp = (u32 *)scratch_buff.data;
4272
4273 /* TP_DBG_SCHED_TX */
4274 reg_local_offset = t5_tp_pio_array[3][2] + 0xa;
4275 reg_offset_range = 1;
4276
4277 t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4278
4279 sp++;
4280
4281 /* TP_DBG_SCHED_RX */
4282 reg_local_offset = t5_tp_pio_array[3][2] + 0xb;
4283 reg_offset_range = 1;
4284
4285 t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4286
4287 sp++;
4288
4289 /* TP_DBG_CSIDE_INT */
4290 reg_local_offset = t5_tp_pio_array[9][2] + 0xf;
4291 reg_offset_range = 1;
4292
4293 t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4294
4295 sp++;
4296
4297 /* TP_DBG_ESIDE_INT */
4298 reg_local_offset = t5_tp_pio_array[8][2] + 3;
4299 reg_offset_range = 1;
4300
4301 t4_tp_pio_read(padap, sp, reg_offset_range, reg_local_offset, true);
4302
4303 sp++;
4304
4305 /* PCIE_CDEBUG_INDEX[AppData0] */
4306 reg_addr = t5_pcie_cdbg_array[0][0];
4307 reg_data = t5_pcie_cdbg_array[0][1];
4308 reg_local_offset = t5_pcie_cdbg_array[0][2] + 2;
4309 reg_offset_range = 1;
4310
4311 t4_read_indirect(padap, reg_addr, reg_data, sp, reg_offset_range,
4312 reg_local_offset);
4313
4314 sp++;
4315
4316 if (is_t6(padap->params.chip)) {
4317 /* PCIE_CDEBUG_INDEX[AppData1] */
4318 reg_addr = t5_pcie_cdbg_array[0][0];
4319 reg_data = t5_pcie_cdbg_array[0][1];
4320 reg_local_offset = t5_pcie_cdbg_array[0][2] + 3;
4321 reg_offset_range = 1;
4322
4323 t4_read_indirect(padap, reg_addr, reg_data, sp,
4324 reg_offset_range, reg_local_offset);
4325
4326 sp++;
4327 }
4328
4329 /* SGE_DEBUG_DATA_HIGH_INDEX_10 */
4330 *sp = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_10);
4331
4332 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4333 if (rc)
4334 goto err1;
4335
4336 rc = compress_buff(&scratch_buff, dbg_buff);
4337
4338 err1:
4339 release_scratch_buff(&scratch_buff, dbg_buff);
4340 err:
4341 return rc;
4342 }
4343
4344 static int
collect_vpd_data(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)4345 collect_vpd_data(struct cudbg_init *pdbg_init,
4346 struct cudbg_buffer *dbg_buff,
4347 struct cudbg_error *cudbg_err)
4348 {
4349 #ifdef notyet
4350 struct cudbg_buffer scratch_buff;
4351 struct adapter *padap = pdbg_init->adap;
4352 struct struct_vpd_data *vpd_data;
4353 char vpd_ver[4];
4354 u32 fw_vers;
4355 u32 size;
4356 int rc;
4357
4358 size = sizeof(struct struct_vpd_data);
4359 scratch_buff.size = size;
4360
4361 rc = get_scratch_buff(dbg_buff, scratch_buff.size, &scratch_buff);
4362 if (rc)
4363 goto err;
4364
4365 vpd_data = (struct struct_vpd_data *)scratch_buff.data;
4366
4367 if (is_t5(padap->params.chip)) {
4368 read_vpd_reg(padap, SN_REG_ADDR, SN_MAX_LEN, vpd_data->sn);
4369 read_vpd_reg(padap, BN_REG_ADDR, BN_MAX_LEN, vpd_data->bn);
4370 read_vpd_reg(padap, NA_REG_ADDR, NA_MAX_LEN, vpd_data->na);
4371 read_vpd_reg(padap, MN_REG_ADDR, MN_MAX_LEN, vpd_data->mn);
4372 } else if (is_t6(padap->params.chip)) {
4373 read_vpd_reg(padap, SN_T6_ADDR, SN_MAX_LEN, vpd_data->sn);
4374 read_vpd_reg(padap, BN_T6_ADDR, BN_MAX_LEN, vpd_data->bn);
4375 read_vpd_reg(padap, NA_T6_ADDR, NA_MAX_LEN, vpd_data->na);
4376 read_vpd_reg(padap, MN_T6_ADDR, MN_MAX_LEN, vpd_data->mn);
4377 }
4378
4379 if (is_fw_attached(pdbg_init)) {
4380 rc = t4_get_scfg_version(padap, &vpd_data->scfg_vers);
4381 } else {
4382 rc = 1;
4383 }
4384
4385 if (rc) {
4386 /* Now trying with backdoor mechanism */
4387 rc = read_vpd_reg(padap, SCFG_VER_ADDR, SCFG_VER_LEN,
4388 (u8 *)&vpd_data->scfg_vers);
4389 if (rc)
4390 goto err1;
4391 }
4392
4393 if (is_fw_attached(pdbg_init)) {
4394 rc = t4_get_vpd_version(padap, &vpd_data->vpd_vers);
4395 } else {
4396 rc = 1;
4397 }
4398
4399 if (rc) {
4400 /* Now trying with backdoor mechanism */
4401 rc = read_vpd_reg(padap, VPD_VER_ADDR, VPD_VER_LEN,
4402 (u8 *)vpd_ver);
4403 if (rc)
4404 goto err1;
4405 /* read_vpd_reg return string of stored hex
4406 * converting hex string to char string
4407 * vpd version is 2 bytes only */
4408 sprintf(vpd_ver, "%c%c\n", vpd_ver[0], vpd_ver[1]);
4409 vpd_data->vpd_vers = simple_strtoul(vpd_ver, NULL, 16);
4410 }
4411
4412 /* Get FW version if it's not already filled in */
4413 fw_vers = padap->params.fw_vers;
4414 if (!fw_vers) {
4415 rc = t4_get_fw_version(padap, &fw_vers);
4416 if (rc)
4417 goto err1;
4418 }
4419
4420 vpd_data->fw_major = G_FW_HDR_FW_VER_MAJOR(fw_vers);
4421 vpd_data->fw_minor = G_FW_HDR_FW_VER_MINOR(fw_vers);
4422 vpd_data->fw_micro = G_FW_HDR_FW_VER_MICRO(fw_vers);
4423 vpd_data->fw_build = G_FW_HDR_FW_VER_BUILD(fw_vers);
4424
4425 rc = write_compression_hdr(&scratch_buff, dbg_buff);
4426 if (rc)
4427 goto err1;
4428
4429 rc = compress_buff(&scratch_buff, dbg_buff);
4430
4431 err1:
4432 release_scratch_buff(&scratch_buff, dbg_buff);
4433 err:
4434 return rc;
4435 #endif
4436 return (-1);
4437 }
4438