xref: /linux/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c (revision 83a37b3292f4aca799b355179ad6fbdd78a08e10)
1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17 
18 #include "t4_regs.h"
19 #include "cxgb4.h"
20 #include "cudbg_if.h"
21 #include "cudbg_lib_common.h"
22 #include "cudbg_lib.h"
23 #include "cudbg_entity.h"
24 
25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
26 					 struct cudbg_buffer *dbg_buff)
27 {
28 	cudbg_update_buff(pin_buff, dbg_buff);
29 	cudbg_put_buff(pin_buff, dbg_buff);
30 }
31 
32 static int is_fw_attached(struct cudbg_init *pdbg_init)
33 {
34 	struct adapter *padap = pdbg_init->adap;
35 
36 	if (!(padap->flags & FW_OK) || padap->use_bd)
37 		return 0;
38 
39 	return 1;
40 }
41 
42 /* This function will add additional padding bytes into debug_buffer to make it
43  * 4 byte aligned.
44  */
45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
46 			      struct cudbg_entity_hdr *entity_hdr)
47 {
48 	u8 zero_buf[4] = {0};
49 	u8 padding, remain;
50 
51 	remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
52 	padding = 4 - remain;
53 	if (remain) {
54 		memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
55 		       padding);
56 		dbg_buff->offset += padding;
57 		entity_hdr->num_pad = padding;
58 	}
59 	entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
60 }
61 
62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
63 {
64 	struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
65 
66 	return (struct cudbg_entity_hdr *)
67 	       ((char *)outbuf + cudbg_hdr->hdr_len +
68 		(sizeof(struct cudbg_entity_hdr) * (i - 1)));
69 }
70 
71 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
72 			   struct cudbg_buffer *dbg_buff,
73 			   struct cudbg_error *cudbg_err)
74 {
75 	struct adapter *padap = pdbg_init->adap;
76 	struct cudbg_buffer temp_buff = { 0 };
77 	u32 buf_size = 0;
78 	int rc = 0;
79 
80 	if (is_t4(padap->params.chip))
81 		buf_size = T4_REGMAP_SIZE;
82 	else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
83 		buf_size = T5_REGMAP_SIZE;
84 
85 	rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
86 	if (rc)
87 		return rc;
88 	t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
89 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
90 	return rc;
91 }
92 
93 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
94 			    struct cudbg_buffer *dbg_buff,
95 			    struct cudbg_error *cudbg_err)
96 {
97 	struct adapter *padap = pdbg_init->adap;
98 	struct cudbg_buffer temp_buff = { 0 };
99 	struct devlog_params *dparams;
100 	int rc = 0;
101 
102 	rc = t4_init_devlog_params(padap);
103 	if (rc < 0) {
104 		cudbg_err->sys_err = rc;
105 		return rc;
106 	}
107 
108 	dparams = &padap->params.devlog;
109 	rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
110 	if (rc)
111 		return rc;
112 
113 	/* Collect FW devlog */
114 	if (dparams->start != 0) {
115 		spin_lock(&padap->win0_lock);
116 		rc = t4_memory_rw(padap, padap->params.drv_memwin,
117 				  dparams->memtype, dparams->start,
118 				  dparams->size,
119 				  (__be32 *)(char *)temp_buff.data,
120 				  1);
121 		spin_unlock(&padap->win0_lock);
122 		if (rc) {
123 			cudbg_err->sys_err = rc;
124 			cudbg_put_buff(&temp_buff, dbg_buff);
125 			return rc;
126 		}
127 	}
128 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
129 	return rc;
130 }
131 
132 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
133 			      struct cudbg_buffer *dbg_buff,
134 			      struct cudbg_error *cudbg_err, int qid)
135 {
136 	struct adapter *padap = pdbg_init->adap;
137 	struct cudbg_buffer temp_buff = { 0 };
138 	int no_of_read_words, rc = 0;
139 	u32 qsize;
140 
141 	/* collect CIM IBQ */
142 	qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
143 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
144 	if (rc)
145 		return rc;
146 
147 	/* t4_read_cim_ibq will return no. of read words or error */
148 	no_of_read_words = t4_read_cim_ibq(padap, qid,
149 					   (u32 *)((u32 *)temp_buff.data +
150 					   temp_buff.offset), qsize);
151 	/* no_of_read_words is less than or equal to 0 means error */
152 	if (no_of_read_words <= 0) {
153 		if (!no_of_read_words)
154 			rc = CUDBG_SYSTEM_ERROR;
155 		else
156 			rc = no_of_read_words;
157 		cudbg_err->sys_err = rc;
158 		cudbg_put_buff(&temp_buff, dbg_buff);
159 		return rc;
160 	}
161 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
162 	return rc;
163 }
164 
165 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
166 			      struct cudbg_buffer *dbg_buff,
167 			      struct cudbg_error *cudbg_err)
168 {
169 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
170 }
171 
172 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
173 			      struct cudbg_buffer *dbg_buff,
174 			      struct cudbg_error *cudbg_err)
175 {
176 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
177 }
178 
179 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
180 			      struct cudbg_buffer *dbg_buff,
181 			      struct cudbg_error *cudbg_err)
182 {
183 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
184 }
185 
186 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
187 			       struct cudbg_buffer *dbg_buff,
188 			       struct cudbg_error *cudbg_err)
189 {
190 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
191 }
192 
193 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
194 			       struct cudbg_buffer *dbg_buff,
195 			       struct cudbg_error *cudbg_err)
196 {
197 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
198 }
199 
200 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
201 			       struct cudbg_buffer *dbg_buff,
202 			       struct cudbg_error *cudbg_err)
203 {
204 	return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
205 }
206 
207 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
208 			      struct cudbg_buffer *dbg_buff,
209 			      struct cudbg_error *cudbg_err, int qid)
210 {
211 	struct adapter *padap = pdbg_init->adap;
212 	struct cudbg_buffer temp_buff = { 0 };
213 	int no_of_read_words, rc = 0;
214 	u32 qsize;
215 
216 	/* collect CIM OBQ */
217 	qsize =  6 * CIM_OBQ_SIZE * 4 *  sizeof(u32);
218 	rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
219 	if (rc)
220 		return rc;
221 
222 	/* t4_read_cim_obq will return no. of read words or error */
223 	no_of_read_words = t4_read_cim_obq(padap, qid,
224 					   (u32 *)((u32 *)temp_buff.data +
225 					   temp_buff.offset), qsize);
226 	/* no_of_read_words is less than or equal to 0 means error */
227 	if (no_of_read_words <= 0) {
228 		if (!no_of_read_words)
229 			rc = CUDBG_SYSTEM_ERROR;
230 		else
231 			rc = no_of_read_words;
232 		cudbg_err->sys_err = rc;
233 		cudbg_put_buff(&temp_buff, dbg_buff);
234 		return rc;
235 	}
236 	temp_buff.size = no_of_read_words * 4;
237 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
238 	return rc;
239 }
240 
241 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
242 			       struct cudbg_buffer *dbg_buff,
243 			       struct cudbg_error *cudbg_err)
244 {
245 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
246 }
247 
248 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
249 			       struct cudbg_buffer *dbg_buff,
250 			       struct cudbg_error *cudbg_err)
251 {
252 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
253 }
254 
255 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
256 			       struct cudbg_buffer *dbg_buff,
257 			       struct cudbg_error *cudbg_err)
258 {
259 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
260 }
261 
262 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
263 			       struct cudbg_buffer *dbg_buff,
264 			       struct cudbg_error *cudbg_err)
265 {
266 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
267 }
268 
269 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
270 			      struct cudbg_buffer *dbg_buff,
271 			      struct cudbg_error *cudbg_err)
272 {
273 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
274 }
275 
276 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
277 			       struct cudbg_buffer *dbg_buff,
278 			       struct cudbg_error *cudbg_err)
279 {
280 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
281 }
282 
283 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
284 				struct cudbg_buffer *dbg_buff,
285 				struct cudbg_error *cudbg_err)
286 {
287 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
288 }
289 
290 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
291 				struct cudbg_buffer *dbg_buff,
292 				struct cudbg_error *cudbg_err)
293 {
294 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
295 }
296 
297 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
298 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
299 			     unsigned long tot_len,
300 			     struct cudbg_error *cudbg_err)
301 {
302 	unsigned long bytes, bytes_left, bytes_read = 0;
303 	struct adapter *padap = pdbg_init->adap;
304 	struct cudbg_buffer temp_buff = { 0 };
305 	int rc = 0;
306 
307 	bytes_left = tot_len;
308 	while (bytes_left > 0) {
309 		bytes = min_t(unsigned long, bytes_left,
310 			      (unsigned long)CUDBG_CHUNK_SIZE);
311 		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
312 		if (rc)
313 			return rc;
314 		spin_lock(&padap->win0_lock);
315 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
316 				  bytes_read, bytes,
317 				  (__be32 *)temp_buff.data,
318 				  1);
319 		spin_unlock(&padap->win0_lock);
320 		if (rc) {
321 			cudbg_err->sys_err = rc;
322 			cudbg_put_buff(&temp_buff, dbg_buff);
323 			return rc;
324 		}
325 		bytes_left -= bytes;
326 		bytes_read += bytes;
327 		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
328 	}
329 	return rc;
330 }
331 
332 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
333 				   struct card_mem *mem_info)
334 {
335 	struct adapter *padap = pdbg_init->adap;
336 	u32 value;
337 
338 	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
339 	value = EDRAM0_SIZE_G(value);
340 	mem_info->size_edc0 = (u16)value;
341 
342 	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
343 	value = EDRAM1_SIZE_G(value);
344 	mem_info->size_edc1 = (u16)value;
345 
346 	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
347 	if (value & EDRAM0_ENABLE_F)
348 		mem_info->mem_flag |= (1 << EDC0_FLAG);
349 	if (value & EDRAM1_ENABLE_F)
350 		mem_info->mem_flag |= (1 << EDC1_FLAG);
351 }
352 
353 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
354 			     struct cudbg_error *cudbg_err)
355 {
356 	struct adapter *padap = pdbg_init->adap;
357 	int rc;
358 
359 	if (is_fw_attached(pdbg_init)) {
360 		/* Flush uP dcache before reading edcX/mcX  */
361 		rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
362 		if (rc)
363 			cudbg_err->sys_warn = rc;
364 	}
365 }
366 
367 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
368 				    struct cudbg_buffer *dbg_buff,
369 				    struct cudbg_error *cudbg_err,
370 				    u8 mem_type)
371 {
372 	struct card_mem mem_info = {0};
373 	unsigned long flag, size;
374 	int rc;
375 
376 	cudbg_t4_fwcache(pdbg_init, cudbg_err);
377 	cudbg_collect_mem_info(pdbg_init, &mem_info);
378 	switch (mem_type) {
379 	case MEM_EDC0:
380 		flag = (1 << EDC0_FLAG);
381 		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
382 		break;
383 	case MEM_EDC1:
384 		flag = (1 << EDC1_FLAG);
385 		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
386 		break;
387 	default:
388 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
389 		goto err;
390 	}
391 
392 	if (mem_info.mem_flag & flag) {
393 		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
394 				       size, cudbg_err);
395 		if (rc)
396 			goto err;
397 	} else {
398 		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
399 		goto err;
400 	}
401 err:
402 	return rc;
403 }
404 
405 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
406 			       struct cudbg_buffer *dbg_buff,
407 			       struct cudbg_error *cudbg_err)
408 {
409 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
410 					MEM_EDC0);
411 }
412 
413 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
414 			       struct cudbg_buffer *dbg_buff,
415 			       struct cudbg_error *cudbg_err)
416 {
417 	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
418 					MEM_EDC1);
419 }
420 
421 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
422 			      struct cudbg_buffer *dbg_buff,
423 			      struct cudbg_error *cudbg_err)
424 {
425 	struct adapter *padap = pdbg_init->adap;
426 	struct cudbg_buffer temp_buff = { 0 };
427 	struct ireg_buf *ch_tp_pio;
428 	int i, rc, n = 0;
429 	u32 size;
430 
431 	if (is_t5(padap->params.chip))
432 		n = sizeof(t5_tp_pio_array) +
433 		    sizeof(t5_tp_tm_pio_array) +
434 		    sizeof(t5_tp_mib_index_array);
435 	else
436 		n = sizeof(t6_tp_pio_array) +
437 		    sizeof(t6_tp_tm_pio_array) +
438 		    sizeof(t6_tp_mib_index_array);
439 
440 	n = n / (IREG_NUM_ELEM * sizeof(u32));
441 	size = sizeof(struct ireg_buf) * n;
442 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
443 	if (rc)
444 		return rc;
445 
446 	ch_tp_pio = (struct ireg_buf *)temp_buff.data;
447 
448 	/* TP_PIO */
449 	if (is_t5(padap->params.chip))
450 		n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
451 	else if (is_t6(padap->params.chip))
452 		n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
453 
454 	for (i = 0; i < n; i++) {
455 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
456 		u32 *buff = ch_tp_pio->outbuf;
457 
458 		if (is_t5(padap->params.chip)) {
459 			tp_pio->ireg_addr = t5_tp_pio_array[i][0];
460 			tp_pio->ireg_data = t5_tp_pio_array[i][1];
461 			tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
462 			tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
463 		} else if (is_t6(padap->params.chip)) {
464 			tp_pio->ireg_addr = t6_tp_pio_array[i][0];
465 			tp_pio->ireg_data = t6_tp_pio_array[i][1];
466 			tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
467 			tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
468 		}
469 		t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
470 			       tp_pio->ireg_local_offset, true);
471 		ch_tp_pio++;
472 	}
473 
474 	/* TP_TM_PIO */
475 	if (is_t5(padap->params.chip))
476 		n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
477 	else if (is_t6(padap->params.chip))
478 		n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
479 
480 	for (i = 0; i < n; i++) {
481 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
482 		u32 *buff = ch_tp_pio->outbuf;
483 
484 		if (is_t5(padap->params.chip)) {
485 			tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
486 			tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
487 			tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
488 			tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
489 		} else if (is_t6(padap->params.chip)) {
490 			tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
491 			tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
492 			tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
493 			tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
494 		}
495 		t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
496 				  tp_pio->ireg_local_offset, true);
497 		ch_tp_pio++;
498 	}
499 
500 	/* TP_MIB_INDEX */
501 	if (is_t5(padap->params.chip))
502 		n = sizeof(t5_tp_mib_index_array) /
503 		    (IREG_NUM_ELEM * sizeof(u32));
504 	else if (is_t6(padap->params.chip))
505 		n = sizeof(t6_tp_mib_index_array) /
506 		    (IREG_NUM_ELEM * sizeof(u32));
507 
508 	for (i = 0; i < n ; i++) {
509 		struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
510 		u32 *buff = ch_tp_pio->outbuf;
511 
512 		if (is_t5(padap->params.chip)) {
513 			tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
514 			tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
515 			tp_pio->ireg_local_offset =
516 				t5_tp_mib_index_array[i][2];
517 			tp_pio->ireg_offset_range =
518 				t5_tp_mib_index_array[i][3];
519 		} else if (is_t6(padap->params.chip)) {
520 			tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
521 			tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
522 			tp_pio->ireg_local_offset =
523 				t6_tp_mib_index_array[i][2];
524 			tp_pio->ireg_offset_range =
525 				t6_tp_mib_index_array[i][3];
526 		}
527 		t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
528 			       tp_pio->ireg_local_offset, true);
529 		ch_tp_pio++;
530 	}
531 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
532 	return rc;
533 }
534 
535 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
536 			       struct cudbg_buffer *dbg_buff,
537 			       struct cudbg_error *cudbg_err)
538 {
539 	struct adapter *padap = pdbg_init->adap;
540 	struct cudbg_buffer temp_buff = { 0 };
541 	struct ireg_buf *ch_sge_dbg;
542 	int i, rc;
543 
544 	rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
545 	if (rc)
546 		return rc;
547 
548 	ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
549 	for (i = 0; i < 2; i++) {
550 		struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
551 		u32 *buff = ch_sge_dbg->outbuf;
552 
553 		sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
554 		sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
555 		sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
556 		sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
557 		t4_read_indirect(padap,
558 				 sge_pio->ireg_addr,
559 				 sge_pio->ireg_data,
560 				 buff,
561 				 sge_pio->ireg_offset_range,
562 				 sge_pio->ireg_local_offset);
563 		ch_sge_dbg++;
564 	}
565 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
566 	return rc;
567 }
568 
569 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
570 				struct cudbg_buffer *dbg_buff,
571 				struct cudbg_error *cudbg_err)
572 {
573 	struct adapter *padap = pdbg_init->adap;
574 	struct cudbg_buffer temp_buff = { 0 };
575 	struct ireg_buf *ch_pcie;
576 	int i, rc, n;
577 	u32 size;
578 
579 	n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
580 	size = sizeof(struct ireg_buf) * n * 2;
581 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
582 	if (rc)
583 		return rc;
584 
585 	ch_pcie = (struct ireg_buf *)temp_buff.data;
586 	/* PCIE_PDBG */
587 	for (i = 0; i < n; i++) {
588 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
589 		u32 *buff = ch_pcie->outbuf;
590 
591 		pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
592 		pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
593 		pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
594 		pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
595 		t4_read_indirect(padap,
596 				 pcie_pio->ireg_addr,
597 				 pcie_pio->ireg_data,
598 				 buff,
599 				 pcie_pio->ireg_offset_range,
600 				 pcie_pio->ireg_local_offset);
601 		ch_pcie++;
602 	}
603 
604 	/* PCIE_CDBG */
605 	n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
606 	for (i = 0; i < n; i++) {
607 		struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
608 		u32 *buff = ch_pcie->outbuf;
609 
610 		pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
611 		pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
612 		pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
613 		pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
614 		t4_read_indirect(padap,
615 				 pcie_pio->ireg_addr,
616 				 pcie_pio->ireg_data,
617 				 buff,
618 				 pcie_pio->ireg_offset_range,
619 				 pcie_pio->ireg_local_offset);
620 		ch_pcie++;
621 	}
622 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
623 	return rc;
624 }
625 
626 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
627 			      struct cudbg_buffer *dbg_buff,
628 			      struct cudbg_error *cudbg_err)
629 {
630 	struct adapter *padap = pdbg_init->adap;
631 	struct cudbg_buffer temp_buff = { 0 };
632 	struct ireg_buf *ch_pm;
633 	int i, rc, n;
634 	u32 size;
635 
636 	n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
637 	size = sizeof(struct ireg_buf) * n * 2;
638 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
639 	if (rc)
640 		return rc;
641 
642 	ch_pm = (struct ireg_buf *)temp_buff.data;
643 	/* PM_RX */
644 	for (i = 0; i < n; i++) {
645 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
646 		u32 *buff = ch_pm->outbuf;
647 
648 		pm_pio->ireg_addr = t5_pm_rx_array[i][0];
649 		pm_pio->ireg_data = t5_pm_rx_array[i][1];
650 		pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
651 		pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
652 		t4_read_indirect(padap,
653 				 pm_pio->ireg_addr,
654 				 pm_pio->ireg_data,
655 				 buff,
656 				 pm_pio->ireg_offset_range,
657 				 pm_pio->ireg_local_offset);
658 		ch_pm++;
659 	}
660 
661 	/* PM_TX */
662 	n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
663 	for (i = 0; i < n; i++) {
664 		struct ireg_field *pm_pio = &ch_pm->tp_pio;
665 		u32 *buff = ch_pm->outbuf;
666 
667 		pm_pio->ireg_addr = t5_pm_tx_array[i][0];
668 		pm_pio->ireg_data = t5_pm_tx_array[i][1];
669 		pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
670 		pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
671 		t4_read_indirect(padap,
672 				 pm_pio->ireg_addr,
673 				 pm_pio->ireg_data,
674 				 buff,
675 				 pm_pio->ireg_offset_range,
676 				 pm_pio->ireg_local_offset);
677 		ch_pm++;
678 	}
679 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
680 	return rc;
681 }
682 
683 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
684 			      struct cudbg_buffer *dbg_buff,
685 			      struct cudbg_error *cudbg_err)
686 {
687 	struct adapter *padap = pdbg_init->adap;
688 	struct cudbg_buffer temp_buff = { 0 };
689 	struct ireg_buf *ma_indr;
690 	int i, rc, n;
691 	u32 size, j;
692 
693 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
694 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
695 
696 	n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
697 	size = sizeof(struct ireg_buf) * n * 2;
698 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
699 	if (rc)
700 		return rc;
701 
702 	ma_indr = (struct ireg_buf *)temp_buff.data;
703 	for (i = 0; i < n; i++) {
704 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
705 		u32 *buff = ma_indr->outbuf;
706 
707 		ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
708 		ma_fli->ireg_data = t6_ma_ireg_array[i][1];
709 		ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
710 		ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
711 		t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
712 				 buff, ma_fli->ireg_offset_range,
713 				 ma_fli->ireg_local_offset);
714 		ma_indr++;
715 	}
716 
717 	n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
718 	for (i = 0; i < n; i++) {
719 		struct ireg_field *ma_fli = &ma_indr->tp_pio;
720 		u32 *buff = ma_indr->outbuf;
721 
722 		ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
723 		ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
724 		ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
725 		for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
726 			t4_read_indirect(padap, ma_fli->ireg_addr,
727 					 ma_fli->ireg_data, buff, 1,
728 					 ma_fli->ireg_local_offset);
729 			buff++;
730 			ma_fli->ireg_local_offset += 0x20;
731 		}
732 		ma_indr++;
733 	}
734 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
735 	return rc;
736 }
737 
738 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
739 				  struct cudbg_buffer *dbg_buff,
740 				  struct cudbg_error *cudbg_err)
741 {
742 	struct adapter *padap = pdbg_init->adap;
743 	struct cudbg_buffer temp_buff = { 0 };
744 	struct ireg_buf *up_cim;
745 	int i, rc, n;
746 	u32 size;
747 
748 	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
749 	size = sizeof(struct ireg_buf) * n;
750 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
751 	if (rc)
752 		return rc;
753 
754 	up_cim = (struct ireg_buf *)temp_buff.data;
755 	for (i = 0; i < n; i++) {
756 		struct ireg_field *up_cim_reg = &up_cim->tp_pio;
757 		u32 *buff = up_cim->outbuf;
758 
759 		if (is_t5(padap->params.chip)) {
760 			up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
761 			up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
762 			up_cim_reg->ireg_local_offset =
763 						t5_up_cim_reg_array[i][2];
764 			up_cim_reg->ireg_offset_range =
765 						t5_up_cim_reg_array[i][3];
766 		} else if (is_t6(padap->params.chip)) {
767 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
768 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
769 			up_cim_reg->ireg_local_offset =
770 						t6_up_cim_reg_array[i][2];
771 			up_cim_reg->ireg_offset_range =
772 						t6_up_cim_reg_array[i][3];
773 		}
774 
775 		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
776 				 up_cim_reg->ireg_offset_range, buff);
777 		if (rc) {
778 			cudbg_put_buff(&temp_buff, dbg_buff);
779 			return rc;
780 		}
781 		up_cim++;
782 	}
783 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
784 	return rc;
785 }
786 
787 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
788 			   struct cudbg_buffer *dbg_buff,
789 			   struct cudbg_error *cudbg_err)
790 {
791 	struct adapter *padap = pdbg_init->adap;
792 	struct cudbg_mbox_log *mboxlog = NULL;
793 	struct cudbg_buffer temp_buff = { 0 };
794 	struct mbox_cmd_log *log = NULL;
795 	struct mbox_cmd *entry;
796 	unsigned int entry_idx;
797 	u16 mbox_cmds;
798 	int i, k, rc;
799 	u64 flit;
800 	u32 size;
801 
802 	log = padap->mbox_log;
803 	mbox_cmds = padap->mbox_log->size;
804 	size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
805 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
806 	if (rc)
807 		return rc;
808 
809 	mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
810 	for (k = 0; k < mbox_cmds; k++) {
811 		entry_idx = log->cursor + k;
812 		if (entry_idx >= log->size)
813 			entry_idx -= log->size;
814 
815 		entry = mbox_cmd_log_entry(log, entry_idx);
816 		/* skip over unused entries */
817 		if (entry->timestamp == 0)
818 			continue;
819 
820 		memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
821 		for (i = 0; i < MBOX_LEN / 8; i++) {
822 			flit = entry->cmd[i];
823 			mboxlog->hi[i] = (u32)(flit >> 32);
824 			mboxlog->lo[i] = (u32)flit;
825 		}
826 		mboxlog++;
827 	}
828 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
829 	return rc;
830 }
831 
832 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
833 			       struct cudbg_buffer *dbg_buff,
834 			       struct cudbg_error *cudbg_err)
835 {
836 	struct adapter *padap = pdbg_init->adap;
837 	struct cudbg_buffer temp_buff = { 0 };
838 	struct ireg_buf *hma_indr;
839 	int i, rc, n;
840 	u32 size;
841 
842 	if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
843 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
844 
845 	n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
846 	size = sizeof(struct ireg_buf) * n;
847 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
848 	if (rc)
849 		return rc;
850 
851 	hma_indr = (struct ireg_buf *)temp_buff.data;
852 	for (i = 0; i < n; i++) {
853 		struct ireg_field *hma_fli = &hma_indr->tp_pio;
854 		u32 *buff = hma_indr->outbuf;
855 
856 		hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
857 		hma_fli->ireg_data = t6_hma_ireg_array[i][1];
858 		hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
859 		hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
860 		t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
861 				 buff, hma_fli->ireg_offset_range,
862 				 hma_fli->ireg_local_offset);
863 		hma_indr++;
864 	}
865 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
866 	return rc;
867 }
868