xref: /illumos-gate/usr/src/uts/common/io/cxgbe/t4nex/cudbg_wtp.c (revision 7e6ad4690aa1d3cbf260ef54d5e9632ae40a782e)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*-
13  * Copyright (c) 2017 Chelsio Communications, Inc.
14  * All rights reserved.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include <sys/types.h>
39 #include <sys/param.h>
40 
41 #include "common/common.h"
42 #include "common/t4_regs.h"
43 #include "cudbg.h"
44 #include "cudbg_lib_common.h"
45 #include "cudbg_entity.h"
46 
47 int collect_wtp_data(struct cudbg_init *pdbg_init,
48 		     struct cudbg_buffer *dbg_buff,
49 		     struct cudbg_error *cudbg_err);
50 /*SGE_DEBUG Registers.*/
51 #define TP_MIB_SIZE	    0x5e
52 
53 struct sge_debug_reg_data {
54 	/*indx0*/
55 	u32 reserved1:4;
56 	u32 reserved2:4;
57 	u32 debug_uP_SOP_cnt:4;
58 	u32 debug_uP_EOP_cnt:4;
59 	u32 debug_CIM_SOP1_cnt:4;
60 	u32 debug_CIM_EOP1_cnt:4;
61 	u32 debug_CIM_SOP0_cnt:4;
62 	u32 debug_CIM_EOP0_cnt:4;
63 
64 	/*indx1*/
65 	u32 reserved3:32;
66 
67 	/*indx2*/
68 	u32 debug_T_Rx_SOP1_cnt:4;
69 	u32 debug_T_Rx_EOP1_cnt:4;
70 	u32 debug_T_Rx_SOP0_cnt:4;
71 	u32 debug_T_Rx_EOP0_cnt:4;
72 	u32 debug_U_Rx_SOP1_cnt:4;
73 	u32 debug_U_Rx_EOP1_cnt:4;
74 	u32 debug_U_Rx_SOP0_cnt:4;
75 	u32 debug_U_Rx_EOP0_cnt:4;
76 
77 	/*indx3*/
78 	u32 reserved4:32;
79 
80 	/*indx4*/
81 	u32 debug_UD_Rx_SOP3_cnt:4;
82 	u32 debug_UD_Rx_EOP3_cnt:4;
83 	u32 debug_UD_Rx_SOP2_cnt:4;
84 	u32 debug_UD_Rx_EOP2_cnt:4;
85 	u32 debug_UD_Rx_SOP1_cnt:4;
86 	u32 debug_UD_Rx_EOP1_cnt:4;
87 	u32 debug_UD_Rx_SOP0_cnt:4;
88 	u32 debug_UD_Rx_EOP0_cnt:4;
89 
90 	/*indx5*/
91 	u32 reserved5:32;
92 
93 	/*indx6*/
94 	u32 debug_U_Tx_SOP3_cnt:4;
95 	u32 debug_U_Tx_EOP3_cnt:4;
96 	u32 debug_U_Tx_SOP2_cnt:4;
97 	u32 debug_U_Tx_EOP2_cnt:4;
98 	u32 debug_U_Tx_SOP1_cnt:4;
99 	u32 debug_U_Tx_EOP1_cnt:4;
100 	u32 debug_U_Tx_SOP0_cnt:4;
101 	u32 debug_U_Tx_EOP0_cnt:4;
102 
103 	/*indx7*/
104 	u32 reserved6:32;
105 
106 	/*indx8*/
107 	u32  debug_PC_Rsp_SOP1_cnt:4;
108 	u32  debug_PC_Rsp_EOP1_cnt:4;
109 	u32  debug_PC_Rsp_SOP0_cnt:4;
110 	u32  debug_PC_Rsp_EOP0_cnt:4;
111 	u32  debug_PC_Req_SOP1_cnt:4;
112 	u32  debug_PC_Req_EOP1_cnt:4;
113 	u32  debug_PC_Req_SOP0_cnt:4;
114 	u32  debug_PC_Req_EOP0_cnt:4;
115 
116 	/*indx9*/
117 	u32 reserved7:32;
118 
119 	/*indx10*/
120 	u32  debug_PD_Req_SOP3_cnt:4;
121 	u32  debug_PD_Req_EOP3_cnt:4;
122 	u32  debug_PD_Req_SOP2_cnt:4;
123 	u32  debug_PD_Req_EOP2_cnt:4;
124 	u32  debug_PD_Req_SOP1_cnt:4;
125 	u32  debug_PD_Req_EOP1_cnt:4;
126 	u32  debug_PD_Req_SOP0_cnt:4;
127 	u32  debug_PD_Req_EOP0_cnt:4;
128 
129 	/*indx11*/
130 	u32 reserved8:32;
131 
132 	/*indx12*/
133 	u32  debug_PD_Rsp_SOP3_cnt:4;
134 	u32  debug_PD_Rsp_EOP3_cnt:4;
135 	u32  debug_PD_Rsp_SOP2_cnt:4;
136 	u32  debug_PD_Rsp_EOP2_cnt:4;
137 	u32  debug_PD_Rsp_SOP1_cnt:4;
138 	u32  debug_PD_Rsp_EOP1_cnt:4;
139 	u32  debug_PD_Rsp_SOP0_cnt:4;
140 	u32  debug_PD_Rsp_EOP0_cnt:4;
141 
142 	/*indx13*/
143 	u32 reserved9:32;
144 
145 	/*indx14*/
146 	u32  debug_CPLSW_TP_Rx_SOP1_cnt:4;
147 	u32  debug_CPLSW_TP_Rx_EOP1_cnt:4;
148 	u32  debug_CPLSW_TP_Rx_SOP0_cnt:4;
149 	u32  debug_CPLSW_TP_Rx_EOP0_cnt:4;
150 	u32  debug_CPLSW_CIM_SOP1_cnt:4;
151 	u32  debug_CPLSW_CIM_EOP1_cnt:4;
152 	u32  debug_CPLSW_CIM_SOP0_cnt:4;
153 	u32  debug_CPLSW_CIM_EOP0_cnt:4;
154 
155 	/*indx15*/
156 	u32 reserved10:32;
157 
158 	/*indx16*/
159 	u32  debug_PD_Req_Rd3_cnt:4;
160 	u32  debug_PD_Req_Rd2_cnt:4;
161 	u32  debug_PD_Req_Rd1_cnt:4;
162 	u32  debug_PD_Req_Rd0_cnt:4;
163 	u32  debug_PD_Req_Int3_cnt:4;
164 	u32  debug_PD_Req_Int2_cnt:4;
165 	u32  debug_PD_Req_Int1_cnt:4;
166 	u32  debug_PD_Req_Int0_cnt:4;
167 
168 };
169 
170 struct tp_mib_type tp_mib[] = {
171 	{"tp_mib_mac_in_err_0", 0x0},
172 	{"tp_mib_mac_in_err_1", 0x1},
173 	{"tp_mib_mac_in_err_2", 0x2},
174 	{"tp_mib_mac_in_err_3", 0x3},
175 	{"tp_mib_hdr_in_err_0", 0x4},
176 	{"tp_mib_hdr_in_err_1", 0x5},
177 	{"tp_mib_hdr_in_err_2", 0x6},
178 	{"tp_mib_hdr_in_err_3", 0x7},
179 	{"tp_mib_tcp_in_err_0", 0x8},
180 	{"tp_mib_tcp_in_err_1", 0x9},
181 	{"tp_mib_tcp_in_err_2", 0xa},
182 	{"tp_mib_tcp_in_err_3", 0xb},
183 	{"tp_mib_tcp_out_rst", 0xc},
184 	{"tp_mib_tcp_in_seg_hi", 0x10},
185 	{"tp_mib_tcp_in_seg_lo", 0x11},
186 	{"tp_mib_tcp_out_seg_hi", 0x12},
187 	{"tp_mib_tcp_out_seg_lo", 0x13},
188 	{"tp_mib_tcp_rxt_seg_hi", 0x14},
189 	{"tp_mib_tcp_rxt_seg_lo", 0x15},
190 	{"tp_mib_tnl_cng_drop_0", 0x18},
191 	{"tp_mib_tnl_cng_drop_1", 0x19},
192 	{"tp_mib_tnl_cng_drop_2", 0x1a},
193 	{"tp_mib_tnl_cng_drop_3", 0x1b},
194 	{"tp_mib_ofd_chn_drop_0", 0x1c},
195 	{"tp_mib_ofd_chn_drop_1", 0x1d},
196 	{"tp_mib_ofd_chn_drop_2", 0x1e},
197 	{"tp_mib_ofd_chn_drop_3", 0x1f},
198 	{"tp_mib_tnl_out_pkt_0", 0x20},
199 	{"tp_mib_tnl_out_pkt_1", 0x21},
200 	{"tp_mib_tnl_out_pkt_2", 0x22},
201 	{"tp_mib_tnl_out_pkt_3", 0x23},
202 	{"tp_mib_tnl_in_pkt_0", 0x24},
203 	{"tp_mib_tnl_in_pkt_1", 0x25},
204 	{"tp_mib_tnl_in_pkt_2", 0x26},
205 	{"tp_mib_tnl_in_pkt_3", 0x27},
206 	{"tp_mib_tcp_v6in_err_0", 0x28},
207 	{"tp_mib_tcp_v6in_err_1", 0x29},
208 	{"tp_mib_tcp_v6in_err_2", 0x2a},
209 	{"tp_mib_tcp_v6in_err_3", 0x2b},
210 	{"tp_mib_tcp_v6out_rst", 0x2c},
211 	{"tp_mib_tcp_v6in_seg_hi", 0x30},
212 	{"tp_mib_tcp_v6in_seg_lo", 0x31},
213 	{"tp_mib_tcp_v6out_seg_hi", 0x32},
214 	{"tp_mib_tcp_v6out_seg_lo", 0x33},
215 	{"tp_mib_tcp_v6rxt_seg_hi", 0x34},
216 	{"tp_mib_tcp_v6rxt_seg_lo", 0x35},
217 	{"tp_mib_ofd_arp_drop", 0x36},
218 	{"tp_mib_ofd_dfr_drop", 0x37},
219 	{"tp_mib_cpl_in_req_0", 0x38},
220 	{"tp_mib_cpl_in_req_1", 0x39},
221 	{"tp_mib_cpl_in_req_2", 0x3a},
222 	{"tp_mib_cpl_in_req_3", 0x3b},
223 	{"tp_mib_cpl_out_rsp_0", 0x3c},
224 	{"tp_mib_cpl_out_rsp_1", 0x3d},
225 	{"tp_mib_cpl_out_rsp_2", 0x3e},
226 	{"tp_mib_cpl_out_rsp_3", 0x3f},
227 	{"tp_mib_tnl_lpbk_0", 0x40},
228 	{"tp_mib_tnl_lpbk_1", 0x41},
229 	{"tp_mib_tnl_lpbk_2", 0x42},
230 	{"tp_mib_tnl_lpbk_3", 0x43},
231 	{"tp_mib_tnl_drop_0", 0x44},
232 	{"tp_mib_tnl_drop_1", 0x45},
233 	{"tp_mib_tnl_drop_2", 0x46},
234 	{"tp_mib_tnl_drop_3", 0x47},
235 	{"tp_mib_fcoe_ddp_0", 0x48},
236 	{"tp_mib_fcoe_ddp_1", 0x49},
237 	{"tp_mib_fcoe_ddp_2", 0x4a},
238 	{"tp_mib_fcoe_ddp_3", 0x4b},
239 	{"tp_mib_fcoe_drop_0", 0x4c},
240 	{"tp_mib_fcoe_drop_1", 0x4d},
241 	{"tp_mib_fcoe_drop_2", 0x4e},
242 	{"tp_mib_fcoe_drop_3", 0x4f},
243 	{"tp_mib_fcoe_byte_0_hi", 0x50},
244 	{"tp_mib_fcoe_byte_0_lo", 0x51},
245 	{"tp_mib_fcoe_byte_1_hi", 0x52},
246 	{"tp_mib_fcoe_byte_1_lo", 0x53},
247 	{"tp_mib_fcoe_byte_2_hi", 0x54},
248 	{"tp_mib_fcoe_byte_2_lo", 0x55},
249 	{"tp_mib_fcoe_byte_3_hi", 0x56},
250 	{"tp_mib_fcoe_byte_3_lo", 0x57},
251 	{"tp_mib_ofd_vln_drop_0", 0x58},
252 	{"tp_mib_ofd_vln_drop_1", 0x59},
253 	{"tp_mib_ofd_vln_drop_2", 0x5a},
254 	{"tp_mib_ofd_vln_drop_3", 0x5b},
255 	{"tp_mib_usm_pkts", 0x5c},
256 	{"tp_mib_usm_drop", 0x5d},
257 	{"tp_mib_usm_bytes_hi", 0x5e},
258 	{"tp_mib_usm_bytes_lo", 0x5f},
259 	{"tp_mib_tid_del", 0x60},
260 	{"tp_mib_tid_inv", 0x61},
261 	{"tp_mib_tid_act", 0x62},
262 	{"tp_mib_tid_pas", 0x63},
263 	{"tp_mib_rqe_dfr_mod", 0x64},
264 	{"tp_mib_rqe_dfr_pkt", 0x65}
265 };
266 
267 static u32
read_sge_debug_data(struct cudbg_init * pdbg_init,u32 * sge_dbg_reg)268 read_sge_debug_data(struct cudbg_init *pdbg_init, u32 *sge_dbg_reg)
269 {
270 	struct adapter *padap = pdbg_init->adap;
271 	u32 value;
272 	int i = 0;
273 
274 	for (i = 0; i <= 15; i++) {
275 		t4_write_reg(padap, A_SGE_DEBUG_INDEX, (u32)i);
276 		value = t4_read_reg(padap, A_SGE_DEBUG_DATA_LOW);
277 		/*printf("LOW	 0x%08x\n", value);*/
278 		sge_dbg_reg[(i << 1) | 1] = HTONL_NIBBLE(value);
279 		value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH);
280 		/*printf("HIGH	 0x%08x\n", value);*/
281 		sge_dbg_reg[(i << 1)] = HTONL_NIBBLE(value);
282 	}
283 	return 0;
284 }
285 
286 static u32
read_tp_mib_data(struct cudbg_init * pdbg_init,struct tp_mib_data ** ppTp_Mib)287 read_tp_mib_data(struct cudbg_init *pdbg_init,
288 		 struct tp_mib_data **ppTp_Mib)
289 {
290 	struct adapter *padap = pdbg_init->adap;
291 	u32 i = 0;
292 
293 	for (i = 0; i < TP_MIB_SIZE; i++) {
294 		t4_tp_mib_read(padap, &tp_mib[i].value, 1,
295 				  (u32)tp_mib[i].addr, true);
296 	}
297 	*ppTp_Mib = (struct tp_mib_data *)&tp_mib[0];
298 
299 	return 0;
300 }
301 
302 static int
t5_wtp_data(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)303 t5_wtp_data(struct cudbg_init *pdbg_init,
304 	    struct cudbg_buffer *dbg_buff,
305 	    struct cudbg_error *cudbg_err)
306 {
307 	struct adapter *padap = pdbg_init->adap;
308 	struct sge_debug_reg_data *sge_dbg_reg = NULL;
309 	struct cudbg_buffer scratch_buff;
310 	struct tp_mib_data *ptp_mib = NULL;
311 	struct wtp_data *wtp;
312 	u32 Sge_Dbg[32] = {0};
313 	u32 value = 0;
314 	u32 i = 0;
315 	u32 drop = 0;
316 	u32 err = 0;
317 	u32 offset;
318 	int rc = 0;
319 
320 	rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
321 
322 	if (rc)
323 		goto err;
324 
325 	offset = scratch_buff.offset;
326 	wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
327 
328 	read_sge_debug_data(pdbg_init, Sge_Dbg);
329 	read_tp_mib_data(pdbg_init, &ptp_mib);
330 
331 	sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
332 
333 	/*#######################################################################*/
334 	/*# TX PATH, starting from pcie*/
335 	/*#######################################################################*/
336 
337 	/* Get Reqests of commmands from SGE to PCIE*/
338 
339 	wtp->sge_pcie_cmd_req.sop[0] =	sge_dbg_reg->debug_PC_Req_SOP0_cnt;
340 	wtp->sge_pcie_cmd_req.sop[1] =	sge_dbg_reg->debug_PC_Req_SOP1_cnt;
341 
342 	wtp->sge_pcie_cmd_req.eop[0] =	sge_dbg_reg->debug_PC_Req_EOP0_cnt;
343 	wtp->sge_pcie_cmd_req.eop[1] =	sge_dbg_reg->debug_PC_Req_EOP1_cnt;
344 
345 	/* Get Reqests of commmands from PCIE to core*/
346 	value = t4_read_reg(padap, A_PCIE_CMDR_REQ_CNT);
347 
348 	wtp->pcie_core_cmd_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
349 	wtp->pcie_core_cmd_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
350 	/* there is no EOP for this, so we fake it.*/
351 	wtp->pcie_core_cmd_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
352 	wtp->pcie_core_cmd_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
353 
354 	/* Get DMA stats*/
355 	for (i = 0; i < 4; i++) {
356 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
357 		wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
358 		wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
359 	}
360 
361 	/* Get SGE debug data high index 6*/
362 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_6);
363 	wtp->sge_debug_data_high_index_6.sop[0] = ((value >> 4) & 0x0F);
364 	wtp->sge_debug_data_high_index_6.eop[0] = ((value >> 0) & 0x0F);
365 	wtp->sge_debug_data_high_index_6.sop[1] = ((value >> 12) & 0x0F);
366 	wtp->sge_debug_data_high_index_6.eop[1] = ((value >> 8) & 0x0F);
367 	wtp->sge_debug_data_high_index_6.sop[2] = ((value >> 20) & 0x0F);
368 	wtp->sge_debug_data_high_index_6.eop[2] = ((value >> 16) & 0x0F);
369 	wtp->sge_debug_data_high_index_6.sop[3] = ((value >> 28) & 0x0F);
370 	wtp->sge_debug_data_high_index_6.eop[3] = ((value >> 24) & 0x0F);
371 
372 	/* Get SGE debug data high index 3*/
373 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_3);
374 	wtp->sge_debug_data_high_index_3.sop[0] = ((value >> 4) & 0x0F);
375 	wtp->sge_debug_data_high_index_3.eop[0] = ((value >> 0) & 0x0F);
376 	wtp->sge_debug_data_high_index_3.sop[1] = ((value >> 12) & 0x0F);
377 	wtp->sge_debug_data_high_index_3.eop[1] = ((value >> 8) & 0x0F);
378 	wtp->sge_debug_data_high_index_3.sop[2] = ((value >> 20) & 0x0F);
379 	wtp->sge_debug_data_high_index_3.eop[2] = ((value >> 16) & 0x0F);
380 	wtp->sge_debug_data_high_index_3.sop[3] = ((value >> 28) & 0x0F);
381 	wtp->sge_debug_data_high_index_3.eop[3] = ((value >> 24) & 0x0F);
382 
383 	/* Get ULP SE CNT CHx*/
384 	for (i = 0; i < 4; i++) {
385 		value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
386 		wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
387 		wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
388 	}
389 
390 	/* Get MAC PORTx PKT COUNT*/
391 	for (i = 0; i < 4; i++) {
392 		value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
393 		wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
394 		wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
395 		wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
396 		wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
397 	}
398 
399 	/* Get mac portx aFramesTransmittedok*/
400 	for (i = 0; i < 4; i++) {
401 		value = t4_read_reg(padap, 0x30a80 + ((i * 4) << 12));
402 		wtp->mac_portx_aframestra_ok.sop[i] = (value & 0xFF);
403 		wtp->mac_portx_aframestra_ok.eop[i] = (value & 0xFF);
404 	}
405 
406 	/* Get command respones from core to PCIE*/
407 	value = t4_read_reg(padap, A_PCIE_CMDR_RSP_CNT);
408 
409 	wtp->core_pcie_cmd_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
410 	wtp->core_pcie_cmd_rsp.sop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
411 
412 	wtp->core_pcie_cmd_rsp.eop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
413 	wtp->core_pcie_cmd_rsp.eop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
414 
415 	/*Get command Resposes from PCIE to SGE*/
416 	wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
417 	wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
418 
419 	wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
420 	wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP1_cnt;
421 
422 	/* Get commands sent from SGE to CIM/uP*/
423 	wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
424 	wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
425 
426 	wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
427 	wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
428 
429 	/* Get Reqests of data from PCIE by SGE*/
430 	wtp->utx_sge_dma_req.sop[0] = sge_dbg_reg->debug_UD_Rx_SOP0_cnt;
431 	wtp->utx_sge_dma_req.sop[1] = sge_dbg_reg->debug_UD_Rx_SOP1_cnt;
432 	wtp->utx_sge_dma_req.sop[2] = sge_dbg_reg->debug_UD_Rx_SOP2_cnt;
433 	wtp->utx_sge_dma_req.sop[3] = sge_dbg_reg->debug_UD_Rx_SOP3_cnt;
434 
435 	wtp->utx_sge_dma_req.eop[0] = sge_dbg_reg->debug_UD_Rx_EOP0_cnt;
436 	wtp->utx_sge_dma_req.eop[1] = sge_dbg_reg->debug_UD_Rx_EOP1_cnt;
437 	wtp->utx_sge_dma_req.eop[2] = sge_dbg_reg->debug_UD_Rx_EOP2_cnt;
438 	wtp->utx_sge_dma_req.eop[3] = sge_dbg_reg->debug_UD_Rx_EOP3_cnt;
439 
440 	/* Get Reqests of data from PCIE by SGE*/
441 	wtp->sge_pcie_dma_req.sop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
442 	wtp->sge_pcie_dma_req.sop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
443 	wtp->sge_pcie_dma_req.sop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
444 	wtp->sge_pcie_dma_req.sop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
445 	/*no EOP's, so fake it.*/
446 	wtp->sge_pcie_dma_req.eop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
447 	wtp->sge_pcie_dma_req.eop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
448 	wtp->sge_pcie_dma_req.eop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
449 	wtp->sge_pcie_dma_req.eop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
450 
451 	/* Get Reqests of data from PCIE to core*/
452 	value = t4_read_reg(padap, A_PCIE_DMAR_REQ_CNT);
453 
454 	wtp->pcie_core_dma_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
455 	wtp->pcie_core_dma_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
456 	wtp->pcie_core_dma_req.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
457 	wtp->pcie_core_dma_req.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
458 	/* There is no eop so fake it.*/
459 	wtp->pcie_core_dma_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
460 	wtp->pcie_core_dma_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
461 	wtp->pcie_core_dma_req.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
462 	wtp->pcie_core_dma_req.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
463 
464 	/* Get data responses from core to PCIE*/
465 	value = t4_read_reg(padap, A_PCIE_DMAR_RSP_SOP_CNT);
466 
467 	wtp->core_pcie_dma_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
468 	wtp->core_pcie_dma_rsp.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
469 	wtp->core_pcie_dma_rsp.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
470 	wtp->core_pcie_dma_rsp.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
471 
472 	value = t4_read_reg(padap, A_PCIE_DMAR_RSP_EOP_CNT);
473 
474 	wtp->core_pcie_dma_rsp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
475 	wtp->core_pcie_dma_rsp.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
476 	wtp->core_pcie_dma_rsp.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
477 	wtp->core_pcie_dma_rsp.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
478 
479 	/* Get PCIE_DATA to SGE*/
480 	wtp->pcie_sge_dma_rsp.sop[0] = sge_dbg_reg->debug_PD_Rsp_SOP0_cnt;
481 	wtp->pcie_sge_dma_rsp.sop[1] = sge_dbg_reg->debug_PD_Rsp_SOP1_cnt;
482 	wtp->pcie_sge_dma_rsp.sop[2] = sge_dbg_reg->debug_PD_Rsp_SOP2_cnt;
483 	wtp->pcie_sge_dma_rsp.sop[3] = sge_dbg_reg->debug_PD_Rsp_SOP3_cnt;
484 
485 	wtp->pcie_sge_dma_rsp.eop[0] = sge_dbg_reg->debug_PD_Rsp_EOP0_cnt;
486 	wtp->pcie_sge_dma_rsp.eop[1] = sge_dbg_reg->debug_PD_Rsp_EOP1_cnt;
487 	wtp->pcie_sge_dma_rsp.eop[2] = sge_dbg_reg->debug_PD_Rsp_EOP2_cnt;
488 	wtp->pcie_sge_dma_rsp.eop[3] = sge_dbg_reg->debug_PD_Rsp_EOP3_cnt;
489 
490 	/*Get SGE to ULP_TX*/
491 	wtp->sge_utx.sop[0] = sge_dbg_reg->debug_U_Tx_SOP0_cnt;
492 	wtp->sge_utx.sop[1] = sge_dbg_reg->debug_U_Tx_SOP1_cnt;
493 	wtp->sge_utx.sop[2] = sge_dbg_reg->debug_U_Tx_SOP2_cnt;
494 	wtp->sge_utx.sop[3] = sge_dbg_reg->debug_U_Tx_SOP3_cnt;
495 
496 	wtp->sge_utx.eop[0] = sge_dbg_reg->debug_U_Tx_EOP0_cnt;
497 	wtp->sge_utx.eop[1] = sge_dbg_reg->debug_U_Tx_EOP1_cnt;
498 	wtp->sge_utx.eop[2] = sge_dbg_reg->debug_U_Tx_EOP2_cnt;
499 	wtp->sge_utx.eop[3] = sge_dbg_reg->debug_U_Tx_EOP3_cnt;
500 
501 	/* Get ULP_TX to TP*/
502 	for (i = 0; i < 4; i++) {
503 		value = t4_read_reg(padap, (A_ULP_TX_SE_CNT_CH0 + (i*4)));
504 
505 		wtp->utx_tp.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
506 		wtp->utx_tp.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
507 	}
508 
509 	/* Get TP_DBG_CSIDE registers*/
510 	for (i = 0; i < 4; i++) {
511 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
512 			       true);
513 
514 		wtp->utx_tpcside.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
515 		wtp->utx_tpcside.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
516 		wtp->tpcside_rxpld.sop[i] = ((value >> 20) & 0xF);/*bits 20:23*/
517 		wtp->tpcside_rxpld.eop[i] = ((value >> 16) & 0xF);/*bits 16:19*/
518 		wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
519 		wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
520 		wtp->tpcside_rxcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
521 		wtp->tpcside_rxcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
522 	}
523 
524 	/* TP_DBG_ESIDE*/
525 	for (i = 0; i < 4; i++) {
526 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
527 			       true);
528 
529 		wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
530 		wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
531 		wtp->tpeside_pm.sop[i]	= ((value >> 20) & 0xF); /*bits 20:23*/
532 		wtp->tpeside_pm.eop[i]	= ((value >> 16) & 0xF); /*bits 16:19*/
533 		wtp->mps_tpeside.sop[i] = ((value >> 12) & 0xF); /*bits 12:15*/
534 		wtp->mps_tpeside.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
535 		wtp->tpeside_pld.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
536 		wtp->tpeside_pld.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
537 
538 	}
539 
540 	/*PCIE CMD STAT2*/
541 	for (i = 0; i < 3; i++) {
542 		value = t4_read_reg(padap, 0x5988 + (i * 0x10));
543 		wtp->pcie_cmd_stat2.sop[i] = value & 0xFF;
544 		wtp->pcie_cmd_stat2.eop[i] = value & 0xFF;
545 	}
546 
547 	/*PCIE cmd stat3*/
548 	for (i = 0; i < 3; i++) {
549 		value = t4_read_reg(padap, 0x598c + (i * 0x10));
550 		wtp->pcie_cmd_stat3.sop[i] = value & 0xFF;
551 		wtp->pcie_cmd_stat3.eop[i] = value & 0xFF;
552 	}
553 
554 	/* ULP_RX input/output*/
555 	for (i = 0; i < 2; i++) {
556 		value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
557 
558 		wtp->pmrx_ulprx.sop[i]	  = ((value >> 4) & 0xF); /*bits 4:7*/
559 		wtp->pmrx_ulprx.eop[i]	  = ((value >> 0) & 0xF); /*bits 0:3*/
560 		wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
561 		wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
562 	}
563 
564 	/* Get the MPS input from TP*/
565 	drop = 0;
566 	for (i = 0; i < 2; i++) {
567 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
568 		wtp->tp_mps.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
569 		wtp->tp_mps.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
570 		wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
571 								    */
572 		wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
573 								    */
574 	}
575 	drop  = ptp_mib->TP_MIB_OFD_ARP_DROP.value;
576 	drop += ptp_mib->TP_MIB_OFD_DFR_DROP.value;
577 
578 	drop += ptp_mib->TP_MIB_TNL_DROP_0.value;
579 	drop += ptp_mib->TP_MIB_TNL_DROP_1.value;
580 	drop += ptp_mib->TP_MIB_TNL_DROP_2.value;
581 	drop += ptp_mib->TP_MIB_TNL_DROP_3.value;
582 
583 	wtp->tp_mps.drops = drop;
584 
585 	/* Get the MPS output to the MAC's*/
586 	drop = 0;
587 	for (i = 0; i < 2; i++) {
588 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
589 		wtp->mps_xgm.sop[(i*2)]     = ((value >> 8) & 0xFF);/*bit 8:15*/
590 		wtp->mps_xgm.eop[(i*2)]     = ((value >> 0) & 0xFF);/*bit 0:7*/
591 		wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
592 								     */
593 		wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
594 								     */
595 	}
596 	for (i = 0; i < 4; i++) {
597 		value = t4_read_reg(padap,
598 				(T5_PORT0_REG(A_MPS_PORT_STAT_TX_PORT_DROP_L) +
599 				(i * T5_PORT_STRIDE)));
600 		drop += value;
601 	}
602 	wtp->mps_xgm.drops = (drop & 0xFF);
603 
604 	/* Get the SOP/EOP counters into and out of MAC. [JHANEL] I think this
605 	 * is*/
606 	/* clear on read, so you have to read both TX and RX path at same
607 	 * time.*/
608 	drop = 0;
609 	for (i = 0; i < 4; i++) {
610 		value = t4_read_reg(padap,
611 				(T5_PORT0_REG(A_MAC_PORT_PKT_COUNT) +
612 				(i * T5_PORT_STRIDE)));
613 
614 		wtp->tx_xgm_xgm.sop[i] = ((value >> 24) & 0xFF); /*bit 24:31*/
615 		wtp->tx_xgm_xgm.eop[i] = ((value >> 16) & 0xFF); /*bit 16:23*/
616 		wtp->rx_xgm_xgm.sop[i] = ((value >> 8) & 0xFF); /*bit 8:15*/
617 		wtp->rx_xgm_xgm.eop[i] = ((value >> 0) & 0xFF); /*bit 0:7*/
618 	}
619 
620 	/* Get the MAC's output to the wire*/
621 	drop = 0;
622 	for (i = 0; i < 4; i++) {
623 		value = t4_read_reg(padap,
624 				(T5_PORT0_REG(A_MAC_PORT_AFRAMESTRANSMITTEDOK) +
625 				(i * T5_PORT_STRIDE)));
626 		wtp->xgm_wire.sop[i] = (value);
627 		wtp->xgm_wire.eop[i] = (value); /* No EOP for XGMAC, so fake
628 						   it.*/
629 	}
630 
631 	/*########################################################################*/
632 	/*# RX PATH, starting from wire*/
633 	/*########################################################################*/
634 
635 	/* Add up the wire input to the MAC*/
636 	drop = 0;
637 	for (i = 0; i < 4; i++) {
638 		value = t4_read_reg(padap,
639 				(T5_PORT0_REG(A_MAC_PORT_AFRAMESRECEIVEDOK) +
640 				(i * T5_PORT_STRIDE)));
641 
642 		wtp->wire_xgm.sop[i] = (value);
643 		wtp->wire_xgm.eop[i] = (value); /* No EOP for XGMAC, so fake
644 						   it.*/
645 	}
646 
647 	/* Already read the rx_xgm_xgm when reading TX path.*/
648 
649 	/* Add up SOP/EOP's on all 8 MPS buffer channels*/
650 	drop = 0;
651 	for (i = 0; i < 8; i++) {
652 		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
653 
654 		wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
655 		wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
656 	}
657 	for (i = 0; i < 4; i++) {
658 		value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
659 		/* typo in JHANEL's code.*/
660 		drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
661 	}
662 	wtp->xgm_mps.cls_drop = drop & 0xFF;
663 
664 	/* Add up the overflow drops on all 4 ports.*/
665 	drop = 0;
666 	for (i = 0; i < 4; i++) {
667 		value = t4_read_reg(padap,
668 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
669 				     (i << 3)));
670 		drop += value;
671 		value = t4_read_reg(padap,
672 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
673 				     (i << 2)));
674 		value = t4_read_reg(padap,
675 				    (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
676 				     (i << 3)));
677 		drop += value;
678 		value = t4_read_reg(padap,
679 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
680 				     (i << 2)));
681 
682 		value = t4_read_reg(padap,
683 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
684 				     (i << 3)));
685 		drop += value;
686 		value = t4_read_reg(padap,
687 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
688 				     (i << 3)));
689 		value = t4_read_reg(padap,
690 				    (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
691 				     (i << 3)));
692 		drop += value;
693 		value = t4_read_reg(padap,
694 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
695 				     (i << 3)));
696 
697 		value = t4_read_reg(padap,
698 			T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
699 			(i * T5_PORT_STRIDE));
700 		drop += value;
701 	}
702 	wtp->xgm_mps.drop = (drop & 0xFF);
703 
704 	/* Add up the MPS errors that should result in dropped packets*/
705 	err = 0;
706 	for (i = 0; i < 4; i++) {
707 
708 		value = t4_read_reg(padap,
709 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
710 			(i * T5_PORT_STRIDE)));
711 		err += value;
712 		value = t4_read_reg(padap,
713 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
714 			(i * T5_PORT_STRIDE) + 4));
715 
716 		value = t4_read_reg(padap,
717 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
718 			(i * T5_PORT_STRIDE)));
719 		err += value;
720 		value = t4_read_reg(padap,
721 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
722 			(i * T5_PORT_STRIDE) + 4));
723 
724 		value = t4_read_reg(padap,
725 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
726 			(i * T5_PORT_STRIDE)));
727 		err += value;
728 		value = t4_read_reg(padap,
729 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
730 			(i * T5_PORT_STRIDE) + 4));
731 
732 		value = t4_read_reg(padap,
733 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
734 			(i * T5_PORT_STRIDE)));
735 		err += value;
736 		value = t4_read_reg(padap,
737 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
738 			(i * T5_PORT_STRIDE) + 4));
739 
740 		value = t4_read_reg(padap,
741 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
742 			(i * T5_PORT_STRIDE)));
743 		err += value;
744 		value = t4_read_reg(padap,
745 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
746 			(i * T5_PORT_STRIDE) + 4));
747 
748 		value = t4_read_reg(padap,
749 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
750 			(i * T5_PORT_STRIDE)));
751 		err += value;
752 		value = t4_read_reg(padap,
753 			(T5_PORT0_REG((A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
754 			(i * T5_PORT_STRIDE) + 4)));
755 	}
756 	wtp->xgm_mps.err = (err & 0xFF);
757 
758 	drop = 0;
759 	for (i = 0; i < 2; i++) {
760 		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
761 
762 		wtp->mps_tp.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
763 		wtp->mps_tp.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
764 		wtp->mps_tp.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
765 								    */
766 		wtp->mps_tp.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
767 								    */
768 	}
769 	drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
770 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
771 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_2.value;
772 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_3.value;
773 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
774 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
775 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_2.value;
776 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_3.value;
777 	drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
778 	drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
779 	drop += ptp_mib->TP_MIB_FCOE_DROP_2.value;
780 	drop += ptp_mib->TP_MIB_FCOE_DROP_3.value;
781 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
782 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
783 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_2.value;
784 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_3.value;
785 	drop += ptp_mib->TP_MIB_USM_DROP.value;
786 
787 	wtp->mps_tp.drops = drop;
788 
789 	/* Get TP_DBG_CSIDE_TX registers*/
790 	for (i = 0; i < 4; i++) {
791 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
792 			       true);
793 
794 		wtp->tpcside_csw.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
795 		wtp->tpcside_csw.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
796 		wtp->tpcside_pm.sop[i]	  = ((value >> 20) & 0xF);/*bits 20:23*/
797 		wtp->tpcside_pm.eop[i]	  = ((value >> 16) & 0xF);/*bits 16:19*/
798 		wtp->tpcside_uturn.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
799 		wtp->tpcside_uturn.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
800 		wtp->tpcside_txcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
801 		wtp->tpcside_txcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
802 	}
803 
804 	/* TP to CPL_SWITCH*/
805 	wtp->tp_csw.sop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP0_cnt;
806 	wtp->tp_csw.sop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP1_cnt;
807 
808 	wtp->tp_csw.eop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP0_cnt;
809 	wtp->tp_csw.eop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP1_cnt;
810 
811 	/* TP/CPL_SWITCH to SGE*/
812 	wtp->csw_sge.sop[0] = sge_dbg_reg->debug_T_Rx_SOP0_cnt;
813 	wtp->csw_sge.sop[1] = sge_dbg_reg->debug_T_Rx_SOP1_cnt;
814 
815 	wtp->csw_sge.eop[0] = sge_dbg_reg->debug_T_Rx_EOP0_cnt;
816 	wtp->csw_sge.eop[1] = sge_dbg_reg->debug_T_Rx_EOP1_cnt;
817 
818 	wtp->sge_pcie.sop[0] = sge_dbg_reg->debug_PD_Req_SOP0_cnt;
819 	wtp->sge_pcie.sop[1] = sge_dbg_reg->debug_PD_Req_SOP1_cnt;
820 	wtp->sge_pcie.sop[2] = sge_dbg_reg->debug_PD_Req_SOP2_cnt;
821 	wtp->sge_pcie.sop[3] = sge_dbg_reg->debug_PD_Req_SOP3_cnt;
822 
823 	wtp->sge_pcie.eop[0] = sge_dbg_reg->debug_PD_Req_EOP0_cnt;
824 	wtp->sge_pcie.eop[1] = sge_dbg_reg->debug_PD_Req_EOP1_cnt;
825 	wtp->sge_pcie.eop[2] = sge_dbg_reg->debug_PD_Req_EOP2_cnt;
826 	wtp->sge_pcie.eop[3] = sge_dbg_reg->debug_PD_Req_EOP3_cnt;
827 
828 	wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
829 	wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
830 	wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
831 	wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
832 	/* NO EOP, so fake it.*/
833 	wtp->sge_pcie_ints.eop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
834 	wtp->sge_pcie_ints.eop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
835 	wtp->sge_pcie_ints.eop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
836 	wtp->sge_pcie_ints.eop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
837 
838 	/*Get PCIE DMA1 STAT2*/
839 	for (i = 0; i < 4; i++) {
840 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
841 		wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
842 		wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
843 		wtp->pcie_dma1_stat2_core.sop[i] += value & 0x0F;
844 		wtp->pcie_dma1_stat2_core.eop[i] += value & 0x0F;
845 	}
846 
847 	/* Get mac porrx aFramesTransmittedok*/
848 	for (i = 0; i < 4; i++) {
849 		value = t4_read_reg(padap, 0x30a88 + ((i * 4) << 12));
850 		wtp->mac_porrx_aframestra_ok.sop[i] = (value & 0xFF);
851 		wtp->mac_porrx_aframestra_ok.eop[i] = (value & 0xFF);
852 	}
853 
854 	/*Get SGE debug data high index 7*/
855 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
856 	wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
857 	wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
858 	wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
859 	wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
860 	wtp->sge_debug_data_high_indx7.sop[2] = ((value >> 20) & 0x0F);
861 	wtp->sge_debug_data_high_indx7.eop[2] = ((value >> 16) & 0x0F);
862 	wtp->sge_debug_data_high_indx7.sop[3] = ((value >> 28) & 0x0F);
863 	wtp->sge_debug_data_high_indx7.eop[3] = ((value >> 24) & 0x0F);
864 
865 	/*Get SGE debug data high index 1*/
866 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
867 	wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
868 	wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
869 	wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
870 	wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
871 
872 	/*Get TP debug CSIDE Tx registers*/
873 	for (i = 0; i < 2; i++) {
874 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
875 			       true);
876 
877 		wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
878 								   */
879 		wtp->utx_tpcside_tx.eop[i] = ((value >> 24) & 0xF);
880 	}
881 
882 	/*Get SGE debug data high index 9*/
883 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
884 	wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
885 	wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
886 	wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
887 	wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
888 	wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
889 	wtp->sge_work_req_pkt.sop[1] = ((value >> 12) & 0x0F);
890 
891 	/*Get LE DB response count*/
892 	value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
893 	wtp->le_db_rsp_cnt.sop = value & 0xF;
894 	wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
895 
896 	/*Get TP debug Eside PKTx*/
897 	for (i = 0; i < 4; i++) {
898 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
899 			       true);
900 
901 		wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
902 		wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
903 	}
904 
905 	/* Get data responses from core to PCIE*/
906 	value = t4_read_reg(padap, A_PCIE_DMAW_SOP_CNT);
907 
908 	wtp->pcie_core_dmaw.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
909 	wtp->pcie_core_dmaw.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
910 	wtp->pcie_core_dmaw.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
911 	wtp->pcie_core_dmaw.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
912 
913 	value = t4_read_reg(padap, A_PCIE_DMAW_EOP_CNT);
914 
915 	wtp->pcie_core_dmaw.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
916 	wtp->pcie_core_dmaw.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
917 	wtp->pcie_core_dmaw.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
918 	wtp->pcie_core_dmaw.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
919 
920 	value = t4_read_reg(padap, A_PCIE_DMAI_CNT);
921 
922 	wtp->pcie_core_dmai.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
923 	wtp->pcie_core_dmai.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
924 	wtp->pcie_core_dmai.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
925 	wtp->pcie_core_dmai.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
926 	/* no eop for interrups, just fake it.*/
927 	wtp->pcie_core_dmai.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
928 	wtp->pcie_core_dmai.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
929 	wtp->pcie_core_dmai.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
930 	wtp->pcie_core_dmai.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
931 
932 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
933 
934 	if (rc)
935 		goto err1;
936 
937 	rc = compress_buff(&scratch_buff, dbg_buff);
938 
939 err1:
940 	release_scratch_buff(&scratch_buff, dbg_buff);
941 err:
942 	return rc;
943 }
944 
945 static int
t6_wtp_data(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)946 t6_wtp_data(struct cudbg_init *pdbg_init,
947 	    struct cudbg_buffer *dbg_buff,
948 	    struct cudbg_error *cudbg_err)
949 {
950 	struct adapter *padap = pdbg_init->adap;
951 	struct sge_debug_reg_data *sge_dbg_reg = NULL;
952 	struct cudbg_buffer scratch_buff;
953 	struct tp_mib_data *ptp_mib = NULL;
954 	struct wtp_data *wtp;
955 	u32 Sge_Dbg[32] = {0};
956 	u32 value = 0;
957 	u32 i = 0;
958 	u32 drop = 0;
959 	u32 err = 0;
960 	u32 offset;
961 	int rc = 0;
962 
963 	rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
964 
965 	if (rc)
966 		goto err;
967 
968 	offset = scratch_buff.offset;
969 	wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
970 
971 	read_sge_debug_data(pdbg_init, Sge_Dbg);
972 	read_tp_mib_data(pdbg_init, &ptp_mib);
973 
974 	sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
975 
976 	/*# TX PATH*/
977 
978 	/*PCIE CMD STAT2*/
979 	value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT2);
980 	wtp->pcie_cmd_stat2.sop[0] = value & 0xFF;
981 	wtp->pcie_cmd_stat2.eop[0] = value & 0xFF;
982 
983 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
984 	wtp->sge_pcie_cmd_req.sop[0] = ((value >> 20) & 0x0F);
985 	wtp->sge_pcie_cmd_req.eop[0] = ((value >> 16) & 0x0F);
986 	wtp->sge_pcie_cmd_req.sop[1] = ((value >> 28) & 0x0F);
987 	wtp->sge_pcie_cmd_req.eop[1] = ((value >> 24) & 0x0F);
988 
989 	value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT3);
990 	wtp->pcie_cmd_stat3.sop[0] = value & 0xFF;
991 	wtp->pcie_cmd_stat3.eop[0] = value & 0xFF;
992 
993 	/*Get command Resposes from PCIE to SGE*/
994 	wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
995 	wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
996 	wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
997 	wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
998 
999 	/* Get commands sent from SGE to CIM/uP*/
1000 	wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
1001 	wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
1002 
1003 	wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
1004 	wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
1005 
1006 	/*Get SGE debug data high index 9*/
1007 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
1008 	wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
1009 	wtp->sge_work_req_pkt.eop[0] = ((value >> 0) & 0x0F);
1010 
1011 	for (i = 0; i < 2; i++) {
1012 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
1013 		wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
1014 		wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
1015 		wtp->pcie_dma1_stat2_core.sop[i] = value & 0x0F;
1016 		wtp->pcie_dma1_stat2_core.eop[i] = value & 0x0F;
1017 	}
1018 
1019 	/* Get DMA0 stats3*/
1020 	for (i = 0; i < 2; i++) {
1021 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
1022 		wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
1023 		wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
1024 	}
1025 
1026 	/* Get ULP SE CNT CHx*/
1027 	for (i = 0; i < 4; i++) {
1028 		value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
1029 		wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
1030 		wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
1031 	}
1032 
1033 	/* Get TP_DBG_CSIDE registers*/
1034 	for (i = 0; i < 4; i++) {
1035 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
1036 			       true);
1037 
1038 		wtp->utx_tpcside.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
1039 		wtp->utx_tpcside.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
1040 		wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
1041 		wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
1042 	}
1043 
1044 	for (i = 0; i < 4; i++) {
1045 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
1046 			       true);
1047 
1048 
1049 		wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
1050 		wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
1051 	}
1052 
1053 	for (i = 0; i < 2; i++) {
1054 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
1055 		wtp->tp_mps.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
1056 		wtp->tp_mps.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
1057 		wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
1058 								    */
1059 		wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
1060 								    */
1061 	}
1062 
1063 	for (i = 0; i < 2; i++) {
1064 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
1065 		wtp->mps_xgm.sop[(i*2)]     = ((value >> 8) & 0xFF);/*bit 8:15*/
1066 		wtp->mps_xgm.eop[(i*2)]     = ((value >> 0) & 0xFF); /*bit 0:7*/
1067 		wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
1068 								     */
1069 		wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
1070 								     */
1071 	}
1072 
1073 	/* Get MAC PORTx PKT COUNT*/
1074 	for (i = 0; i < 2; i++) {
1075 		value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
1076 		wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
1077 		wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
1078 		wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
1079 		wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
1080 	}
1081 
1082 	for (i = 0; i < 2; i++) {
1083 		value = t4_read_reg(padap, 0x30f20 + ((i * 4) << 12));
1084 		wtp->mac_portx_aframestra_ok.sop[i] = value & 0xff;
1085 		wtp->mac_portx_aframestra_ok.eop[i] = value & 0xff;
1086 	}
1087 
1088 	/*MAC_PORT_MTIP_1G10G_TX_etherStatsPkts*/
1089 
1090 	for (i = 0; i < 2; i++) {
1091 		value = t4_read_reg(padap, 0x30f60 + ((i * 4) << 12));
1092 		wtp->mac_portx_etherstatspkts.sop[i] = value & 0xff;
1093 		wtp->mac_portx_etherstatspkts.eop[i] = value & 0xff;
1094 	}
1095 
1096 	/*RX path*/
1097 
1098 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
1099 	wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
1100 	wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
1101 	wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
1102 	wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
1103 
1104 	/*Get SGE debug data high index 1*/
1105 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
1106 	wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
1107 	wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
1108 	wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
1109 	wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
1110 
1111 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
1112 	wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
1113 	wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
1114 
1115 	wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
1116 	wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
1117 
1118 	for (i = 0; i < 2; i++) {
1119 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
1120 			       true);
1121 
1122 		wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
1123 								   */
1124 		wtp->utx_tpcside_tx.eop[i]   = ((value >> 24) & 0xF);
1125 	}
1126 
1127 	/*ULP_RX input/output*/
1128 	for (i = 0; i < 2; i++) {
1129 		value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
1130 
1131 		wtp->pmrx_ulprx.sop[i]	  = ((value >> 4) & 0xF); /*bits 4:7*/
1132 		wtp->pmrx_ulprx.eop[i]	  = ((value >> 0) & 0xF); /*bits 0:3*/
1133 		wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
1134 		wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
1135 	}
1136 
1137 	/*Get LE DB response count*/
1138 	value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
1139 	wtp->le_db_rsp_cnt.sop = value & 0xF;
1140 	wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
1141 
1142 	/*Get TP debug Eside PKTx*/
1143 	for (i = 0; i < 4; i++) {
1144 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
1145 			       true);
1146 
1147 		wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
1148 		wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
1149 	}
1150 
1151 	drop = 0;
1152 	/*MPS_RX_SE_CNT_OUT01*/
1153 	value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
1154 	wtp->mps_tp.sop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
1155 	wtp->mps_tp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
1156 	wtp->mps_tp.sop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
1157 	wtp->mps_tp.eop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
1158 
1159 	drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
1160 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
1161 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
1162 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
1163 	drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
1164 	drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
1165 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
1166 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
1167 	drop += ptp_mib->TP_MIB_USM_DROP.value;
1168 
1169 	wtp->mps_tp.drops = drop;
1170 
1171 	drop = 0;
1172 	for (i = 0; i < 8; i++) {
1173 		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
1174 
1175 		wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
1176 		wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
1177 	}
1178 	for (i = 0; i < 2; i++) {
1179 		value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
1180 		drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
1181 	}
1182 	wtp->xgm_mps.cls_drop = drop & 0xFF;
1183 
1184 	for (i = 0; i < 2; i++) {
1185 		value = t4_read_reg(padap, 0x30e20 + ((i * 4) << 12));
1186 		wtp->mac_porrx_aframestra_ok.sop[i] = value & 0xff;
1187 		wtp->mac_porrx_aframestra_ok.eop[i] = value & 0xff;
1188 	}
1189 
1190 	/*MAC_PORT_MTIP_1G10G_RX_etherStatsPkts*/
1191 	for (i = 0; i < 2; i++) {
1192 		value = t4_read_reg(padap, 0x30e60 + ((i * 4) << 12));
1193 		wtp->mac_porrx_etherstatspkts.sop[i] = value & 0xff;
1194 		wtp->mac_porrx_etherstatspkts.eop[i] = value & 0xff;
1195 	}
1196 
1197 	wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
1198 	wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
1199 	wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
1200 	wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
1201 
1202 	/* Add up the overflow drops on all 4 ports.*/
1203 	drop = 0;
1204 	for (i = 0; i < 2; i++) {
1205 		value = t4_read_reg(padap,
1206 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1207 				     (i << 3)));
1208 		drop += value;
1209 		value = t4_read_reg(padap,
1210 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1211 				     (i << 2)));
1212 		value = t4_read_reg(padap,
1213 				    (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
1214 				     (i << 3)));
1215 		drop += value;
1216 		value = t4_read_reg(padap,
1217 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1218 				     (i << 2)));
1219 
1220 		value = t4_read_reg(padap,
1221 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1222 				     (i << 3)));
1223 		drop += value;
1224 		value = t4_read_reg(padap,
1225 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1226 				     (i << 3)));
1227 		value = t4_read_reg(padap,
1228 				    (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
1229 				     (i << 3)));
1230 		drop += value;
1231 		value = t4_read_reg(padap,
1232 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1233 				     (i << 3)));
1234 
1235 		value = t4_read_reg(padap,
1236 			(T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
1237 			(i * T5_PORT_STRIDE)));
1238 		drop += value;
1239 	}
1240 	wtp->xgm_mps.drop = (drop & 0xFF);
1241 
1242 	/* Add up the MPS errors that should result in dropped packets*/
1243 	err = 0;
1244 	for (i = 0; i < 2; i++) {
1245 
1246 		value = t4_read_reg(padap,
1247 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
1248 			(i * T5_PORT_STRIDE)));
1249 		err += value;
1250 		value = t4_read_reg(padap,
1251 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
1252 			(i * T5_PORT_STRIDE) + 4));
1253 
1254 		value = t4_read_reg(padap,
1255 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
1256 			(i * T5_PORT_STRIDE)));
1257 		err += value;
1258 		value = t4_read_reg(padap,
1259 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
1260 			(i * T5_PORT_STRIDE) + 4));
1261 
1262 		value = t4_read_reg(padap,
1263 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
1264 				     (i * T5_PORT_STRIDE)));
1265 		err += value;
1266 		value = t4_read_reg(padap,
1267 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
1268 			(i * T5_PORT_STRIDE) + 4));
1269 
1270 		value = t4_read_reg(padap,
1271 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
1272 			(i * T5_PORT_STRIDE)));
1273 		err += value;
1274 		value = t4_read_reg(padap,
1275 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
1276 			(i * T5_PORT_STRIDE) + 4));
1277 
1278 		value = t4_read_reg(padap,
1279 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
1280 			(i * T5_PORT_STRIDE)));
1281 		err += value;
1282 		value = t4_read_reg(padap,
1283 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
1284 			(i * T5_PORT_STRIDE) + 4));
1285 
1286 		value = t4_read_reg(padap,
1287 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
1288 			(i * T5_PORT_STRIDE)));
1289 		err += value;
1290 		value = t4_read_reg(padap,
1291 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
1292 			(i * T5_PORT_STRIDE) + 4));
1293 	}
1294 	wtp->xgm_mps.err = (err & 0xFF);
1295 
1296 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1297 
1298 	if (rc)
1299 		goto err1;
1300 
1301 	rc = compress_buff(&scratch_buff, dbg_buff);
1302 
1303 err1:
1304 	release_scratch_buff(&scratch_buff, dbg_buff);
1305 err:
1306 	return rc;
1307 }
1308 
1309 int
collect_wtp_data(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1310 collect_wtp_data(struct cudbg_init *pdbg_init,
1311 		 struct cudbg_buffer *dbg_buff,
1312 		 struct cudbg_error *cudbg_err)
1313 {
1314 	struct adapter *padap = pdbg_init->adap;
1315 	int rc = -1;
1316 
1317 	if (is_t5(padap->params.chip))
1318 		rc = t5_wtp_data(pdbg_init, dbg_buff, cudbg_err);
1319 	else if (is_t6(padap->params.chip))
1320 		rc = t6_wtp_data(pdbg_init, dbg_buff, cudbg_err);
1321 
1322 	return rc;
1323 }
1324