xref: /freebsd/sys/dev/cxgbe/cudbg/cudbg_wtp.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 #include <sys/types.h>
29 #include <sys/param.h>
30 
31 #include "common/common.h"
32 #include "common/t4_regs.h"
33 #include "cudbg.h"
34 #include "cudbg_lib_common.h"
35 #include "cudbg_entity.h"
36 
37 int collect_wtp_data(struct cudbg_init *pdbg_init,
38 		     struct cudbg_buffer *dbg_buff,
39 		     struct cudbg_error *cudbg_err);
40 /*SGE_DEBUG Registers.*/
41 #define TP_MIB_SIZE	    0x5e
42 
43 struct sge_debug_reg_data {
44 	/*indx0*/
45 	u32 reserved1:4;
46 	u32 reserved2:4;
47 	u32 debug_uP_SOP_cnt:4;
48 	u32 debug_uP_EOP_cnt:4;
49 	u32 debug_CIM_SOP1_cnt:4;
50 	u32 debug_CIM_EOP1_cnt:4;
51 	u32 debug_CIM_SOP0_cnt:4;
52 	u32 debug_CIM_EOP0_cnt:4;
53 
54 	/*indx1*/
55 	u32 reserved3:32;
56 
57 	/*indx2*/
58 	u32 debug_T_Rx_SOP1_cnt:4;
59 	u32 debug_T_Rx_EOP1_cnt:4;
60 	u32 debug_T_Rx_SOP0_cnt:4;
61 	u32 debug_T_Rx_EOP0_cnt:4;
62 	u32 debug_U_Rx_SOP1_cnt:4;
63 	u32 debug_U_Rx_EOP1_cnt:4;
64 	u32 debug_U_Rx_SOP0_cnt:4;
65 	u32 debug_U_Rx_EOP0_cnt:4;
66 
67 	/*indx3*/
68 	u32 reserved4:32;
69 
70 	/*indx4*/
71 	u32 debug_UD_Rx_SOP3_cnt:4;
72 	u32 debug_UD_Rx_EOP3_cnt:4;
73 	u32 debug_UD_Rx_SOP2_cnt:4;
74 	u32 debug_UD_Rx_EOP2_cnt:4;
75 	u32 debug_UD_Rx_SOP1_cnt:4;
76 	u32 debug_UD_Rx_EOP1_cnt:4;
77 	u32 debug_UD_Rx_SOP0_cnt:4;
78 	u32 debug_UD_Rx_EOP0_cnt:4;
79 
80 	/*indx5*/
81 	u32 reserved5:32;
82 
83 	/*indx6*/
84 	u32 debug_U_Tx_SOP3_cnt:4;
85 	u32 debug_U_Tx_EOP3_cnt:4;
86 	u32 debug_U_Tx_SOP2_cnt:4;
87 	u32 debug_U_Tx_EOP2_cnt:4;
88 	u32 debug_U_Tx_SOP1_cnt:4;
89 	u32 debug_U_Tx_EOP1_cnt:4;
90 	u32 debug_U_Tx_SOP0_cnt:4;
91 	u32 debug_U_Tx_EOP0_cnt:4;
92 
93 	/*indx7*/
94 	u32 reserved6:32;
95 
96 	/*indx8*/
97 	u32  debug_PC_Rsp_SOP1_cnt:4;
98 	u32  debug_PC_Rsp_EOP1_cnt:4;
99 	u32  debug_PC_Rsp_SOP0_cnt:4;
100 	u32  debug_PC_Rsp_EOP0_cnt:4;
101 	u32  debug_PC_Req_SOP1_cnt:4;
102 	u32  debug_PC_Req_EOP1_cnt:4;
103 	u32  debug_PC_Req_SOP0_cnt:4;
104 	u32  debug_PC_Req_EOP0_cnt:4;
105 
106 	/*indx9*/
107 	u32 reserved7:32;
108 
109 	/*indx10*/
110 	u32  debug_PD_Req_SOP3_cnt:4;
111 	u32  debug_PD_Req_EOP3_cnt:4;
112 	u32  debug_PD_Req_SOP2_cnt:4;
113 	u32  debug_PD_Req_EOP2_cnt:4;
114 	u32  debug_PD_Req_SOP1_cnt:4;
115 	u32  debug_PD_Req_EOP1_cnt:4;
116 	u32  debug_PD_Req_SOP0_cnt:4;
117 	u32  debug_PD_Req_EOP0_cnt:4;
118 
119 	/*indx11*/
120 	u32 reserved8:32;
121 
122 	/*indx12*/
123 	u32  debug_PD_Rsp_SOP3_cnt:4;
124 	u32  debug_PD_Rsp_EOP3_cnt:4;
125 	u32  debug_PD_Rsp_SOP2_cnt:4;
126 	u32  debug_PD_Rsp_EOP2_cnt:4;
127 	u32  debug_PD_Rsp_SOP1_cnt:4;
128 	u32  debug_PD_Rsp_EOP1_cnt:4;
129 	u32  debug_PD_Rsp_SOP0_cnt:4;
130 	u32  debug_PD_Rsp_EOP0_cnt:4;
131 
132 	/*indx13*/
133 	u32 reserved9:32;
134 
135 	/*indx14*/
136 	u32  debug_CPLSW_TP_Rx_SOP1_cnt:4;
137 	u32  debug_CPLSW_TP_Rx_EOP1_cnt:4;
138 	u32  debug_CPLSW_TP_Rx_SOP0_cnt:4;
139 	u32  debug_CPLSW_TP_Rx_EOP0_cnt:4;
140 	u32  debug_CPLSW_CIM_SOP1_cnt:4;
141 	u32  debug_CPLSW_CIM_EOP1_cnt:4;
142 	u32  debug_CPLSW_CIM_SOP0_cnt:4;
143 	u32  debug_CPLSW_CIM_EOP0_cnt:4;
144 
145 	/*indx15*/
146 	u32 reserved10:32;
147 
148 	/*indx16*/
149 	u32  debug_PD_Req_Rd3_cnt:4;
150 	u32  debug_PD_Req_Rd2_cnt:4;
151 	u32  debug_PD_Req_Rd1_cnt:4;
152 	u32  debug_PD_Req_Rd0_cnt:4;
153 	u32  debug_PD_Req_Int3_cnt:4;
154 	u32  debug_PD_Req_Int2_cnt:4;
155 	u32  debug_PD_Req_Int1_cnt:4;
156 	u32  debug_PD_Req_Int0_cnt:4;
157 
158 };
159 
160 struct tp_mib_type tp_mib[] = {
161 	{"tp_mib_mac_in_err_0", 0x0},
162 	{"tp_mib_mac_in_err_1", 0x1},
163 	{"tp_mib_mac_in_err_2", 0x2},
164 	{"tp_mib_mac_in_err_3", 0x3},
165 	{"tp_mib_hdr_in_err_0", 0x4},
166 	{"tp_mib_hdr_in_err_1", 0x5},
167 	{"tp_mib_hdr_in_err_2", 0x6},
168 	{"tp_mib_hdr_in_err_3", 0x7},
169 	{"tp_mib_tcp_in_err_0", 0x8},
170 	{"tp_mib_tcp_in_err_1", 0x9},
171 	{"tp_mib_tcp_in_err_2", 0xa},
172 	{"tp_mib_tcp_in_err_3", 0xb},
173 	{"tp_mib_tcp_out_rst", 0xc},
174 	{"tp_mib_tcp_in_seg_hi", 0x10},
175 	{"tp_mib_tcp_in_seg_lo", 0x11},
176 	{"tp_mib_tcp_out_seg_hi", 0x12},
177 	{"tp_mib_tcp_out_seg_lo", 0x13},
178 	{"tp_mib_tcp_rxt_seg_hi", 0x14},
179 	{"tp_mib_tcp_rxt_seg_lo", 0x15},
180 	{"tp_mib_tnl_cng_drop_0", 0x18},
181 	{"tp_mib_tnl_cng_drop_1", 0x19},
182 	{"tp_mib_tnl_cng_drop_2", 0x1a},
183 	{"tp_mib_tnl_cng_drop_3", 0x1b},
184 	{"tp_mib_ofd_chn_drop_0", 0x1c},
185 	{"tp_mib_ofd_chn_drop_1", 0x1d},
186 	{"tp_mib_ofd_chn_drop_2", 0x1e},
187 	{"tp_mib_ofd_chn_drop_3", 0x1f},
188 	{"tp_mib_tnl_out_pkt_0", 0x20},
189 	{"tp_mib_tnl_out_pkt_1", 0x21},
190 	{"tp_mib_tnl_out_pkt_2", 0x22},
191 	{"tp_mib_tnl_out_pkt_3", 0x23},
192 	{"tp_mib_tnl_in_pkt_0", 0x24},
193 	{"tp_mib_tnl_in_pkt_1", 0x25},
194 	{"tp_mib_tnl_in_pkt_2", 0x26},
195 	{"tp_mib_tnl_in_pkt_3", 0x27},
196 	{"tp_mib_tcp_v6in_err_0", 0x28},
197 	{"tp_mib_tcp_v6in_err_1", 0x29},
198 	{"tp_mib_tcp_v6in_err_2", 0x2a},
199 	{"tp_mib_tcp_v6in_err_3", 0x2b},
200 	{"tp_mib_tcp_v6out_rst", 0x2c},
201 	{"tp_mib_tcp_v6in_seg_hi", 0x30},
202 	{"tp_mib_tcp_v6in_seg_lo", 0x31},
203 	{"tp_mib_tcp_v6out_seg_hi", 0x32},
204 	{"tp_mib_tcp_v6out_seg_lo", 0x33},
205 	{"tp_mib_tcp_v6rxt_seg_hi", 0x34},
206 	{"tp_mib_tcp_v6rxt_seg_lo", 0x35},
207 	{"tp_mib_ofd_arp_drop", 0x36},
208 	{"tp_mib_ofd_dfr_drop", 0x37},
209 	{"tp_mib_cpl_in_req_0", 0x38},
210 	{"tp_mib_cpl_in_req_1", 0x39},
211 	{"tp_mib_cpl_in_req_2", 0x3a},
212 	{"tp_mib_cpl_in_req_3", 0x3b},
213 	{"tp_mib_cpl_out_rsp_0", 0x3c},
214 	{"tp_mib_cpl_out_rsp_1", 0x3d},
215 	{"tp_mib_cpl_out_rsp_2", 0x3e},
216 	{"tp_mib_cpl_out_rsp_3", 0x3f},
217 	{"tp_mib_tnl_lpbk_0", 0x40},
218 	{"tp_mib_tnl_lpbk_1", 0x41},
219 	{"tp_mib_tnl_lpbk_2", 0x42},
220 	{"tp_mib_tnl_lpbk_3", 0x43},
221 	{"tp_mib_tnl_drop_0", 0x44},
222 	{"tp_mib_tnl_drop_1", 0x45},
223 	{"tp_mib_tnl_drop_2", 0x46},
224 	{"tp_mib_tnl_drop_3", 0x47},
225 	{"tp_mib_fcoe_ddp_0", 0x48},
226 	{"tp_mib_fcoe_ddp_1", 0x49},
227 	{"tp_mib_fcoe_ddp_2", 0x4a},
228 	{"tp_mib_fcoe_ddp_3", 0x4b},
229 	{"tp_mib_fcoe_drop_0", 0x4c},
230 	{"tp_mib_fcoe_drop_1", 0x4d},
231 	{"tp_mib_fcoe_drop_2", 0x4e},
232 	{"tp_mib_fcoe_drop_3", 0x4f},
233 	{"tp_mib_fcoe_byte_0_hi", 0x50},
234 	{"tp_mib_fcoe_byte_0_lo", 0x51},
235 	{"tp_mib_fcoe_byte_1_hi", 0x52},
236 	{"tp_mib_fcoe_byte_1_lo", 0x53},
237 	{"tp_mib_fcoe_byte_2_hi", 0x54},
238 	{"tp_mib_fcoe_byte_2_lo", 0x55},
239 	{"tp_mib_fcoe_byte_3_hi", 0x56},
240 	{"tp_mib_fcoe_byte_3_lo", 0x57},
241 	{"tp_mib_ofd_vln_drop_0", 0x58},
242 	{"tp_mib_ofd_vln_drop_1", 0x59},
243 	{"tp_mib_ofd_vln_drop_2", 0x5a},
244 	{"tp_mib_ofd_vln_drop_3", 0x5b},
245 	{"tp_mib_usm_pkts", 0x5c},
246 	{"tp_mib_usm_drop", 0x5d},
247 	{"tp_mib_usm_bytes_hi", 0x5e},
248 	{"tp_mib_usm_bytes_lo", 0x5f},
249 	{"tp_mib_tid_del", 0x60},
250 	{"tp_mib_tid_inv", 0x61},
251 	{"tp_mib_tid_act", 0x62},
252 	{"tp_mib_tid_pas", 0x63},
253 	{"tp_mib_rqe_dfr_mod", 0x64},
254 	{"tp_mib_rqe_dfr_pkt", 0x65}
255 };
256 
257 static u32 read_sge_debug_data(struct cudbg_init *pdbg_init, u32 *sge_dbg_reg)
258 {
259 	struct adapter *padap = pdbg_init->adap;
260 	u32 value;
261 	int i = 0;
262 
263 	for (i = 0; i <= 15; i++) {
264 		t4_write_reg(padap, A_SGE_DEBUG_INDEX, (u32)i);
265 		value = t4_read_reg(padap, A_SGE_DEBUG_DATA_LOW);
266 		/*printf("LOW	 0x%08x\n", value);*/
267 		sge_dbg_reg[(i << 1) | 1] = HTONL_NIBBLE(value);
268 		value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH);
269 		/*printf("HIGH	 0x%08x\n", value);*/
270 		sge_dbg_reg[(i << 1)] = HTONL_NIBBLE(value);
271 	}
272 	return 0;
273 }
274 
275 static u32 read_tp_mib_data(struct cudbg_init *pdbg_init,
276 			    struct tp_mib_data **ppTp_Mib)
277 {
278 	struct adapter *padap = pdbg_init->adap;
279 	u32 i = 0;
280 
281 	for (i = 0; i < TP_MIB_SIZE; i++) {
282 		t4_tp_mib_read(padap, &tp_mib[i].value, 1,
283 				  (u32)tp_mib[i].addr, true);
284 	}
285 	*ppTp_Mib = (struct tp_mib_data *)&tp_mib[0];
286 
287 	return 0;
288 }
289 
290 static int t5_wtp_data(struct cudbg_init *pdbg_init,
291 		       struct cudbg_buffer *dbg_buff,
292 		       struct cudbg_error *cudbg_err)
293 {
294 	struct adapter *padap = pdbg_init->adap;
295 	struct sge_debug_reg_data *sge_dbg_reg = NULL;
296 	struct cudbg_buffer scratch_buff;
297 	struct tp_mib_data *ptp_mib = NULL;
298 	struct wtp_data *wtp;
299 	u32 Sge_Dbg[32] = {0};
300 	u32 value = 0;
301 	u32 i = 0;
302 	u32 drop = 0;
303 	u32 err = 0;
304 	u32 offset;
305 	int rc = 0;
306 
307 	rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
308 
309 	if (rc)
310 		goto err;
311 
312 	offset = scratch_buff.offset;
313 	wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
314 
315 	read_sge_debug_data(pdbg_init, Sge_Dbg);
316 	read_tp_mib_data(pdbg_init, &ptp_mib);
317 
318 	sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
319 
320 	/*#######################################################################*/
321 	/*# TX PATH, starting from pcie*/
322 	/*#######################################################################*/
323 
324 	/* Get Requests of commands from SGE to PCIE*/
325 
326 	wtp->sge_pcie_cmd_req.sop[0] =	sge_dbg_reg->debug_PC_Req_SOP0_cnt;
327 	wtp->sge_pcie_cmd_req.sop[1] =	sge_dbg_reg->debug_PC_Req_SOP1_cnt;
328 
329 	wtp->sge_pcie_cmd_req.eop[0] =	sge_dbg_reg->debug_PC_Req_EOP0_cnt;
330 	wtp->sge_pcie_cmd_req.eop[1] =	sge_dbg_reg->debug_PC_Req_EOP1_cnt;
331 
332 	/* Get Requests of commands from PCIE to core*/
333 	value = t4_read_reg(padap, A_PCIE_CMDR_REQ_CNT);
334 
335 	wtp->pcie_core_cmd_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
336 	wtp->pcie_core_cmd_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
337 	/* there is no EOP for this, so we fake it.*/
338 	wtp->pcie_core_cmd_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
339 	wtp->pcie_core_cmd_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
340 
341 	/* Get DMA stats*/
342 	for (i = 0; i < 4; i++) {
343 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
344 		wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
345 		wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
346 	}
347 
348 	/* Get SGE debug data high index 6*/
349 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_6);
350 	wtp->sge_debug_data_high_index_6.sop[0] = ((value >> 4) & 0x0F);
351 	wtp->sge_debug_data_high_index_6.eop[0] = ((value >> 0) & 0x0F);
352 	wtp->sge_debug_data_high_index_6.sop[1] = ((value >> 12) & 0x0F);
353 	wtp->sge_debug_data_high_index_6.eop[1] = ((value >> 8) & 0x0F);
354 	wtp->sge_debug_data_high_index_6.sop[2] = ((value >> 20) & 0x0F);
355 	wtp->sge_debug_data_high_index_6.eop[2] = ((value >> 16) & 0x0F);
356 	wtp->sge_debug_data_high_index_6.sop[3] = ((value >> 28) & 0x0F);
357 	wtp->sge_debug_data_high_index_6.eop[3] = ((value >> 24) & 0x0F);
358 
359 	/* Get SGE debug data high index 3*/
360 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_3);
361 	wtp->sge_debug_data_high_index_3.sop[0] = ((value >> 4) & 0x0F);
362 	wtp->sge_debug_data_high_index_3.eop[0] = ((value >> 0) & 0x0F);
363 	wtp->sge_debug_data_high_index_3.sop[1] = ((value >> 12) & 0x0F);
364 	wtp->sge_debug_data_high_index_3.eop[1] = ((value >> 8) & 0x0F);
365 	wtp->sge_debug_data_high_index_3.sop[2] = ((value >> 20) & 0x0F);
366 	wtp->sge_debug_data_high_index_3.eop[2] = ((value >> 16) & 0x0F);
367 	wtp->sge_debug_data_high_index_3.sop[3] = ((value >> 28) & 0x0F);
368 	wtp->sge_debug_data_high_index_3.eop[3] = ((value >> 24) & 0x0F);
369 
370 	/* Get ULP SE CNT CHx*/
371 	for (i = 0; i < 4; i++) {
372 		value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
373 		wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
374 		wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
375 	}
376 
377 	/* Get MAC PORTx PKT COUNT*/
378 	for (i = 0; i < 4; i++) {
379 		value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
380 		wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
381 		wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
382 		wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
383 		wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
384 	}
385 
386 	/* Get mac portx aFramesTransmittedok*/
387 	for (i = 0; i < 4; i++) {
388 		value = t4_read_reg(padap, 0x30a80 + ((i * 4) << 12));
389 		wtp->mac_portx_aframestra_ok.sop[i] = (value & 0xFF);
390 		wtp->mac_portx_aframestra_ok.eop[i] = (value & 0xFF);
391 	}
392 
393 	/* Get command respones from core to PCIE*/
394 	value = t4_read_reg(padap, A_PCIE_CMDR_RSP_CNT);
395 
396 	wtp->core_pcie_cmd_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
397 	wtp->core_pcie_cmd_rsp.sop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
398 
399 	wtp->core_pcie_cmd_rsp.eop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
400 	wtp->core_pcie_cmd_rsp.eop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
401 
402 	/*Get command Resposes from PCIE to SGE*/
403 	wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
404 	wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
405 
406 	wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
407 	wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP1_cnt;
408 
409 	/* Get commands sent from SGE to CIM/uP*/
410 	wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
411 	wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
412 
413 	wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
414 	wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
415 
416 	/* Get Requests of data from PCIE by SGE*/
417 	wtp->utx_sge_dma_req.sop[0] = sge_dbg_reg->debug_UD_Rx_SOP0_cnt;
418 	wtp->utx_sge_dma_req.sop[1] = sge_dbg_reg->debug_UD_Rx_SOP1_cnt;
419 	wtp->utx_sge_dma_req.sop[2] = sge_dbg_reg->debug_UD_Rx_SOP2_cnt;
420 	wtp->utx_sge_dma_req.sop[3] = sge_dbg_reg->debug_UD_Rx_SOP3_cnt;
421 
422 	wtp->utx_sge_dma_req.eop[0] = sge_dbg_reg->debug_UD_Rx_EOP0_cnt;
423 	wtp->utx_sge_dma_req.eop[1] = sge_dbg_reg->debug_UD_Rx_EOP1_cnt;
424 	wtp->utx_sge_dma_req.eop[2] = sge_dbg_reg->debug_UD_Rx_EOP2_cnt;
425 	wtp->utx_sge_dma_req.eop[3] = sge_dbg_reg->debug_UD_Rx_EOP3_cnt;
426 
427 	/* Get Requests of data from PCIE by SGE*/
428 	wtp->sge_pcie_dma_req.sop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
429 	wtp->sge_pcie_dma_req.sop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
430 	wtp->sge_pcie_dma_req.sop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
431 	wtp->sge_pcie_dma_req.sop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
432 	/*no EOP's, so fake it.*/
433 	wtp->sge_pcie_dma_req.eop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
434 	wtp->sge_pcie_dma_req.eop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
435 	wtp->sge_pcie_dma_req.eop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
436 	wtp->sge_pcie_dma_req.eop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
437 
438 	/* Get Requests of data from PCIE to core*/
439 	value = t4_read_reg(padap, A_PCIE_DMAR_REQ_CNT);
440 
441 	wtp->pcie_core_dma_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
442 	wtp->pcie_core_dma_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
443 	wtp->pcie_core_dma_req.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
444 	wtp->pcie_core_dma_req.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
445 	/* There is no eop so fake it.*/
446 	wtp->pcie_core_dma_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
447 	wtp->pcie_core_dma_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
448 	wtp->pcie_core_dma_req.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
449 	wtp->pcie_core_dma_req.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
450 
451 	/* Get data responses from core to PCIE*/
452 	value = t4_read_reg(padap, A_PCIE_DMAR_RSP_SOP_CNT);
453 
454 	wtp->core_pcie_dma_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
455 	wtp->core_pcie_dma_rsp.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
456 	wtp->core_pcie_dma_rsp.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
457 	wtp->core_pcie_dma_rsp.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
458 
459 	value = t4_read_reg(padap, A_PCIE_DMAR_RSP_EOP_CNT);
460 
461 	wtp->core_pcie_dma_rsp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
462 	wtp->core_pcie_dma_rsp.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
463 	wtp->core_pcie_dma_rsp.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
464 	wtp->core_pcie_dma_rsp.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
465 
466 	/* Get PCIE_DATA to SGE*/
467 	wtp->pcie_sge_dma_rsp.sop[0] = sge_dbg_reg->debug_PD_Rsp_SOP0_cnt;
468 	wtp->pcie_sge_dma_rsp.sop[1] = sge_dbg_reg->debug_PD_Rsp_SOP1_cnt;
469 	wtp->pcie_sge_dma_rsp.sop[2] = sge_dbg_reg->debug_PD_Rsp_SOP2_cnt;
470 	wtp->pcie_sge_dma_rsp.sop[3] = sge_dbg_reg->debug_PD_Rsp_SOP3_cnt;
471 
472 	wtp->pcie_sge_dma_rsp.eop[0] = sge_dbg_reg->debug_PD_Rsp_EOP0_cnt;
473 	wtp->pcie_sge_dma_rsp.eop[1] = sge_dbg_reg->debug_PD_Rsp_EOP1_cnt;
474 	wtp->pcie_sge_dma_rsp.eop[2] = sge_dbg_reg->debug_PD_Rsp_EOP2_cnt;
475 	wtp->pcie_sge_dma_rsp.eop[3] = sge_dbg_reg->debug_PD_Rsp_EOP3_cnt;
476 
477 	/*Get SGE to ULP_TX*/
478 	wtp->sge_utx.sop[0] = sge_dbg_reg->debug_U_Tx_SOP0_cnt;
479 	wtp->sge_utx.sop[1] = sge_dbg_reg->debug_U_Tx_SOP1_cnt;
480 	wtp->sge_utx.sop[2] = sge_dbg_reg->debug_U_Tx_SOP2_cnt;
481 	wtp->sge_utx.sop[3] = sge_dbg_reg->debug_U_Tx_SOP3_cnt;
482 
483 	wtp->sge_utx.eop[0] = sge_dbg_reg->debug_U_Tx_EOP0_cnt;
484 	wtp->sge_utx.eop[1] = sge_dbg_reg->debug_U_Tx_EOP1_cnt;
485 	wtp->sge_utx.eop[2] = sge_dbg_reg->debug_U_Tx_EOP2_cnt;
486 	wtp->sge_utx.eop[3] = sge_dbg_reg->debug_U_Tx_EOP3_cnt;
487 
488 	/* Get ULP_TX to TP*/
489 	for (i = 0; i < 4; i++) {
490 		value = t4_read_reg(padap, (A_ULP_TX_SE_CNT_CH0 + (i*4)));
491 
492 		wtp->utx_tp.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
493 		wtp->utx_tp.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
494 	}
495 
496 	/* Get TP_DBG_CSIDE registers*/
497 	for (i = 0; i < 4; i++) {
498 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
499 			       true);
500 
501 		wtp->utx_tpcside.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
502 		wtp->utx_tpcside.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
503 		wtp->tpcside_rxpld.sop[i] = ((value >> 20) & 0xF);/*bits 20:23*/
504 		wtp->tpcside_rxpld.eop[i] = ((value >> 16) & 0xF);/*bits 16:19*/
505 		wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
506 		wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
507 		wtp->tpcside_rxcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
508 		wtp->tpcside_rxcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
509 	}
510 
511 	/* TP_DBG_ESIDE*/
512 	for (i = 0; i < 4; i++) {
513 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
514 			       true);
515 
516 		wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
517 		wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
518 		wtp->tpeside_pm.sop[i]	= ((value >> 20) & 0xF); /*bits 20:23*/
519 		wtp->tpeside_pm.eop[i]	= ((value >> 16) & 0xF); /*bits 16:19*/
520 		wtp->mps_tpeside.sop[i] = ((value >> 12) & 0xF); /*bits 12:15*/
521 		wtp->mps_tpeside.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
522 		wtp->tpeside_pld.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
523 		wtp->tpeside_pld.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
524 
525 	}
526 
527 	/*PCIE CMD STAT2*/
528 	for (i = 0; i < 3; i++) {
529 		value = t4_read_reg(padap, 0x5988 + (i * 0x10));
530 		wtp->pcie_cmd_stat2.sop[i] = value & 0xFF;
531 		wtp->pcie_cmd_stat2.eop[i] = value & 0xFF;
532 	}
533 
534 	/*PCIE cmd stat3*/
535 	for (i = 0; i < 3; i++) {
536 		value = t4_read_reg(padap, 0x598c + (i * 0x10));
537 		wtp->pcie_cmd_stat3.sop[i] = value & 0xFF;
538 		wtp->pcie_cmd_stat3.eop[i] = value & 0xFF;
539 	}
540 
541 	/* ULP_RX input/output*/
542 	for (i = 0; i < 2; i++) {
543 		value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
544 
545 		wtp->pmrx_ulprx.sop[i]	  = ((value >> 4) & 0xF); /*bits 4:7*/
546 		wtp->pmrx_ulprx.eop[i]	  = ((value >> 0) & 0xF); /*bits 0:3*/
547 		wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
548 		wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
549 	}
550 
551 	/* Get the MPS input from TP*/
552 	drop = 0;
553 	for (i = 0; i < 2; i++) {
554 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
555 		wtp->tp_mps.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
556 		wtp->tp_mps.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
557 		wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
558 								    */
559 		wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
560 								    */
561 	}
562 	drop  = ptp_mib->TP_MIB_OFD_ARP_DROP.value;
563 	drop += ptp_mib->TP_MIB_OFD_DFR_DROP.value;
564 
565 	drop += ptp_mib->TP_MIB_TNL_DROP_0.value;
566 	drop += ptp_mib->TP_MIB_TNL_DROP_1.value;
567 	drop += ptp_mib->TP_MIB_TNL_DROP_2.value;
568 	drop += ptp_mib->TP_MIB_TNL_DROP_3.value;
569 
570 	wtp->tp_mps.drops = drop;
571 
572 	/* Get the MPS output to the MAC's*/
573 	drop = 0;
574 	for (i = 0; i < 2; i++) {
575 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
576 		wtp->mps_xgm.sop[(i*2)]     = ((value >> 8) & 0xFF);/*bit 8:15*/
577 		wtp->mps_xgm.eop[(i*2)]     = ((value >> 0) & 0xFF);/*bit 0:7*/
578 		wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
579 								     */
580 		wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
581 								     */
582 	}
583 	for (i = 0; i < 4; i++) {
584 		value = t4_read_reg(padap,
585 				(T5_PORT0_REG(A_MPS_PORT_STAT_TX_PORT_DROP_L) +
586 				(i * T5_PORT_STRIDE)));
587 		drop += value;
588 	}
589 	wtp->mps_xgm.drops = (drop & 0xFF);
590 
591 	/* Get the SOP/EOP counters into and out of MAC. [JHANEL] I think this
592 	 * is*/
593 	/* clear on read, so you have to read both TX and RX path at same
594 	 * time.*/
595 	drop = 0;
596 	for (i = 0; i < 4; i++) {
597 		value = t4_read_reg(padap,
598 				(T5_PORT0_REG(A_MAC_PORT_PKT_COUNT) +
599 				(i * T5_PORT_STRIDE)));
600 
601 		wtp->tx_xgm_xgm.sop[i] = ((value >> 24) & 0xFF); /*bit 24:31*/
602 		wtp->tx_xgm_xgm.eop[i] = ((value >> 16) & 0xFF); /*bit 16:23*/
603 		wtp->rx_xgm_xgm.sop[i] = ((value >> 8) & 0xFF); /*bit 8:15*/
604 		wtp->rx_xgm_xgm.eop[i] = ((value >> 0) & 0xFF); /*bit 0:7*/
605 	}
606 
607 	/* Get the MAC's output to the wire*/
608 	drop = 0;
609 	for (i = 0; i < 4; i++) {
610 		value = t4_read_reg(padap,
611 				(T5_PORT0_REG(A_MAC_PORT_AFRAMESTRANSMITTEDOK) +
612 				(i * T5_PORT_STRIDE)));
613 		wtp->xgm_wire.sop[i] = (value);
614 		wtp->xgm_wire.eop[i] = (value); /* No EOP for XGMAC, so fake
615 						   it.*/
616 	}
617 
618 	/*########################################################################*/
619 	/*# RX PATH, starting from wire*/
620 	/*########################################################################*/
621 
622 	/* Add up the wire input to the MAC*/
623 	drop = 0;
624 	for (i = 0; i < 4; i++) {
625 		value = t4_read_reg(padap,
626 				(T5_PORT0_REG(A_MAC_PORT_AFRAMESRECEIVEDOK) +
627 				(i * T5_PORT_STRIDE)));
628 
629 		wtp->wire_xgm.sop[i] = (value);
630 		wtp->wire_xgm.eop[i] = (value); /* No EOP for XGMAC, so fake
631 						   it.*/
632 	}
633 
634 	/* Already read the rx_xgm_xgm when reading TX path.*/
635 
636 	/* Add up SOP/EOP's on all 8 MPS buffer channels*/
637 	drop = 0;
638 	for (i = 0; i < 8; i++) {
639 		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
640 
641 		wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
642 		wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
643 	}
644 	for (i = 0; i < 4; i++) {
645 		value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
646 		/* typo in JHANEL's code.*/
647 		drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
648 	}
649 	wtp->xgm_mps.cls_drop = drop & 0xFF;
650 
651 	/* Add up the overflow drops on all 4 ports.*/
652 	drop = 0;
653 	for (i = 0; i < 4; i++) {
654 		value = t4_read_reg(padap,
655 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
656 				     (i << 3)));
657 		drop += value;
658 		value = t4_read_reg(padap,
659 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
660 				     (i << 2)));
661 		value = t4_read_reg(padap,
662 				    (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
663 				     (i << 3)));
664 		drop += value;
665 		value = t4_read_reg(padap,
666 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
667 				     (i << 2)));
668 
669 		value = t4_read_reg(padap,
670 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
671 				     (i << 3)));
672 		drop += value;
673 		value = t4_read_reg(padap,
674 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
675 				     (i << 3)));
676 		value = t4_read_reg(padap,
677 				    (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
678 				     (i << 3)));
679 		drop += value;
680 		value = t4_read_reg(padap,
681 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
682 				     (i << 3)));
683 
684 		value = t4_read_reg(padap,
685 			T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
686 			(i * T5_PORT_STRIDE));
687 		drop += value;
688 	}
689 	wtp->xgm_mps.drop = (drop & 0xFF);
690 
691 	/* Add up the MPS errors that should result in dropped packets*/
692 	err = 0;
693 	for (i = 0; i < 4; i++) {
694 
695 		value = t4_read_reg(padap,
696 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
697 			(i * T5_PORT_STRIDE)));
698 		err += value;
699 		value = t4_read_reg(padap,
700 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
701 			(i * T5_PORT_STRIDE) + 4));
702 
703 		value = t4_read_reg(padap,
704 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
705 			(i * T5_PORT_STRIDE)));
706 		err += value;
707 		value = t4_read_reg(padap,
708 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
709 			(i * T5_PORT_STRIDE) + 4));
710 
711 		value = t4_read_reg(padap,
712 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
713 			(i * T5_PORT_STRIDE)));
714 		err += value;
715 		value = t4_read_reg(padap,
716 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
717 			(i * T5_PORT_STRIDE) + 4));
718 
719 		value = t4_read_reg(padap,
720 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
721 			(i * T5_PORT_STRIDE)));
722 		err += value;
723 		value = t4_read_reg(padap,
724 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
725 			(i * T5_PORT_STRIDE) + 4));
726 
727 		value = t4_read_reg(padap,
728 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
729 			(i * T5_PORT_STRIDE)));
730 		err += value;
731 		value = t4_read_reg(padap,
732 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
733 			(i * T5_PORT_STRIDE) + 4));
734 
735 		value = t4_read_reg(padap,
736 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
737 			(i * T5_PORT_STRIDE)));
738 		err += value;
739 		value = t4_read_reg(padap,
740 			(T5_PORT0_REG((A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
741 			(i * T5_PORT_STRIDE) + 4)));
742 	}
743 	wtp->xgm_mps.err = (err & 0xFF);
744 
745 	drop = 0;
746 	for (i = 0; i < 2; i++) {
747 		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
748 
749 		wtp->mps_tp.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
750 		wtp->mps_tp.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
751 		wtp->mps_tp.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
752 								    */
753 		wtp->mps_tp.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
754 								    */
755 	}
756 	drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
757 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
758 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_2.value;
759 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_3.value;
760 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
761 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
762 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_2.value;
763 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_3.value;
764 	drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
765 	drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
766 	drop += ptp_mib->TP_MIB_FCOE_DROP_2.value;
767 	drop += ptp_mib->TP_MIB_FCOE_DROP_3.value;
768 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
769 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
770 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_2.value;
771 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_3.value;
772 	drop += ptp_mib->TP_MIB_USM_DROP.value;
773 
774 	wtp->mps_tp.drops = drop;
775 
776 	/* Get TP_DBG_CSIDE_TX registers*/
777 	for (i = 0; i < 4; i++) {
778 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
779 			       true);
780 
781 		wtp->tpcside_csw.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
782 		wtp->tpcside_csw.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
783 		wtp->tpcside_pm.sop[i]	  = ((value >> 20) & 0xF);/*bits 20:23*/
784 		wtp->tpcside_pm.eop[i]	  = ((value >> 16) & 0xF);/*bits 16:19*/
785 		wtp->tpcside_uturn.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
786 		wtp->tpcside_uturn.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
787 		wtp->tpcside_txcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
788 		wtp->tpcside_txcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
789 	}
790 
791 	/* TP to CPL_SWITCH*/
792 	wtp->tp_csw.sop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP0_cnt;
793 	wtp->tp_csw.sop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP1_cnt;
794 
795 	wtp->tp_csw.eop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP0_cnt;
796 	wtp->tp_csw.eop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP1_cnt;
797 
798 	/* TP/CPL_SWITCH to SGE*/
799 	wtp->csw_sge.sop[0] = sge_dbg_reg->debug_T_Rx_SOP0_cnt;
800 	wtp->csw_sge.sop[1] = sge_dbg_reg->debug_T_Rx_SOP1_cnt;
801 
802 	wtp->csw_sge.eop[0] = sge_dbg_reg->debug_T_Rx_EOP0_cnt;
803 	wtp->csw_sge.eop[1] = sge_dbg_reg->debug_T_Rx_EOP1_cnt;
804 
805 	wtp->sge_pcie.sop[0] = sge_dbg_reg->debug_PD_Req_SOP0_cnt;
806 	wtp->sge_pcie.sop[1] = sge_dbg_reg->debug_PD_Req_SOP1_cnt;
807 	wtp->sge_pcie.sop[2] = sge_dbg_reg->debug_PD_Req_SOP2_cnt;
808 	wtp->sge_pcie.sop[3] = sge_dbg_reg->debug_PD_Req_SOP3_cnt;
809 
810 	wtp->sge_pcie.eop[0] = sge_dbg_reg->debug_PD_Req_EOP0_cnt;
811 	wtp->sge_pcie.eop[1] = sge_dbg_reg->debug_PD_Req_EOP1_cnt;
812 	wtp->sge_pcie.eop[2] = sge_dbg_reg->debug_PD_Req_EOP2_cnt;
813 	wtp->sge_pcie.eop[3] = sge_dbg_reg->debug_PD_Req_EOP3_cnt;
814 
815 	wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
816 	wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
817 	wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
818 	wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
819 	/* NO EOP, so fake it.*/
820 	wtp->sge_pcie_ints.eop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
821 	wtp->sge_pcie_ints.eop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
822 	wtp->sge_pcie_ints.eop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
823 	wtp->sge_pcie_ints.eop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
824 
825 	/*Get PCIE DMA1 STAT2*/
826 	for (i = 0; i < 4; i++) {
827 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
828 		wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
829 		wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
830 		wtp->pcie_dma1_stat2_core.sop[i] += value & 0x0F;
831 		wtp->pcie_dma1_stat2_core.eop[i] += value & 0x0F;
832 	}
833 
834 	/* Get mac porrx aFramesTransmittedok*/
835 	for (i = 0; i < 4; i++) {
836 		value = t4_read_reg(padap, 0x30a88 + ((i * 4) << 12));
837 		wtp->mac_porrx_aframestra_ok.sop[i] = (value & 0xFF);
838 		wtp->mac_porrx_aframestra_ok.eop[i] = (value & 0xFF);
839 	}
840 
841 	/*Get SGE debug data high index 7*/
842 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
843 	wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
844 	wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
845 	wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
846 	wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
847 	wtp->sge_debug_data_high_indx7.sop[2] = ((value >> 20) & 0x0F);
848 	wtp->sge_debug_data_high_indx7.eop[2] = ((value >> 16) & 0x0F);
849 	wtp->sge_debug_data_high_indx7.sop[3] = ((value >> 28) & 0x0F);
850 	wtp->sge_debug_data_high_indx7.eop[3] = ((value >> 24) & 0x0F);
851 
852 	/*Get SGE debug data high index 1*/
853 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
854 	wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
855 	wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
856 	wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
857 	wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
858 
859 	/*Get TP debug CSIDE Tx registers*/
860 	for (i = 0; i < 2; i++) {
861 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
862 			       true);
863 
864 		wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
865 								   */
866 		wtp->utx_tpcside_tx.eop[i] = ((value >> 24) & 0xF);
867 	}
868 
869 	/*Get SGE debug data high index 9*/
870 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
871 	wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
872 	wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
873 	wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
874 	wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
875 	wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
876 	wtp->sge_work_req_pkt.sop[1] = ((value >> 12) & 0x0F);
877 
878 	/*Get LE DB response count*/
879 	value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
880 	wtp->le_db_rsp_cnt.sop = value & 0xF;
881 	wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
882 
883 	/*Get TP debug Eside PKTx*/
884 	for (i = 0; i < 4; i++) {
885 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
886 			       true);
887 
888 		wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
889 		wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
890 	}
891 
892 	/* Get data responses from core to PCIE*/
893 	value = t4_read_reg(padap, A_PCIE_DMAW_SOP_CNT);
894 
895 	wtp->pcie_core_dmaw.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
896 	wtp->pcie_core_dmaw.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
897 	wtp->pcie_core_dmaw.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
898 	wtp->pcie_core_dmaw.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
899 
900 	value = t4_read_reg(padap, A_PCIE_DMAW_EOP_CNT);
901 
902 	wtp->pcie_core_dmaw.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
903 	wtp->pcie_core_dmaw.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
904 	wtp->pcie_core_dmaw.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
905 	wtp->pcie_core_dmaw.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
906 
907 	value = t4_read_reg(padap, A_PCIE_DMAI_CNT);
908 
909 	wtp->pcie_core_dmai.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
910 	wtp->pcie_core_dmai.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
911 	wtp->pcie_core_dmai.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
912 	wtp->pcie_core_dmai.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
913 	/* no eop for interrups, just fake it.*/
914 	wtp->pcie_core_dmai.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
915 	wtp->pcie_core_dmai.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
916 	wtp->pcie_core_dmai.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
917 	wtp->pcie_core_dmai.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
918 
919 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
920 
921 	if (rc)
922 		goto err1;
923 
924 	rc = compress_buff(&scratch_buff, dbg_buff);
925 
926 err1:
927 	release_scratch_buff(&scratch_buff, dbg_buff);
928 err:
929 	return rc;
930 }
931 
932 static int t6_wtp_data(struct cudbg_init *pdbg_init,
933 		       struct cudbg_buffer *dbg_buff,
934 		       struct cudbg_error *cudbg_err)
935 {
936 	struct adapter *padap = pdbg_init->adap;
937 	struct sge_debug_reg_data *sge_dbg_reg = NULL;
938 	struct cudbg_buffer scratch_buff;
939 	struct tp_mib_data *ptp_mib = NULL;
940 	struct wtp_data *wtp;
941 	u32 Sge_Dbg[32] = {0};
942 	u32 value = 0;
943 	u32 i = 0;
944 	u32 drop = 0;
945 	u32 err = 0;
946 	u32 offset;
947 	int rc = 0;
948 
949 	rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
950 
951 	if (rc)
952 		goto err;
953 
954 	offset = scratch_buff.offset;
955 	wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
956 
957 	read_sge_debug_data(pdbg_init, Sge_Dbg);
958 	read_tp_mib_data(pdbg_init, &ptp_mib);
959 
960 	sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
961 
962 	/*# TX PATH*/
963 
964 	/*PCIE CMD STAT2*/
965 	value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT2);
966 	wtp->pcie_cmd_stat2.sop[0] = value & 0xFF;
967 	wtp->pcie_cmd_stat2.eop[0] = value & 0xFF;
968 
969 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
970 	wtp->sge_pcie_cmd_req.sop[0] = ((value >> 20) & 0x0F);
971 	wtp->sge_pcie_cmd_req.eop[0] = ((value >> 16) & 0x0F);
972 	wtp->sge_pcie_cmd_req.sop[1] = ((value >> 28) & 0x0F);
973 	wtp->sge_pcie_cmd_req.eop[1] = ((value >> 24) & 0x0F);
974 
975 	value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT3);
976 	wtp->pcie_cmd_stat3.sop[0] = value & 0xFF;
977 	wtp->pcie_cmd_stat3.eop[0] = value & 0xFF;
978 
979 	/*Get command Resposes from PCIE to SGE*/
980 	wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
981 	wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
982 	wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
983 	wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
984 
985 	/* Get commands sent from SGE to CIM/uP*/
986 	wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
987 	wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
988 
989 	wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
990 	wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
991 
992 	/*Get SGE debug data high index 9*/
993 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
994 	wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
995 	wtp->sge_work_req_pkt.eop[0] = ((value >> 0) & 0x0F);
996 
997 	for (i = 0; i < 2; i++) {
998 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
999 		wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
1000 		wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
1001 		wtp->pcie_dma1_stat2_core.sop[i] = value & 0x0F;
1002 		wtp->pcie_dma1_stat2_core.eop[i] = value & 0x0F;
1003 	}
1004 
1005 	/* Get DMA0 stats3*/
1006 	for (i = 0; i < 2; i++) {
1007 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
1008 		wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
1009 		wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
1010 	}
1011 
1012 	/* Get ULP SE CNT CHx*/
1013 	for (i = 0; i < 4; i++) {
1014 		value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
1015 		wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
1016 		wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
1017 	}
1018 
1019 	/* Get TP_DBG_CSIDE registers*/
1020 	for (i = 0; i < 4; i++) {
1021 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
1022 			       true);
1023 
1024 		wtp->utx_tpcside.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
1025 		wtp->utx_tpcside.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
1026 		wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
1027 		wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
1028 	}
1029 
1030 	for (i = 0; i < 4; i++) {
1031 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
1032 			       true);
1033 
1034 
1035 		wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
1036 		wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
1037 	}
1038 
1039 	for (i = 0; i < 2; i++) {
1040 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
1041 		wtp->tp_mps.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
1042 		wtp->tp_mps.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
1043 		wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
1044 								    */
1045 		wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
1046 								    */
1047 	}
1048 
1049 	for (i = 0; i < 2; i++) {
1050 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
1051 		wtp->mps_xgm.sop[(i*2)]     = ((value >> 8) & 0xFF);/*bit 8:15*/
1052 		wtp->mps_xgm.eop[(i*2)]     = ((value >> 0) & 0xFF); /*bit 0:7*/
1053 		wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
1054 								     */
1055 		wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
1056 								     */
1057 	}
1058 
1059 	/* Get MAC PORTx PKT COUNT*/
1060 	for (i = 0; i < 2; i++) {
1061 		value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
1062 		wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
1063 		wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
1064 		wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
1065 		wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
1066 	}
1067 
1068 	for (i = 0; i < 2; i++) {
1069 		value = t4_read_reg(padap, 0x30f20 + ((i * 4) << 12));
1070 		wtp->mac_portx_aframestra_ok.sop[i] = value & 0xff;
1071 		wtp->mac_portx_aframestra_ok.eop[i] = value & 0xff;
1072 	}
1073 
1074 	/*MAC_PORT_MTIP_1G10G_TX_etherStatsPkts*/
1075 
1076 	for (i = 0; i < 2; i++) {
1077 		value = t4_read_reg(padap, 0x30f60 + ((i * 4) << 12));
1078 		wtp->mac_portx_etherstatspkts.sop[i] = value & 0xff;
1079 		wtp->mac_portx_etherstatspkts.eop[i] = value & 0xff;
1080 	}
1081 
1082 	/*RX path*/
1083 
1084 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
1085 	wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
1086 	wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
1087 	wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
1088 	wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
1089 
1090 	/*Get SGE debug data high index 1*/
1091 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
1092 	wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
1093 	wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
1094 	wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
1095 	wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
1096 
1097 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
1098 	wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
1099 	wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
1100 
1101 	wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
1102 	wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
1103 
1104 	for (i = 0; i < 2; i++) {
1105 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
1106 			       true);
1107 
1108 		wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
1109 								   */
1110 		wtp->utx_tpcside_tx.eop[i]   = ((value >> 24) & 0xF);
1111 	}
1112 
1113 	/*ULP_RX input/output*/
1114 	for (i = 0; i < 2; i++) {
1115 		value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
1116 
1117 		wtp->pmrx_ulprx.sop[i]	  = ((value >> 4) & 0xF); /*bits 4:7*/
1118 		wtp->pmrx_ulprx.eop[i]	  = ((value >> 0) & 0xF); /*bits 0:3*/
1119 		wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
1120 		wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
1121 	}
1122 
1123 	/*Get LE DB response count*/
1124 	value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
1125 	wtp->le_db_rsp_cnt.sop = value & 0xF;
1126 	wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
1127 
1128 	/*Get TP debug Eside PKTx*/
1129 	for (i = 0; i < 4; i++) {
1130 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
1131 			       true);
1132 
1133 		wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
1134 		wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
1135 	}
1136 
1137 	drop = 0;
1138 	/*MPS_RX_SE_CNT_OUT01*/
1139 	value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
1140 	wtp->mps_tp.sop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
1141 	wtp->mps_tp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
1142 	wtp->mps_tp.sop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
1143 	wtp->mps_tp.eop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
1144 
1145 	drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
1146 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
1147 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
1148 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
1149 	drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
1150 	drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
1151 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
1152 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
1153 	drop += ptp_mib->TP_MIB_USM_DROP.value;
1154 
1155 	wtp->mps_tp.drops = drop;
1156 
1157 	drop = 0;
1158 	for (i = 0; i < 8; i++) {
1159 		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
1160 
1161 		wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
1162 		wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
1163 	}
1164 	for (i = 0; i < 2; i++) {
1165 		value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
1166 		drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
1167 	}
1168 	wtp->xgm_mps.cls_drop = drop & 0xFF;
1169 
1170 	for (i = 0; i < 2; i++) {
1171 		value = t4_read_reg(padap, 0x30e20 + ((i * 4) << 12));
1172 		wtp->mac_porrx_aframestra_ok.sop[i] = value & 0xff;
1173 		wtp->mac_porrx_aframestra_ok.eop[i] = value & 0xff;
1174 	}
1175 
1176 	/*MAC_PORT_MTIP_1G10G_RX_etherStatsPkts*/
1177 	for (i = 0; i < 2; i++) {
1178 		value = t4_read_reg(padap, 0x30e60 + ((i * 4) << 12));
1179 		wtp->mac_porrx_etherstatspkts.sop[i] = value & 0xff;
1180 		wtp->mac_porrx_etherstatspkts.eop[i] = value & 0xff;
1181 	}
1182 
1183 	wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
1184 	wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
1185 	wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
1186 	wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
1187 
1188 	/* Add up the overflow drops on all 4 ports.*/
1189 	drop = 0;
1190 	for (i = 0; i < 2; i++) {
1191 		value = t4_read_reg(padap,
1192 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1193 				     (i << 3)));
1194 		drop += value;
1195 		value = t4_read_reg(padap,
1196 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1197 				     (i << 2)));
1198 		value = t4_read_reg(padap,
1199 				    (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
1200 				     (i << 3)));
1201 		drop += value;
1202 		value = t4_read_reg(padap,
1203 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1204 				     (i << 2)));
1205 
1206 		value = t4_read_reg(padap,
1207 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1208 				     (i << 3)));
1209 		drop += value;
1210 		value = t4_read_reg(padap,
1211 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1212 				     (i << 3)));
1213 		value = t4_read_reg(padap,
1214 				    (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
1215 				     (i << 3)));
1216 		drop += value;
1217 		value = t4_read_reg(padap,
1218 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1219 				     (i << 3)));
1220 
1221 		value = t4_read_reg(padap,
1222 			(T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
1223 			(i * T5_PORT_STRIDE)));
1224 		drop += value;
1225 	}
1226 	wtp->xgm_mps.drop = (drop & 0xFF);
1227 
1228 	/* Add up the MPS errors that should result in dropped packets*/
1229 	err = 0;
1230 	for (i = 0; i < 2; i++) {
1231 
1232 		value = t4_read_reg(padap,
1233 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
1234 			(i * T5_PORT_STRIDE)));
1235 		err += value;
1236 		value = t4_read_reg(padap,
1237 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
1238 			(i * T5_PORT_STRIDE) + 4));
1239 
1240 		value = t4_read_reg(padap,
1241 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
1242 			(i * T5_PORT_STRIDE)));
1243 		err += value;
1244 		value = t4_read_reg(padap,
1245 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
1246 			(i * T5_PORT_STRIDE) + 4));
1247 
1248 		value = t4_read_reg(padap,
1249 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
1250 				     (i * T5_PORT_STRIDE)));
1251 		err += value;
1252 		value = t4_read_reg(padap,
1253 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
1254 			(i * T5_PORT_STRIDE) + 4));
1255 
1256 		value = t4_read_reg(padap,
1257 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
1258 			(i * T5_PORT_STRIDE)));
1259 		err += value;
1260 		value = t4_read_reg(padap,
1261 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
1262 			(i * T5_PORT_STRIDE) + 4));
1263 
1264 		value = t4_read_reg(padap,
1265 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
1266 			(i * T5_PORT_STRIDE)));
1267 		err += value;
1268 		value = t4_read_reg(padap,
1269 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
1270 			(i * T5_PORT_STRIDE) + 4));
1271 
1272 		value = t4_read_reg(padap,
1273 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
1274 			(i * T5_PORT_STRIDE)));
1275 		err += value;
1276 		value = t4_read_reg(padap,
1277 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
1278 			(i * T5_PORT_STRIDE) + 4));
1279 	}
1280 	wtp->xgm_mps.err = (err & 0xFF);
1281 
1282 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1283 
1284 	if (rc)
1285 		goto err1;
1286 
1287 	rc = compress_buff(&scratch_buff, dbg_buff);
1288 
1289 err1:
1290 	release_scratch_buff(&scratch_buff, dbg_buff);
1291 err:
1292 	return rc;
1293 }
1294 
1295 int collect_wtp_data(struct cudbg_init *pdbg_init,
1296 		     struct cudbg_buffer *dbg_buff,
1297 		     struct cudbg_error *cudbg_err)
1298 {
1299 	struct adapter *padap = pdbg_init->adap;
1300 	int rc = -1;
1301 
1302 	if (is_t5(padap))
1303 		rc = t5_wtp_data(pdbg_init, dbg_buff, cudbg_err);
1304 	else if (is_t6(padap))
1305 		rc = t6_wtp_data(pdbg_init, dbg_buff, cudbg_err);
1306 
1307 	return rc;
1308 }
1309