xref: /freebsd/sys/dev/cxgbe/cudbg/cudbg_wtp.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*-
2  * Copyright (c) 2017 Chelsio Communications, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/param.h>
29 
30 #include "common/common.h"
31 #include "common/t4_regs.h"
32 #include "cudbg.h"
33 #include "cudbg_lib_common.h"
34 #include "cudbg_entity.h"
35 
36 int collect_wtp_data(struct cudbg_init *pdbg_init,
37 		     struct cudbg_buffer *dbg_buff,
38 		     struct cudbg_error *cudbg_err);
39 /*SGE_DEBUG Registers.*/
40 #define TP_MIB_SIZE	    0x5e
41 
42 struct sge_debug_reg_data {
43 	/*indx0*/
44 	u32 reserved1:4;
45 	u32 reserved2:4;
46 	u32 debug_uP_SOP_cnt:4;
47 	u32 debug_uP_EOP_cnt:4;
48 	u32 debug_CIM_SOP1_cnt:4;
49 	u32 debug_CIM_EOP1_cnt:4;
50 	u32 debug_CIM_SOP0_cnt:4;
51 	u32 debug_CIM_EOP0_cnt:4;
52 
53 	/*indx1*/
54 	u32 reserved3:32;
55 
56 	/*indx2*/
57 	u32 debug_T_Rx_SOP1_cnt:4;
58 	u32 debug_T_Rx_EOP1_cnt:4;
59 	u32 debug_T_Rx_SOP0_cnt:4;
60 	u32 debug_T_Rx_EOP0_cnt:4;
61 	u32 debug_U_Rx_SOP1_cnt:4;
62 	u32 debug_U_Rx_EOP1_cnt:4;
63 	u32 debug_U_Rx_SOP0_cnt:4;
64 	u32 debug_U_Rx_EOP0_cnt:4;
65 
66 	/*indx3*/
67 	u32 reserved4:32;
68 
69 	/*indx4*/
70 	u32 debug_UD_Rx_SOP3_cnt:4;
71 	u32 debug_UD_Rx_EOP3_cnt:4;
72 	u32 debug_UD_Rx_SOP2_cnt:4;
73 	u32 debug_UD_Rx_EOP2_cnt:4;
74 	u32 debug_UD_Rx_SOP1_cnt:4;
75 	u32 debug_UD_Rx_EOP1_cnt:4;
76 	u32 debug_UD_Rx_SOP0_cnt:4;
77 	u32 debug_UD_Rx_EOP0_cnt:4;
78 
79 	/*indx5*/
80 	u32 reserved5:32;
81 
82 	/*indx6*/
83 	u32 debug_U_Tx_SOP3_cnt:4;
84 	u32 debug_U_Tx_EOP3_cnt:4;
85 	u32 debug_U_Tx_SOP2_cnt:4;
86 	u32 debug_U_Tx_EOP2_cnt:4;
87 	u32 debug_U_Tx_SOP1_cnt:4;
88 	u32 debug_U_Tx_EOP1_cnt:4;
89 	u32 debug_U_Tx_SOP0_cnt:4;
90 	u32 debug_U_Tx_EOP0_cnt:4;
91 
92 	/*indx7*/
93 	u32 reserved6:32;
94 
95 	/*indx8*/
96 	u32  debug_PC_Rsp_SOP1_cnt:4;
97 	u32  debug_PC_Rsp_EOP1_cnt:4;
98 	u32  debug_PC_Rsp_SOP0_cnt:4;
99 	u32  debug_PC_Rsp_EOP0_cnt:4;
100 	u32  debug_PC_Req_SOP1_cnt:4;
101 	u32  debug_PC_Req_EOP1_cnt:4;
102 	u32  debug_PC_Req_SOP0_cnt:4;
103 	u32  debug_PC_Req_EOP0_cnt:4;
104 
105 	/*indx9*/
106 	u32 reserved7:32;
107 
108 	/*indx10*/
109 	u32  debug_PD_Req_SOP3_cnt:4;
110 	u32  debug_PD_Req_EOP3_cnt:4;
111 	u32  debug_PD_Req_SOP2_cnt:4;
112 	u32  debug_PD_Req_EOP2_cnt:4;
113 	u32  debug_PD_Req_SOP1_cnt:4;
114 	u32  debug_PD_Req_EOP1_cnt:4;
115 	u32  debug_PD_Req_SOP0_cnt:4;
116 	u32  debug_PD_Req_EOP0_cnt:4;
117 
118 	/*indx11*/
119 	u32 reserved8:32;
120 
121 	/*indx12*/
122 	u32  debug_PD_Rsp_SOP3_cnt:4;
123 	u32  debug_PD_Rsp_EOP3_cnt:4;
124 	u32  debug_PD_Rsp_SOP2_cnt:4;
125 	u32  debug_PD_Rsp_EOP2_cnt:4;
126 	u32  debug_PD_Rsp_SOP1_cnt:4;
127 	u32  debug_PD_Rsp_EOP1_cnt:4;
128 	u32  debug_PD_Rsp_SOP0_cnt:4;
129 	u32  debug_PD_Rsp_EOP0_cnt:4;
130 
131 	/*indx13*/
132 	u32 reserved9:32;
133 
134 	/*indx14*/
135 	u32  debug_CPLSW_TP_Rx_SOP1_cnt:4;
136 	u32  debug_CPLSW_TP_Rx_EOP1_cnt:4;
137 	u32  debug_CPLSW_TP_Rx_SOP0_cnt:4;
138 	u32  debug_CPLSW_TP_Rx_EOP0_cnt:4;
139 	u32  debug_CPLSW_CIM_SOP1_cnt:4;
140 	u32  debug_CPLSW_CIM_EOP1_cnt:4;
141 	u32  debug_CPLSW_CIM_SOP0_cnt:4;
142 	u32  debug_CPLSW_CIM_EOP0_cnt:4;
143 
144 	/*indx15*/
145 	u32 reserved10:32;
146 
147 	/*indx16*/
148 	u32  debug_PD_Req_Rd3_cnt:4;
149 	u32  debug_PD_Req_Rd2_cnt:4;
150 	u32  debug_PD_Req_Rd1_cnt:4;
151 	u32  debug_PD_Req_Rd0_cnt:4;
152 	u32  debug_PD_Req_Int3_cnt:4;
153 	u32  debug_PD_Req_Int2_cnt:4;
154 	u32  debug_PD_Req_Int1_cnt:4;
155 	u32  debug_PD_Req_Int0_cnt:4;
156 
157 };
158 
159 struct tp_mib_type tp_mib[] = {
160 	{"tp_mib_mac_in_err_0", 0x0},
161 	{"tp_mib_mac_in_err_1", 0x1},
162 	{"tp_mib_mac_in_err_2", 0x2},
163 	{"tp_mib_mac_in_err_3", 0x3},
164 	{"tp_mib_hdr_in_err_0", 0x4},
165 	{"tp_mib_hdr_in_err_1", 0x5},
166 	{"tp_mib_hdr_in_err_2", 0x6},
167 	{"tp_mib_hdr_in_err_3", 0x7},
168 	{"tp_mib_tcp_in_err_0", 0x8},
169 	{"tp_mib_tcp_in_err_1", 0x9},
170 	{"tp_mib_tcp_in_err_2", 0xa},
171 	{"tp_mib_tcp_in_err_3", 0xb},
172 	{"tp_mib_tcp_out_rst", 0xc},
173 	{"tp_mib_tcp_in_seg_hi", 0x10},
174 	{"tp_mib_tcp_in_seg_lo", 0x11},
175 	{"tp_mib_tcp_out_seg_hi", 0x12},
176 	{"tp_mib_tcp_out_seg_lo", 0x13},
177 	{"tp_mib_tcp_rxt_seg_hi", 0x14},
178 	{"tp_mib_tcp_rxt_seg_lo", 0x15},
179 	{"tp_mib_tnl_cng_drop_0", 0x18},
180 	{"tp_mib_tnl_cng_drop_1", 0x19},
181 	{"tp_mib_tnl_cng_drop_2", 0x1a},
182 	{"tp_mib_tnl_cng_drop_3", 0x1b},
183 	{"tp_mib_ofd_chn_drop_0", 0x1c},
184 	{"tp_mib_ofd_chn_drop_1", 0x1d},
185 	{"tp_mib_ofd_chn_drop_2", 0x1e},
186 	{"tp_mib_ofd_chn_drop_3", 0x1f},
187 	{"tp_mib_tnl_out_pkt_0", 0x20},
188 	{"tp_mib_tnl_out_pkt_1", 0x21},
189 	{"tp_mib_tnl_out_pkt_2", 0x22},
190 	{"tp_mib_tnl_out_pkt_3", 0x23},
191 	{"tp_mib_tnl_in_pkt_0", 0x24},
192 	{"tp_mib_tnl_in_pkt_1", 0x25},
193 	{"tp_mib_tnl_in_pkt_2", 0x26},
194 	{"tp_mib_tnl_in_pkt_3", 0x27},
195 	{"tp_mib_tcp_v6in_err_0", 0x28},
196 	{"tp_mib_tcp_v6in_err_1", 0x29},
197 	{"tp_mib_tcp_v6in_err_2", 0x2a},
198 	{"tp_mib_tcp_v6in_err_3", 0x2b},
199 	{"tp_mib_tcp_v6out_rst", 0x2c},
200 	{"tp_mib_tcp_v6in_seg_hi", 0x30},
201 	{"tp_mib_tcp_v6in_seg_lo", 0x31},
202 	{"tp_mib_tcp_v6out_seg_hi", 0x32},
203 	{"tp_mib_tcp_v6out_seg_lo", 0x33},
204 	{"tp_mib_tcp_v6rxt_seg_hi", 0x34},
205 	{"tp_mib_tcp_v6rxt_seg_lo", 0x35},
206 	{"tp_mib_ofd_arp_drop", 0x36},
207 	{"tp_mib_ofd_dfr_drop", 0x37},
208 	{"tp_mib_cpl_in_req_0", 0x38},
209 	{"tp_mib_cpl_in_req_1", 0x39},
210 	{"tp_mib_cpl_in_req_2", 0x3a},
211 	{"tp_mib_cpl_in_req_3", 0x3b},
212 	{"tp_mib_cpl_out_rsp_0", 0x3c},
213 	{"tp_mib_cpl_out_rsp_1", 0x3d},
214 	{"tp_mib_cpl_out_rsp_2", 0x3e},
215 	{"tp_mib_cpl_out_rsp_3", 0x3f},
216 	{"tp_mib_tnl_lpbk_0", 0x40},
217 	{"tp_mib_tnl_lpbk_1", 0x41},
218 	{"tp_mib_tnl_lpbk_2", 0x42},
219 	{"tp_mib_tnl_lpbk_3", 0x43},
220 	{"tp_mib_tnl_drop_0", 0x44},
221 	{"tp_mib_tnl_drop_1", 0x45},
222 	{"tp_mib_tnl_drop_2", 0x46},
223 	{"tp_mib_tnl_drop_3", 0x47},
224 	{"tp_mib_fcoe_ddp_0", 0x48},
225 	{"tp_mib_fcoe_ddp_1", 0x49},
226 	{"tp_mib_fcoe_ddp_2", 0x4a},
227 	{"tp_mib_fcoe_ddp_3", 0x4b},
228 	{"tp_mib_fcoe_drop_0", 0x4c},
229 	{"tp_mib_fcoe_drop_1", 0x4d},
230 	{"tp_mib_fcoe_drop_2", 0x4e},
231 	{"tp_mib_fcoe_drop_3", 0x4f},
232 	{"tp_mib_fcoe_byte_0_hi", 0x50},
233 	{"tp_mib_fcoe_byte_0_lo", 0x51},
234 	{"tp_mib_fcoe_byte_1_hi", 0x52},
235 	{"tp_mib_fcoe_byte_1_lo", 0x53},
236 	{"tp_mib_fcoe_byte_2_hi", 0x54},
237 	{"tp_mib_fcoe_byte_2_lo", 0x55},
238 	{"tp_mib_fcoe_byte_3_hi", 0x56},
239 	{"tp_mib_fcoe_byte_3_lo", 0x57},
240 	{"tp_mib_ofd_vln_drop_0", 0x58},
241 	{"tp_mib_ofd_vln_drop_1", 0x59},
242 	{"tp_mib_ofd_vln_drop_2", 0x5a},
243 	{"tp_mib_ofd_vln_drop_3", 0x5b},
244 	{"tp_mib_usm_pkts", 0x5c},
245 	{"tp_mib_usm_drop", 0x5d},
246 	{"tp_mib_usm_bytes_hi", 0x5e},
247 	{"tp_mib_usm_bytes_lo", 0x5f},
248 	{"tp_mib_tid_del", 0x60},
249 	{"tp_mib_tid_inv", 0x61},
250 	{"tp_mib_tid_act", 0x62},
251 	{"tp_mib_tid_pas", 0x63},
252 	{"tp_mib_rqe_dfr_mod", 0x64},
253 	{"tp_mib_rqe_dfr_pkt", 0x65}
254 };
255 
read_sge_debug_data(struct cudbg_init * pdbg_init,u32 * sge_dbg_reg)256 static u32 read_sge_debug_data(struct cudbg_init *pdbg_init, u32 *sge_dbg_reg)
257 {
258 	struct adapter *padap = pdbg_init->adap;
259 	u32 value;
260 	int i = 0;
261 
262 	for (i = 0; i <= 15; i++) {
263 		t4_write_reg(padap, A_SGE_DEBUG_INDEX, (u32)i);
264 		value = t4_read_reg(padap, A_SGE_DEBUG_DATA_LOW);
265 		/*printf("LOW	 0x%08x\n", value);*/
266 		sge_dbg_reg[(i << 1) | 1] = HTONL_NIBBLE(value);
267 		value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH);
268 		/*printf("HIGH	 0x%08x\n", value);*/
269 		sge_dbg_reg[(i << 1)] = HTONL_NIBBLE(value);
270 	}
271 	return 0;
272 }
273 
read_tp_mib_data(struct cudbg_init * pdbg_init,struct tp_mib_data ** ppTp_Mib)274 static u32 read_tp_mib_data(struct cudbg_init *pdbg_init,
275 			    struct tp_mib_data **ppTp_Mib)
276 {
277 	struct adapter *padap = pdbg_init->adap;
278 	u32 i = 0;
279 
280 	for (i = 0; i < TP_MIB_SIZE; i++) {
281 		t4_tp_mib_read(padap, &tp_mib[i].value, 1,
282 				  (u32)tp_mib[i].addr, true);
283 	}
284 	*ppTp_Mib = (struct tp_mib_data *)&tp_mib[0];
285 
286 	return 0;
287 }
288 
t5_wtp_data(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)289 static int t5_wtp_data(struct cudbg_init *pdbg_init,
290 		       struct cudbg_buffer *dbg_buff,
291 		       struct cudbg_error *cudbg_err)
292 {
293 	struct adapter *padap = pdbg_init->adap;
294 	struct sge_debug_reg_data *sge_dbg_reg = NULL;
295 	struct cudbg_buffer scratch_buff;
296 	struct tp_mib_data *ptp_mib = NULL;
297 	struct wtp_data *wtp;
298 	u32 Sge_Dbg[32] = {0};
299 	u32 value = 0;
300 	u32 i = 0;
301 	u32 drop = 0;
302 	u32 err = 0;
303 	u32 offset;
304 	int rc = 0;
305 
306 	rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
307 
308 	if (rc)
309 		goto err;
310 
311 	offset = scratch_buff.offset;
312 	wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
313 
314 	read_sge_debug_data(pdbg_init, Sge_Dbg);
315 	read_tp_mib_data(pdbg_init, &ptp_mib);
316 
317 	sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
318 
319 	/*#######################################################################*/
320 	/*# TX PATH, starting from pcie*/
321 	/*#######################################################################*/
322 
323 	/* Get Requests of commands from SGE to PCIE*/
324 
325 	wtp->sge_pcie_cmd_req.sop[0] =	sge_dbg_reg->debug_PC_Req_SOP0_cnt;
326 	wtp->sge_pcie_cmd_req.sop[1] =	sge_dbg_reg->debug_PC_Req_SOP1_cnt;
327 
328 	wtp->sge_pcie_cmd_req.eop[0] =	sge_dbg_reg->debug_PC_Req_EOP0_cnt;
329 	wtp->sge_pcie_cmd_req.eop[1] =	sge_dbg_reg->debug_PC_Req_EOP1_cnt;
330 
331 	/* Get Requests of commands from PCIE to core*/
332 	value = t4_read_reg(padap, A_PCIE_CMDR_REQ_CNT);
333 
334 	wtp->pcie_core_cmd_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
335 	wtp->pcie_core_cmd_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
336 	/* there is no EOP for this, so we fake it.*/
337 	wtp->pcie_core_cmd_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
338 	wtp->pcie_core_cmd_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
339 
340 	/* Get DMA stats*/
341 	for (i = 0; i < 4; i++) {
342 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
343 		wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
344 		wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
345 	}
346 
347 	/* Get SGE debug data high index 6*/
348 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_6);
349 	wtp->sge_debug_data_high_index_6.sop[0] = ((value >> 4) & 0x0F);
350 	wtp->sge_debug_data_high_index_6.eop[0] = ((value >> 0) & 0x0F);
351 	wtp->sge_debug_data_high_index_6.sop[1] = ((value >> 12) & 0x0F);
352 	wtp->sge_debug_data_high_index_6.eop[1] = ((value >> 8) & 0x0F);
353 	wtp->sge_debug_data_high_index_6.sop[2] = ((value >> 20) & 0x0F);
354 	wtp->sge_debug_data_high_index_6.eop[2] = ((value >> 16) & 0x0F);
355 	wtp->sge_debug_data_high_index_6.sop[3] = ((value >> 28) & 0x0F);
356 	wtp->sge_debug_data_high_index_6.eop[3] = ((value >> 24) & 0x0F);
357 
358 	/* Get SGE debug data high index 3*/
359 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_3);
360 	wtp->sge_debug_data_high_index_3.sop[0] = ((value >> 4) & 0x0F);
361 	wtp->sge_debug_data_high_index_3.eop[0] = ((value >> 0) & 0x0F);
362 	wtp->sge_debug_data_high_index_3.sop[1] = ((value >> 12) & 0x0F);
363 	wtp->sge_debug_data_high_index_3.eop[1] = ((value >> 8) & 0x0F);
364 	wtp->sge_debug_data_high_index_3.sop[2] = ((value >> 20) & 0x0F);
365 	wtp->sge_debug_data_high_index_3.eop[2] = ((value >> 16) & 0x0F);
366 	wtp->sge_debug_data_high_index_3.sop[3] = ((value >> 28) & 0x0F);
367 	wtp->sge_debug_data_high_index_3.eop[3] = ((value >> 24) & 0x0F);
368 
369 	/* Get ULP SE CNT CHx*/
370 	for (i = 0; i < 4; i++) {
371 		value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
372 		wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
373 		wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
374 	}
375 
376 	/* Get MAC PORTx PKT COUNT*/
377 	for (i = 0; i < 4; i++) {
378 		value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
379 		wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
380 		wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
381 		wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
382 		wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
383 	}
384 
385 	/* Get mac portx aFramesTransmittedok*/
386 	for (i = 0; i < 4; i++) {
387 		value = t4_read_reg(padap, 0x30a80 + ((i * 4) << 12));
388 		wtp->mac_portx_aframestra_ok.sop[i] = (value & 0xFF);
389 		wtp->mac_portx_aframestra_ok.eop[i] = (value & 0xFF);
390 	}
391 
392 	/* Get command respones from core to PCIE*/
393 	value = t4_read_reg(padap, A_PCIE_CMDR_RSP_CNT);
394 
395 	wtp->core_pcie_cmd_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
396 	wtp->core_pcie_cmd_rsp.sop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
397 
398 	wtp->core_pcie_cmd_rsp.eop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
399 	wtp->core_pcie_cmd_rsp.eop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
400 
401 	/*Get command Resposes from PCIE to SGE*/
402 	wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
403 	wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
404 
405 	wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
406 	wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP1_cnt;
407 
408 	/* Get commands sent from SGE to CIM/uP*/
409 	wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
410 	wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
411 
412 	wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
413 	wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
414 
415 	/* Get Requests of data from PCIE by SGE*/
416 	wtp->utx_sge_dma_req.sop[0] = sge_dbg_reg->debug_UD_Rx_SOP0_cnt;
417 	wtp->utx_sge_dma_req.sop[1] = sge_dbg_reg->debug_UD_Rx_SOP1_cnt;
418 	wtp->utx_sge_dma_req.sop[2] = sge_dbg_reg->debug_UD_Rx_SOP2_cnt;
419 	wtp->utx_sge_dma_req.sop[3] = sge_dbg_reg->debug_UD_Rx_SOP3_cnt;
420 
421 	wtp->utx_sge_dma_req.eop[0] = sge_dbg_reg->debug_UD_Rx_EOP0_cnt;
422 	wtp->utx_sge_dma_req.eop[1] = sge_dbg_reg->debug_UD_Rx_EOP1_cnt;
423 	wtp->utx_sge_dma_req.eop[2] = sge_dbg_reg->debug_UD_Rx_EOP2_cnt;
424 	wtp->utx_sge_dma_req.eop[3] = sge_dbg_reg->debug_UD_Rx_EOP3_cnt;
425 
426 	/* Get Requests of data from PCIE by SGE*/
427 	wtp->sge_pcie_dma_req.sop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
428 	wtp->sge_pcie_dma_req.sop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
429 	wtp->sge_pcie_dma_req.sop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
430 	wtp->sge_pcie_dma_req.sop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
431 	/*no EOP's, so fake it.*/
432 	wtp->sge_pcie_dma_req.eop[0] = sge_dbg_reg->debug_PD_Req_Rd0_cnt;
433 	wtp->sge_pcie_dma_req.eop[1] = sge_dbg_reg->debug_PD_Req_Rd1_cnt;
434 	wtp->sge_pcie_dma_req.eop[2] = sge_dbg_reg->debug_PD_Req_Rd2_cnt;
435 	wtp->sge_pcie_dma_req.eop[3] = sge_dbg_reg->debug_PD_Req_Rd3_cnt;
436 
437 	/* Get Requests of data from PCIE to core*/
438 	value = t4_read_reg(padap, A_PCIE_DMAR_REQ_CNT);
439 
440 	wtp->pcie_core_dma_req.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
441 	wtp->pcie_core_dma_req.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
442 	wtp->pcie_core_dma_req.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
443 	wtp->pcie_core_dma_req.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
444 	/* There is no eop so fake it.*/
445 	wtp->pcie_core_dma_req.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
446 	wtp->pcie_core_dma_req.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
447 	wtp->pcie_core_dma_req.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
448 	wtp->pcie_core_dma_req.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
449 
450 	/* Get data responses from core to PCIE*/
451 	value = t4_read_reg(padap, A_PCIE_DMAR_RSP_SOP_CNT);
452 
453 	wtp->core_pcie_dma_rsp.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
454 	wtp->core_pcie_dma_rsp.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
455 	wtp->core_pcie_dma_rsp.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
456 	wtp->core_pcie_dma_rsp.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
457 
458 	value = t4_read_reg(padap, A_PCIE_DMAR_RSP_EOP_CNT);
459 
460 	wtp->core_pcie_dma_rsp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
461 	wtp->core_pcie_dma_rsp.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
462 	wtp->core_pcie_dma_rsp.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
463 	wtp->core_pcie_dma_rsp.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
464 
465 	/* Get PCIE_DATA to SGE*/
466 	wtp->pcie_sge_dma_rsp.sop[0] = sge_dbg_reg->debug_PD_Rsp_SOP0_cnt;
467 	wtp->pcie_sge_dma_rsp.sop[1] = sge_dbg_reg->debug_PD_Rsp_SOP1_cnt;
468 	wtp->pcie_sge_dma_rsp.sop[2] = sge_dbg_reg->debug_PD_Rsp_SOP2_cnt;
469 	wtp->pcie_sge_dma_rsp.sop[3] = sge_dbg_reg->debug_PD_Rsp_SOP3_cnt;
470 
471 	wtp->pcie_sge_dma_rsp.eop[0] = sge_dbg_reg->debug_PD_Rsp_EOP0_cnt;
472 	wtp->pcie_sge_dma_rsp.eop[1] = sge_dbg_reg->debug_PD_Rsp_EOP1_cnt;
473 	wtp->pcie_sge_dma_rsp.eop[2] = sge_dbg_reg->debug_PD_Rsp_EOP2_cnt;
474 	wtp->pcie_sge_dma_rsp.eop[3] = sge_dbg_reg->debug_PD_Rsp_EOP3_cnt;
475 
476 	/*Get SGE to ULP_TX*/
477 	wtp->sge_utx.sop[0] = sge_dbg_reg->debug_U_Tx_SOP0_cnt;
478 	wtp->sge_utx.sop[1] = sge_dbg_reg->debug_U_Tx_SOP1_cnt;
479 	wtp->sge_utx.sop[2] = sge_dbg_reg->debug_U_Tx_SOP2_cnt;
480 	wtp->sge_utx.sop[3] = sge_dbg_reg->debug_U_Tx_SOP3_cnt;
481 
482 	wtp->sge_utx.eop[0] = sge_dbg_reg->debug_U_Tx_EOP0_cnt;
483 	wtp->sge_utx.eop[1] = sge_dbg_reg->debug_U_Tx_EOP1_cnt;
484 	wtp->sge_utx.eop[2] = sge_dbg_reg->debug_U_Tx_EOP2_cnt;
485 	wtp->sge_utx.eop[3] = sge_dbg_reg->debug_U_Tx_EOP3_cnt;
486 
487 	/* Get ULP_TX to TP*/
488 	for (i = 0; i < 4; i++) {
489 		value = t4_read_reg(padap, (A_ULP_TX_SE_CNT_CH0 + (i*4)));
490 
491 		wtp->utx_tp.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
492 		wtp->utx_tp.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
493 	}
494 
495 	/* Get TP_DBG_CSIDE registers*/
496 	for (i = 0; i < 4; i++) {
497 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
498 			       true);
499 
500 		wtp->utx_tpcside.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
501 		wtp->utx_tpcside.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
502 		wtp->tpcside_rxpld.sop[i] = ((value >> 20) & 0xF);/*bits 20:23*/
503 		wtp->tpcside_rxpld.eop[i] = ((value >> 16) & 0xF);/*bits 16:19*/
504 		wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
505 		wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
506 		wtp->tpcside_rxcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
507 		wtp->tpcside_rxcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
508 	}
509 
510 	/* TP_DBG_ESIDE*/
511 	for (i = 0; i < 4; i++) {
512 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
513 			       true);
514 
515 		wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
516 		wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
517 		wtp->tpeside_pm.sop[i]	= ((value >> 20) & 0xF); /*bits 20:23*/
518 		wtp->tpeside_pm.eop[i]	= ((value >> 16) & 0xF); /*bits 16:19*/
519 		wtp->mps_tpeside.sop[i] = ((value >> 12) & 0xF); /*bits 12:15*/
520 		wtp->mps_tpeside.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
521 		wtp->tpeside_pld.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
522 		wtp->tpeside_pld.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
523 
524 	}
525 
526 	/*PCIE CMD STAT2*/
527 	for (i = 0; i < 3; i++) {
528 		value = t4_read_reg(padap, 0x5988 + (i * 0x10));
529 		wtp->pcie_cmd_stat2.sop[i] = value & 0xFF;
530 		wtp->pcie_cmd_stat2.eop[i] = value & 0xFF;
531 	}
532 
533 	/*PCIE cmd stat3*/
534 	for (i = 0; i < 3; i++) {
535 		value = t4_read_reg(padap, 0x598c + (i * 0x10));
536 		wtp->pcie_cmd_stat3.sop[i] = value & 0xFF;
537 		wtp->pcie_cmd_stat3.eop[i] = value & 0xFF;
538 	}
539 
540 	/* ULP_RX input/output*/
541 	for (i = 0; i < 2; i++) {
542 		value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
543 
544 		wtp->pmrx_ulprx.sop[i]	  = ((value >> 4) & 0xF); /*bits 4:7*/
545 		wtp->pmrx_ulprx.eop[i]	  = ((value >> 0) & 0xF); /*bits 0:3*/
546 		wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
547 		wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
548 	}
549 
550 	/* Get the MPS input from TP*/
551 	drop = 0;
552 	for (i = 0; i < 2; i++) {
553 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
554 		wtp->tp_mps.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
555 		wtp->tp_mps.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
556 		wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
557 								    */
558 		wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
559 								    */
560 	}
561 	drop  = ptp_mib->TP_MIB_OFD_ARP_DROP.value;
562 	drop += ptp_mib->TP_MIB_OFD_DFR_DROP.value;
563 
564 	drop += ptp_mib->TP_MIB_TNL_DROP_0.value;
565 	drop += ptp_mib->TP_MIB_TNL_DROP_1.value;
566 	drop += ptp_mib->TP_MIB_TNL_DROP_2.value;
567 	drop += ptp_mib->TP_MIB_TNL_DROP_3.value;
568 
569 	wtp->tp_mps.drops = drop;
570 
571 	/* Get the MPS output to the MAC's*/
572 	drop = 0;
573 	for (i = 0; i < 2; i++) {
574 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
575 		wtp->mps_xgm.sop[(i*2)]     = ((value >> 8) & 0xFF);/*bit 8:15*/
576 		wtp->mps_xgm.eop[(i*2)]     = ((value >> 0) & 0xFF);/*bit 0:7*/
577 		wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
578 								     */
579 		wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
580 								     */
581 	}
582 	for (i = 0; i < 4; i++) {
583 		value = t4_read_reg(padap,
584 				(T5_PORT0_REG(A_MPS_PORT_STAT_TX_PORT_DROP_L) +
585 				(i * T5_PORT_STRIDE)));
586 		drop += value;
587 	}
588 	wtp->mps_xgm.drops = (drop & 0xFF);
589 
590 	/* Get the SOP/EOP counters into and out of MAC. [JHANEL] I think this
591 	 * is*/
592 	/* clear on read, so you have to read both TX and RX path at same
593 	 * time.*/
594 	drop = 0;
595 	for (i = 0; i < 4; i++) {
596 		value = t4_read_reg(padap,
597 				(T5_PORT0_REG(A_MAC_PORT_PKT_COUNT) +
598 				(i * T5_PORT_STRIDE)));
599 
600 		wtp->tx_xgm_xgm.sop[i] = ((value >> 24) & 0xFF); /*bit 24:31*/
601 		wtp->tx_xgm_xgm.eop[i] = ((value >> 16) & 0xFF); /*bit 16:23*/
602 		wtp->rx_xgm_xgm.sop[i] = ((value >> 8) & 0xFF); /*bit 8:15*/
603 		wtp->rx_xgm_xgm.eop[i] = ((value >> 0) & 0xFF); /*bit 0:7*/
604 	}
605 
606 	/* Get the MAC's output to the wire*/
607 	drop = 0;
608 	for (i = 0; i < 4; i++) {
609 		value = t4_read_reg(padap,
610 				(T5_PORT0_REG(A_MAC_PORT_AFRAMESTRANSMITTEDOK) +
611 				(i * T5_PORT_STRIDE)));
612 		wtp->xgm_wire.sop[i] = (value);
613 		wtp->xgm_wire.eop[i] = (value); /* No EOP for XGMAC, so fake
614 						   it.*/
615 	}
616 
617 	/*########################################################################*/
618 	/*# RX PATH, starting from wire*/
619 	/*########################################################################*/
620 
621 	/* Add up the wire input to the MAC*/
622 	drop = 0;
623 	for (i = 0; i < 4; i++) {
624 		value = t4_read_reg(padap,
625 				(T5_PORT0_REG(A_MAC_PORT_AFRAMESRECEIVEDOK) +
626 				(i * T5_PORT_STRIDE)));
627 
628 		wtp->wire_xgm.sop[i] = (value);
629 		wtp->wire_xgm.eop[i] = (value); /* No EOP for XGMAC, so fake
630 						   it.*/
631 	}
632 
633 	/* Already read the rx_xgm_xgm when reading TX path.*/
634 
635 	/* Add up SOP/EOP's on all 8 MPS buffer channels*/
636 	drop = 0;
637 	for (i = 0; i < 8; i++) {
638 		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
639 
640 		wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
641 		wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
642 	}
643 	for (i = 0; i < 4; i++) {
644 		value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
645 		/* typo in JHANEL's code.*/
646 		drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
647 	}
648 	wtp->xgm_mps.cls_drop = drop & 0xFF;
649 
650 	/* Add up the overflow drops on all 4 ports.*/
651 	drop = 0;
652 	for (i = 0; i < 4; i++) {
653 		value = t4_read_reg(padap,
654 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
655 				     (i << 3)));
656 		drop += value;
657 		value = t4_read_reg(padap,
658 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
659 				     (i << 2)));
660 		value = t4_read_reg(padap,
661 				    (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
662 				     (i << 3)));
663 		drop += value;
664 		value = t4_read_reg(padap,
665 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
666 				     (i << 2)));
667 
668 		value = t4_read_reg(padap,
669 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
670 				     (i << 3)));
671 		drop += value;
672 		value = t4_read_reg(padap,
673 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
674 				     (i << 3)));
675 		value = t4_read_reg(padap,
676 				    (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
677 				     (i << 3)));
678 		drop += value;
679 		value = t4_read_reg(padap,
680 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
681 				     (i << 3)));
682 
683 		value = t4_read_reg(padap,
684 			T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
685 			(i * T5_PORT_STRIDE));
686 		drop += value;
687 	}
688 	wtp->xgm_mps.drop = (drop & 0xFF);
689 
690 	/* Add up the MPS errors that should result in dropped packets*/
691 	err = 0;
692 	for (i = 0; i < 4; i++) {
693 
694 		value = t4_read_reg(padap,
695 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
696 			(i * T5_PORT_STRIDE)));
697 		err += value;
698 		value = t4_read_reg(padap,
699 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
700 			(i * T5_PORT_STRIDE) + 4));
701 
702 		value = t4_read_reg(padap,
703 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
704 			(i * T5_PORT_STRIDE)));
705 		err += value;
706 		value = t4_read_reg(padap,
707 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
708 			(i * T5_PORT_STRIDE) + 4));
709 
710 		value = t4_read_reg(padap,
711 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
712 			(i * T5_PORT_STRIDE)));
713 		err += value;
714 		value = t4_read_reg(padap,
715 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
716 			(i * T5_PORT_STRIDE) + 4));
717 
718 		value = t4_read_reg(padap,
719 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
720 			(i * T5_PORT_STRIDE)));
721 		err += value;
722 		value = t4_read_reg(padap,
723 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
724 			(i * T5_PORT_STRIDE) + 4));
725 
726 		value = t4_read_reg(padap,
727 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
728 			(i * T5_PORT_STRIDE)));
729 		err += value;
730 		value = t4_read_reg(padap,
731 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
732 			(i * T5_PORT_STRIDE) + 4));
733 
734 		value = t4_read_reg(padap,
735 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
736 			(i * T5_PORT_STRIDE)));
737 		err += value;
738 		value = t4_read_reg(padap,
739 			(T5_PORT0_REG((A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
740 			(i * T5_PORT_STRIDE) + 4)));
741 	}
742 	wtp->xgm_mps.err = (err & 0xFF);
743 
744 	drop = 0;
745 	for (i = 0; i < 2; i++) {
746 		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
747 
748 		wtp->mps_tp.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
749 		wtp->mps_tp.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
750 		wtp->mps_tp.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
751 								    */
752 		wtp->mps_tp.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
753 								    */
754 	}
755 	drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
756 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
757 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_2.value;
758 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_3.value;
759 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
760 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
761 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_2.value;
762 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_3.value;
763 	drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
764 	drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
765 	drop += ptp_mib->TP_MIB_FCOE_DROP_2.value;
766 	drop += ptp_mib->TP_MIB_FCOE_DROP_3.value;
767 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
768 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
769 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_2.value;
770 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_3.value;
771 	drop += ptp_mib->TP_MIB_USM_DROP.value;
772 
773 	wtp->mps_tp.drops = drop;
774 
775 	/* Get TP_DBG_CSIDE_TX registers*/
776 	for (i = 0; i < 4; i++) {
777 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
778 			       true);
779 
780 		wtp->tpcside_csw.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
781 		wtp->tpcside_csw.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
782 		wtp->tpcside_pm.sop[i]	  = ((value >> 20) & 0xF);/*bits 20:23*/
783 		wtp->tpcside_pm.eop[i]	  = ((value >> 16) & 0xF);/*bits 16:19*/
784 		wtp->tpcside_uturn.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
785 		wtp->tpcside_uturn.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
786 		wtp->tpcside_txcpl.sop[i] = ((value >> 4) & 0xF); /*bits 4:7*/
787 		wtp->tpcside_txcpl.eop[i] = ((value >> 0) & 0xF); /*bits 0:3*/
788 	}
789 
790 	/* TP to CPL_SWITCH*/
791 	wtp->tp_csw.sop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP0_cnt;
792 	wtp->tp_csw.sop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_SOP1_cnt;
793 
794 	wtp->tp_csw.eop[0] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP0_cnt;
795 	wtp->tp_csw.eop[1] = sge_dbg_reg->debug_CPLSW_TP_Rx_EOP1_cnt;
796 
797 	/* TP/CPL_SWITCH to SGE*/
798 	wtp->csw_sge.sop[0] = sge_dbg_reg->debug_T_Rx_SOP0_cnt;
799 	wtp->csw_sge.sop[1] = sge_dbg_reg->debug_T_Rx_SOP1_cnt;
800 
801 	wtp->csw_sge.eop[0] = sge_dbg_reg->debug_T_Rx_EOP0_cnt;
802 	wtp->csw_sge.eop[1] = sge_dbg_reg->debug_T_Rx_EOP1_cnt;
803 
804 	wtp->sge_pcie.sop[0] = sge_dbg_reg->debug_PD_Req_SOP0_cnt;
805 	wtp->sge_pcie.sop[1] = sge_dbg_reg->debug_PD_Req_SOP1_cnt;
806 	wtp->sge_pcie.sop[2] = sge_dbg_reg->debug_PD_Req_SOP2_cnt;
807 	wtp->sge_pcie.sop[3] = sge_dbg_reg->debug_PD_Req_SOP3_cnt;
808 
809 	wtp->sge_pcie.eop[0] = sge_dbg_reg->debug_PD_Req_EOP0_cnt;
810 	wtp->sge_pcie.eop[1] = sge_dbg_reg->debug_PD_Req_EOP1_cnt;
811 	wtp->sge_pcie.eop[2] = sge_dbg_reg->debug_PD_Req_EOP2_cnt;
812 	wtp->sge_pcie.eop[3] = sge_dbg_reg->debug_PD_Req_EOP3_cnt;
813 
814 	wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
815 	wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
816 	wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
817 	wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
818 	/* NO EOP, so fake it.*/
819 	wtp->sge_pcie_ints.eop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
820 	wtp->sge_pcie_ints.eop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
821 	wtp->sge_pcie_ints.eop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
822 	wtp->sge_pcie_ints.eop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
823 
824 	/*Get PCIE DMA1 STAT2*/
825 	for (i = 0; i < 4; i++) {
826 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
827 		wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
828 		wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
829 		wtp->pcie_dma1_stat2_core.sop[i] += value & 0x0F;
830 		wtp->pcie_dma1_stat2_core.eop[i] += value & 0x0F;
831 	}
832 
833 	/* Get mac porrx aFramesTransmittedok*/
834 	for (i = 0; i < 4; i++) {
835 		value = t4_read_reg(padap, 0x30a88 + ((i * 4) << 12));
836 		wtp->mac_porrx_aframestra_ok.sop[i] = (value & 0xFF);
837 		wtp->mac_porrx_aframestra_ok.eop[i] = (value & 0xFF);
838 	}
839 
840 	/*Get SGE debug data high index 7*/
841 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
842 	wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
843 	wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
844 	wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
845 	wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
846 	wtp->sge_debug_data_high_indx7.sop[2] = ((value >> 20) & 0x0F);
847 	wtp->sge_debug_data_high_indx7.eop[2] = ((value >> 16) & 0x0F);
848 	wtp->sge_debug_data_high_indx7.sop[3] = ((value >> 28) & 0x0F);
849 	wtp->sge_debug_data_high_indx7.eop[3] = ((value >> 24) & 0x0F);
850 
851 	/*Get SGE debug data high index 1*/
852 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
853 	wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
854 	wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
855 	wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
856 	wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
857 
858 	/*Get TP debug CSIDE Tx registers*/
859 	for (i = 0; i < 2; i++) {
860 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
861 			       true);
862 
863 		wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
864 								   */
865 		wtp->utx_tpcside_tx.eop[i] = ((value >> 24) & 0xF);
866 	}
867 
868 	/*Get SGE debug data high index 9*/
869 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
870 	wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
871 	wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
872 	wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
873 	wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
874 	wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
875 	wtp->sge_work_req_pkt.sop[1] = ((value >> 12) & 0x0F);
876 
877 	/*Get LE DB response count*/
878 	value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
879 	wtp->le_db_rsp_cnt.sop = value & 0xF;
880 	wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
881 
882 	/*Get TP debug Eside PKTx*/
883 	for (i = 0; i < 4; i++) {
884 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
885 			       true);
886 
887 		wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
888 		wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
889 	}
890 
891 	/* Get data responses from core to PCIE*/
892 	value = t4_read_reg(padap, A_PCIE_DMAW_SOP_CNT);
893 
894 	wtp->pcie_core_dmaw.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
895 	wtp->pcie_core_dmaw.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
896 	wtp->pcie_core_dmaw.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
897 	wtp->pcie_core_dmaw.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
898 
899 	value = t4_read_reg(padap, A_PCIE_DMAW_EOP_CNT);
900 
901 	wtp->pcie_core_dmaw.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
902 	wtp->pcie_core_dmaw.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
903 	wtp->pcie_core_dmaw.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
904 	wtp->pcie_core_dmaw.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
905 
906 	value = t4_read_reg(padap, A_PCIE_DMAI_CNT);
907 
908 	wtp->pcie_core_dmai.sop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
909 	wtp->pcie_core_dmai.sop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
910 	wtp->pcie_core_dmai.sop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
911 	wtp->pcie_core_dmai.sop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
912 	/* no eop for interrups, just fake it.*/
913 	wtp->pcie_core_dmai.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
914 	wtp->pcie_core_dmai.eop[1] = ((value >> 8) & 0xFF); /*bit 8:15*/
915 	wtp->pcie_core_dmai.eop[2] = ((value >> 16) & 0xFF); /*bit 16:23*/
916 	wtp->pcie_core_dmai.eop[3] = ((value >> 24) & 0xFF); /*bit 24:31*/
917 
918 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
919 
920 	if (rc)
921 		goto err1;
922 
923 	rc = compress_buff(&scratch_buff, dbg_buff);
924 
925 err1:
926 	release_scratch_buff(&scratch_buff, dbg_buff);
927 err:
928 	return rc;
929 }
930 
t6_wtp_data(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)931 static int t6_wtp_data(struct cudbg_init *pdbg_init,
932 		       struct cudbg_buffer *dbg_buff,
933 		       struct cudbg_error *cudbg_err)
934 {
935 	struct adapter *padap = pdbg_init->adap;
936 	struct sge_debug_reg_data *sge_dbg_reg = NULL;
937 	struct cudbg_buffer scratch_buff;
938 	struct tp_mib_data *ptp_mib = NULL;
939 	struct wtp_data *wtp;
940 	u32 Sge_Dbg[32] = {0};
941 	u32 value = 0;
942 	u32 i = 0;
943 	u32 drop = 0;
944 	u32 err = 0;
945 	u32 offset;
946 	int rc = 0;
947 
948 	rc = get_scratch_buff(dbg_buff, sizeof(struct wtp_data), &scratch_buff);
949 
950 	if (rc)
951 		goto err;
952 
953 	offset = scratch_buff.offset;
954 	wtp = (struct wtp_data *)((char *)scratch_buff.data + offset);
955 
956 	read_sge_debug_data(pdbg_init, Sge_Dbg);
957 	read_tp_mib_data(pdbg_init, &ptp_mib);
958 
959 	sge_dbg_reg = (struct sge_debug_reg_data *) &Sge_Dbg[0];
960 
961 	/*# TX PATH*/
962 
963 	/*PCIE CMD STAT2*/
964 	value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT2);
965 	wtp->pcie_cmd_stat2.sop[0] = value & 0xFF;
966 	wtp->pcie_cmd_stat2.eop[0] = value & 0xFF;
967 
968 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
969 	wtp->sge_pcie_cmd_req.sop[0] = ((value >> 20) & 0x0F);
970 	wtp->sge_pcie_cmd_req.eop[0] = ((value >> 16) & 0x0F);
971 	wtp->sge_pcie_cmd_req.sop[1] = ((value >> 28) & 0x0F);
972 	wtp->sge_pcie_cmd_req.eop[1] = ((value >> 24) & 0x0F);
973 
974 	value = t4_read_reg(padap, A_PCIE_T5_CMD_STAT3);
975 	wtp->pcie_cmd_stat3.sop[0] = value & 0xFF;
976 	wtp->pcie_cmd_stat3.eop[0] = value & 0xFF;
977 
978 	/*Get command Resposes from PCIE to SGE*/
979 	wtp->pcie_sge_cmd_rsp.sop[0] = sge_dbg_reg->debug_PC_Rsp_SOP0_cnt;
980 	wtp->pcie_sge_cmd_rsp.eop[0] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
981 	wtp->pcie_sge_cmd_rsp.sop[1] = sge_dbg_reg->debug_PC_Rsp_SOP1_cnt;
982 	wtp->pcie_sge_cmd_rsp.eop[1] = sge_dbg_reg->debug_PC_Rsp_EOP0_cnt;
983 
984 	/* Get commands sent from SGE to CIM/uP*/
985 	wtp->sge_cim.sop[0] = sge_dbg_reg->debug_CIM_SOP0_cnt;
986 	wtp->sge_cim.sop[1] = sge_dbg_reg->debug_CIM_SOP1_cnt;
987 
988 	wtp->sge_cim.eop[0] = sge_dbg_reg->debug_CIM_EOP0_cnt;
989 	wtp->sge_cim.eop[1] = sge_dbg_reg->debug_CIM_EOP1_cnt;
990 
991 	/*Get SGE debug data high index 9*/
992 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
993 	wtp->sge_work_req_pkt.sop[0] = ((value >> 4) & 0x0F);
994 	wtp->sge_work_req_pkt.eop[0] = ((value >> 0) & 0x0F);
995 
996 	for (i = 0; i < 2; i++) {
997 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT2 + (i * 0x10));
998 		wtp->pcie_dma1_stat2.sop[i] = ((value >> 8) & 0x0F);
999 		wtp->pcie_dma1_stat2.eop[i] = ((value >> 8) & 0x0F);
1000 		wtp->pcie_dma1_stat2_core.sop[i] = value & 0x0F;
1001 		wtp->pcie_dma1_stat2_core.eop[i] = value & 0x0F;
1002 	}
1003 
1004 	/* Get DMA0 stats3*/
1005 	for (i = 0; i < 2; i++) {
1006 		value = t4_read_reg(padap, A_PCIE_T5_DMA_STAT3 + (i * 0x10));
1007 		wtp->pcie_t5_dma_stat3.sop[i] = value & 0xFF;
1008 		wtp->pcie_t5_dma_stat3.eop[i] = ((value >> 16) & 0xFF);
1009 	}
1010 
1011 	/* Get ULP SE CNT CHx*/
1012 	for (i = 0; i < 4; i++) {
1013 		value = t4_read_reg(padap, A_ULP_TX_SE_CNT_CH0 + (i * 4));
1014 		wtp->ulp_se_cnt_chx.sop[i] = ((value >> 28) & 0x0F);
1015 		wtp->ulp_se_cnt_chx.eop[i] = ((value >> 24) & 0x0F);
1016 	}
1017 
1018 	/* Get TP_DBG_CSIDE registers*/
1019 	for (i = 0; i < 4; i++) {
1020 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_RX0 + i),
1021 			       true);
1022 
1023 		wtp->utx_tpcside.sop[i]   = ((value >> 28) & 0xF);/*bits 28:31*/
1024 		wtp->utx_tpcside.eop[i]   = ((value >> 24) & 0xF);/*bits 24:27*/
1025 		wtp->tpcside_rxarb.sop[i] = ((value >> 12) & 0xF);/*bits 12:15*/
1026 		wtp->tpcside_rxarb.eop[i] = ((value >> 8) & 0xF); /*bits 8:11*/
1027 	}
1028 
1029 	for (i = 0; i < 4; i++) {
1030 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
1031 			       true);
1032 
1033 
1034 		wtp->tpeside_mps.sop[i] = ((value >> 28) & 0xF); /*bits 28:31*/
1035 		wtp->tpeside_mps.eop[i] = ((value >> 24) & 0xF); /*bits 24:27*/
1036 	}
1037 
1038 	for (i = 0; i < 2; i++) {
1039 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_TP01 + (i << 2)));
1040 		wtp->tp_mps.sop[(i*2)]	   = ((value >> 8) & 0xFF); /*bit 8:15*/
1041 		wtp->tp_mps.eop[(i*2)]	   = ((value >> 0) & 0xFF); /*bit 0:7*/
1042 		wtp->tp_mps.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
1043 								    */
1044 		wtp->tp_mps.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
1045 								    */
1046 	}
1047 
1048 	for (i = 0; i < 2; i++) {
1049 		value = t4_read_reg(padap, (A_MPS_TX_SE_CNT_MAC01 + (i << 2)));
1050 		wtp->mps_xgm.sop[(i*2)]     = ((value >> 8) & 0xFF);/*bit 8:15*/
1051 		wtp->mps_xgm.eop[(i*2)]     = ((value >> 0) & 0xFF); /*bit 0:7*/
1052 		wtp->mps_xgm.sop[(i*2) + 1] = ((value >> 24) & 0xFF);/*bit 24:31
1053 								     */
1054 		wtp->mps_xgm.eop[(i*2) + 1] = ((value >> 16) & 0xFF);/*bit 16:23
1055 								     */
1056 	}
1057 
1058 	/* Get MAC PORTx PKT COUNT*/
1059 	for (i = 0; i < 2; i++) {
1060 		value = t4_read_reg(padap, 0x3081c + ((i * 4) << 12));
1061 		wtp->mac_portx_pkt_count.sop[i] = ((value >> 24) & 0xFF);
1062 		wtp->mac_portx_pkt_count.eop[i] = ((value >> 16) & 0xFF);
1063 		wtp->mac_porrx_pkt_count.sop[i] = ((value >> 8) & 0xFF);
1064 		wtp->mac_porrx_pkt_count.eop[i] = ((value >> 0) & 0xFF);
1065 	}
1066 
1067 	for (i = 0; i < 2; i++) {
1068 		value = t4_read_reg(padap, 0x30f20 + ((i * 4) << 12));
1069 		wtp->mac_portx_aframestra_ok.sop[i] = value & 0xff;
1070 		wtp->mac_portx_aframestra_ok.eop[i] = value & 0xff;
1071 	}
1072 
1073 	/*MAC_PORT_MTIP_1G10G_TX_etherStatsPkts*/
1074 
1075 	for (i = 0; i < 2; i++) {
1076 		value = t4_read_reg(padap, 0x30f60 + ((i * 4) << 12));
1077 		wtp->mac_portx_etherstatspkts.sop[i] = value & 0xff;
1078 		wtp->mac_portx_etherstatspkts.eop[i] = value & 0xff;
1079 	}
1080 
1081 	/*RX path*/
1082 
1083 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_7);
1084 	wtp->sge_debug_data_high_indx7.sop[0] = ((value >> 4) & 0x0F);
1085 	wtp->sge_debug_data_high_indx7.eop[0] = ((value >> 0) & 0x0F);
1086 	wtp->sge_debug_data_high_indx7.sop[1] = ((value >> 12) & 0x0F);
1087 	wtp->sge_debug_data_high_indx7.eop[1] = ((value >> 8) & 0x0F);
1088 
1089 	/*Get SGE debug data high index 1*/
1090 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_1);
1091 	wtp->sge_debug_data_high_indx1.sop[0] = ((value >> 20) & 0x0F);
1092 	wtp->sge_debug_data_high_indx1.eop[0] = ((value >> 16) & 0x0F);
1093 	wtp->sge_debug_data_high_indx1.sop[1] = ((value >> 28) & 0x0F);
1094 	wtp->sge_debug_data_high_indx1.eop[1] = ((value >> 24) & 0x0F);
1095 
1096 	value = t4_read_reg(padap, A_SGE_DEBUG_DATA_HIGH_INDEX_9);
1097 	wtp->sge_debug_data_high_indx9.sop[0] = ((value >> 20) & 0x0F);
1098 	wtp->sge_debug_data_high_indx9.sop[1] = ((value >> 28) & 0x0F);
1099 
1100 	wtp->sge_debug_data_high_indx9.eop[0] = ((value >> 16) & 0x0F);
1101 	wtp->sge_debug_data_high_indx9.eop[1] = ((value >> 24) & 0x0F);
1102 
1103 	for (i = 0; i < 2; i++) {
1104 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_CSIDE_TX0 + i),
1105 			       true);
1106 
1107 		wtp->utx_tpcside_tx.sop[i] = ((value >> 28) & 0xF);/*bits 28:31
1108 								   */
1109 		wtp->utx_tpcside_tx.eop[i]   = ((value >> 24) & 0xF);
1110 	}
1111 
1112 	/*ULP_RX input/output*/
1113 	for (i = 0; i < 2; i++) {
1114 		value = t4_read_reg(padap, (A_ULP_RX_SE_CNT_CH0 + (i*4)));
1115 
1116 		wtp->pmrx_ulprx.sop[i]	  = ((value >> 4) & 0xF); /*bits 4:7*/
1117 		wtp->pmrx_ulprx.eop[i]	  = ((value >> 0) & 0xF); /*bits 0:3*/
1118 		wtp->ulprx_tpcside.sop[i] = ((value >> 28) & 0xF);/*bits 28:31*/
1119 		wtp->ulprx_tpcside.eop[i] = ((value >> 24) & 0xF);/*bits 24:27*/
1120 	}
1121 
1122 	/*Get LE DB response count*/
1123 	value = t4_read_reg(padap, A_LE_DB_REQ_RSP_CNT);
1124 	wtp->le_db_rsp_cnt.sop = value & 0xF;
1125 	wtp->le_db_rsp_cnt.eop = (value >> 16) & 0xF;
1126 
1127 	/*Get TP debug Eside PKTx*/
1128 	for (i = 0; i < 4; i++) {
1129 		t4_tp_pio_read(padap, &value, 1, (u32)(A_TP_DBG_ESIDE_PKT0 + i),
1130 			       true);
1131 
1132 		wtp->tp_dbg_eside_pktx.sop[i] = ((value >> 12) & 0xF);
1133 		wtp->tp_dbg_eside_pktx.eop[i] = ((value >> 8) & 0xF);
1134 	}
1135 
1136 	drop = 0;
1137 	/*MPS_RX_SE_CNT_OUT01*/
1138 	value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_OUT01 + (i << 2)));
1139 	wtp->mps_tp.sop[0] = ((value >> 8) & 0xFF); /*bit 8:15*/
1140 	wtp->mps_tp.eop[0] = ((value >> 0) & 0xFF); /*bit 0:7*/
1141 	wtp->mps_tp.sop[1] = ((value >> 24) & 0xFF); /*bit 24:31*/
1142 	wtp->mps_tp.eop[1] = ((value >> 16) & 0xFF); /*bit 16:23*/
1143 
1144 	drop = ptp_mib->TP_MIB_TNL_CNG_DROP_0.value;
1145 	drop += ptp_mib->TP_MIB_TNL_CNG_DROP_1.value;
1146 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_0.value;
1147 	drop += ptp_mib->TP_MIB_OFD_CHN_DROP_1.value;
1148 	drop += ptp_mib->TP_MIB_FCOE_DROP_0.value;
1149 	drop += ptp_mib->TP_MIB_FCOE_DROP_1.value;
1150 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_0.value;
1151 	drop += ptp_mib->TP_MIB_OFD_VLN_DROP_1.value;
1152 	drop += ptp_mib->TP_MIB_USM_DROP.value;
1153 
1154 	wtp->mps_tp.drops = drop;
1155 
1156 	drop = 0;
1157 	for (i = 0; i < 8; i++) {
1158 		value = t4_read_reg(padap, (A_MPS_RX_SE_CNT_IN0 + (i << 2)));
1159 
1160 		wtp->xgm_mps.sop[i] = ((value >> 8) & 0xFF); /*bits 8:15*/
1161 		wtp->xgm_mps.eop[i] = ((value >> 0) & 0xFF); /*bits 0:7*/
1162 	}
1163 	for (i = 0; i < 2; i++) {
1164 		value = t4_read_reg(padap, (A_MPS_RX_CLS_DROP_CNT0 + (i << 2)));
1165 		drop += (value & 0xFFFF) + ((value >> 16) & 0xFFFF);
1166 	}
1167 	wtp->xgm_mps.cls_drop = drop & 0xFF;
1168 
1169 	for (i = 0; i < 2; i++) {
1170 		value = t4_read_reg(padap, 0x30e20 + ((i * 4) << 12));
1171 		wtp->mac_porrx_aframestra_ok.sop[i] = value & 0xff;
1172 		wtp->mac_porrx_aframestra_ok.eop[i] = value & 0xff;
1173 	}
1174 
1175 	/*MAC_PORT_MTIP_1G10G_RX_etherStatsPkts*/
1176 	for (i = 0; i < 2; i++) {
1177 		value = t4_read_reg(padap, 0x30e60 + ((i * 4) << 12));
1178 		wtp->mac_porrx_etherstatspkts.sop[i] = value & 0xff;
1179 		wtp->mac_porrx_etherstatspkts.eop[i] = value & 0xff;
1180 	}
1181 
1182 	wtp->sge_pcie_ints.sop[0] = sge_dbg_reg->debug_PD_Req_Int0_cnt;
1183 	wtp->sge_pcie_ints.sop[1] = sge_dbg_reg->debug_PD_Req_Int1_cnt;
1184 	wtp->sge_pcie_ints.sop[2] = sge_dbg_reg->debug_PD_Req_Int2_cnt;
1185 	wtp->sge_pcie_ints.sop[3] = sge_dbg_reg->debug_PD_Req_Int3_cnt;
1186 
1187 	/* Add up the overflow drops on all 4 ports.*/
1188 	drop = 0;
1189 	for (i = 0; i < 2; i++) {
1190 		value = t4_read_reg(padap,
1191 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1192 				     (i << 3)));
1193 		drop += value;
1194 		value = t4_read_reg(padap,
1195 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1196 				     (i << 2)));
1197 		value = t4_read_reg(padap,
1198 				    (A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L +
1199 				     (i << 3)));
1200 		drop += value;
1201 		value = t4_read_reg(padap,
1202 				    (A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1203 				     (i << 2)));
1204 
1205 		value = t4_read_reg(padap,
1206 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1207 				     (i << 3)));
1208 		drop += value;
1209 		value = t4_read_reg(padap,
1210 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1211 				     (i << 3)));
1212 		value = t4_read_reg(padap,
1213 				    (A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L +
1214 				     (i << 3)));
1215 		drop += value;
1216 		value = t4_read_reg(padap,
1217 				    (A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1218 				     (i << 3)));
1219 
1220 		value = t4_read_reg(padap,
1221 			(T5_PORT0_REG(A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES) +
1222 			(i * T5_PORT_STRIDE)));
1223 		drop += value;
1224 	}
1225 	wtp->xgm_mps.drop = (drop & 0xFF);
1226 
1227 	/* Add up the MPS errors that should result in dropped packets*/
1228 	err = 0;
1229 	for (i = 0; i < 2; i++) {
1230 
1231 		value = t4_read_reg(padap,
1232 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
1233 			(i * T5_PORT_STRIDE)));
1234 		err += value;
1235 		value = t4_read_reg(padap,
1236 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L) +
1237 			(i * T5_PORT_STRIDE) + 4));
1238 
1239 		value = t4_read_reg(padap,
1240 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
1241 			(i * T5_PORT_STRIDE)));
1242 		err += value;
1243 		value = t4_read_reg(padap,
1244 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L) +
1245 			(i * T5_PORT_STRIDE) + 4));
1246 
1247 		value = t4_read_reg(padap,
1248 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
1249 				     (i * T5_PORT_STRIDE)));
1250 		err += value;
1251 		value = t4_read_reg(padap,
1252 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L) +
1253 			(i * T5_PORT_STRIDE) + 4));
1254 
1255 		value = t4_read_reg(padap,
1256 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
1257 			(i * T5_PORT_STRIDE)));
1258 		err += value;
1259 		value = t4_read_reg(padap,
1260 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L) +
1261 			(i * T5_PORT_STRIDE) + 4));
1262 
1263 		value = t4_read_reg(padap,
1264 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
1265 			(i * T5_PORT_STRIDE)));
1266 		err += value;
1267 		value = t4_read_reg(padap,
1268 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L) +
1269 			(i * T5_PORT_STRIDE) + 4));
1270 
1271 		value = t4_read_reg(padap,
1272 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
1273 			(i * T5_PORT_STRIDE)));
1274 		err += value;
1275 		value = t4_read_reg(padap,
1276 			(T5_PORT0_REG(A_MPS_PORT_STAT_RX_PORT_LESS_64B_L) +
1277 			(i * T5_PORT_STRIDE) + 4));
1278 	}
1279 	wtp->xgm_mps.err = (err & 0xFF);
1280 
1281 	rc = write_compression_hdr(&scratch_buff, dbg_buff);
1282 
1283 	if (rc)
1284 		goto err1;
1285 
1286 	rc = compress_buff(&scratch_buff, dbg_buff);
1287 
1288 err1:
1289 	release_scratch_buff(&scratch_buff, dbg_buff);
1290 err:
1291 	return rc;
1292 }
1293 
collect_wtp_data(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,struct cudbg_error * cudbg_err)1294 int collect_wtp_data(struct cudbg_init *pdbg_init,
1295 		     struct cudbg_buffer *dbg_buff,
1296 		     struct cudbg_error *cudbg_err)
1297 {
1298 	struct adapter *padap = pdbg_init->adap;
1299 	int rc = -1;
1300 
1301 	if (is_t5(padap))
1302 		rc = t5_wtp_data(pdbg_init, dbg_buff, cudbg_err);
1303 	else if (is_t6(padap))
1304 		rc = t6_wtp_data(pdbg_init, dbg_buff, cudbg_err);
1305 
1306 	return rc;
1307 }
1308