xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3 
4 #include <linux/device.h>
5 #include <linux/sched/clock.h>
6 #include <linux/string_choices.h>
7 
8 #include "hclge_debugfs.h"
9 #include "hclge_err.h"
10 #include "hclge_main.h"
11 #include "hclge_regs.h"
12 #include "hclge_tm.h"
13 #include "hnae3.h"
14 
15 #define hclge_seq_file_to_hdev(s)	\
16 		(((struct hnae3_ae_dev *)hnae3_seq_file_to_ae_dev(s))->priv)
17 
18 static const char * const hclge_mac_state_str[] = {
19 	"TO_ADD", "TO_DEL", "ACTIVE"
20 };
21 
22 static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
23 
24 static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
25 	{false, "Reserved"},
26 	{true,	"BP_CPU_STATE"},
27 	{true,	"DFX_MSIX_INFO_NIC_0"},
28 	{true,	"DFX_MSIX_INFO_NIC_1"},
29 	{true,	"DFX_MSIX_INFO_NIC_2"},
30 	{true,	"DFX_MSIX_INFO_NIC_3"},
31 
32 	{true,	"DFX_MSIX_INFO_ROC_0"},
33 	{true,	"DFX_MSIX_INFO_ROC_1"},
34 	{true,	"DFX_MSIX_INFO_ROC_2"},
35 	{true,	"DFX_MSIX_INFO_ROC_3"},
36 	{false, "Reserved"},
37 	{false, "Reserved"},
38 };
39 
40 static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
41 	{false, "Reserved"},
42 	{true,	"SSU_ETS_PORT_STATUS"},
43 	{true,	"SSU_ETS_TCG_STATUS"},
44 	{false, "Reserved"},
45 	{false, "Reserved"},
46 	{true,	"SSU_BP_STATUS_0"},
47 
48 	{true,	"SSU_BP_STATUS_1"},
49 	{true,	"SSU_BP_STATUS_2"},
50 	{true,	"SSU_BP_STATUS_3"},
51 	{true,	"SSU_BP_STATUS_4"},
52 	{true,	"SSU_BP_STATUS_5"},
53 	{true,	"SSU_MAC_TX_PFC_IND"},
54 
55 	{true,	"MAC_SSU_RX_PFC_IND"},
56 	{true,	"BTMP_AGEING_ST_B0"},
57 	{true,	"BTMP_AGEING_ST_B1"},
58 	{true,	"BTMP_AGEING_ST_B2"},
59 	{false, "Reserved"},
60 	{false, "Reserved"},
61 
62 	{true,	"FULL_DROP_NUM"},
63 	{true,	"PART_DROP_NUM"},
64 	{true,	"PPP_KEY_DROP_NUM"},
65 	{true,	"PPP_RLT_DROP_NUM"},
66 	{true,	"LO_PRI_UNICAST_RLT_DROP_NUM"},
67 	{true,	"HI_PRI_MULTICAST_RLT_DROP_NUM"},
68 
69 	{true,	"LO_PRI_MULTICAST_RLT_DROP_NUM"},
70 	{true,	"NCSI_PACKET_CURR_BUFFER_CNT"},
71 	{true,	"BTMP_AGEING_RLS_CNT_BANK0"},
72 	{true,	"BTMP_AGEING_RLS_CNT_BANK1"},
73 	{true,	"BTMP_AGEING_RLS_CNT_BANK2"},
74 	{true,	"SSU_MB_RD_RLT_DROP_CNT"},
75 
76 	{true,	"SSU_PPP_MAC_KEY_NUM_L"},
77 	{true,	"SSU_PPP_MAC_KEY_NUM_H"},
78 	{true,	"SSU_PPP_HOST_KEY_NUM_L"},
79 	{true,	"SSU_PPP_HOST_KEY_NUM_H"},
80 	{true,	"PPP_SSU_MAC_RLT_NUM_L"},
81 	{true,	"PPP_SSU_MAC_RLT_NUM_H"},
82 
83 	{true,	"PPP_SSU_HOST_RLT_NUM_L"},
84 	{true,	"PPP_SSU_HOST_RLT_NUM_H"},
85 	{true,	"NCSI_RX_PACKET_IN_CNT_L"},
86 	{true,	"NCSI_RX_PACKET_IN_CNT_H"},
87 	{true,	"NCSI_TX_PACKET_OUT_CNT_L"},
88 	{true,	"NCSI_TX_PACKET_OUT_CNT_H"},
89 
90 	{true,	"SSU_KEY_DROP_NUM"},
91 	{true,	"MB_UNCOPY_NUM"},
92 	{true,	"RX_OQ_DROP_PKT_CNT"},
93 	{true,	"TX_OQ_DROP_PKT_CNT"},
94 	{true,	"BANK_UNBALANCE_DROP_CNT"},
95 	{true,	"BANK_UNBALANCE_RX_DROP_CNT"},
96 
97 	{true,	"NIC_L2_ERR_DROP_PKT_CNT"},
98 	{true,	"ROC_L2_ERR_DROP_PKT_CNT"},
99 	{true,	"NIC_L2_ERR_DROP_PKT_CNT_RX"},
100 	{true,	"ROC_L2_ERR_DROP_PKT_CNT_RX"},
101 	{true,	"RX_OQ_GLB_DROP_PKT_CNT"},
102 	{false, "Reserved"},
103 
104 	{true,	"LO_PRI_UNICAST_CUR_CNT"},
105 	{true,	"HI_PRI_MULTICAST_CUR_CNT"},
106 	{true,	"LO_PRI_MULTICAST_CUR_CNT"},
107 	{false, "Reserved"},
108 	{false, "Reserved"},
109 	{false, "Reserved"},
110 };
111 
112 static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
113 	{true,	"prt_id"},
114 	{true,	"PACKET_TC_CURR_BUFFER_CNT_0"},
115 	{true,	"PACKET_TC_CURR_BUFFER_CNT_1"},
116 	{true,	"PACKET_TC_CURR_BUFFER_CNT_2"},
117 	{true,	"PACKET_TC_CURR_BUFFER_CNT_3"},
118 	{true,	"PACKET_TC_CURR_BUFFER_CNT_4"},
119 
120 	{true,	"PACKET_TC_CURR_BUFFER_CNT_5"},
121 	{true,	"PACKET_TC_CURR_BUFFER_CNT_6"},
122 	{true,	"PACKET_TC_CURR_BUFFER_CNT_7"},
123 	{true,	"PACKET_CURR_BUFFER_CNT"},
124 	{false, "Reserved"},
125 	{false, "Reserved"},
126 
127 	{true,	"RX_PACKET_IN_CNT_L"},
128 	{true,	"RX_PACKET_IN_CNT_H"},
129 	{true,	"RX_PACKET_OUT_CNT_L"},
130 	{true,	"RX_PACKET_OUT_CNT_H"},
131 	{true,	"TX_PACKET_IN_CNT_L"},
132 	{true,	"TX_PACKET_IN_CNT_H"},
133 
134 	{true,	"TX_PACKET_OUT_CNT_L"},
135 	{true,	"TX_PACKET_OUT_CNT_H"},
136 	{true,	"ROC_RX_PACKET_IN_CNT_L"},
137 	{true,	"ROC_RX_PACKET_IN_CNT_H"},
138 	{true,	"ROC_TX_PACKET_OUT_CNT_L"},
139 	{true,	"ROC_TX_PACKET_OUT_CNT_H"},
140 
141 	{true,	"RX_PACKET_TC_IN_CNT_0_L"},
142 	{true,	"RX_PACKET_TC_IN_CNT_0_H"},
143 	{true,	"RX_PACKET_TC_IN_CNT_1_L"},
144 	{true,	"RX_PACKET_TC_IN_CNT_1_H"},
145 	{true,	"RX_PACKET_TC_IN_CNT_2_L"},
146 	{true,	"RX_PACKET_TC_IN_CNT_2_H"},
147 
148 	{true,	"RX_PACKET_TC_IN_CNT_3_L"},
149 	{true,	"RX_PACKET_TC_IN_CNT_3_H"},
150 	{true,	"RX_PACKET_TC_IN_CNT_4_L"},
151 	{true,	"RX_PACKET_TC_IN_CNT_4_H"},
152 	{true,	"RX_PACKET_TC_IN_CNT_5_L"},
153 	{true,	"RX_PACKET_TC_IN_CNT_5_H"},
154 
155 	{true,	"RX_PACKET_TC_IN_CNT_6_L"},
156 	{true,	"RX_PACKET_TC_IN_CNT_6_H"},
157 	{true,	"RX_PACKET_TC_IN_CNT_7_L"},
158 	{true,	"RX_PACKET_TC_IN_CNT_7_H"},
159 	{true,	"RX_PACKET_TC_OUT_CNT_0_L"},
160 	{true,	"RX_PACKET_TC_OUT_CNT_0_H"},
161 
162 	{true,	"RX_PACKET_TC_OUT_CNT_1_L"},
163 	{true,	"RX_PACKET_TC_OUT_CNT_1_H"},
164 	{true,	"RX_PACKET_TC_OUT_CNT_2_L"},
165 	{true,	"RX_PACKET_TC_OUT_CNT_2_H"},
166 	{true,	"RX_PACKET_TC_OUT_CNT_3_L"},
167 	{true,	"RX_PACKET_TC_OUT_CNT_3_H"},
168 
169 	{true,	"RX_PACKET_TC_OUT_CNT_4_L"},
170 	{true,	"RX_PACKET_TC_OUT_CNT_4_H"},
171 	{true,	"RX_PACKET_TC_OUT_CNT_5_L"},
172 	{true,	"RX_PACKET_TC_OUT_CNT_5_H"},
173 	{true,	"RX_PACKET_TC_OUT_CNT_6_L"},
174 	{true,	"RX_PACKET_TC_OUT_CNT_6_H"},
175 
176 	{true,	"RX_PACKET_TC_OUT_CNT_7_L"},
177 	{true,	"RX_PACKET_TC_OUT_CNT_7_H"},
178 	{true,	"TX_PACKET_TC_IN_CNT_0_L"},
179 	{true,	"TX_PACKET_TC_IN_CNT_0_H"},
180 	{true,	"TX_PACKET_TC_IN_CNT_1_L"},
181 	{true,	"TX_PACKET_TC_IN_CNT_1_H"},
182 
183 	{true,	"TX_PACKET_TC_IN_CNT_2_L"},
184 	{true,	"TX_PACKET_TC_IN_CNT_2_H"},
185 	{true,	"TX_PACKET_TC_IN_CNT_3_L"},
186 	{true,	"TX_PACKET_TC_IN_CNT_3_H"},
187 	{true,	"TX_PACKET_TC_IN_CNT_4_L"},
188 	{true,	"TX_PACKET_TC_IN_CNT_4_H"},
189 
190 	{true,	"TX_PACKET_TC_IN_CNT_5_L"},
191 	{true,	"TX_PACKET_TC_IN_CNT_5_H"},
192 	{true,	"TX_PACKET_TC_IN_CNT_6_L"},
193 	{true,	"TX_PACKET_TC_IN_CNT_6_H"},
194 	{true,	"TX_PACKET_TC_IN_CNT_7_L"},
195 	{true,	"TX_PACKET_TC_IN_CNT_7_H"},
196 
197 	{true,	"TX_PACKET_TC_OUT_CNT_0_L"},
198 	{true,	"TX_PACKET_TC_OUT_CNT_0_H"},
199 	{true,	"TX_PACKET_TC_OUT_CNT_1_L"},
200 	{true,	"TX_PACKET_TC_OUT_CNT_1_H"},
201 	{true,	"TX_PACKET_TC_OUT_CNT_2_L"},
202 	{true,	"TX_PACKET_TC_OUT_CNT_2_H"},
203 
204 	{true,	"TX_PACKET_TC_OUT_CNT_3_L"},
205 	{true,	"TX_PACKET_TC_OUT_CNT_3_H"},
206 	{true,	"TX_PACKET_TC_OUT_CNT_4_L"},
207 	{true,	"TX_PACKET_TC_OUT_CNT_4_H"},
208 	{true,	"TX_PACKET_TC_OUT_CNT_5_L"},
209 	{true,	"TX_PACKET_TC_OUT_CNT_5_H"},
210 
211 	{true,	"TX_PACKET_TC_OUT_CNT_6_L"},
212 	{true,	"TX_PACKET_TC_OUT_CNT_6_H"},
213 	{true,	"TX_PACKET_TC_OUT_CNT_7_L"},
214 	{true,	"TX_PACKET_TC_OUT_CNT_7_H"},
215 	{false, "Reserved"},
216 	{false, "Reserved"},
217 };
218 
219 static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
220 	{true,	"OQ_INDEX"},
221 	{true,	"QUEUE_CNT"},
222 	{false, "Reserved"},
223 	{false, "Reserved"},
224 	{false, "Reserved"},
225 	{false, "Reserved"},
226 };
227 
228 static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
229 	{true,	"prt_id"},
230 	{true,	"IGU_RX_ERR_PKT"},
231 	{true,	"IGU_RX_NO_SOF_PKT"},
232 	{true,	"EGU_TX_1588_SHORT_PKT"},
233 	{true,	"EGU_TX_1588_PKT"},
234 	{true,	"EGU_TX_ERR_PKT"},
235 
236 	{true,	"IGU_RX_OUT_L2_PKT"},
237 	{true,	"IGU_RX_OUT_L3_PKT"},
238 	{true,	"IGU_RX_OUT_L4_PKT"},
239 	{true,	"IGU_RX_IN_L2_PKT"},
240 	{true,	"IGU_RX_IN_L3_PKT"},
241 	{true,	"IGU_RX_IN_L4_PKT"},
242 
243 	{true,	"IGU_RX_EL3E_PKT"},
244 	{true,	"IGU_RX_EL4E_PKT"},
245 	{true,	"IGU_RX_L3E_PKT"},
246 	{true,	"IGU_RX_L4E_PKT"},
247 	{true,	"IGU_RX_ROCEE_PKT"},
248 	{true,	"IGU_RX_OUT_UDP0_PKT"},
249 
250 	{true,	"IGU_RX_IN_UDP0_PKT"},
251 	{true,	"IGU_MC_CAR_DROP_PKT_L"},
252 	{true,	"IGU_MC_CAR_DROP_PKT_H"},
253 	{true,	"IGU_BC_CAR_DROP_PKT_L"},
254 	{true,	"IGU_BC_CAR_DROP_PKT_H"},
255 	{false, "Reserved"},
256 
257 	{true,	"IGU_RX_OVERSIZE_PKT_L"},
258 	{true,	"IGU_RX_OVERSIZE_PKT_H"},
259 	{true,	"IGU_RX_UNDERSIZE_PKT_L"},
260 	{true,	"IGU_RX_UNDERSIZE_PKT_H"},
261 	{true,	"IGU_RX_OUT_ALL_PKT_L"},
262 	{true,	"IGU_RX_OUT_ALL_PKT_H"},
263 
264 	{true,	"IGU_TX_OUT_ALL_PKT_L"},
265 	{true,	"IGU_TX_OUT_ALL_PKT_H"},
266 	{true,	"IGU_RX_UNI_PKT_L"},
267 	{true,	"IGU_RX_UNI_PKT_H"},
268 	{true,	"IGU_RX_MULTI_PKT_L"},
269 	{true,	"IGU_RX_MULTI_PKT_H"},
270 
271 	{true,	"IGU_RX_BROAD_PKT_L"},
272 	{true,	"IGU_RX_BROAD_PKT_H"},
273 	{true,	"EGU_TX_OUT_ALL_PKT_L"},
274 	{true,	"EGU_TX_OUT_ALL_PKT_H"},
275 	{true,	"EGU_TX_UNI_PKT_L"},
276 	{true,	"EGU_TX_UNI_PKT_H"},
277 
278 	{true,	"EGU_TX_MULTI_PKT_L"},
279 	{true,	"EGU_TX_MULTI_PKT_H"},
280 	{true,	"EGU_TX_BROAD_PKT_L"},
281 	{true,	"EGU_TX_BROAD_PKT_H"},
282 	{true,	"IGU_TX_KEY_NUM_L"},
283 	{true,	"IGU_TX_KEY_NUM_H"},
284 
285 	{true,	"IGU_RX_NON_TUN_PKT_L"},
286 	{true,	"IGU_RX_NON_TUN_PKT_H"},
287 	{true,	"IGU_RX_TUN_PKT_L"},
288 	{true,	"IGU_RX_TUN_PKT_H"},
289 	{false,	"Reserved"},
290 	{false,	"Reserved"},
291 };
292 
293 static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
294 	{true, "tc_queue_num"},
295 	{true, "FSM_DFX_ST0"},
296 	{true, "FSM_DFX_ST1"},
297 	{true, "RPU_RX_PKT_DROP_CNT"},
298 	{true, "BUF_WAIT_TIMEOUT"},
299 	{true, "BUF_WAIT_TIMEOUT_QID"},
300 };
301 
302 static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
303 	{false, "Reserved"},
304 	{true,	"FIFO_DFX_ST0"},
305 	{true,	"FIFO_DFX_ST1"},
306 	{true,	"FIFO_DFX_ST2"},
307 	{true,	"FIFO_DFX_ST3"},
308 	{true,	"FIFO_DFX_ST4"},
309 
310 	{true,	"FIFO_DFX_ST5"},
311 	{false, "Reserved"},
312 	{false, "Reserved"},
313 	{false, "Reserved"},
314 	{false, "Reserved"},
315 	{false, "Reserved"},
316 };
317 
318 static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
319 	{false, "Reserved"},
320 	{true,	"NCSI_EGU_TX_FIFO_STS"},
321 	{true,	"NCSI_PAUSE_STATUS"},
322 	{true,	"NCSI_RX_CTRL_DMAC_ERR_CNT"},
323 	{true,	"NCSI_RX_CTRL_SMAC_ERR_CNT"},
324 	{true,	"NCSI_RX_CTRL_CKS_ERR_CNT"},
325 
326 	{true,	"NCSI_RX_CTRL_PKT_CNT"},
327 	{true,	"NCSI_RX_PT_DMAC_ERR_CNT"},
328 	{true,	"NCSI_RX_PT_SMAC_ERR_CNT"},
329 	{true,	"NCSI_RX_PT_PKT_CNT"},
330 	{true,	"NCSI_RX_FCS_ERR_CNT"},
331 	{true,	"NCSI_TX_CTRL_DMAC_ERR_CNT"},
332 
333 	{true,	"NCSI_TX_CTRL_SMAC_ERR_CNT"},
334 	{true,	"NCSI_TX_CTRL_PKT_CNT"},
335 	{true,	"NCSI_TX_PT_DMAC_ERR_CNT"},
336 	{true,	"NCSI_TX_PT_SMAC_ERR_CNT"},
337 	{true,	"NCSI_TX_PT_PKT_CNT"},
338 	{true,	"NCSI_TX_PT_PKT_TRUNC_CNT"},
339 
340 	{true,	"NCSI_TX_PT_PKT_ERR_CNT"},
341 	{true,	"NCSI_TX_CTRL_PKT_ERR_CNT"},
342 	{true,	"NCSI_RX_CTRL_PKT_TRUNC_CNT"},
343 	{true,	"NCSI_RX_CTRL_PKT_CFLIT_CNT"},
344 	{false, "Reserved"},
345 	{false, "Reserved"},
346 
347 	{true,	"NCSI_MAC_RX_OCTETS_OK"},
348 	{true,	"NCSI_MAC_RX_OCTETS_BAD"},
349 	{true,	"NCSI_MAC_RX_UC_PKTS"},
350 	{true,	"NCSI_MAC_RX_MC_PKTS"},
351 	{true,	"NCSI_MAC_RX_BC_PKTS"},
352 	{true,	"NCSI_MAC_RX_PKTS_64OCTETS"},
353 
354 	{true,	"NCSI_MAC_RX_PKTS_65TO127OCTETS"},
355 	{true,	"NCSI_MAC_RX_PKTS_128TO255OCTETS"},
356 	{true,	"NCSI_MAC_RX_PKTS_255TO511OCTETS"},
357 	{true,	"NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
358 	{true,	"NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
359 	{true,	"NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
360 
361 	{true,	"NCSI_MAC_RX_FCS_ERRORS"},
362 	{true,	"NCSI_MAC_RX_LONG_ERRORS"},
363 	{true,	"NCSI_MAC_RX_JABBER_ERRORS"},
364 	{true,	"NCSI_MAC_RX_RUNT_ERR_CNT"},
365 	{true,	"NCSI_MAC_RX_SHORT_ERR_CNT"},
366 	{true,	"NCSI_MAC_RX_FILT_PKT_CNT"},
367 
368 	{true,	"NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
369 	{true,	"NCSI_MAC_TX_OCTETS_OK"},
370 	{true,	"NCSI_MAC_TX_OCTETS_BAD"},
371 	{true,	"NCSI_MAC_TX_UC_PKTS"},
372 	{true,	"NCSI_MAC_TX_MC_PKTS"},
373 	{true,	"NCSI_MAC_TX_BC_PKTS"},
374 
375 	{true,	"NCSI_MAC_TX_PKTS_64OCTETS"},
376 	{true,	"NCSI_MAC_TX_PKTS_65TO127OCTETS"},
377 	{true,	"NCSI_MAC_TX_PKTS_128TO255OCTETS"},
378 	{true,	"NCSI_MAC_TX_PKTS_256TO511OCTETS"},
379 	{true,	"NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
380 	{true,	"NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
381 
382 	{true,	"NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
383 	{true,	"NCSI_MAC_TX_UNDERRUN"},
384 	{true,	"NCSI_MAC_TX_CRC_ERROR"},
385 	{true,	"NCSI_MAC_TX_PAUSE_FRAMES"},
386 	{true,	"NCSI_MAC_RX_PAD_PKTS"},
387 	{true,	"NCSI_MAC_RX_PAUSE_FRAMES"},
388 };
389 
390 static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
391 	{false, "Reserved"},
392 	{true,	"LGE_IGU_AFIFO_DFX_0"},
393 	{true,	"LGE_IGU_AFIFO_DFX_1"},
394 	{true,	"LGE_IGU_AFIFO_DFX_2"},
395 	{true,	"LGE_IGU_AFIFO_DFX_3"},
396 	{true,	"LGE_IGU_AFIFO_DFX_4"},
397 
398 	{true,	"LGE_IGU_AFIFO_DFX_5"},
399 	{true,	"LGE_IGU_AFIFO_DFX_6"},
400 	{true,	"LGE_IGU_AFIFO_DFX_7"},
401 	{true,	"LGE_EGU_AFIFO_DFX_0"},
402 	{true,	"LGE_EGU_AFIFO_DFX_1"},
403 	{true,	"LGE_EGU_AFIFO_DFX_2"},
404 
405 	{true,	"LGE_EGU_AFIFO_DFX_3"},
406 	{true,	"LGE_EGU_AFIFO_DFX_4"},
407 	{true,	"LGE_EGU_AFIFO_DFX_5"},
408 	{true,	"LGE_EGU_AFIFO_DFX_6"},
409 	{true,	"LGE_EGU_AFIFO_DFX_7"},
410 	{true,	"CGE_IGU_AFIFO_DFX_0"},
411 
412 	{true,	"CGE_IGU_AFIFO_DFX_1"},
413 	{true,	"CGE_EGU_AFIFO_DFX_0"},
414 	{true,	"CGE_EGU_AFIFO_DFX_1"},
415 	{false, "Reserved"},
416 	{false, "Reserved"},
417 	{false, "Reserved"},
418 };
419 
420 static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
421 	{false, "Reserved"},
422 	{true,	"DROP_FROM_PRT_PKT_CNT"},
423 	{true,	"DROP_FROM_HOST_PKT_CNT"},
424 	{true,	"DROP_TX_VLAN_PROC_CNT"},
425 	{true,	"DROP_MNG_CNT"},
426 	{true,	"DROP_FD_CNT"},
427 
428 	{true,	"DROP_NO_DST_CNT"},
429 	{true,	"DROP_MC_MBID_FULL_CNT"},
430 	{true,	"DROP_SC_FILTERED"},
431 	{true,	"PPP_MC_DROP_PKT_CNT"},
432 	{true,	"DROP_PT_CNT"},
433 	{true,	"DROP_MAC_ANTI_SPOOF_CNT"},
434 
435 	{true,	"DROP_IG_VFV_CNT"},
436 	{true,	"DROP_IG_PRTV_CNT"},
437 	{true,	"DROP_CNM_PFC_PAUSE_CNT"},
438 	{true,	"DROP_TORUS_TC_CNT"},
439 	{true,	"DROP_TORUS_LPBK_CNT"},
440 	{true,	"PPP_HFS_STS"},
441 
442 	{true,	"PPP_MC_RSLT_STS"},
443 	{true,	"PPP_P3U_STS"},
444 	{true,	"PPP_RSLT_DESCR_STS"},
445 	{true,	"PPP_UMV_STS_0"},
446 	{true,	"PPP_UMV_STS_1"},
447 	{true,	"PPP_VFV_STS"},
448 
449 	{true,	"PPP_GRO_KEY_CNT"},
450 	{true,	"PPP_GRO_INFO_CNT"},
451 	{true,	"PPP_GRO_DROP_CNT"},
452 	{true,	"PPP_GRO_OUT_CNT"},
453 	{true,	"PPP_GRO_KEY_MATCH_DATA_CNT"},
454 	{true,	"PPP_GRO_KEY_MATCH_TCAM_CNT"},
455 
456 	{true,	"PPP_GRO_INFO_MATCH_CNT"},
457 	{true,	"PPP_GRO_FREE_ENTRY_CNT"},
458 	{true,	"PPP_GRO_INNER_DFX_SIGNAL"},
459 	{false, "Reserved"},
460 	{false, "Reserved"},
461 	{false, "Reserved"},
462 
463 	{true,	"GET_RX_PKT_CNT_L"},
464 	{true,	"GET_RX_PKT_CNT_H"},
465 	{true,	"GET_TX_PKT_CNT_L"},
466 	{true,	"GET_TX_PKT_CNT_H"},
467 	{true,	"SEND_UC_PRT2HOST_PKT_CNT_L"},
468 	{true,	"SEND_UC_PRT2HOST_PKT_CNT_H"},
469 
470 	{true,	"SEND_UC_PRT2PRT_PKT_CNT_L"},
471 	{true,	"SEND_UC_PRT2PRT_PKT_CNT_H"},
472 	{true,	"SEND_UC_HOST2HOST_PKT_CNT_L"},
473 	{true,	"SEND_UC_HOST2HOST_PKT_CNT_H"},
474 	{true,	"SEND_UC_HOST2PRT_PKT_CNT_L"},
475 	{true,	"SEND_UC_HOST2PRT_PKT_CNT_H"},
476 
477 	{true,	"SEND_MC_FROM_PRT_CNT_L"},
478 	{true,	"SEND_MC_FROM_PRT_CNT_H"},
479 	{true,	"SEND_MC_FROM_HOST_CNT_L"},
480 	{true,	"SEND_MC_FROM_HOST_CNT_H"},
481 	{true,	"SSU_MC_RD_CNT_L"},
482 	{true,	"SSU_MC_RD_CNT_H"},
483 
484 	{true,	"SSU_MC_DROP_CNT_L"},
485 	{true,	"SSU_MC_DROP_CNT_H"},
486 	{true,	"SSU_MC_RD_PKT_CNT_L"},
487 	{true,	"SSU_MC_RD_PKT_CNT_H"},
488 	{true,	"PPP_MC_2HOST_PKT_CNT_L"},
489 	{true,	"PPP_MC_2HOST_PKT_CNT_H"},
490 
491 	{true,	"PPP_MC_2PRT_PKT_CNT_L"},
492 	{true,	"PPP_MC_2PRT_PKT_CNT_H"},
493 	{true,	"NTSNOS_PKT_CNT_L"},
494 	{true,	"NTSNOS_PKT_CNT_H"},
495 	{true,	"NTUP_PKT_CNT_L"},
496 	{true,	"NTUP_PKT_CNT_H"},
497 
498 	{true,	"NTLCL_PKT_CNT_L"},
499 	{true,	"NTLCL_PKT_CNT_H"},
500 	{true,	"NTTGT_PKT_CNT_L"},
501 	{true,	"NTTGT_PKT_CNT_H"},
502 	{true,	"RTNS_PKT_CNT_L"},
503 	{true,	"RTNS_PKT_CNT_H"},
504 
505 	{true,	"RTLPBK_PKT_CNT_L"},
506 	{true,	"RTLPBK_PKT_CNT_H"},
507 	{true,	"NR_PKT_CNT_L"},
508 	{true,	"NR_PKT_CNT_H"},
509 	{true,	"RR_PKT_CNT_L"},
510 	{true,	"RR_PKT_CNT_H"},
511 
512 	{true,	"MNG_TBL_HIT_CNT_L"},
513 	{true,	"MNG_TBL_HIT_CNT_H"},
514 	{true,	"FD_TBL_HIT_CNT_L"},
515 	{true,	"FD_TBL_HIT_CNT_H"},
516 	{true,	"FD_LKUP_CNT_L"},
517 	{true,	"FD_LKUP_CNT_H"},
518 
519 	{true,	"BC_HIT_CNT_L"},
520 	{true,	"BC_HIT_CNT_H"},
521 	{true,	"UM_TBL_UC_HIT_CNT_L"},
522 	{true,	"UM_TBL_UC_HIT_CNT_H"},
523 	{true,	"UM_TBL_MC_HIT_CNT_L"},
524 	{true,	"UM_TBL_MC_HIT_CNT_H"},
525 
526 	{true,	"UM_TBL_VMDQ1_HIT_CNT_L"},
527 	{true,	"UM_TBL_VMDQ1_HIT_CNT_H"},
528 	{true,	"MTA_TBL_HIT_CNT_L"},
529 	{true,	"MTA_TBL_HIT_CNT_H"},
530 	{true,	"FWD_BONDING_HIT_CNT_L"},
531 	{true,	"FWD_BONDING_HIT_CNT_H"},
532 
533 	{true,	"PROMIS_TBL_HIT_CNT_L"},
534 	{true,	"PROMIS_TBL_HIT_CNT_H"},
535 	{true,	"GET_TUNL_PKT_CNT_L"},
536 	{true,	"GET_TUNL_PKT_CNT_H"},
537 	{true,	"GET_BMC_PKT_CNT_L"},
538 	{true,	"GET_BMC_PKT_CNT_H"},
539 
540 	{true,	"SEND_UC_PRT2BMC_PKT_CNT_L"},
541 	{true,	"SEND_UC_PRT2BMC_PKT_CNT_H"},
542 	{true,	"SEND_UC_HOST2BMC_PKT_CNT_L"},
543 	{true,	"SEND_UC_HOST2BMC_PKT_CNT_H"},
544 	{true,	"SEND_UC_BMC2HOST_PKT_CNT_L"},
545 	{true,	"SEND_UC_BMC2HOST_PKT_CNT_H"},
546 
547 	{true,	"SEND_UC_BMC2PRT_PKT_CNT_L"},
548 	{true,	"SEND_UC_BMC2PRT_PKT_CNT_H"},
549 	{true,	"PPP_MC_2BMC_PKT_CNT_L"},
550 	{true,	"PPP_MC_2BMC_PKT_CNT_H"},
551 	{true,	"VLAN_MIRR_CNT_L"},
552 	{true,	"VLAN_MIRR_CNT_H"},
553 
554 	{true,	"IG_MIRR_CNT_L"},
555 	{true,	"IG_MIRR_CNT_H"},
556 	{true,	"EG_MIRR_CNT_L"},
557 	{true,	"EG_MIRR_CNT_H"},
558 	{true,	"RX_DEFAULT_HOST_HIT_CNT_L"},
559 	{true,	"RX_DEFAULT_HOST_HIT_CNT_H"},
560 
561 	{true,	"LAN_PAIR_CNT_L"},
562 	{true,	"LAN_PAIR_CNT_H"},
563 	{true,	"UM_TBL_MC_HIT_PKT_CNT_L"},
564 	{true,	"UM_TBL_MC_HIT_PKT_CNT_H"},
565 	{true,	"MTA_TBL_HIT_PKT_CNT_L"},
566 	{true,	"MTA_TBL_HIT_PKT_CNT_H"},
567 
568 	{true,	"PROMIS_TBL_HIT_PKT_CNT_L"},
569 	{true,	"PROMIS_TBL_HIT_PKT_CNT_H"},
570 	{false, "Reserved"},
571 	{false, "Reserved"},
572 	{false, "Reserved"},
573 	{false, "Reserved"},
574 };
575 
576 static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
577 	{false, "Reserved"},
578 	{true,	"FSM_DFX_ST0"},
579 	{true,	"FSM_DFX_ST1"},
580 	{true,	"FSM_DFX_ST2"},
581 	{true,	"FIFO_DFX_ST0"},
582 	{true,	"FIFO_DFX_ST1"},
583 
584 	{true,	"FIFO_DFX_ST2"},
585 	{true,	"FIFO_DFX_ST3"},
586 	{true,	"FIFO_DFX_ST4"},
587 	{true,	"FIFO_DFX_ST5"},
588 	{true,	"FIFO_DFX_ST6"},
589 	{true,	"FIFO_DFX_ST7"},
590 
591 	{true,	"FIFO_DFX_ST8"},
592 	{true,	"FIFO_DFX_ST9"},
593 	{true,	"FIFO_DFX_ST10"},
594 	{true,	"FIFO_DFX_ST11"},
595 	{true,	"Q_CREDIT_VLD_0"},
596 	{true,	"Q_CREDIT_VLD_1"},
597 
598 	{true,	"Q_CREDIT_VLD_2"},
599 	{true,	"Q_CREDIT_VLD_3"},
600 	{true,	"Q_CREDIT_VLD_4"},
601 	{true,	"Q_CREDIT_VLD_5"},
602 	{true,	"Q_CREDIT_VLD_6"},
603 	{true,	"Q_CREDIT_VLD_7"},
604 
605 	{true,	"Q_CREDIT_VLD_8"},
606 	{true,	"Q_CREDIT_VLD_9"},
607 	{true,	"Q_CREDIT_VLD_10"},
608 	{true,	"Q_CREDIT_VLD_11"},
609 	{true,	"Q_CREDIT_VLD_12"},
610 	{true,	"Q_CREDIT_VLD_13"},
611 
612 	{true,	"Q_CREDIT_VLD_14"},
613 	{true,	"Q_CREDIT_VLD_15"},
614 	{true,	"Q_CREDIT_VLD_16"},
615 	{true,	"Q_CREDIT_VLD_17"},
616 	{true,	"Q_CREDIT_VLD_18"},
617 	{true,	"Q_CREDIT_VLD_19"},
618 
619 	{true,	"Q_CREDIT_VLD_20"},
620 	{true,	"Q_CREDIT_VLD_21"},
621 	{true,	"Q_CREDIT_VLD_22"},
622 	{true,	"Q_CREDIT_VLD_23"},
623 	{true,	"Q_CREDIT_VLD_24"},
624 	{true,	"Q_CREDIT_VLD_25"},
625 
626 	{true,	"Q_CREDIT_VLD_26"},
627 	{true,	"Q_CREDIT_VLD_27"},
628 	{true,	"Q_CREDIT_VLD_28"},
629 	{true,	"Q_CREDIT_VLD_29"},
630 	{true,	"Q_CREDIT_VLD_30"},
631 	{true,	"Q_CREDIT_VLD_31"},
632 
633 	{true,	"GRO_BD_SERR_CNT"},
634 	{true,	"GRO_CONTEXT_SERR_CNT"},
635 	{true,	"RX_STASH_CFG_SERR_CNT"},
636 	{true,	"AXI_RD_FBD_SERR_CNT"},
637 	{true,	"GRO_BD_MERR_CNT"},
638 	{true,	"GRO_CONTEXT_MERR_CNT"},
639 
640 	{true,	"RX_STASH_CFG_MERR_CNT"},
641 	{true,	"AXI_RD_FBD_MERR_CNT"},
642 	{false, "Reserved"},
643 	{false, "Reserved"},
644 	{false, "Reserved"},
645 	{false, "Reserved"},
646 };
647 
648 static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
649 	{true, "q_num"},
650 	{true, "RCB_CFG_RX_RING_TAIL"},
651 	{true, "RCB_CFG_RX_RING_HEAD"},
652 	{true, "RCB_CFG_RX_RING_FBDNUM"},
653 	{true, "RCB_CFG_RX_RING_OFFSET"},
654 	{true, "RCB_CFG_RX_RING_FBDOFFSET"},
655 
656 	{true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
657 	{true, "RCB_CFG_TX_RING_TAIL"},
658 	{true, "RCB_CFG_TX_RING_HEAD"},
659 	{true, "RCB_CFG_TX_RING_FBDNUM"},
660 	{true, "RCB_CFG_TX_RING_OFFSET"},
661 	{true, "RCB_CFG_TX_RING_EBDNUM"},
662 };
663 
664 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
665 	{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
666 	  .dfx_msg = &hclge_dbg_bios_common_reg[0],
667 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
668 		       .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
669 		       .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
670 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
671 	  .dfx_msg = &hclge_dbg_ssu_reg_0[0],
672 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
673 		       .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
674 		       .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
675 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
676 	  .dfx_msg = &hclge_dbg_ssu_reg_1[0],
677 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
678 		       .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
679 		       .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
680 	{ .cmd = HNAE3_DBG_CMD_REG_SSU,
681 	  .dfx_msg = &hclge_dbg_ssu_reg_2[0],
682 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
683 		       .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
684 		       .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
685 	{ .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
686 	  .dfx_msg = &hclge_dbg_igu_egu_reg[0],
687 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
688 		       .offset = HCLGE_DBG_DFX_IGU_OFFSET,
689 		       .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
690 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
691 	  .dfx_msg = &hclge_dbg_rpu_reg_0[0],
692 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
693 		       .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
694 		       .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
695 	{ .cmd = HNAE3_DBG_CMD_REG_RPU,
696 	  .dfx_msg = &hclge_dbg_rpu_reg_1[0],
697 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
698 		       .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
699 		       .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
700 	{ .cmd = HNAE3_DBG_CMD_REG_NCSI,
701 	  .dfx_msg = &hclge_dbg_ncsi_reg[0],
702 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
703 		       .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
704 		       .cmd = HCLGE_OPC_DFX_NCSI_REG } },
705 	{ .cmd = HNAE3_DBG_CMD_REG_RTC,
706 	  .dfx_msg = &hclge_dbg_rtc_reg[0],
707 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
708 		       .offset = HCLGE_DBG_DFX_RTC_OFFSET,
709 		       .cmd = HCLGE_OPC_DFX_RTC_REG } },
710 	{ .cmd = HNAE3_DBG_CMD_REG_PPP,
711 	  .dfx_msg = &hclge_dbg_ppp_reg[0],
712 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
713 		       .offset = HCLGE_DBG_DFX_PPP_OFFSET,
714 		       .cmd = HCLGE_OPC_DFX_PPP_REG } },
715 	{ .cmd = HNAE3_DBG_CMD_REG_RCB,
716 	  .dfx_msg = &hclge_dbg_rcb_reg[0],
717 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
718 		       .offset = HCLGE_DBG_DFX_RCB_OFFSET,
719 		       .cmd = HCLGE_OPC_DFX_RCB_REG } },
720 	{ .cmd = HNAE3_DBG_CMD_REG_TQP,
721 	  .dfx_msg = &hclge_dbg_tqp_reg[0],
722 	  .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
723 		       .offset = HCLGE_DBG_DFX_TQP_OFFSET,
724 		       .cmd = HCLGE_OPC_DFX_TQP_REG } },
725 };
726 
hclge_dbg_get_func_id_str(char * buf,u8 id)727 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
728 {
729 	if (id)
730 		sprintf(buf, "vf%u", id - 1U);
731 	else
732 		sprintf(buf, "pf");
733 
734 	return buf;
735 }
736 
hclge_dbg_get_dfx_bd_num(struct hclge_dev * hdev,int offset,u32 * bd_num)737 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
738 				    u32 *bd_num)
739 {
740 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
741 	int entries_per_desc;
742 	int index;
743 	int ret;
744 
745 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
746 	if (ret) {
747 		dev_err(&hdev->pdev->dev,
748 			"failed to get dfx bd_num, offset = %d, ret = %d\n",
749 			offset, ret);
750 		return ret;
751 	}
752 
753 	entries_per_desc = ARRAY_SIZE(desc[0].data);
754 	index = offset % entries_per_desc;
755 
756 	*bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
757 	if (!(*bd_num)) {
758 		dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
759 		return -EINVAL;
760 	}
761 
762 	return 0;
763 }
764 
hclge_dbg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int index,int bd_num,enum hclge_opcode_type cmd)765 int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
766 		       int index, int bd_num, enum hclge_opcode_type cmd)
767 {
768 	struct hclge_desc *desc = desc_src;
769 	int ret, i;
770 
771 	hclge_cmd_setup_basic_desc(desc, cmd, true);
772 	desc->data[0] = cpu_to_le32(index);
773 
774 	for (i = 1; i < bd_num; i++) {
775 		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
776 		desc++;
777 		hclge_cmd_setup_basic_desc(desc, cmd, true);
778 	}
779 
780 	ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
781 	if (ret)
782 		dev_err(&hdev->pdev->dev,
783 			"cmd(0x%x) send fail, ret = %d\n", cmd, ret);
784 	return ret;
785 }
786 
787 static int
hclge_dbg_dump_reg_tqp(struct hclge_dev * hdev,const struct hclge_dbg_reg_type_info * reg_info,struct seq_file * s)788 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
789 		       const struct hclge_dbg_reg_type_info *reg_info,
790 		       struct seq_file *s)
791 {
792 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
793 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
794 	u32 index, entry, i, cnt, min_num;
795 	struct hclge_desc *desc_src;
796 	struct hclge_desc *desc;
797 	int bd_num, ret;
798 
799 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
800 	if (ret)
801 		return ret;
802 
803 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
804 	if (!desc_src)
805 		return -ENOMEM;
806 
807 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
808 
809 	for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
810 		seq_printf(s, "item%u = %s\n", cnt++, dfx_message->message);
811 
812 	for (i = 0; i < cnt; i++)
813 		seq_printf(s, "item%u\t", i);
814 
815 	seq_puts(s, "\n");
816 
817 	for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
818 		dfx_message = reg_info->dfx_msg;
819 		desc = desc_src;
820 		ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
821 					 reg_msg->cmd);
822 		if (ret)
823 			break;
824 
825 		for (i = 0; i < min_num; i++, dfx_message++) {
826 			entry = i % HCLGE_DESC_DATA_LEN;
827 			if (i > 0 && !entry)
828 				desc++;
829 
830 			seq_printf(s, "%#x\t", le32_to_cpu(desc->data[entry]));
831 		}
832 		seq_puts(s, "\n");
833 	}
834 
835 	kfree(desc_src);
836 	return ret;
837 }
838 
839 static int
hclge_dbg_dump_reg_common(struct hclge_dev * hdev,const struct hclge_dbg_reg_type_info * reg_info,struct seq_file * s)840 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
841 			  const struct hclge_dbg_reg_type_info *reg_info,
842 			  struct seq_file *s)
843 {
844 	const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
845 	const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
846 	struct hclge_desc *desc_src;
847 	int bd_num, min_num, ret, i;
848 	struct hclge_desc *desc;
849 	u32 entry;
850 
851 	ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
852 	if (ret)
853 		return ret;
854 
855 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
856 	if (!desc_src)
857 		return -ENOMEM;
858 
859 	desc = desc_src;
860 
861 	ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
862 	if (ret) {
863 		kfree(desc);
864 		return ret;
865 	}
866 
867 	min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
868 
869 	for (i = 0; i < min_num; i++, dfx_message++) {
870 		entry = i % HCLGE_DESC_DATA_LEN;
871 		if (i > 0 && !entry)
872 			desc++;
873 		if (!dfx_message->flag)
874 			continue;
875 
876 		seq_printf(s, "%s: %#x\n", dfx_message->message,
877 			   le32_to_cpu(desc->data[entry]));
878 	}
879 
880 	kfree(desc_src);
881 	return 0;
882 }
883 
884 static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
885 	{HCLGE_MAC_TX_EN_B,  "mac_trans_en"},
886 	{HCLGE_MAC_RX_EN_B,  "mac_rcv_en"},
887 	{HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
888 	{HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
889 	{HCLGE_MAC_1588_TX_B, "1588_trans_en"},
890 	{HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
891 	{HCLGE_MAC_APP_LP_B,  "mac_app_loop_en"},
892 	{HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
893 	{HCLGE_MAC_FCS_TX_B,  "mac_fcs_tx_en"},
894 	{HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
895 	{HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
896 	{HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
897 	{HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
898 	{HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
899 };
900 
hclge_dbg_dump_mac_enable_status(struct hclge_dev * hdev,struct seq_file * s)901 static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev,
902 					    struct seq_file *s)
903 {
904 	struct hclge_config_mac_mode_cmd *req;
905 	struct hclge_desc desc;
906 	u32 loop_en, i, offset;
907 	int ret;
908 
909 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
910 
911 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
912 	if (ret) {
913 		dev_err(&hdev->pdev->dev,
914 			"failed to dump mac enable status, ret = %d\n", ret);
915 		return ret;
916 	}
917 
918 	req = (struct hclge_config_mac_mode_cmd *)desc.data;
919 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
920 
921 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
922 		offset = hclge_dbg_mac_en_status[i].offset;
923 		seq_printf(s, "%s: %#x\n", hclge_dbg_mac_en_status[i].message,
924 			   hnae3_get_bit(loop_en, offset));
925 	}
926 
927 	return 0;
928 }
929 
hclge_dbg_dump_mac_frame_size(struct hclge_dev * hdev,struct seq_file * s)930 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev,
931 					 struct seq_file *s)
932 {
933 	struct hclge_config_max_frm_size_cmd *req;
934 	struct hclge_desc desc;
935 	int ret;
936 
937 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
938 
939 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
940 	if (ret) {
941 		dev_err(&hdev->pdev->dev,
942 			"failed to dump mac frame size, ret = %d\n", ret);
943 		return ret;
944 	}
945 
946 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
947 
948 	seq_printf(s, "max_frame_size: %u\n", le16_to_cpu(req->max_frm_size));
949 	seq_printf(s, "min_frame_size: %u\n", req->min_frm_size);
950 
951 	return 0;
952 }
953 
hclge_dbg_dump_mac_speed_duplex(struct hclge_dev * hdev,struct seq_file * s)954 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev,
955 					   struct seq_file *s)
956 {
957 #define HCLGE_MAC_SPEED_SHIFT	0
958 #define HCLGE_MAC_SPEED_MASK	GENMASK(5, 0)
959 #define HCLGE_MAC_DUPLEX_SHIFT	7
960 
961 	struct hclge_config_mac_speed_dup_cmd *req;
962 	struct hclge_desc desc;
963 	int ret;
964 
965 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
966 
967 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
968 	if (ret) {
969 		dev_err(&hdev->pdev->dev,
970 			"failed to dump mac speed duplex, ret = %d\n", ret);
971 		return ret;
972 	}
973 
974 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
975 
976 	seq_printf(s, "speed: %#lx\n",
977 		   hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
978 				   HCLGE_MAC_SPEED_SHIFT));
979 	seq_printf(s, "duplex: %#x\n",
980 		   hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
981 	return 0;
982 }
983 
hclge_dbg_dump_mac(struct seq_file * s,void * data)984 static int hclge_dbg_dump_mac(struct seq_file *s, void *data)
985 {
986 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
987 	int ret;
988 
989 	ret = hclge_dbg_dump_mac_enable_status(hdev, s);
990 	if (ret)
991 		return ret;
992 
993 	ret = hclge_dbg_dump_mac_frame_size(hdev, s);
994 	if (ret)
995 		return ret;
996 
997 	return hclge_dbg_dump_mac_speed_duplex(hdev, s);
998 }
999 
hclge_dbg_dump_dcb_qset(struct hclge_dev * hdev,struct seq_file * s)1000 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, struct seq_file *s)
1001 {
1002 	struct hclge_dbg_bitmap_cmd req;
1003 	struct hclge_desc desc;
1004 	u16 qset_id, qset_num;
1005 	int ret;
1006 
1007 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
1008 	if (ret)
1009 		return ret;
1010 
1011 	seq_puts(s, "qset_id  roce_qset_mask  nic_qset_mask  ");
1012 	seq_puts(s, "qset_shaping_pass  qset_bp_status\n");
1013 	for (qset_id = 0; qset_id < qset_num; qset_id++) {
1014 		ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
1015 					 HCLGE_OPC_QSET_DFX_STS);
1016 		if (ret)
1017 			return ret;
1018 
1019 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
1020 
1021 		seq_printf(s, "%04u     %#-16x%#-15x%#-19x%#-x\n",
1022 			   qset_id, req.bit0, req.bit1, req.bit2, req.bit3);
1023 	}
1024 
1025 	return 0;
1026 }
1027 
hclge_dbg_dump_dcb_pri(struct hclge_dev * hdev,struct seq_file * s)1028 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, struct seq_file *s)
1029 {
1030 	struct hclge_dbg_bitmap_cmd req;
1031 	struct hclge_desc desc;
1032 	u8 pri_id, pri_num;
1033 	int ret;
1034 
1035 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
1036 	if (ret)
1037 		return ret;
1038 
1039 	seq_puts(s, "pri_id  pri_mask  pri_cshaping_pass  pri_pshaping_pass\n");
1040 	for (pri_id = 0; pri_id < pri_num; pri_id++) {
1041 		ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
1042 					 HCLGE_OPC_PRI_DFX_STS);
1043 		if (ret)
1044 			return ret;
1045 
1046 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
1047 
1048 		seq_printf(s, "%03u     %#-10x%#-19x%#-x\n",
1049 			   pri_id, req.bit0, req.bit1, req.bit2);
1050 	}
1051 
1052 	return 0;
1053 }
1054 
hclge_dbg_dump_dcb_pg(struct hclge_dev * hdev,struct seq_file * s)1055 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, struct seq_file *s)
1056 {
1057 	struct hclge_dbg_bitmap_cmd req;
1058 	struct hclge_desc desc;
1059 	u8 pg_id;
1060 	int ret;
1061 
1062 	seq_puts(s, "pg_id  pg_mask  pg_cshaping_pass  pg_pshaping_pass\n");
1063 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
1064 		ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
1065 					 HCLGE_OPC_PG_DFX_STS);
1066 		if (ret)
1067 			return ret;
1068 
1069 		req.bitmap = (u8)le32_to_cpu(desc.data[1]);
1070 
1071 		seq_printf(s, "%03u    %#-9x%#-18x%#-x\n",
1072 			   pg_id, req.bit0, req.bit1, req.bit2);
1073 	}
1074 
1075 	return 0;
1076 }
1077 
hclge_dbg_dump_dcb_queue(struct hclge_dev * hdev,struct seq_file * s)1078 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, struct seq_file *s)
1079 {
1080 	struct hclge_desc desc;
1081 	u16 nq_id;
1082 	int ret;
1083 
1084 	seq_puts(s, "nq_id  sch_nic_queue_cnt  sch_roce_queue_cnt\n");
1085 	for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
1086 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
1087 					 HCLGE_OPC_SCH_NQ_CNT);
1088 		if (ret)
1089 			return ret;
1090 
1091 		seq_printf(s, "%04u   %#-19x",
1092 			   nq_id, le32_to_cpu(desc.data[1]));
1093 
1094 		ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
1095 					 HCLGE_OPC_SCH_RQ_CNT);
1096 		if (ret)
1097 			return ret;
1098 
1099 		seq_printf(s, "%#-x\n", le32_to_cpu(desc.data[1]));
1100 	}
1101 
1102 	return 0;
1103 }
1104 
hclge_dbg_dump_dcb_port(struct hclge_dev * hdev,struct seq_file * s)1105 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, struct seq_file *s)
1106 {
1107 	struct hclge_dbg_bitmap_cmd req;
1108 	struct hclge_desc desc;
1109 	u8 port_id = 0;
1110 	int ret;
1111 
1112 	ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
1113 				 HCLGE_OPC_PORT_DFX_STS);
1114 	if (ret)
1115 		return ret;
1116 
1117 	req.bitmap = (u8)le32_to_cpu(desc.data[1]);
1118 
1119 	seq_printf(s, "port_mask: %#x\n", req.bit0);
1120 	seq_printf(s, "port_shaping_pass: %#x\n", req.bit1);
1121 
1122 	return 0;
1123 }
1124 
hclge_dbg_dump_dcb_tm(struct hclge_dev * hdev,struct seq_file * s)1125 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, struct seq_file *s)
1126 {
1127 	struct hclge_desc desc[2];
1128 	u8 port_id = 0;
1129 	int ret;
1130 
1131 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
1132 				 HCLGE_OPC_TM_INTERNAL_CNT);
1133 	if (ret)
1134 		return ret;
1135 
1136 	seq_printf(s, "SCH_NIC_NUM: %#x\n", le32_to_cpu(desc[0].data[1]));
1137 	seq_printf(s, "SCH_ROCE_NUM: %#x\n", le32_to_cpu(desc[0].data[2]));
1138 
1139 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
1140 				 HCLGE_OPC_TM_INTERNAL_STS);
1141 	if (ret)
1142 		return ret;
1143 
1144 	seq_printf(s, "pri_bp: %#x\n", le32_to_cpu(desc[0].data[1]));
1145 	seq_printf(s, "fifo_dfx_info: %#x\n", le32_to_cpu(desc[0].data[2]));
1146 	seq_printf(s, "sch_roce_fifo_afull_gap: %#x\n",
1147 		   le32_to_cpu(desc[0].data[3]));
1148 	seq_printf(s, "tx_private_waterline: %#x\n",
1149 		   le32_to_cpu(desc[0].data[4]));
1150 	seq_printf(s, "tm_bypass_en: %#x\n", le32_to_cpu(desc[0].data[5]));
1151 	seq_printf(s, "SSU_TM_BYPASS_EN: %#x\n", le32_to_cpu(desc[1].data[0]));
1152 	seq_printf(s, "SSU_RESERVE_CFG: %#x\n", le32_to_cpu(desc[1].data[1]));
1153 
1154 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
1155 		return 0;
1156 
1157 	ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
1158 				 HCLGE_OPC_TM_INTERNAL_STS_1);
1159 	if (ret)
1160 		return ret;
1161 
1162 	seq_printf(s, "TC_MAP_SEL: %#x\n", le32_to_cpu(desc[0].data[1]));
1163 	seq_printf(s, "IGU_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[2]));
1164 	seq_printf(s, "MAC_PFC_PRI_EN: %#x\n", le32_to_cpu(desc[0].data[3]));
1165 	seq_printf(s, "IGU_PRI_MAP_TC_CFG: %#x\n",
1166 		   le32_to_cpu(desc[0].data[4]));
1167 	seq_printf(s, "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
1168 		   le32_to_cpu(desc[0].data[5]));
1169 
1170 	return 0;
1171 }
1172 
hclge_dbg_dump_dcb(struct seq_file * s,void * data)1173 static int hclge_dbg_dump_dcb(struct seq_file *s, void *data)
1174 {
1175 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1176 	int ret;
1177 
1178 	ret = hclge_dbg_dump_dcb_qset(hdev, s);
1179 	if (ret)
1180 		return ret;
1181 
1182 	ret = hclge_dbg_dump_dcb_pri(hdev, s);
1183 	if (ret)
1184 		return ret;
1185 
1186 	ret = hclge_dbg_dump_dcb_pg(hdev, s);
1187 	if (ret)
1188 		return ret;
1189 
1190 	ret = hclge_dbg_dump_dcb_queue(hdev, s);
1191 	if (ret)
1192 		return ret;
1193 
1194 	ret = hclge_dbg_dump_dcb_port(hdev, s);
1195 	if (ret)
1196 		return ret;
1197 
1198 	return hclge_dbg_dump_dcb_tm(hdev, s);
1199 }
1200 
hclge_dbg_dump_reg_cmd(enum hnae3_dbg_cmd cmd,struct seq_file * s)1201 static int hclge_dbg_dump_reg_cmd(enum hnae3_dbg_cmd cmd, struct seq_file *s)
1202 {
1203 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1204 	const struct hclge_dbg_reg_type_info *reg_info;
1205 	int ret = 0;
1206 	u32 i;
1207 
1208 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
1209 		reg_info = &hclge_dbg_reg_info[i];
1210 		if (cmd == reg_info->cmd) {
1211 			if (cmd == HNAE3_DBG_CMD_REG_TQP)
1212 				return hclge_dbg_dump_reg_tqp(hdev,
1213 							      reg_info, s);
1214 
1215 			ret = hclge_dbg_dump_reg_common(hdev, reg_info, s);
1216 			if (ret)
1217 				break;
1218 		}
1219 	}
1220 
1221 	return ret;
1222 }
1223 
hclge_dbg_dump_bios_reg_cmd(struct seq_file * s,void * data)1224 static int hclge_dbg_dump_bios_reg_cmd(struct seq_file *s, void *data)
1225 {
1226 	return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_BIOS_COMMON, s);
1227 }
1228 
hclge_dbg_dump_ssu_reg_cmd(struct seq_file * s,void * data)1229 static int hclge_dbg_dump_ssu_reg_cmd(struct seq_file *s, void *data)
1230 {
1231 	return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_SSU, s);
1232 }
1233 
hclge_dbg_dump_igu_egu_reg_cmd(struct seq_file * s,void * data)1234 static int hclge_dbg_dump_igu_egu_reg_cmd(struct seq_file *s, void *data)
1235 {
1236 	return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_IGU_EGU, s);
1237 }
1238 
hclge_dbg_dump_rpu_reg_cmd(struct seq_file * s,void * data)1239 static int hclge_dbg_dump_rpu_reg_cmd(struct seq_file *s, void *data)
1240 {
1241 	return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RPU, s);
1242 }
1243 
hclge_dbg_dump_ncsi_reg_cmd(struct seq_file * s,void * data)1244 static int hclge_dbg_dump_ncsi_reg_cmd(struct seq_file *s, void *data)
1245 {
1246 	return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_NCSI, s);
1247 }
1248 
hclge_dbg_dump_rtc_reg_cmd(struct seq_file * s,void * data)1249 static int hclge_dbg_dump_rtc_reg_cmd(struct seq_file *s, void *data)
1250 {
1251 	return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RTC, s);
1252 }
1253 
hclge_dbg_dump_ppp_reg_cmd(struct seq_file * s,void * data)1254 static int hclge_dbg_dump_ppp_reg_cmd(struct seq_file *s, void *data)
1255 {
1256 	return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_PPP, s);
1257 }
1258 
hclge_dbg_dump_rcb_reg_cmd(struct seq_file * s,void * data)1259 static int hclge_dbg_dump_rcb_reg_cmd(struct seq_file *s, void *data)
1260 {
1261 	return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_RCB, s);
1262 }
1263 
hclge_dbg_dump_tqp_reg_cmd(struct seq_file * s,void * data)1264 static int hclge_dbg_dump_tqp_reg_cmd(struct seq_file *s, void *data)
1265 {
1266 	return hclge_dbg_dump_reg_cmd(HNAE3_DBG_CMD_REG_TQP, s);
1267 }
1268 
hclge_dbg_dump_tc(struct seq_file * s,void * data)1269 static int hclge_dbg_dump_tc(struct seq_file *s, void *data)
1270 {
1271 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1272 	struct hclge_ets_tc_weight_cmd *ets_weight;
1273 	const char *sch_mode_str;
1274 	struct hclge_desc desc;
1275 	int ret;
1276 	u8 i;
1277 
1278 	if (!hnae3_dev_dcb_supported(hdev)) {
1279 		dev_err(&hdev->pdev->dev,
1280 			"Only DCB-supported dev supports tc\n");
1281 		return -EOPNOTSUPP;
1282 	}
1283 
1284 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
1285 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1286 	if (ret) {
1287 		dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
1288 			ret);
1289 		return ret;
1290 	}
1291 
1292 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1293 
1294 	seq_printf(s, "enabled tc number: %u\n", hdev->tm_info.num_tc);
1295 	seq_printf(s, "weight_offset: %u\n", ets_weight->weight_offset);
1296 
1297 	seq_puts(s, "TC    MODE  WEIGHT\n");
1298 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1299 		sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
1300 		seq_printf(s, "%u     %4s    %3u\n", i, sch_mode_str,
1301 			   ets_weight->tc_weight[i]);
1302 	}
1303 
1304 	return 0;
1305 }
1306 
hclge_dbg_fill_shaper_content(struct seq_file * s,struct hclge_tm_shaper_para * para)1307 static void hclge_dbg_fill_shaper_content(struct seq_file *s,
1308 					  struct hclge_tm_shaper_para *para)
1309 {
1310 	seq_printf(s, "%-8u%-8u%-8u%-8u%-8u%-8u%-14u", para->ir_b, para->ir_u,
1311 		   para->ir_s, para->bs_b, para->bs_s, para->flag, para->rate);
1312 }
1313 
hclge_dbg_dump_tm_pg(struct seq_file * s,void * data)1314 static int hclge_dbg_dump_tm_pg(struct seq_file *s, void *data)
1315 {
1316 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
1317 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1318 	u8 pg_id, sch_mode, weight, pri_bit_map;
1319 	const char *sch_mode_str;
1320 	int ret;
1321 
1322 	seq_puts(s, "ID  PRI_MAP  MODE  DWRR  C_IR_B  C_IR_U  C_IR_S  C_BS_B  ");
1323 	seq_puts(s, "C_BS_S  C_FLAG  C_RATE(Mbps)  P_IR_B  P_IR_U  P_IR_S  ");
1324 	seq_puts(s, "P_BS_B  P_BS_S  P_FLAG  P_RATE(Mbps)\n");
1325 
1326 	for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
1327 		ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
1328 		if (ret)
1329 			return ret;
1330 
1331 		ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
1332 		if (ret)
1333 			return ret;
1334 
1335 		ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
1336 		if (ret)
1337 			return ret;
1338 
1339 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
1340 					     HCLGE_OPC_TM_PG_C_SHAPPING,
1341 					     &c_shaper_para);
1342 		if (ret)
1343 			return ret;
1344 
1345 		ret = hclge_tm_get_pg_shaper(hdev, pg_id,
1346 					     HCLGE_OPC_TM_PG_P_SHAPPING,
1347 					     &p_shaper_para);
1348 		if (ret)
1349 			return ret;
1350 
1351 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1352 				       "sp";
1353 
1354 		seq_printf(s, "%02u  0x%-7x%-6s%-6u", pg_id, pri_bit_map,
1355 			   sch_mode_str, weight);
1356 		hclge_dbg_fill_shaper_content(s, &c_shaper_para);
1357 		hclge_dbg_fill_shaper_content(s, &p_shaper_para);
1358 		seq_puts(s, "\n");
1359 	}
1360 
1361 	return 0;
1362 }
1363 
hclge_dbg_dump_tm_port(struct seq_file * s,void * data)1364 static int hclge_dbg_dump_tm_port(struct seq_file *s, void *data)
1365 {
1366 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1367 	struct hclge_tm_shaper_para shaper_para;
1368 	int ret;
1369 
1370 	ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
1371 	if (ret)
1372 		return ret;
1373 
1374 	seq_puts(s, "IR_B  IR_U  IR_S  BS_B  BS_S  FLAG  RATE(Mbps)\n");
1375 	seq_printf(s, "%3u   %3u   %3u   %3u   %3u     %1u   %6u\n",
1376 		   shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
1377 		   shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
1378 		   shaper_para.rate);
1379 
1380 	return 0;
1381 }
1382 
hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev * hdev,u8 tc_id,struct seq_file * s)1383 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
1384 					 struct seq_file *s)
1385 {
1386 	u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
1387 	struct hclge_bp_to_qs_map_cmd *map;
1388 	struct hclge_desc desc;
1389 	u8 group_id;
1390 	u8 grp_num;
1391 	u16 i = 0;
1392 	int ret;
1393 
1394 	grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
1395 		  HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
1396 	map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
1397 	for (group_id = 0; group_id < grp_num; group_id++) {
1398 		hclge_cmd_setup_basic_desc(&desc,
1399 					   HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
1400 					   true);
1401 		map->tc_id = tc_id;
1402 		map->qs_group_id = group_id;
1403 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1404 		if (ret) {
1405 			dev_err(&hdev->pdev->dev,
1406 				"failed to get bp to qset map, ret = %d\n",
1407 				ret);
1408 			return ret;
1409 		}
1410 
1411 		qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
1412 	}
1413 
1414 	seq_puts(s, "INDEX | TM BP QSET MAPPING:\n");
1415 	for (group_id = 0; group_id < grp_num / 8; group_id++) {
1416 		seq_printf(s,
1417 			   "%04d  | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
1418 			   group_id * 256, qset_mapping[i + 7],
1419 			   qset_mapping[i + 6], qset_mapping[i + 5],
1420 			   qset_mapping[i + 4], qset_mapping[i + 3],
1421 			   qset_mapping[i + 2], qset_mapping[i + 1],
1422 			   qset_mapping[i]);
1423 		i += 8;
1424 	}
1425 
1426 	return 0;
1427 }
1428 
hclge_dbg_dump_tm_map(struct seq_file * s,void * data)1429 static int hclge_dbg_dump_tm_map(struct seq_file *s, void *data)
1430 {
1431 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1432 	u16 queue_id;
1433 	u16 qset_id;
1434 	u8 link_vld;
1435 	u8 pri_id;
1436 	u8 tc_id;
1437 	int ret;
1438 
1439 	for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
1440 		ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
1441 		if (ret)
1442 			return ret;
1443 
1444 		ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
1445 						&link_vld);
1446 		if (ret)
1447 			return ret;
1448 
1449 		ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
1450 		if (ret)
1451 			return ret;
1452 
1453 		seq_puts(s, "QUEUE_ID   QSET_ID   PRI_ID   TC_ID\n");
1454 		seq_printf(s, "%04u        %4u       %3u      %2u\n",
1455 			   queue_id, qset_id, pri_id, tc_id);
1456 
1457 		if (!hnae3_dev_dcb_supported(hdev))
1458 			continue;
1459 
1460 		ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, s);
1461 		if (ret < 0)
1462 			return ret;
1463 
1464 		seq_puts(s, "\n");
1465 	}
1466 
1467 	return 0;
1468 }
1469 
hclge_dbg_dump_tm_nodes(struct seq_file * s,void * data)1470 static int hclge_dbg_dump_tm_nodes(struct seq_file *s, void *data)
1471 {
1472 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1473 	struct hclge_tm_nodes_cmd *nodes;
1474 	struct hclge_desc desc;
1475 	int ret;
1476 
1477 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1478 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1479 	if (ret) {
1480 		dev_err(&hdev->pdev->dev,
1481 			"failed to dump tm nodes, ret = %d\n", ret);
1482 		return ret;
1483 	}
1484 
1485 	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1486 
1487 	seq_puts(s, "       BASE_ID  MAX_NUM\n");
1488 	seq_printf(s, "PG      %4u      %4u\n", nodes->pg_base_id,
1489 		   nodes->pg_num);
1490 	seq_printf(s, "PRI     %4u      %4u\n", nodes->pri_base_id,
1491 		   nodes->pri_num);
1492 	seq_printf(s, "QSET    %4u      %4u\n",
1493 		   le16_to_cpu(nodes->qset_base_id),
1494 		   le16_to_cpu(nodes->qset_num));
1495 	seq_printf(s, "QUEUE   %4u      %4u\n",
1496 		   le16_to_cpu(nodes->queue_base_id),
1497 		   le16_to_cpu(nodes->queue_num));
1498 
1499 	return 0;
1500 }
1501 
hclge_dbg_dump_tm_pri(struct seq_file * s,void * data)1502 static int hclge_dbg_dump_tm_pri(struct seq_file *s, void *data)
1503 {
1504 	struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
1505 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1506 	u8 pri_num, sch_mode, weight, i;
1507 	const char *sch_mode_str;
1508 	int ret;
1509 
1510 	ret = hclge_tm_get_pri_num(hdev, &pri_num);
1511 	if (ret)
1512 		return ret;
1513 
1514 	seq_puts(s, "ID  MODE  DWRR  C_IR_B  C_IR_U  C_IR_S  C_BS_B  ");
1515 	seq_puts(s, "C_BS_S  C_FLAG  C_RATE(Mbps)  P_IR_B  P_IR_U  P_IR_S  ");
1516 	seq_puts(s, "P_BS_B  P_BS_S  P_FLAG  P_RATE(Mbps)\n");
1517 
1518 	for (i = 0; i < pri_num; i++) {
1519 		ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
1520 		if (ret)
1521 			goto out;
1522 
1523 		ret = hclge_tm_get_pri_weight(hdev, i, &weight);
1524 		if (ret)
1525 			goto out;
1526 
1527 		ret = hclge_tm_get_pri_shaper(hdev, i,
1528 					      HCLGE_OPC_TM_PRI_C_SHAPPING,
1529 					      &c_shaper_para);
1530 		if (ret)
1531 			goto out;
1532 
1533 		ret = hclge_tm_get_pri_shaper(hdev, i,
1534 					      HCLGE_OPC_TM_PRI_P_SHAPPING,
1535 					      &p_shaper_para);
1536 		if (ret)
1537 			goto out;
1538 
1539 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1540 			       "sp";
1541 
1542 		seq_printf(s, "%04u  %-6s%-6u", i, sch_mode_str, weight);
1543 		hclge_dbg_fill_shaper_content(s, &c_shaper_para);
1544 		hclge_dbg_fill_shaper_content(s, &p_shaper_para);
1545 		seq_puts(s, "\n");
1546 	}
1547 
1548 out:
1549 	return ret;
1550 }
1551 
hclge_dbg_dump_tm_qset(struct seq_file * s,void * data)1552 static int hclge_dbg_dump_tm_qset(struct seq_file *s, void *data)
1553 {
1554 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1555 	u8 priority, link_vld, sch_mode, weight;
1556 	struct hclge_tm_shaper_para shaper_para;
1557 	const char *sch_mode_str;
1558 	u16 qset_num, i;
1559 	int ret;
1560 
1561 	ret = hclge_tm_get_qset_num(hdev, &qset_num);
1562 	if (ret)
1563 		return ret;
1564 
1565 	seq_puts(s, "ID    MAP_PRI  LINK_VLD  MODE  DWRR  IR_B  IR_U  IR_S  ");
1566 	seq_puts(s, "BS_B  BS_S  FLAG  RATE(Mbps)\n");
1567 
1568 	for (i = 0; i < qset_num; i++) {
1569 		ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1570 		if (ret)
1571 			return ret;
1572 
1573 		ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1574 		if (ret)
1575 			return ret;
1576 
1577 		ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1578 		if (ret)
1579 			return ret;
1580 
1581 		ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1582 		if (ret)
1583 			return ret;
1584 
1585 		sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1586 			       "sp";
1587 
1588 		seq_printf(s, "%04u  %-9u%-10u%-6s%-6u", i, priority, link_vld,
1589 			   sch_mode_str, weight);
1590 		seq_printf(s, "%-6u%-6u%-6u%-6u%-6u%-6u%-14u\n",
1591 			   shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
1592 			   shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
1593 			   shaper_para.rate);
1594 	}
1595 
1596 	return 0;
1597 }
1598 
hclge_dbg_dump_qos_pause_cfg(struct seq_file * s,void * data)1599 static int hclge_dbg_dump_qos_pause_cfg(struct seq_file *s, void *data)
1600 {
1601 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1602 	struct hclge_cfg_pause_param_cmd *pause_param;
1603 	struct hclge_desc desc;
1604 	int ret;
1605 
1606 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1607 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1608 	if (ret) {
1609 		dev_err(&hdev->pdev->dev,
1610 			"failed to dump qos pause, ret = %d\n", ret);
1611 		return ret;
1612 	}
1613 
1614 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1615 
1616 	seq_printf(s, "pause_trans_gap: 0x%x\n", pause_param->pause_trans_gap);
1617 	seq_printf(s, "pause_trans_time: 0x%x\n",
1618 		   le16_to_cpu(pause_param->pause_trans_time));
1619 	return 0;
1620 }
1621 
1622 #define HCLGE_DBG_TC_MASK		0x0F
1623 
hclge_dbg_dump_qos_pri_map(struct seq_file * s,void * data)1624 static int hclge_dbg_dump_qos_pri_map(struct seq_file *s, void *data)
1625 {
1626 #define HCLGE_DBG_TC_BIT_WIDTH		4
1627 
1628 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1629 	struct hclge_qos_pri_map_cmd *pri_map;
1630 	struct hclge_desc desc;
1631 	u8 *pri_tc;
1632 	u8 tc, i;
1633 	int ret;
1634 
1635 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1636 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1637 	if (ret) {
1638 		dev_err(&hdev->pdev->dev,
1639 			"failed to dump qos pri map, ret = %d\n", ret);
1640 		return ret;
1641 	}
1642 
1643 	pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1644 
1645 	seq_printf(s, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
1646 	seq_puts(s, "PRI  TC\n");
1647 
1648 	pri_tc = (u8 *)pri_map;
1649 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1650 		tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1651 		tc &= HCLGE_DBG_TC_MASK;
1652 		seq_printf(s, "%u     %u\n", i, tc);
1653 	}
1654 
1655 	return 0;
1656 }
1657 
hclge_dbg_dump_qos_dscp_map(struct seq_file * s,void * data)1658 static int hclge_dbg_dump_qos_dscp_map(struct seq_file *s, void *data)
1659 {
1660 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1661 	struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
1662 	struct hnae3_knic_private_info *kinfo;
1663 	u8 *req0 = (u8 *)desc[0].data;
1664 	u8 *req1 = (u8 *)desc[1].data;
1665 	u8 dscp_tc[HNAE3_MAX_DSCP];
1666 	int ret;
1667 	u8 i, j;
1668 
1669 	kinfo = &hdev->vport[0].nic.kinfo;
1670 
1671 	seq_printf(s, "tc map mode: %s\n", tc_map_mode_str[kinfo->tc_map_mode]);
1672 
1673 	if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
1674 		return 0;
1675 
1676 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
1677 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1678 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
1679 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
1680 	if (ret) {
1681 		dev_err(&hdev->pdev->dev,
1682 			"failed to dump qos dscp map, ret = %d\n", ret);
1683 		return ret;
1684 	}
1685 
1686 	seq_puts(s, "\nDSCP  PRIO  TC\n");
1687 
1688 	/* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
1689 	for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
1690 		j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
1691 		/* Each dscp setting has 4 bits, so each byte saves two dscp
1692 		 * setting
1693 		 */
1694 		dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1695 		dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1696 		dscp_tc[i] &= HCLGE_DBG_TC_MASK;
1697 		dscp_tc[j] &= HCLGE_DBG_TC_MASK;
1698 	}
1699 
1700 	for (i = 0; i < HNAE3_MAX_DSCP; i++) {
1701 		if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
1702 			continue;
1703 
1704 		seq_printf(s, " %2u    %u    %u\n", i, kinfo->dscp_prio[i],
1705 			   dscp_tc[i]);
1706 	}
1707 
1708 	return 0;
1709 }
1710 
hclge_dbg_dump_tx_buf_cfg(struct hclge_dev * hdev,struct seq_file * s)1711 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, struct seq_file *s)
1712 {
1713 	struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1714 	struct hclge_desc desc;
1715 	int i, ret;
1716 
1717 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1718 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1719 	if (ret) {
1720 		dev_err(&hdev->pdev->dev,
1721 			"failed to dump tx buf, ret = %d\n", ret);
1722 		return ret;
1723 	}
1724 
1725 	tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1726 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1727 		seq_printf(s, "tx_packet_buf_tc_%d: 0x%x\n", i,
1728 			   le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1729 
1730 	return 0;
1731 }
1732 
hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev * hdev,struct seq_file * s)1733 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev,
1734 					  struct seq_file *s)
1735 {
1736 	struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1737 	struct hclge_desc desc;
1738 	int i, ret;
1739 
1740 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1741 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1742 	if (ret) {
1743 		dev_err(&hdev->pdev->dev,
1744 			"failed to dump rx priv buf, ret = %d\n", ret);
1745 		return ret;
1746 	}
1747 
1748 	seq_puts(s, "\n");
1749 
1750 	rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1751 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1752 		seq_printf(s, "rx_packet_buf_tc_%d: 0x%x\n", i,
1753 			   le16_to_cpu(rx_buf_cmd->buf_num[i]));
1754 
1755 	seq_printf(s, "rx_share_buf: 0x%x\n",
1756 		   le16_to_cpu(rx_buf_cmd->shared_buf));
1757 
1758 	return 0;
1759 }
1760 
hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev * hdev,struct seq_file * s)1761 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev,
1762 					   struct seq_file *s)
1763 {
1764 	struct hclge_rx_com_wl *rx_com_wl;
1765 	struct hclge_desc desc;
1766 	int ret;
1767 
1768 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1769 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1770 	if (ret) {
1771 		dev_err(&hdev->pdev->dev,
1772 			"failed to dump rx common wl, ret = %d\n", ret);
1773 		return ret;
1774 	}
1775 
1776 	rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1777 	seq_puts(s, "\n");
1778 	seq_printf(s, "rx_com_wl: high: 0x%x, low: 0x%x\n",
1779 		   le16_to_cpu(rx_com_wl->com_wl.high),
1780 		   le16_to_cpu(rx_com_wl->com_wl.low));
1781 
1782 	return 0;
1783 }
1784 
hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev * hdev,struct seq_file * s)1785 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev,
1786 					    struct seq_file *s)
1787 {
1788 	struct hclge_rx_com_wl *rx_packet_cnt;
1789 	struct hclge_desc desc;
1790 	int ret;
1791 
1792 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1793 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1794 	if (ret) {
1795 		dev_err(&hdev->pdev->dev,
1796 			"failed to dump rx global pkt cnt, ret = %d\n", ret);
1797 		return ret;
1798 	}
1799 
1800 	rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1801 	seq_printf(s, "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1802 		   le16_to_cpu(rx_packet_cnt->com_wl.high),
1803 		   le16_to_cpu(rx_packet_cnt->com_wl.low));
1804 
1805 	return 0;
1806 }
1807 
hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev * hdev,struct seq_file * s)1808 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev,
1809 					     struct seq_file *s)
1810 {
1811 	struct hclge_rx_priv_wl_buf *rx_priv_wl;
1812 	struct hclge_desc desc[2];
1813 	int i, ret;
1814 
1815 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1816 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1817 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1818 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1819 	if (ret) {
1820 		dev_err(&hdev->pdev->dev,
1821 			"failed to dump rx priv wl buf, ret = %d\n", ret);
1822 		return ret;
1823 	}
1824 
1825 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1826 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1827 		seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1828 			   le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1829 			   le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1830 
1831 	rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1832 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1833 		seq_printf(s, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1834 			   i + HCLGE_TC_NUM_ONE_DESC,
1835 			   le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1836 			   le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1837 
1838 	return 0;
1839 }
1840 
hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev * hdev,struct seq_file * s)1841 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1842 						  struct seq_file *s)
1843 {
1844 	struct hclge_rx_com_thrd *rx_com_thrd;
1845 	struct hclge_desc desc[2];
1846 	int i, ret;
1847 
1848 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1849 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1850 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1851 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1852 	if (ret) {
1853 		dev_err(&hdev->pdev->dev,
1854 			"failed to dump rx common threshold, ret = %d\n", ret);
1855 		return ret;
1856 	}
1857 
1858 	seq_puts(s, "\n");
1859 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1860 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1861 		seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1862 			   le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1863 			   le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1864 
1865 	rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1866 	for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1867 		seq_printf(s, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1868 			   i + HCLGE_TC_NUM_ONE_DESC,
1869 			   le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1870 			   le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1871 
1872 	return 0;
1873 }
1874 
hclge_dbg_dump_qos_buf_cfg(struct seq_file * s,void * data)1875 static int hclge_dbg_dump_qos_buf_cfg(struct seq_file *s, void *data)
1876 {
1877 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1878 	int ret;
1879 
1880 	ret = hclge_dbg_dump_tx_buf_cfg(hdev, s);
1881 	if (ret < 0)
1882 		return ret;
1883 
1884 	ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, s);
1885 	if (ret < 0)
1886 		return ret;
1887 
1888 	ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, s);
1889 	if (ret < 0)
1890 		return ret;
1891 
1892 	ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, s);
1893 	if (ret < 0)
1894 		return ret;
1895 
1896 	seq_puts(s, "\n");
1897 	if (!hnae3_dev_dcb_supported(hdev))
1898 		return 0;
1899 
1900 	ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, s);
1901 	if (ret < 0)
1902 		return ret;
1903 
1904 	ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, s);
1905 	if (ret < 0)
1906 		return ret;
1907 
1908 	return 0;
1909 }
1910 
hclge_dbg_dump_mng_table(struct seq_file * s,void * data)1911 static int hclge_dbg_dump_mng_table(struct seq_file *s, void *data)
1912 {
1913 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
1914 	struct hclge_mac_ethertype_idx_rd_cmd *req0;
1915 	struct hclge_desc desc;
1916 	u32 msg_egress_port;
1917 	int ret, i;
1918 
1919 	seq_puts(s, "entry  mac_addr          mask  ether  ");
1920 	seq_puts(s, "mask  vlan  mask  i_map  i_dir  e_type  ");
1921 	seq_puts(s, "pf_id  vf_id  q_id  drop\n");
1922 
1923 	for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1924 		hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1925 					   true);
1926 		req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)desc.data;
1927 		req0->index = cpu_to_le16(i);
1928 
1929 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1930 		if (ret) {
1931 			dev_err(&hdev->pdev->dev,
1932 				"failed to dump manage table, ret = %d\n", ret);
1933 			return ret;
1934 		}
1935 
1936 		if (!req0->resp_code)
1937 			continue;
1938 
1939 		seq_printf(s, "%02u     %pM ",
1940 			   le16_to_cpu(req0->index), req0->mac_addr);
1941 
1942 		seq_printf(s, "%x     %04x   %x     %04x  ",
1943 			   !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1944 			   le16_to_cpu(req0->ethter_type),
1945 			   !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1946 			   le16_to_cpu(req0->vlan_tag) &
1947 			   HCLGE_DBG_MNG_VLAN_TAG);
1948 
1949 		seq_printf(s, "%x     %02x     %02x     ",
1950 			   !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1951 			   req0->i_port_bitmap, req0->i_port_direction);
1952 
1953 		msg_egress_port = le16_to_cpu(req0->egress_port);
1954 		seq_printf(s, "%x       %x      %02x     %04x  %x\n",
1955 			   !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1956 			   msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1957 			   (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1958 			   le16_to_cpu(req0->egress_queue),
1959 			   !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1960 	}
1961 
1962 	return 0;
1963 }
1964 
hclge_dbg_fd_tcam_read(struct hclge_dev * hdev,bool sel_x,struct seq_file * s,struct hclge_dbg_tcam_msg tcam_msg)1965 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1966 				  struct seq_file *s,
1967 				  struct hclge_dbg_tcam_msg tcam_msg)
1968 {
1969 	struct hclge_fd_tcam_config_1_cmd *req1;
1970 	struct hclge_fd_tcam_config_2_cmd *req2;
1971 	struct hclge_fd_tcam_config_3_cmd *req3;
1972 	struct hclge_desc desc[3];
1973 	int ret, i;
1974 	__le32 *req;
1975 
1976 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1977 	desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1978 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1979 	desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1980 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1981 
1982 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1983 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1984 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1985 
1986 	req1->stage  = tcam_msg.stage;
1987 	req1->xy_sel = sel_x ? 1 : 0;
1988 	req1->index  = cpu_to_le32(tcam_msg.loc);
1989 
1990 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
1991 	if (ret)
1992 		return ret;
1993 
1994 	seq_printf(s, "read result tcam key %s(%u):\n",
1995 		   sel_x ? "x" : "y", tcam_msg.loc);
1996 
1997 	/* tcam_data0 ~ tcam_data1 */
1998 	req = (__le32 *)req1->tcam_data;
1999 	for (i = 0; i < 2; i++)
2000 		seq_printf(s, "%08x\n", le32_to_cpu(*req++));
2001 
2002 	/* tcam_data2 ~ tcam_data7 */
2003 	req = (__le32 *)req2->tcam_data;
2004 	for (i = 0; i < 6; i++)
2005 		seq_printf(s, "%08x\n", le32_to_cpu(*req++));
2006 
2007 	/* tcam_data8 ~ tcam_data12 */
2008 	req = (__le32 *)req3->tcam_data;
2009 	for (i = 0; i < 5; i++)
2010 		seq_printf(s, "%08x\n", le32_to_cpu(*req++));
2011 
2012 	return ret;
2013 }
2014 
hclge_dbg_get_rules_location(struct hclge_dev * hdev,u16 * rule_locs)2015 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
2016 {
2017 	struct hclge_fd_rule *rule;
2018 	struct hlist_node *node;
2019 	int cnt = 0;
2020 
2021 	spin_lock_bh(&hdev->fd_rule_lock);
2022 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
2023 		rule_locs[cnt] = rule->location;
2024 		cnt++;
2025 	}
2026 	spin_unlock_bh(&hdev->fd_rule_lock);
2027 
2028 	if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
2029 		return -EINVAL;
2030 
2031 	return cnt;
2032 }
2033 
hclge_dbg_dump_fd_tcam(struct seq_file * s,void * data)2034 static int hclge_dbg_dump_fd_tcam(struct seq_file *s, void *data)
2035 {
2036 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2037 	struct hclge_dbg_tcam_msg tcam_msg;
2038 	int i, ret, rule_cnt;
2039 	u16 *rule_locs;
2040 	u32 rule_num;
2041 
2042 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
2043 		dev_err(&hdev->pdev->dev,
2044 			"Only FD-supported dev supports dump fd tcam\n");
2045 		return -EOPNOTSUPP;
2046 	}
2047 
2048 	rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
2049 	if (!hdev->hclge_fd_rule_num || !rule_num)
2050 		return 0;
2051 
2052 	rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
2053 	if (!rule_locs)
2054 		return -ENOMEM;
2055 
2056 	rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
2057 	if (rule_cnt < 0) {
2058 		ret = rule_cnt;
2059 		dev_err(&hdev->pdev->dev,
2060 			"failed to get rule number, ret = %d\n", ret);
2061 		goto out;
2062 	}
2063 
2064 	ret = 0;
2065 	for (i = 0; i < rule_cnt; i++) {
2066 		tcam_msg.stage = HCLGE_FD_STAGE_1;
2067 		tcam_msg.loc = rule_locs[i];
2068 
2069 		ret = hclge_dbg_fd_tcam_read(hdev, true, s, tcam_msg);
2070 		if (ret) {
2071 			dev_err(&hdev->pdev->dev,
2072 				"failed to get fd tcam key x, ret = %d\n", ret);
2073 			goto out;
2074 		}
2075 
2076 		ret = hclge_dbg_fd_tcam_read(hdev, false, s, tcam_msg);
2077 		if (ret) {
2078 			dev_err(&hdev->pdev->dev,
2079 				"failed to get fd tcam key y, ret = %d\n", ret);
2080 			goto out;
2081 		}
2082 
2083 	}
2084 
2085 out:
2086 	kfree(rule_locs);
2087 	return ret;
2088 }
2089 
hclge_dbg_dump_fd_counter(struct seq_file * s,void * data)2090 static int hclge_dbg_dump_fd_counter(struct seq_file *s, void *data)
2091 {
2092 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2093 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2094 	struct hclge_fd_ad_cnt_read_cmd *req;
2095 	char str_id[HCLGE_DBG_ID_LEN];
2096 	struct hclge_desc desc;
2097 	int ret;
2098 	u64 cnt;
2099 	u8 i;
2100 
2101 	if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
2102 		return -EOPNOTSUPP;
2103 
2104 	seq_puts(s, "func_id\thit_times\n");
2105 
2106 	for (i = 0; i < func_num; i++) {
2107 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
2108 		req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
2109 		req->index = cpu_to_le16(i);
2110 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2111 		if (ret) {
2112 			dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
2113 				ret);
2114 			return ret;
2115 		}
2116 		cnt = le64_to_cpu(req->cnt);
2117 		hclge_dbg_get_func_id_str(str_id, i);
2118 		seq_printf(s, "%s\t%llu\n", str_id, cnt);
2119 	}
2120 
2121 	return 0;
2122 }
2123 
2124 static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
2125 	{HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
2126 	{HCLGE_MISC_RESET_STS_REG,   "reset interrupt source"},
2127 	{HCLGE_MISC_VECTOR_INT_STS,  "reset interrupt status"},
2128 	{HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
2129 	{HCLGE_GLOBAL_RESET_REG,  "hardware reset status"},
2130 	{HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
2131 	{HCLGE_FUN_RST_ING, "function reset status"}
2132 };
2133 
hclge_dbg_dump_rst_info(struct hclge_dev * hdev,char * buf,int len)2134 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
2135 {
2136 	u32 i, offset;
2137 	int pos = 0;
2138 
2139 	pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
2140 			 hdev->rst_stats.pf_rst_cnt);
2141 	pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
2142 			 hdev->rst_stats.flr_rst_cnt);
2143 	pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
2144 			 hdev->rst_stats.global_rst_cnt);
2145 	pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
2146 			 hdev->rst_stats.imp_rst_cnt);
2147 	pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
2148 			 hdev->rst_stats.reset_done_cnt);
2149 	pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
2150 			 hdev->rst_stats.hw_reset_done_cnt);
2151 	pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
2152 			 hdev->rst_stats.reset_cnt);
2153 	pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
2154 			 hdev->rst_stats.reset_fail_cnt);
2155 
2156 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
2157 		offset = hclge_dbg_rst_info[i].offset;
2158 		pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
2159 				 hclge_dbg_rst_info[i].message,
2160 				 hclge_read_dev(&hdev->hw, offset));
2161 	}
2162 
2163 	pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
2164 			 hdev->state);
2165 
2166 	return 0;
2167 }
2168 
hclge_dbg_seq_dump_rst_info(struct seq_file * s,void * data)2169 static int hclge_dbg_seq_dump_rst_info(struct seq_file *s, void *data)
2170 {
2171 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2172 	u32 i, offset;
2173 
2174 	seq_printf(s, "PF reset count: %u\n", hdev->rst_stats.pf_rst_cnt);
2175 	seq_printf(s, "FLR reset count: %u\n", hdev->rst_stats.flr_rst_cnt);
2176 	seq_printf(s, "GLOBAL reset count: %u\n",
2177 		   hdev->rst_stats.global_rst_cnt);
2178 	seq_printf(s, "IMP reset count: %u\n", hdev->rst_stats.imp_rst_cnt);
2179 	seq_printf(s, "reset done count: %u\n", hdev->rst_stats.reset_done_cnt);
2180 	seq_printf(s, "HW reset done count: %u\n",
2181 		   hdev->rst_stats.hw_reset_done_cnt);
2182 	seq_printf(s, "reset count: %u\n", hdev->rst_stats.reset_cnt);
2183 	seq_printf(s, "reset fail count: %u\n", hdev->rst_stats.reset_fail_cnt);
2184 
2185 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
2186 		offset = hclge_dbg_rst_info[i].offset;
2187 		seq_printf(s, "%s: 0x%x\n",
2188 			   hclge_dbg_rst_info[i].message,
2189 			   hclge_read_dev(&hdev->hw, offset));
2190 	}
2191 
2192 	seq_printf(s, "hdev state: 0x%lx\n", hdev->state);
2193 
2194 	return 0;
2195 }
2196 
hclge_dbg_dump_serv_info(struct seq_file * s,void * data)2197 static int hclge_dbg_dump_serv_info(struct seq_file *s, void *data)
2198 {
2199 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2200 	unsigned long rem_nsec;
2201 	u64 lc;
2202 
2203 	lc = local_clock();
2204 	rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
2205 
2206 	seq_printf(s, "local_clock: [%5lu.%06lu]\n",
2207 		   (unsigned long)lc, rem_nsec / 1000);
2208 	seq_printf(s, "delta: %u(ms)\n",
2209 		   jiffies_to_msecs(jiffies - hdev->last_serv_processed));
2210 	seq_printf(s, "last_service_task_processed: %lu(jiffies)\n",
2211 		   hdev->last_serv_processed);
2212 	seq_printf(s, "last_service_task_cnt: %lu\n", hdev->serv_processed_cnt);
2213 
2214 	return 0;
2215 }
2216 
hclge_dbg_dump_interrupt(struct seq_file * s,void * data)2217 static int hclge_dbg_dump_interrupt(struct seq_file *s, void *data)
2218 {
2219 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2220 
2221 	seq_printf(s, "num_nic_msi: %u\n", hdev->num_nic_msi);
2222 	seq_printf(s, "num_roce_msi: %u\n", hdev->num_roce_msi);
2223 	seq_printf(s, "num_msi_used: %u\n", hdev->num_msi_used);
2224 	seq_printf(s, "num_msi_left: %u\n", hdev->num_msi_left);
2225 
2226 	return 0;
2227 }
2228 
hclge_dbg_imp_info_data_print(struct seq_file * s,struct hclge_desc * desc_src,u32 bd_num)2229 static void hclge_dbg_imp_info_data_print(struct seq_file *s,
2230 					  struct hclge_desc *desc_src,
2231 					  u32 bd_num)
2232 {
2233 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
2234 
2235 	struct hclge_desc *desc_index = desc_src;
2236 	u32 offset = 0;
2237 	u32 i, j;
2238 
2239 	seq_puts(s, "offset | data\n");
2240 
2241 	for (i = 0; i < bd_num; i++) {
2242 		j = 0;
2243 		while (j < HCLGE_DESC_DATA_LEN - 1) {
2244 			seq_printf(s, "0x%04x | ", offset);
2245 			seq_printf(s, "0x%08x  ",
2246 				   le32_to_cpu(desc_index->data[j++]));
2247 			seq_printf(s, "0x%08x\n",
2248 				   le32_to_cpu(desc_index->data[j++]));
2249 			offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
2250 		}
2251 		desc_index++;
2252 	}
2253 }
2254 
hclge_dbg_get_imp_stats_info(struct seq_file * s,void * data)2255 static int hclge_dbg_get_imp_stats_info(struct seq_file *s, void *data)
2256 {
2257 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2258 	struct hclge_get_imp_bd_cmd *req;
2259 	struct hclge_desc *desc_src;
2260 	struct hclge_desc desc;
2261 	u32 bd_num;
2262 	int ret;
2263 
2264 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
2265 
2266 	req = (struct hclge_get_imp_bd_cmd *)desc.data;
2267 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2268 	if (ret) {
2269 		dev_err(&hdev->pdev->dev,
2270 			"failed to get imp statistics bd number, ret = %d\n",
2271 			ret);
2272 		return ret;
2273 	}
2274 
2275 	bd_num = le32_to_cpu(req->bd_num);
2276 	if (!bd_num) {
2277 		dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
2278 		return -EINVAL;
2279 	}
2280 
2281 	desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
2282 	if (!desc_src)
2283 		return -ENOMEM;
2284 
2285 	ret  = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
2286 				  HCLGE_OPC_IMP_STATS_INFO);
2287 	if (ret) {
2288 		kfree(desc_src);
2289 		dev_err(&hdev->pdev->dev,
2290 			"failed to get imp statistics, ret = %d\n", ret);
2291 		return ret;
2292 	}
2293 
2294 	hclge_dbg_imp_info_data_print(s, desc_src, bd_num);
2295 
2296 	kfree(desc_src);
2297 
2298 	return 0;
2299 }
2300 
2301 #define HCLGE_CMD_NCL_CONFIG_BD_NUM	5
2302 #define HCLGE_MAX_NCL_CONFIG_LENGTH	16384
2303 
hclge_ncl_config_data_print(struct hclge_desc * desc,int * index,struct seq_file * s)2304 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
2305 					struct seq_file *s)
2306 {
2307 #define HCLGE_CMD_DATA_NUM		6
2308 
2309 	int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
2310 	int i, j;
2311 
2312 	for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
2313 		for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
2314 			if (i == 0 && j == 0)
2315 				continue;
2316 
2317 			seq_printf(s, "0x%04x | 0x%08x\n", offset,
2318 				   le32_to_cpu(desc[i].data[j]));
2319 
2320 			offset += sizeof(u32);
2321 			*index -= sizeof(u32);
2322 
2323 			if (*index <= 0)
2324 				return;
2325 		}
2326 	}
2327 }
2328 
hclge_dbg_dump_ncl_config(struct seq_file * s,void * data)2329 static int hclge_dbg_dump_ncl_config(struct seq_file *s, void *data)
2330 {
2331 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD	(20 + 24 * 4)
2332 
2333 	struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
2334 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2335 	int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
2336 	int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
2337 	u32 data0;
2338 	int ret;
2339 
2340 	seq_puts(s, "offset | data\n");
2341 
2342 	while (index > 0) {
2343 		data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
2344 		if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
2345 			data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
2346 		else
2347 			data0 |= (u32)index << 16;
2348 		ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
2349 					 HCLGE_OPC_QUERY_NCL_CONFIG);
2350 		if (ret)
2351 			return ret;
2352 
2353 		hclge_ncl_config_data_print(desc, &index, s);
2354 	}
2355 
2356 	return 0;
2357 }
2358 
hclge_dbg_dump_loopback(struct seq_file * s,void * data)2359 static int hclge_dbg_dump_loopback(struct seq_file *s, void *data)
2360 {
2361 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2362 	struct phy_device *phydev = hdev->hw.mac.phydev;
2363 	struct hclge_config_mac_mode_cmd *req_app;
2364 	struct hclge_common_lb_cmd *req_common;
2365 	struct hclge_desc desc;
2366 	u8 loopback_en;
2367 	int ret;
2368 
2369 	req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
2370 	req_common = (struct hclge_common_lb_cmd *)desc.data;
2371 
2372 	seq_printf(s, "mac id: %u\n", hdev->hw.mac.mac_id);
2373 
2374 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
2375 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2376 	if (ret) {
2377 		dev_err(&hdev->pdev->dev,
2378 			"failed to dump app loopback status, ret = %d\n", ret);
2379 		return ret;
2380 	}
2381 
2382 	loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
2383 				    HCLGE_MAC_APP_LP_B);
2384 	seq_printf(s, "app loopback: %s\n", str_on_off(loopback_en));
2385 
2386 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
2387 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2388 	if (ret) {
2389 		dev_err(&hdev->pdev->dev,
2390 			"failed to dump common loopback status, ret = %d\n",
2391 			ret);
2392 		return ret;
2393 	}
2394 
2395 	loopback_en = req_common->enable &
2396 		      HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
2397 	seq_printf(s, "serdes serial loopback: %s\n", str_on_off(loopback_en));
2398 
2399 	loopback_en = req_common->enable &
2400 		      HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
2401 	seq_printf(s, "serdes parallel loopback: %s\n",
2402 		   str_on_off(loopback_en));
2403 
2404 	if (phydev) {
2405 		loopback_en = phydev->loopback_enabled;
2406 		seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
2407 	} else if (hnae3_dev_phy_imp_supported(hdev)) {
2408 		loopback_en = req_common->enable &
2409 			      HCLGE_CMD_GE_PHY_INNER_LOOP_B;
2410 		seq_printf(s, "phy loopback: %s\n", str_on_off(loopback_en));
2411 	}
2412 
2413 	return 0;
2414 }
2415 
2416 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
2417  * @hdev: pointer to struct hclge_dev
2418  */
hclge_dbg_dump_mac_tnl_status(struct seq_file * s,void * data)2419 static int hclge_dbg_dump_mac_tnl_status(struct seq_file *s, void *data)
2420 {
2421 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2422 	struct hclge_mac_tnl_stats stats;
2423 	unsigned long rem_nsec;
2424 
2425 	seq_puts(s, "Recently generated mac tnl interruption:\n");
2426 
2427 	while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
2428 		rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
2429 
2430 		seq_printf(s, "[%07lu.%03lu] status = 0x%x\n",
2431 			   (unsigned long)stats.time, rem_nsec / 1000,
2432 			   stats.status);
2433 	}
2434 
2435 	return 0;
2436 }
2437 
hclge_dbg_dump_mac_list(struct seq_file * s,bool is_unicast)2438 static void hclge_dbg_dump_mac_list(struct seq_file *s, bool is_unicast)
2439 {
2440 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2441 	struct hclge_mac_node *mac_node, *tmp;
2442 	struct hclge_vport *vport;
2443 	struct list_head *list;
2444 	u32 func_id;
2445 
2446 	seq_printf(s, "%s MAC_LIST:\n", is_unicast ? "UC" : "MC");
2447 	seq_puts(s, "FUNC_ID  MAC_ADDR            STATE\n");
2448 
2449 	for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
2450 		vport = &hdev->vport[func_id];
2451 		list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
2452 		spin_lock_bh(&vport->mac_list_lock);
2453 		list_for_each_entry_safe(mac_node, tmp, list, node) {
2454 			if (func_id)
2455 				seq_printf(s, "vf%-7u", func_id - 1U);
2456 			else
2457 				seq_puts(s, "pf       ");
2458 			seq_printf(s, "%pM   ", mac_node->mac_addr);
2459 			seq_printf(s, "%5s\n",
2460 				   hclge_mac_state_str[mac_node->state]);
2461 		}
2462 		spin_unlock_bh(&vport->mac_list_lock);
2463 	}
2464 }
2465 
hclge_dbg_dump_umv_info(struct seq_file * s,void * data)2466 static int hclge_dbg_dump_umv_info(struct seq_file *s, void *data)
2467 {
2468 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2469 	u8 func_num = pci_num_vf(hdev->pdev) + 1;
2470 	struct hclge_vport *vport;
2471 	u8 i;
2472 
2473 	seq_printf(s, "num_alloc_vport   : %u\n", hdev->num_alloc_vport);
2474 	seq_printf(s, "max_umv_size     : %u\n", hdev->max_umv_size);
2475 	seq_printf(s, "wanted_umv_size  : %u\n", hdev->wanted_umv_size);
2476 	seq_printf(s, "priv_umv_size    : %u\n", hdev->priv_umv_size);
2477 
2478 	mutex_lock(&hdev->vport_lock);
2479 	seq_printf(s, "share_umv_size   : %u\n", hdev->share_umv_size);
2480 	for (i = 0; i < func_num; i++) {
2481 		vport = &hdev->vport[i];
2482 		seq_printf(s, "vport(%u) used_umv_num : %u\n",
2483 			   i, vport->used_umv_num);
2484 	}
2485 	mutex_unlock(&hdev->vport_lock);
2486 
2487 	seq_printf(s, "used_mc_mac_num  : %u\n", hdev->used_mc_mac_num);
2488 
2489 	return 0;
2490 }
2491 
hclge_get_vlan_rx_offload_cfg(struct hclge_dev * hdev,u8 vf_id,struct hclge_dbg_vlan_cfg * vlan_cfg)2492 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2493 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2494 {
2495 	struct hclge_vport_vtag_rx_cfg_cmd *req;
2496 	struct hclge_desc desc;
2497 	u16 bmap_index;
2498 	u8 rx_cfg;
2499 	int ret;
2500 
2501 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2502 
2503 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2504 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2505 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2506 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2507 
2508 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2509 	if (ret) {
2510 		dev_err(&hdev->pdev->dev,
2511 			"failed to get vport%u rxvlan cfg, ret = %d\n",
2512 			vf_id, ret);
2513 		return ret;
2514 	}
2515 
2516 	rx_cfg = req->vport_vlan_cfg;
2517 	vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2518 	vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2519 	vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2520 	vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2521 	vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2522 	vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2523 
2524 	return 0;
2525 }
2526 
hclge_get_vlan_tx_offload_cfg(struct hclge_dev * hdev,u8 vf_id,struct hclge_dbg_vlan_cfg * vlan_cfg)2527 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2528 					 struct hclge_dbg_vlan_cfg *vlan_cfg)
2529 {
2530 	struct hclge_vport_vtag_tx_cfg_cmd *req;
2531 	struct hclge_desc desc;
2532 	u16 bmap_index;
2533 	u8 tx_cfg;
2534 	int ret;
2535 
2536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2537 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2538 	req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2539 	bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2540 	req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2541 
2542 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2543 	if (ret) {
2544 		dev_err(&hdev->pdev->dev,
2545 			"failed to get vport%u txvlan cfg, ret = %d\n",
2546 			vf_id, ret);
2547 		return ret;
2548 	}
2549 
2550 	tx_cfg = req->vport_vlan_cfg;
2551 	vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2552 
2553 	vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2554 	vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2555 	vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2556 	vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2557 	vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2558 	vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2559 	vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2560 
2561 	return 0;
2562 }
2563 
hclge_get_vlan_filter_config_cmd(struct hclge_dev * hdev,u8 vlan_type,u8 vf_id,struct hclge_desc * desc)2564 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2565 					    u8 vlan_type, u8 vf_id,
2566 					    struct hclge_desc *desc)
2567 {
2568 	struct hclge_vlan_filter_ctrl_cmd *req;
2569 	int ret;
2570 
2571 	hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2572 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2573 	req->vlan_type = vlan_type;
2574 	req->vf_id = vf_id;
2575 
2576 	ret = hclge_cmd_send(&hdev->hw, desc, 1);
2577 	if (ret)
2578 		dev_err(&hdev->pdev->dev,
2579 			"failed to get vport%u vlan filter config, ret = %d.\n",
2580 			vf_id, ret);
2581 
2582 	return ret;
2583 }
2584 
hclge_get_vlan_filter_state(struct hclge_dev * hdev,u8 vlan_type,u8 vf_id,u8 * vlan_fe)2585 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2586 				       u8 vf_id, u8 *vlan_fe)
2587 {
2588 	struct hclge_vlan_filter_ctrl_cmd *req;
2589 	struct hclge_desc desc;
2590 	int ret;
2591 
2592 	ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2593 	if (ret)
2594 		return ret;
2595 
2596 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2597 	*vlan_fe = req->vlan_fe;
2598 
2599 	return 0;
2600 }
2601 
hclge_get_port_vlan_filter_bypass_state(struct hclge_dev * hdev,u8 vf_id,u8 * bypass_en)2602 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2603 						   u8 vf_id, u8 *bypass_en)
2604 {
2605 	struct hclge_port_vlan_filter_bypass_cmd *req;
2606 	struct hclge_desc desc;
2607 	int ret;
2608 
2609 	if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2610 		return 0;
2611 
2612 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2613 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2614 	req->vf_id = vf_id;
2615 
2616 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2617 	if (ret) {
2618 		dev_err(&hdev->pdev->dev,
2619 			"failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2620 			vf_id, ret);
2621 		return ret;
2622 	}
2623 
2624 	*bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2625 
2626 	return 0;
2627 }
2628 
hclge_dbg_dump_vlan_filter_config(struct hclge_dev * hdev,struct seq_file * s)2629 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev,
2630 					     struct seq_file *s)
2631 {
2632 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2633 	u8 i, vlan_fe, bypass, ingress, egress;
2634 	char str_id[HCLGE_DBG_ID_LEN];
2635 	int ret;
2636 
2637 	ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2638 					  &vlan_fe);
2639 	if (ret)
2640 		return ret;
2641 	ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2642 	egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2643 
2644 	seq_printf(s, "I_PORT_VLAN_FILTER: %s\n", str_on_off(ingress));
2645 	seq_printf(s, "E_PORT_VLAN_FILTER: %s\n", str_on_off(egress));
2646 
2647 	seq_puts(s, "FUNC_ID  I_VF_VLAN_FILTER  E_VF_VLAN_FILTER  ");
2648 	seq_puts(s, "PORT_VLAN_FILTER_BYPASS\n");
2649 
2650 	for (i = 0; i < func_num; i++) {
2651 		ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2652 						  &vlan_fe);
2653 		if (ret)
2654 			return ret;
2655 
2656 		ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2657 		egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2658 		ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2659 		if (ret)
2660 			return ret;
2661 
2662 		seq_printf(s, "%-9s%-18s%-18s%s\n",
2663 			   hclge_dbg_get_func_id_str(str_id, i),
2664 			   str_on_off(ingress), str_on_off(egress),
2665 			   test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2666 				    hdev->ae_dev->caps) ?
2667 						str_on_off(bypass) : "NA");
2668 	}
2669 	seq_puts(s, "\n");
2670 
2671 	return 0;
2672 }
2673 
hclge_dbg_dump_vlan_offload_config(struct hclge_dev * hdev,struct seq_file * s)2674 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev,
2675 					      struct seq_file *s)
2676 {
2677 	u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2678 	struct hclge_dbg_vlan_cfg vlan_cfg;
2679 	char str_id[HCLGE_DBG_ID_LEN];
2680 	int ret;
2681 	u8 i;
2682 
2683 	seq_puts(s, "FUNC_ID  PVID  ACCEPT_TAG1  ACCEPT_TAG2 ACCEPT_UNTAG1  ");
2684 	seq_puts(s, "ACCEPT_UNTAG2  INSERT_TAG1  INSERT_TAG2  SHIFT_TAG  ");
2685 	seq_puts(s, "STRIP_TAG1  STRIP_TAG2  DROP_TAG1  DROP_TAG2  ");
2686 	seq_puts(s, "PRI_ONLY_TAG1  PRI_ONLY_TAG2\n");
2687 
2688 	for (i = 0; i < func_num; i++) {
2689 		ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2690 		if (ret)
2691 			return ret;
2692 
2693 		ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2694 		if (ret)
2695 			return ret;
2696 
2697 		seq_printf(s, "%-9s", hclge_dbg_get_func_id_str(str_id, i));
2698 		seq_printf(s, "%-6u", vlan_cfg.pvid);
2699 		seq_printf(s, "%-13s", str_on_off(vlan_cfg.accept_tag1));
2700 		seq_printf(s, "%-12s", str_on_off(vlan_cfg.accept_tag2));
2701 		seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag1));
2702 		seq_printf(s, "%-15s", str_on_off(vlan_cfg.accept_untag2));
2703 		seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag1));
2704 		seq_printf(s, "%-13s", str_on_off(vlan_cfg.insert_tag2));
2705 		seq_printf(s, "%-11s", str_on_off(vlan_cfg.shift_tag));
2706 		seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag1));
2707 		seq_printf(s, "%-12s", str_on_off(vlan_cfg.strip_tag2));
2708 		seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag1));
2709 		seq_printf(s, "%-11s", str_on_off(vlan_cfg.drop_tag2));
2710 		seq_printf(s, "%-15s", str_on_off(vlan_cfg.pri_only1));
2711 		seq_printf(s, "%s\n", str_on_off(vlan_cfg.pri_only2));
2712 	}
2713 
2714 	return 0;
2715 }
2716 
hclge_dbg_dump_vlan_config(struct seq_file * s,void * data)2717 static int hclge_dbg_dump_vlan_config(struct seq_file *s, void *data)
2718 {
2719 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2720 	int ret;
2721 
2722 	ret = hclge_dbg_dump_vlan_filter_config(hdev, s);
2723 	if (ret)
2724 		return ret;
2725 
2726 	return hclge_dbg_dump_vlan_offload_config(hdev, s);
2727 }
2728 
hclge_dbg_dump_ptp_info(struct seq_file * s,void * data)2729 static int hclge_dbg_dump_ptp_info(struct seq_file *s, void *data)
2730 {
2731 	struct hclge_dev *hdev = hclge_seq_file_to_hdev(s);
2732 	struct hclge_ptp *ptp = hdev->ptp;
2733 	u32 sw_cfg = ptp->ptp_cfg;
2734 	unsigned int tx_start;
2735 	unsigned int last_rx;
2736 	u32 hw_cfg;
2737 	int ret;
2738 
2739 	seq_printf(s, "phc %s's debug info:\n", ptp->info.name);
2740 	seq_printf(s, "ptp enable: %s\n",
2741 		   str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
2742 	seq_printf(s, "ptp tx enable: %s\n",
2743 		   str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags)));
2744 	seq_printf(s, "ptp rx enable: %s\n",
2745 		   str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags)));
2746 
2747 	last_rx = jiffies_to_msecs(ptp->last_rx);
2748 	seq_printf(s, "last rx time: %lu.%lu\n",
2749 		   last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2750 	seq_printf(s, "rx count: %lu\n", ptp->rx_cnt);
2751 
2752 	tx_start = jiffies_to_msecs(ptp->tx_start);
2753 	seq_printf(s, "last tx start time: %lu.%lu\n",
2754 		   tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2755 	seq_printf(s, "tx count: %lu\n", ptp->tx_cnt);
2756 	seq_printf(s, "tx skipped count: %lu\n", ptp->tx_skipped);
2757 	seq_printf(s, "tx timeout count: %lu\n", ptp->tx_timeout);
2758 	seq_printf(s, "last tx seqid: %u\n", ptp->last_tx_seqid);
2759 
2760 
2761 	ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2762 	if (ret)
2763 		return ret;
2764 
2765 	seq_printf(s, "sw_cfg: %#x, hw_cfg: %#x\n", sw_cfg, hw_cfg);
2766 
2767 	seq_printf(s, "tx type: %d, rx filter: %d\n",
2768 		   ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2769 
2770 	return 0;
2771 }
2772 
hclge_dbg_dump_mac_uc(struct seq_file * s,void * data)2773 static int hclge_dbg_dump_mac_uc(struct seq_file *s, void *data)
2774 {
2775 	hclge_dbg_dump_mac_list(s, true);
2776 
2777 	return 0;
2778 }
2779 
hclge_dbg_dump_mac_mc(struct seq_file * s,void * data)2780 static int hclge_dbg_dump_mac_mc(struct seq_file *s, void *data)
2781 {
2782 	hclge_dbg_dump_mac_list(s, false);
2783 
2784 	return 0;
2785 }
2786 
2787 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2788 	{
2789 		.cmd = HNAE3_DBG_CMD_TM_NODES,
2790 		.dbg_read_func = hclge_dbg_dump_tm_nodes,
2791 	},
2792 	{
2793 		.cmd = HNAE3_DBG_CMD_TM_PRI,
2794 		.dbg_read_func = hclge_dbg_dump_tm_pri,
2795 	},
2796 	{
2797 		.cmd = HNAE3_DBG_CMD_TM_QSET,
2798 		.dbg_read_func = hclge_dbg_dump_tm_qset,
2799 	},
2800 	{
2801 		.cmd = HNAE3_DBG_CMD_TM_MAP,
2802 		.dbg_read_func = hclge_dbg_dump_tm_map,
2803 	},
2804 	{
2805 		.cmd = HNAE3_DBG_CMD_TM_PG,
2806 		.dbg_read_func = hclge_dbg_dump_tm_pg,
2807 	},
2808 	{
2809 		.cmd = HNAE3_DBG_CMD_TM_PORT,
2810 		.dbg_read_func = hclge_dbg_dump_tm_port,
2811 	},
2812 	{
2813 		.cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2814 		.dbg_read_func = hclge_dbg_dump_tc,
2815 	},
2816 	{
2817 		.cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2818 		.dbg_read_func = hclge_dbg_dump_qos_pause_cfg,
2819 	},
2820 	{
2821 		.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2822 		.dbg_read_func = hclge_dbg_dump_qos_pri_map,
2823 	},
2824 	{
2825 		.cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
2826 		.dbg_read_func = hclge_dbg_dump_qos_dscp_map,
2827 	},
2828 	{
2829 		.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2830 		.dbg_read_func = hclge_dbg_dump_qos_buf_cfg,
2831 	},
2832 	{
2833 		.cmd = HNAE3_DBG_CMD_MAC_UC,
2834 		.dbg_read_func = hclge_dbg_dump_mac_uc,
2835 	},
2836 	{
2837 		.cmd = HNAE3_DBG_CMD_MAC_MC,
2838 		.dbg_read_func = hclge_dbg_dump_mac_mc,
2839 	},
2840 	{
2841 		.cmd = HNAE3_DBG_CMD_MNG_TBL,
2842 		.dbg_read_func = hclge_dbg_dump_mng_table,
2843 	},
2844 	{
2845 		.cmd = HNAE3_DBG_CMD_LOOPBACK,
2846 		.dbg_read_func = hclge_dbg_dump_loopback,
2847 	},
2848 	{
2849 		.cmd = HNAE3_DBG_CMD_PTP_INFO,
2850 		.dbg_read_func = hclge_dbg_dump_ptp_info,
2851 	},
2852 	{
2853 		.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2854 		.dbg_read_func = hclge_dbg_dump_interrupt,
2855 	},
2856 	{
2857 		.cmd = HNAE3_DBG_CMD_RESET_INFO,
2858 		.dbg_read_func = hclge_dbg_seq_dump_rst_info,
2859 	},
2860 	{
2861 		.cmd = HNAE3_DBG_CMD_IMP_INFO,
2862 		.dbg_read_func = hclge_dbg_get_imp_stats_info,
2863 	},
2864 	{
2865 		.cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2866 		.dbg_read_func = hclge_dbg_dump_ncl_config,
2867 	},
2868 	{
2869 		.cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2870 		.dbg_read_func = hclge_dbg_dump_bios_reg_cmd,
2871 	},
2872 	{
2873 		.cmd = HNAE3_DBG_CMD_REG_SSU,
2874 		.dbg_read_func = hclge_dbg_dump_ssu_reg_cmd,
2875 	},
2876 	{
2877 		.cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2878 		.dbg_read_func = hclge_dbg_dump_igu_egu_reg_cmd,
2879 	},
2880 	{
2881 		.cmd = HNAE3_DBG_CMD_REG_RPU,
2882 		.dbg_read_func = hclge_dbg_dump_rpu_reg_cmd,
2883 	},
2884 	{
2885 		.cmd = HNAE3_DBG_CMD_REG_NCSI,
2886 		.dbg_read_func = hclge_dbg_dump_ncsi_reg_cmd,
2887 	},
2888 	{
2889 		.cmd = HNAE3_DBG_CMD_REG_RTC,
2890 		.dbg_read_func = hclge_dbg_dump_rtc_reg_cmd,
2891 	},
2892 	{
2893 		.cmd = HNAE3_DBG_CMD_REG_PPP,
2894 		.dbg_read_func = hclge_dbg_dump_ppp_reg_cmd,
2895 	},
2896 	{
2897 		.cmd = HNAE3_DBG_CMD_REG_RCB,
2898 		.dbg_read_func = hclge_dbg_dump_rcb_reg_cmd,
2899 	},
2900 	{
2901 		.cmd = HNAE3_DBG_CMD_REG_TQP,
2902 		.dbg_read_func = hclge_dbg_dump_tqp_reg_cmd,
2903 	},
2904 	{
2905 		.cmd = HNAE3_DBG_CMD_REG_MAC,
2906 		.dbg_read_func = hclge_dbg_dump_mac,
2907 	},
2908 	{
2909 		.cmd = HNAE3_DBG_CMD_REG_DCB,
2910 		.dbg_read_func = hclge_dbg_dump_dcb,
2911 	},
2912 	{
2913 		.cmd = HNAE3_DBG_CMD_FD_TCAM,
2914 		.dbg_read_func = hclge_dbg_dump_fd_tcam,
2915 	},
2916 	{
2917 		.cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2918 		.dbg_read_func = hclge_dbg_dump_mac_tnl_status,
2919 	},
2920 	{
2921 		.cmd = HNAE3_DBG_CMD_SERV_INFO,
2922 		.dbg_read_func = hclge_dbg_dump_serv_info,
2923 	},
2924 	{
2925 		.cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2926 		.dbg_read_func = hclge_dbg_dump_vlan_config,
2927 	},
2928 	{
2929 		.cmd = HNAE3_DBG_CMD_FD_COUNTER,
2930 		.dbg_read_func = hclge_dbg_dump_fd_counter,
2931 	},
2932 	{
2933 		.cmd = HNAE3_DBG_CMD_UMV_INFO,
2934 		.dbg_read_func = hclge_dbg_dump_umv_info,
2935 	},
2936 };
2937 
hclge_dbg_get_read_func(struct hnae3_handle * handle,enum hnae3_dbg_cmd cmd,read_func * func)2938 int hclge_dbg_get_read_func(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2939 			    read_func *func)
2940 {
2941 	struct hclge_vport *vport = hclge_get_vport(handle);
2942 	const struct hclge_dbg_func *cmd_func;
2943 	struct hclge_dev *hdev = vport->back;
2944 	u32 i;
2945 
2946 	for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2947 		if (cmd == hclge_dbg_cmd_func[i].cmd) {
2948 			cmd_func = &hclge_dbg_cmd_func[i];
2949 			*func = cmd_func->dbg_read_func;
2950 			return 0;
2951 		}
2952 	}
2953 
2954 	dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2955 	return -EINVAL;
2956 }
2957