xref: /linux/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c (revision b5c6891b2c5b54bf58069966296917da46cda6f2)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2025 Hisilicon Limited.
3 
4 #include <linux/iopoll.h>
5 #include <linux/phy.h>
6 #include "hbg_common.h"
7 #include "hbg_ethtool.h"
8 #include "hbg_hw.h"
9 #include "hbg_diagnose.h"
10 
11 #define HBG_MSG_DATA_MAX_NUM	64
12 
13 struct hbg_diagnose_message {
14 	u32 opcode;
15 	u32 status;
16 	u32 data_num;
17 	struct hbg_priv *priv;
18 
19 	u32 data[HBG_MSG_DATA_MAX_NUM];
20 };
21 
22 #define HBG_HW_PUSH_WAIT_TIMEOUT_US	(2 * 1000 * 1000)
23 #define HBG_HW_PUSH_WAIT_INTERVAL_US	(1 * 1000)
24 
25 enum hbg_push_cmd {
26 	HBG_PUSH_CMD_IRQ = 0,
27 	HBG_PUSH_CMD_STATS,
28 	HBG_PUSH_CMD_LINK,
29 };
30 
31 struct hbg_push_stats_info {
32 	/* id is used to match the name of the current stats item.
33 	 * and is used for pretty print on BMC
34 	 */
35 	u32 id;
36 	u64 offset;
37 };
38 
39 struct hbg_push_irq_info {
40 	/* id is used to match the name of the current irq.
41 	 * and is used for pretty print on BMC
42 	 */
43 	u32 id;
44 	u32 mask;
45 };
46 
47 #define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B}
48 static const struct hbg_push_irq_info hbg_push_irq_list[] = {
49 	HBG_PUSH_IRQ_I(RX, 0),
50 	HBG_PUSH_IRQ_I(TX, 1),
51 	HBG_PUSH_IRQ_I(TX_PKT_CPL, 2),
52 	HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3),
53 	HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4),
54 	HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5),
55 	HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6),
56 	HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7),
57 	HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8),
58 	HBG_PUSH_IRQ_I(TX_AHB_ERR, 9),
59 	HBG_PUSH_IRQ_I(RX_BUF_AVL, 10),
60 	HBG_PUSH_IRQ_I(REL_BUF_ERR, 11),
61 	HBG_PUSH_IRQ_I(TXCFG_AVL, 12),
62 	HBG_PUSH_IRQ_I(TX_DROP, 13),
63 	HBG_PUSH_IRQ_I(RX_DROP, 14),
64 	HBG_PUSH_IRQ_I(RX_AHB_ERR, 15),
65 	HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16),
66 	HBG_PUSH_IRQ_I(RBREQ_ERR, 17),
67 	HBG_PUSH_IRQ_I(WE_ERR, 18),
68 };
69 
70 #define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)}
71 static const struct hbg_push_stats_info hbg_push_stats_list[] = {
72 	HBG_PUSH_STATS_I(rx_desc_drop, 0),
73 	HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1),
74 	HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2),
75 	HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3),
76 	HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4),
77 	HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5),
78 	HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6),
79 	HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7),
80 	HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8),
81 	HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9),
82 	HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10),
83 	HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11),
84 	HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12),
85 	HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13),
86 	HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14),
87 	HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15),
88 	HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16),
89 	HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17),
90 	HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18),
91 	HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19),
92 	HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20),
93 	HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21),
94 	HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22),
95 	HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23),
96 	HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24),
97 	HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25),
98 	HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26),
99 	HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27),
100 	HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28),
101 	HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29),
102 	HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30),
103 	HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31),
104 	HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32),
105 	HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33),
106 	HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34),
107 	HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35),
108 	HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36),
109 	HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37),
110 	HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38),
111 	HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39),
112 	HBG_PUSH_STATS_I(rx_framesize_64, 40),
113 	HBG_PUSH_STATS_I(rx_framesize_65_127, 41),
114 	HBG_PUSH_STATS_I(rx_framesize_128_255, 42),
115 	HBG_PUSH_STATS_I(rx_framesize_256_511, 43),
116 	HBG_PUSH_STATS_I(rx_framesize_512_1023, 44),
117 	HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45),
118 	HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46),
119 	HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47),
120 	HBG_PUSH_STATS_I(rx_data_error_cnt, 48),
121 	HBG_PUSH_STATS_I(rx_align_error_cnt, 49),
122 	HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50),
123 	HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51),
124 	HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52),
125 	HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53),
126 	HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54),
127 	HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55),
128 	HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56),
129 	HBG_PUSH_STATS_I(rx_overflow_cnt, 57),
130 	HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58),
131 	HBG_PUSH_STATS_I(rx_we_err_cnt, 59),
132 	HBG_PUSH_STATS_I(rx_overrun_cnt, 60),
133 	HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61),
134 	HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62),
135 	HBG_PUSH_STATS_I(rx_dma_err_cnt, 63),
136 	HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64),
137 	HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65),
138 	HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66),
139 	HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67),
140 	HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68),
141 	HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69),
142 	HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70),
143 	HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71),
144 	HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72),
145 	HBG_PUSH_STATS_I(tx_framesize_64, 73),
146 	HBG_PUSH_STATS_I(tx_framesize_65_127, 74),
147 	HBG_PUSH_STATS_I(tx_framesize_128_255, 75),
148 	HBG_PUSH_STATS_I(tx_framesize_256_511, 76),
149 	HBG_PUSH_STATS_I(tx_framesize_512_1023, 77),
150 	HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78),
151 	HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79),
152 	HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80),
153 	HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81),
154 	HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82),
155 	HBG_PUSH_STATS_I(tx_crc_err_cnt, 83),
156 	HBG_PUSH_STATS_I(tx_drop_cnt, 84),
157 	HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
158 	HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
159 };
160 
hbg_push_msg_send(struct hbg_priv * priv,struct hbg_diagnose_message * msg)161 static int hbg_push_msg_send(struct hbg_priv *priv,
162 			     struct hbg_diagnose_message *msg)
163 {
164 	u32 header = 0;
165 	u32 i;
166 
167 	if (msg->data_num == 0)
168 		return 0;
169 
170 	for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++)
171 		hbg_reg_write(priv,
172 			      HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32),
173 			      msg->data[i]);
174 
175 	hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode);
176 	hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M,  msg->data_num);
177 	hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT);
178 
179 	/* start status */
180 	hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1);
181 
182 	/* write header msg to start push */
183 	hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header);
184 
185 	/* wait done */
186 	readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header,
187 			   !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header),
188 			   HBG_HW_PUSH_WAIT_INTERVAL_US,
189 			   HBG_HW_PUSH_WAIT_TIMEOUT_US);
190 
191 	msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header);
192 	return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header);
193 }
194 
hbg_push_data(struct hbg_priv * priv,u32 opcode,u32 * data,u32 data_num)195 static int hbg_push_data(struct hbg_priv *priv,
196 			 u32 opcode, u32 *data, u32 data_num)
197 {
198 	struct hbg_diagnose_message msg = {0};
199 	u32 data_left_num;
200 	u32 i, j;
201 	int ret;
202 
203 	msg.priv = priv;
204 	msg.opcode = opcode;
205 	for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) {
206 		if (i * HBG_MSG_DATA_MAX_NUM >= data_num)
207 			break;
208 
209 		data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM;
210 		for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++)
211 			msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j];
212 
213 		msg.data_num = j;
214 		ret = hbg_push_msg_send(priv, &msg);
215 		if (ret)
216 			return ret;
217 	}
218 
219 	return 0;
220 }
221 
hbg_push_data_u64(struct hbg_priv * priv,u32 opcode,u64 * data,u32 data_num)222 static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode,
223 			     u64 *data, u32 data_num)
224 {
225 	/* The length of u64 is twice that of u32,
226 	 * the data_num must be multiplied by 2.
227 	 */
228 	return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2);
229 }
230 
hbg_get_irq_stats(struct hbg_vector * vectors,u32 mask)231 static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask)
232 {
233 	u32 i = 0;
234 
235 	for (i = 0; i < vectors->info_array_len; i++)
236 		if (vectors->info_array[i].mask == mask)
237 			return vectors->stats_array[i];
238 
239 	return 0;
240 }
241 
hbg_push_irq_cnt(struct hbg_priv * priv)242 static int hbg_push_irq_cnt(struct hbg_priv *priv)
243 {
244 	/* An id needs to be added for each data.
245 	 * Therefore, the data_num must be multiplied by 2.
246 	 */
247 	u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2;
248 	struct hbg_vector *vectors = &priv->vectors;
249 	const struct hbg_push_irq_info *info;
250 	u32 i, j = 0;
251 	u64 *data;
252 	int ret;
253 
254 	data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
255 	if (!data)
256 		return -ENOMEM;
257 
258 	/* An id needs to be added for each data.
259 	 * So i + 2 for each loop.
260 	 */
261 	for (i = 0; i < data_num; i += 2) {
262 		info = &hbg_push_irq_list[j++];
263 		data[i] = info->id;
264 		data[i + 1] = hbg_get_irq_stats(vectors, info->mask);
265 	}
266 
267 	ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num);
268 	kfree(data);
269 	return ret;
270 }
271 
hbg_push_link_status(struct hbg_priv * priv)272 static int hbg_push_link_status(struct hbg_priv *priv)
273 {
274 	u32 link_status[2];
275 
276 	/* phy link status */
277 	link_status[0] = priv->mac.phydev->link;
278 	/* mac link status */
279 	link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
280 					    HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
281 
282 	return hbg_push_data(priv, HBG_PUSH_CMD_LINK,
283 			     link_status, ARRAY_SIZE(link_status));
284 }
285 
hbg_push_stats(struct hbg_priv * priv)286 static int hbg_push_stats(struct hbg_priv *priv)
287 {
288 	/* An id needs to be added for each data.
289 	 * Therefore, the data_num must be multiplied by 2.
290 	 */
291 	u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2;
292 	struct hbg_stats *stats = &priv->stats;
293 	const struct hbg_push_stats_info *info;
294 	u32 i, j = 0;
295 	u64 *data;
296 	int ret;
297 
298 	data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
299 	if (!data)
300 		return -ENOMEM;
301 
302 	/* An id needs to be added for each data.
303 	 * So i + 2 for each loop.
304 	 */
305 	for (i = 0; i < data_num; i += 2) {
306 		info = &hbg_push_stats_list[j++];
307 		data[i] = info->id;
308 		data[i + 1] = HBG_STATS_R(stats, info->offset);
309 	}
310 
311 	ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num);
312 	kfree(data);
313 	return ret;
314 }
315 
hbg_diagnose_message_push(struct hbg_priv * priv)316 void hbg_diagnose_message_push(struct hbg_priv *priv)
317 {
318 	int ret;
319 
320 	if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
321 		return;
322 
323 	/* only 1 is the right value */
324 	if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1)
325 		return;
326 
327 	ret = hbg_push_irq_cnt(priv);
328 	if (ret) {
329 		dev_err(&priv->pdev->dev,
330 			"failed to push irq cnt, ret = %d\n", ret);
331 		goto push_done;
332 	}
333 
334 	ret = hbg_push_link_status(priv);
335 	if (ret) {
336 		dev_err(&priv->pdev->dev,
337 			"failed to push link status, ret = %d\n", ret);
338 		goto push_done;
339 	}
340 
341 	ret = hbg_push_stats(priv);
342 	if (ret)
343 		dev_err(&priv->pdev->dev,
344 			"failed to push stats, ret = %d\n", ret);
345 
346 push_done:
347 	hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0);
348 }
349