xref: /linux/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h (revision b5c6891b2c5b54bf58069966296917da46cda6f2)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /* Copyright (c) 2024 Hisilicon Limited. */
3 
4 #ifndef __HBG_COMMON_H
5 #define __HBG_COMMON_H
6 
7 #include <linux/ethtool.h>
8 #include <linux/netdevice.h>
9 #include <linux/pci.h>
10 #include "hbg_reg.h"
11 
12 #define HBG_STATUS_DISABLE		0x0
13 #define HBG_STATUS_ENABLE		0x1
14 #define HBG_RX_SKIP1			0x00
15 #define HBG_RX_SKIP2			0x01
16 #define HBG_VECTOR_NUM			4
17 #define HBG_PCU_CACHE_LINE_SIZE		32
18 #define HBG_TX_TIMEOUT_BUF_LEN		1024
19 #define HBG_RX_DESCR			0x01
20 
21 #define HBG_PACKET_HEAD_SIZE	((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \
22 				  HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
23 
24 enum hbg_dir {
25 	HBG_DIR_TX = 1 << 0,
26 	HBG_DIR_RX = 1 << 1,
27 	HBG_DIR_TX_RX = HBG_DIR_TX | HBG_DIR_RX,
28 };
29 
30 enum hbg_tx_state {
31 	HBG_TX_STATE_COMPLETE = 0, /* clear state, must fix to 0 */
32 	HBG_TX_STATE_START,
33 };
34 
35 enum hbg_nic_state {
36 	HBG_NIC_STATE_EVENT_HANDLING = 0,
37 	HBG_NIC_STATE_RESETTING,
38 	HBG_NIC_STATE_RESET_FAIL,
39 	HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */
40 	HBG_NIC_STATE_NP_LINK_FAIL,
41 };
42 
43 enum hbg_reset_type {
44 	HBG_RESET_TYPE_NONE = 0,
45 	HBG_RESET_TYPE_FLR,
46 	HBG_RESET_TYPE_FUNCTION,
47 };
48 
49 struct hbg_buffer {
50 	u32 state;
51 	dma_addr_t state_dma;
52 
53 	struct sk_buff *skb;
54 	dma_addr_t skb_dma;
55 	u32 skb_len;
56 
57 	enum hbg_dir dir;
58 	struct hbg_ring *ring;
59 	struct hbg_priv *priv;
60 };
61 
62 struct hbg_ring {
63 	struct hbg_buffer *queue;
64 	dma_addr_t queue_dma;
65 
66 	union {
67 		u32 head;
68 		u32 ntc;
69 	};
70 	union {
71 		u32 tail;
72 		u32 ntu;
73 	};
74 	u32 len;
75 
76 	enum hbg_dir dir;
77 	struct hbg_priv *priv;
78 	struct napi_struct napi;
79 	char *tout_log_buf; /* tx timeout log buffer */
80 };
81 
82 enum hbg_hw_event_type {
83 	HBG_HW_EVENT_NONE = 0,
84 	HBG_HW_EVENT_INIT, /* driver is loading */
85 	HBG_HW_EVENT_RESET,
86 	HBG_HW_EVENT_CORE_RESET,
87 };
88 
89 struct hbg_dev_specs {
90 	u32 mac_id;
91 	struct sockaddr mac_addr;
92 	u32 phy_addr;
93 	u32 mdio_frequency;
94 	u32 rx_fifo_num;
95 	u32 tx_fifo_num;
96 	u32 vlan_layers;
97 	u32 max_mtu;
98 	u32 min_mtu;
99 	u32 uc_mac_num;
100 
101 	u32 max_frame_len;
102 	u32 rx_buf_size;
103 };
104 
105 struct hbg_irq_info {
106 	const char *name;
107 	u32 mask;
108 	bool re_enable;
109 	bool need_print;
110 	bool need_reset;
111 
112 	void (*irq_handle)(struct hbg_priv *priv,
113 			   const struct hbg_irq_info *info);
114 };
115 
116 struct hbg_vector {
117 	char name[HBG_VECTOR_NUM][32];
118 
119 	u64 *stats_array;
120 	const struct hbg_irq_info *info_array;
121 	u32 info_array_len;
122 };
123 
124 struct hbg_mac {
125 	struct mii_bus *mdio_bus;
126 	struct phy_device *phydev;
127 	u8 phy_addr;
128 
129 	u32 speed;
130 	u32 duplex;
131 	u32 autoneg;
132 	u32 link_status;
133 	u32 pause_autoneg;
134 };
135 
136 struct hbg_mac_table_entry {
137 	u8 addr[ETH_ALEN];
138 };
139 
140 struct hbg_mac_filter {
141 	struct hbg_mac_table_entry *mac_table;
142 	u32 table_max_len;
143 	bool enabled;
144 };
145 
146 /* saved for restore after rest */
147 struct hbg_user_def {
148 	struct ethtool_pauseparam pause_param;
149 };
150 
151 struct hbg_stats {
152 	u64 rx_desc_drop;
153 	u64 rx_desc_l2_err_cnt;
154 	u64 rx_desc_pkt_len_err_cnt;
155 	u64 rx_desc_l3l4_err_cnt;
156 	u64 rx_desc_l3_wrong_head_cnt;
157 	u64 rx_desc_l3_csum_err_cnt;
158 	u64 rx_desc_l3_len_err_cnt;
159 	u64 rx_desc_l3_zero_ttl_cnt;
160 	u64 rx_desc_l3_other_cnt;
161 	u64 rx_desc_l4_err_cnt;
162 	u64 rx_desc_l4_wrong_head_cnt;
163 	u64 rx_desc_l4_len_err_cnt;
164 	u64 rx_desc_l4_csum_err_cnt;
165 	u64 rx_desc_l4_zero_port_num_cnt;
166 	u64 rx_desc_l4_other_cnt;
167 	u64 rx_desc_frag_cnt;
168 	u64 rx_desc_ip_ver_err_cnt;
169 	u64 rx_desc_ipv4_pkt_cnt;
170 	u64 rx_desc_ipv6_pkt_cnt;
171 	u64 rx_desc_no_ip_pkt_cnt;
172 	u64 rx_desc_ip_pkt_cnt;
173 	u64 rx_desc_tcp_pkt_cnt;
174 	u64 rx_desc_udp_pkt_cnt;
175 	u64 rx_desc_vlan_pkt_cnt;
176 	u64 rx_desc_icmp_pkt_cnt;
177 	u64 rx_desc_arp_pkt_cnt;
178 	u64 rx_desc_rarp_pkt_cnt;
179 	u64 rx_desc_multicast_pkt_cnt;
180 	u64 rx_desc_broadcast_pkt_cnt;
181 	u64 rx_desc_ipsec_pkt_cnt;
182 	u64 rx_desc_ip_opt_pkt_cnt;
183 	u64 rx_desc_key_not_match_cnt;
184 
185 	u64 rx_octets_total_ok_cnt;
186 	u64 rx_uc_pkt_cnt;
187 	u64 rx_mc_pkt_cnt;
188 	u64 rx_bc_pkt_cnt;
189 	u64 rx_vlan_pkt_cnt;
190 	u64 rx_octets_bad_cnt;
191 	u64 rx_octets_total_filt_cnt;
192 	u64 rx_filt_pkt_cnt;
193 	u64 rx_trans_pkt_cnt;
194 	u64 rx_framesize_64;
195 	u64 rx_framesize_65_127;
196 	u64 rx_framesize_128_255;
197 	u64 rx_framesize_256_511;
198 	u64 rx_framesize_512_1023;
199 	u64 rx_framesize_1024_1518;
200 	u64 rx_framesize_bt_1518;
201 	u64 rx_fcs_error_cnt;
202 	u64 rx_data_error_cnt;
203 	u64 rx_align_error_cnt;
204 	u64 rx_pause_macctl_frame_cnt;
205 	u64 rx_unknown_macctl_frame_cnt;
206 	/* crc ok, > max_frm_size, < 2max_frm_size */
207 	u64 rx_frame_long_err_cnt;
208 	/* crc fail, > max_frm_size, < 2max_frm_size */
209 	u64 rx_jabber_err_cnt;
210 	/* > 2max_frm_size */
211 	u64 rx_frame_very_long_err_cnt;
212 	/* < 64byte, >= short_runts_thr */
213 	u64 rx_frame_runt_err_cnt;
214 	/* < short_runts_thr */
215 	u64 rx_frame_short_err_cnt;
216 	/* PCU: dropped when the RX FIFO is full.*/
217 	u64 rx_overflow_cnt;
218 	/* GMAC: the count of overflows of the RX FIFO */
219 	u64 rx_overrun_cnt;
220 	/* PCU: the count of buffer alloc errors in RX */
221 	u64 rx_bufrq_err_cnt;
222 	/* PCU: the count of write descriptor errors in RX */
223 	u64 rx_we_err_cnt;
224 	/* GMAC: the count of pkts that contain PAD but length is not 64 */
225 	u64 rx_lengthfield_err_cnt;
226 	u64 rx_fail_comma_cnt;
227 
228 	u64 rx_dma_err_cnt;
229 	u64 rx_fifo_less_empty_thrsld_cnt;
230 
231 	u64 tx_octets_total_ok_cnt;
232 	u64 tx_uc_pkt_cnt;
233 	u64 tx_mc_pkt_cnt;
234 	u64 tx_bc_pkt_cnt;
235 	u64 tx_vlan_pkt_cnt;
236 	u64 tx_octets_bad_cnt;
237 	u64 tx_trans_pkt_cnt;
238 	u64 tx_pause_frame_cnt;
239 	u64 tx_framesize_64;
240 	u64 tx_framesize_65_127;
241 	u64 tx_framesize_128_255;
242 	u64 tx_framesize_256_511;
243 	u64 tx_framesize_512_1023;
244 	u64 tx_framesize_1024_1518;
245 	u64 tx_framesize_bt_1518;
246 	/* GMAC: the count of times that frames fail to be transmitted
247 	 *       due to internal errors.
248 	 */
249 	u64 tx_underrun_err_cnt;
250 	u64 tx_add_cs_fail_cnt;
251 	/* PCU: the count of buffer free errors in TX */
252 	u64 tx_bufrl_err_cnt;
253 	u64 tx_crc_err_cnt;
254 	u64 tx_drop_cnt;
255 	u64 tx_excessive_length_drop_cnt;
256 
257 	u64 tx_timeout_cnt;
258 	u64 tx_dma_err_cnt;
259 
260 	u64 np_link_fail_cnt;
261 };
262 
263 struct hbg_priv {
264 	struct net_device *netdev;
265 	struct pci_dev *pdev;
266 	u8 __iomem *io_base;
267 	struct hbg_dev_specs dev_specs;
268 	unsigned long state;
269 	struct hbg_mac mac;
270 	struct hbg_vector vectors;
271 	struct hbg_ring tx_ring;
272 	struct hbg_ring rx_ring;
273 	struct hbg_mac_filter filter;
274 	enum hbg_reset_type reset_type;
275 	struct hbg_user_def user_def;
276 	struct hbg_stats stats;
277 	unsigned long last_update_stats_time;
278 	struct delayed_work service_task;
279 };
280 
281 void hbg_err_reset_task_schedule(struct hbg_priv *priv);
282 void hbg_np_link_fail_task_schedule(struct hbg_priv *priv);
283 
284 #endif
285