xref: /linux/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/if_vlan.h>
7 #include <linux/iopoll.h>
8 #include <linux/minmax.h>
9 #include "hbg_common.h"
10 #include "hbg_hw.h"
11 #include "hbg_reg.h"
12 
13 #define HBG_HW_EVENT_WAIT_TIMEOUT_US	(2 * 1000 * 1000)
14 #define HBG_HW_EVENT_WAIT_INTERVAL_US	(10 * 1000)
15 /* little endian or big endian.
16  * ctrl means packet description, data means skb packet data
17  */
18 #define HBG_ENDIAN_CTRL_LE_DATA_BE	0x0
19 #define HBG_PCU_FRAME_LEN_PLUS 4
20 
hbg_hw_spec_is_valid(struct hbg_priv * priv)21 static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
22 {
23 	return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
24 	       !hbg_reg_read(priv, HBG_REG_EVENT_REQ_ADDR);
25 }
26 
hbg_hw_event_notify(struct hbg_priv * priv,enum hbg_hw_event_type event_type)27 int hbg_hw_event_notify(struct hbg_priv *priv,
28 			enum hbg_hw_event_type event_type)
29 {
30 	bool is_valid;
31 	int ret;
32 
33 	if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state))
34 		return -EBUSY;
35 
36 	/* notify */
37 	hbg_reg_write(priv, HBG_REG_EVENT_REQ_ADDR, event_type);
38 
39 	ret = read_poll_timeout(hbg_hw_spec_is_valid, is_valid, is_valid,
40 				HBG_HW_EVENT_WAIT_INTERVAL_US,
41 				HBG_HW_EVENT_WAIT_TIMEOUT_US,
42 				HBG_HW_EVENT_WAIT_INTERVAL_US, priv);
43 
44 	clear_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state);
45 
46 	if (ret)
47 		dev_err(&priv->pdev->dev,
48 			"event %d wait timeout\n", event_type);
49 
50 	return ret;
51 }
52 
hbg_hw_dev_specs_init(struct hbg_priv * priv)53 static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
54 {
55 	struct hbg_dev_specs *specs = &priv->dev_specs;
56 	u64 mac_addr;
57 
58 	if (!hbg_hw_spec_is_valid(priv)) {
59 		dev_err(&priv->pdev->dev, "dev_specs not init\n");
60 		return -EINVAL;
61 	}
62 
63 	specs->mac_id = hbg_reg_read(priv, HBG_REG_MAC_ID_ADDR);
64 	specs->phy_addr = hbg_reg_read(priv, HBG_REG_PHY_ID_ADDR);
65 	specs->mdio_frequency = hbg_reg_read(priv, HBG_REG_MDIO_FREQ_ADDR);
66 	specs->max_mtu = hbg_reg_read(priv, HBG_REG_MAX_MTU_ADDR);
67 	specs->min_mtu = hbg_reg_read(priv, HBG_REG_MIN_MTU_ADDR);
68 	specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
69 	specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
70 	specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
71 	specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
72 
73 	mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
74 	u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
75 
76 	if (!is_valid_ether_addr((u8 *)specs->mac_addr.sa_data))
77 		return -EADDRNOTAVAIL;
78 
79 	specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu;
80 	specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len;
81 	return 0;
82 }
83 
hbg_hw_get_irq_status(struct hbg_priv * priv)84 u32 hbg_hw_get_irq_status(struct hbg_priv *priv)
85 {
86 	u32 status;
87 
88 	status = hbg_reg_read(priv, HBG_REG_CF_INTRPT_STAT_ADDR);
89 
90 	hbg_field_modify(status, HBG_INT_MSK_TX_B,
91 			 hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_STAT_ADDR));
92 	hbg_field_modify(status, HBG_INT_MSK_RX_B,
93 			 hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_STAT_ADDR));
94 
95 	return status;
96 }
97 
hbg_hw_irq_clear(struct hbg_priv * priv,u32 mask)98 void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask)
99 {
100 	if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
101 		return hbg_reg_write(priv, HBG_REG_CF_IND_TXINT_CLR_ADDR, 0x1);
102 
103 	if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
104 		return hbg_reg_write(priv, HBG_REG_CF_IND_RXINT_CLR_ADDR, 0x1);
105 
106 	return hbg_reg_write(priv, HBG_REG_CF_INTRPT_CLR_ADDR, mask);
107 }
108 
hbg_hw_irq_is_enabled(struct hbg_priv * priv,u32 mask)109 bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask)
110 {
111 	if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
112 		return hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_MSK_ADDR);
113 
114 	if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
115 		return hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_MSK_ADDR);
116 
117 	return hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR) & mask;
118 }
119 
hbg_hw_irq_enable(struct hbg_priv * priv,u32 mask,bool enable)120 void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
121 {
122 	u32 value;
123 
124 	if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
125 		return hbg_reg_write(priv,
126 				     HBG_REG_CF_IND_TXINT_MSK_ADDR, enable);
127 
128 	if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
129 		return hbg_reg_write(priv,
130 				     HBG_REG_CF_IND_RXINT_MSK_ADDR, enable);
131 
132 	value = hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR);
133 	if (enable)
134 		value |= mask;
135 	else
136 		value &= ~mask;
137 
138 	hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
139 }
140 
hbg_hw_set_uc_addr(struct hbg_priv * priv,u64 mac_addr,u32 index)141 void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
142 {
143 	u32 addr;
144 
145 	/* mac addr is u64, so the addr offset is 0x8 */
146 	addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
147 	hbg_reg_write64(priv, addr, mac_addr);
148 }
149 
hbg_hw_set_pcu_max_frame_len(struct hbg_priv * priv,u16 max_frame_len)150 static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
151 					 u16 max_frame_len)
152 {
153 	max_frame_len = max_t(u32, max_frame_len, ETH_DATA_LEN);
154 
155 	/* lower two bits of value must be set to 0 */
156 	max_frame_len = round_up(max_frame_len, HBG_PCU_FRAME_LEN_PLUS);
157 
158 	hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_LEN_ADDR,
159 			    HBG_REG_MAX_FRAME_LEN_M, max_frame_len);
160 }
161 
hbg_hw_set_mac_max_frame_len(struct hbg_priv * priv,u16 max_frame_size)162 static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
163 					 u16 max_frame_size)
164 {
165 	hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_SIZE_ADDR,
166 			    HBG_REG_MAX_FRAME_LEN_M, max_frame_size);
167 }
168 
hbg_hw_set_mtu(struct hbg_priv * priv,u16 mtu)169 void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
170 {
171 	u32 frame_len;
172 
173 	frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
174 		    ETH_HLEN + ETH_FCS_LEN;
175 
176 	hbg_hw_set_pcu_max_frame_len(priv, frame_len);
177 	hbg_hw_set_mac_max_frame_len(priv, frame_len);
178 }
179 
hbg_hw_mac_enable(struct hbg_priv * priv,u32 enable)180 void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
181 {
182 	hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
183 			    HBG_REG_PORT_ENABLE_TX_B, enable);
184 	hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
185 			    HBG_REG_PORT_ENABLE_RX_B, enable);
186 }
187 
hbg_hw_get_fifo_used_num(struct hbg_priv * priv,enum hbg_dir dir)188 u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir)
189 {
190 	if (dir & HBG_DIR_TX)
191 		return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
192 					  HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M);
193 
194 	if (dir & HBG_DIR_RX)
195 		return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
196 					  HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M);
197 
198 	return 0;
199 }
200 
hbg_hw_set_tx_desc(struct hbg_priv * priv,struct hbg_tx_desc * tx_desc)201 void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc)
202 {
203 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_0_ADDR, tx_desc->word0);
204 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_1_ADDR, tx_desc->word1);
205 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_2_ADDR, tx_desc->word2);
206 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3);
207 }
208 
hbg_hw_fill_buffer(struct hbg_priv * priv,u32 buffer_dma_addr)209 void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
210 {
211 	hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr);
212 }
213 
hbg_hw_adjust_link(struct hbg_priv * priv,u32 speed,u32 duplex)214 void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
215 {
216 	hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
217 
218 	hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
219 			    HBG_REG_PORT_MODE_M, speed);
220 	hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
221 			    HBG_REG_DUPLEX_B, duplex);
222 
223 	hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
224 
225 	hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
226 
227 	if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
228 				HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
229 		hbg_np_link_fail_task_schedule(priv);
230 }
231 
232 /* only support uc filter */
hbg_hw_set_mac_filter_enable(struct hbg_priv * priv,u32 enable)233 void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
234 {
235 	hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
236 			    HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
237 }
238 
hbg_hw_set_pause_enable(struct hbg_priv * priv,u32 tx_en,u32 rx_en)239 void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
240 {
241 	hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
242 			    HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
243 	hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
244 			    HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
245 }
246 
hbg_hw_get_pause_enable(struct hbg_priv * priv,u32 * tx_en,u32 * rx_en)247 void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
248 {
249 	*tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
250 				    HBG_REG_PAUSE_ENABLE_TX_B);
251 	*rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
252 				    HBG_REG_PAUSE_ENABLE_RX_B);
253 }
254 
hbg_hw_set_rx_pause_mac_addr(struct hbg_priv * priv,u64 mac_addr)255 void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
256 {
257 	hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
258 }
259 
hbg_hw_init_transmit_ctrl(struct hbg_priv * priv)260 static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
261 {
262 	u32 ctrl = 0;
263 
264 	ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B, HBG_STATUS_ENABLE);
265 	ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B, HBG_STATUS_ENABLE);
266 	ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B, HBG_STATUS_ENABLE);
267 
268 	hbg_reg_write(priv, HBG_REG_TRANSMIT_CTRL_ADDR, ctrl);
269 }
270 
hbg_hw_init_rx_ctrl(struct hbg_priv * priv)271 static void hbg_hw_init_rx_ctrl(struct hbg_priv *priv)
272 {
273 	u32 ctrl = 0;
274 
275 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B,
276 			   HBG_STATUS_ENABLE);
277 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B, HBG_STATUS_DISABLE);
278 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M, HBG_RX_SKIP1);
279 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M,
280 			   HBG_RX_SKIP2);
281 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M, NET_IP_ALIGN);
282 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM, priv->dev_specs.mac_id);
283 
284 	hbg_reg_write(priv, HBG_REG_RX_CTRL_ADDR, ctrl);
285 }
286 
hbg_hw_init_rx_control(struct hbg_priv * priv)287 static void hbg_hw_init_rx_control(struct hbg_priv *priv)
288 {
289 	hbg_hw_init_rx_ctrl(priv);
290 
291 	/* parse from L2 layer */
292 	hbg_reg_write_field(priv, HBG_REG_RX_PKT_MODE_ADDR,
293 			    HBG_REG_RX_PKT_MODE_PARSE_MODE_M, 0x1);
294 
295 	hbg_reg_write_field(priv, HBG_REG_RECV_CTRL_ADDR,
296 			    HBG_REG_RECV_CTRL_STRIP_PAD_EN_B,
297 			    HBG_STATUS_ENABLE);
298 	hbg_reg_write_field(priv, HBG_REG_RX_BUF_SIZE_ADDR,
299 			    HBG_REG_RX_BUF_SIZE_M, priv->dev_specs.rx_buf_size);
300 	hbg_reg_write_field(priv, HBG_REG_CF_CRC_STRIP_ADDR,
301 			    HBG_REG_CF_CRC_STRIP_B, HBG_STATUS_DISABLE);
302 }
303 
hbg_hw_init(struct hbg_priv * priv)304 int hbg_hw_init(struct hbg_priv *priv)
305 {
306 	int ret;
307 
308 	ret = hbg_hw_dev_specs_init(priv);
309 	if (ret)
310 		return ret;
311 
312 	hbg_reg_write_field(priv, HBG_REG_BUS_CTRL_ADDR,
313 			    HBG_REG_BUS_CTRL_ENDIAN_M,
314 			    HBG_ENDIAN_CTRL_LE_DATA_BE);
315 	hbg_reg_write_field(priv, HBG_REG_MODE_CHANGE_EN_ADDR,
316 			    HBG_REG_MODE_CHANGE_EN_B, HBG_STATUS_ENABLE);
317 
318 	hbg_hw_init_rx_control(priv);
319 	hbg_hw_init_transmit_ctrl(priv);
320 	return 0;
321 }
322