1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/if_vlan.h>
7 #include <linux/iopoll.h>
8 #include <linux/minmax.h>
9 #include "hbg_common.h"
10 #include "hbg_hw.h"
11 #include "hbg_reg.h"
12
13 #define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
14 #define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
15 /* little endian or big endian.
16 * ctrl means packet description, data means skb packet data
17 */
18 #define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0
19 #define HBG_PCU_FRAME_LEN_PLUS 4
20
21 #define HBG_FIFO_TX_FULL_THRSLD 0x3F0
22 #define HBG_FIFO_TX_EMPTY_THRSLD 0x1F0
23 #define HBG_FIFO_RX_FULL_THRSLD 0x240
24 #define HBG_FIFO_RX_EMPTY_THRSLD 0x190
25 #define HBG_CFG_FIFO_FULL_THRSLD 0x10
26 #define HBG_CFG_FIFO_EMPTY_THRSLD 0x01
27
hbg_hw_spec_is_valid(struct hbg_priv * priv)28 static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
29 {
30 return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
31 !hbg_reg_read(priv, HBG_REG_EVENT_REQ_ADDR);
32 }
33
hbg_hw_event_notify(struct hbg_priv * priv,enum hbg_hw_event_type event_type)34 int hbg_hw_event_notify(struct hbg_priv *priv,
35 enum hbg_hw_event_type event_type)
36 {
37 bool is_valid;
38 int ret;
39
40 if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state))
41 return -EBUSY;
42
43 /* notify */
44 hbg_reg_write(priv, HBG_REG_EVENT_REQ_ADDR, event_type);
45
46 ret = read_poll_timeout(hbg_hw_spec_is_valid, is_valid, is_valid,
47 HBG_HW_EVENT_WAIT_INTERVAL_US,
48 HBG_HW_EVENT_WAIT_TIMEOUT_US,
49 HBG_HW_EVENT_WAIT_INTERVAL_US, priv);
50
51 clear_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state);
52
53 if (ret)
54 dev_err(&priv->pdev->dev,
55 "event %d wait timeout\n", event_type);
56
57 return ret;
58 }
59
hbg_hw_dev_specs_init(struct hbg_priv * priv)60 static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
61 {
62 struct hbg_dev_specs *specs = &priv->dev_specs;
63 u64 mac_addr;
64
65 if (!hbg_hw_spec_is_valid(priv)) {
66 dev_err(&priv->pdev->dev, "dev_specs not init\n");
67 return -EINVAL;
68 }
69
70 specs->mac_id = hbg_reg_read(priv, HBG_REG_MAC_ID_ADDR);
71 specs->phy_addr = hbg_reg_read(priv, HBG_REG_PHY_ID_ADDR);
72 specs->mdio_frequency = hbg_reg_read(priv, HBG_REG_MDIO_FREQ_ADDR);
73 specs->max_mtu = hbg_reg_read(priv, HBG_REG_MAX_MTU_ADDR);
74 specs->min_mtu = hbg_reg_read(priv, HBG_REG_MIN_MTU_ADDR);
75 specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
76 specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
77 specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
78 specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR);
79
80 mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
81 u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
82
83 if (!is_valid_ether_addr((u8 *)specs->mac_addr.sa_data))
84 return -EADDRNOTAVAIL;
85
86 specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu;
87 specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len;
88 return 0;
89 }
90
hbg_hw_get_irq_status(struct hbg_priv * priv)91 u32 hbg_hw_get_irq_status(struct hbg_priv *priv)
92 {
93 u32 status;
94
95 status = hbg_reg_read(priv, HBG_REG_CF_INTRPT_STAT_ADDR);
96
97 hbg_field_modify(status, HBG_INT_MSK_TX_B,
98 hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_STAT_ADDR));
99 hbg_field_modify(status, HBG_INT_MSK_RX_B,
100 hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_STAT_ADDR));
101
102 return status;
103 }
104
hbg_hw_irq_clear(struct hbg_priv * priv,u32 mask)105 void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask)
106 {
107 if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
108 return hbg_reg_write(priv, HBG_REG_CF_IND_TXINT_CLR_ADDR, 0x1);
109
110 if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
111 return hbg_reg_write(priv, HBG_REG_CF_IND_RXINT_CLR_ADDR, 0x1);
112
113 return hbg_reg_write(priv, HBG_REG_CF_INTRPT_CLR_ADDR, mask);
114 }
115
hbg_hw_irq_is_enabled(struct hbg_priv * priv,u32 mask)116 bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask)
117 {
118 if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
119 return hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_MSK_ADDR);
120
121 if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
122 return hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_MSK_ADDR);
123
124 return hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR) & mask;
125 }
126
hbg_hw_irq_enable(struct hbg_priv * priv,u32 mask,bool enable)127 void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
128 {
129 u32 value;
130
131 if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
132 return hbg_reg_write(priv,
133 HBG_REG_CF_IND_TXINT_MSK_ADDR, enable);
134
135 if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
136 return hbg_reg_write(priv,
137 HBG_REG_CF_IND_RXINT_MSK_ADDR, enable);
138
139 value = hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR);
140 if (enable)
141 value |= mask;
142 else
143 value &= ~mask;
144
145 hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
146 }
147
hbg_hw_set_uc_addr(struct hbg_priv * priv,u64 mac_addr,u32 index)148 void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index)
149 {
150 u32 addr;
151
152 /* mac addr is u64, so the addr offset is 0x8 */
153 addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8);
154 hbg_reg_write64(priv, addr, mac_addr);
155 }
156
hbg_hw_set_pcu_max_frame_len(struct hbg_priv * priv,u16 max_frame_len)157 static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
158 u16 max_frame_len)
159 {
160 max_frame_len = max_t(u32, max_frame_len, ETH_DATA_LEN);
161
162 /* lower two bits of value must be set to 0 */
163 max_frame_len = round_up(max_frame_len, HBG_PCU_FRAME_LEN_PLUS);
164
165 hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_LEN_ADDR,
166 HBG_REG_MAX_FRAME_LEN_M, max_frame_len);
167 }
168
hbg_hw_set_mac_max_frame_len(struct hbg_priv * priv,u16 max_frame_size)169 static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
170 u16 max_frame_size)
171 {
172 hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_SIZE_ADDR,
173 HBG_REG_MAX_FRAME_LEN_M, max_frame_size);
174 }
175
hbg_hw_set_mtu(struct hbg_priv * priv,u16 mtu)176 void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
177 {
178 /* burst_len BIT(29) set to 1 can improve the TX performance.
179 * But packet drop occurs when mtu > 2000.
180 * So, BIT(29) reset to 0 when mtu > 2000.
181 */
182 u32 burst_len_bit = (mtu > 2000) ? 0 : 1;
183 u32 frame_len;
184
185 frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers +
186 ETH_HLEN + ETH_FCS_LEN;
187
188 hbg_hw_set_pcu_max_frame_len(priv, frame_len);
189 hbg_hw_set_mac_max_frame_len(priv, frame_len);
190
191 hbg_reg_write_field(priv, HBG_REG_BRUST_LENGTH_ADDR,
192 HBG_REG_BRUST_LENGTH_B, burst_len_bit);
193 }
194
hbg_hw_mac_enable(struct hbg_priv * priv,u32 enable)195 void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
196 {
197 hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
198 HBG_REG_PORT_ENABLE_TX_B, enable);
199 hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
200 HBG_REG_PORT_ENABLE_RX_B, enable);
201 }
202
hbg_hw_get_fifo_used_num(struct hbg_priv * priv,enum hbg_dir dir)203 u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir)
204 {
205 if (dir & HBG_DIR_TX)
206 return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
207 HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M);
208
209 if (dir & HBG_DIR_RX)
210 return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
211 HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M);
212
213 return 0;
214 }
215
hbg_hw_set_tx_desc(struct hbg_priv * priv,struct hbg_tx_desc * tx_desc)216 void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc)
217 {
218 hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_0_ADDR, tx_desc->word0);
219 hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_1_ADDR, tx_desc->word1);
220 hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_2_ADDR, tx_desc->word2);
221 hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3);
222 }
223
hbg_hw_fill_buffer(struct hbg_priv * priv,u32 buffer_dma_addr)224 void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
225 {
226 hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr);
227 }
228
hbg_hw_adjust_link(struct hbg_priv * priv,u32 speed,u32 duplex)229 void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
230 {
231 hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
232
233 hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
234 HBG_REG_PORT_MODE_M, speed);
235 hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
236 HBG_REG_DUPLEX_B, duplex);
237
238 hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
239
240 hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
241
242 if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
243 HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
244 hbg_np_link_fail_task_schedule(priv);
245 }
246
247 /* only support uc filter */
hbg_hw_set_mac_filter_enable(struct hbg_priv * priv,u32 enable)248 void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable)
249 {
250 hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
251 HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable);
252
253 /* only uc filter is supported, so set all bits of mc mask reg to 1 */
254 hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_0, U64_MAX);
255 hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_MSK_1, U64_MAX);
256 }
257
hbg_hw_set_pause_enable(struct hbg_priv * priv,u32 tx_en,u32 rx_en)258 void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en)
259 {
260 hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
261 HBG_REG_PAUSE_ENABLE_TX_B, tx_en);
262 hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
263 HBG_REG_PAUSE_ENABLE_RX_B, rx_en);
264
265 hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR,
266 HBG_REG_REC_FILT_CTRL_PAUSE_FRM_PASS_B, rx_en);
267 }
268
hbg_hw_get_pause_enable(struct hbg_priv * priv,u32 * tx_en,u32 * rx_en)269 void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en)
270 {
271 *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
272 HBG_REG_PAUSE_ENABLE_TX_B);
273 *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR,
274 HBG_REG_PAUSE_ENABLE_RX_B);
275 }
276
hbg_hw_set_rx_pause_mac_addr(struct hbg_priv * priv,u64 mac_addr)277 void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr)
278 {
279 hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr);
280 }
281
hbg_hw_set_fifo_thrsld(struct hbg_priv * priv,u32 full,u32 empty,enum hbg_dir dir)282 static void hbg_hw_set_fifo_thrsld(struct hbg_priv *priv,
283 u32 full, u32 empty, enum hbg_dir dir)
284 {
285 u32 value = 0;
286
287 value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_FULL_M, full);
288 value |= FIELD_PREP(HBG_REG_FIFO_THRSLD_EMPTY_M, empty);
289
290 if (dir & HBG_DIR_TX)
291 hbg_reg_write(priv, HBG_REG_TX_FIFO_THRSLD_ADDR, value);
292
293 if (dir & HBG_DIR_RX)
294 hbg_reg_write(priv, HBG_REG_RX_FIFO_THRSLD_ADDR, value);
295 }
296
hbg_hw_set_cfg_fifo_thrsld(struct hbg_priv * priv,u32 full,u32 empty,enum hbg_dir dir)297 static void hbg_hw_set_cfg_fifo_thrsld(struct hbg_priv *priv,
298 u32 full, u32 empty, enum hbg_dir dir)
299 {
300 u32 value;
301
302 value = hbg_reg_read(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR);
303
304 if (dir & HBG_DIR_TX) {
305 value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_FULL_M, full);
306 value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_TX_EMPTY_M, empty);
307 }
308
309 if (dir & HBG_DIR_RX) {
310 value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_FULL_M, full);
311 value |= FIELD_PREP(HBG_REG_CFG_FIFO_THRSLD_RX_EMPTY_M, empty);
312 }
313
314 hbg_reg_write(priv, HBG_REG_CFG_FIFO_THRSLD_ADDR, value);
315 }
316
hbg_hw_init_transmit_ctrl(struct hbg_priv * priv)317 static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
318 {
319 u32 ctrl = 0;
320
321 ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B, HBG_STATUS_ENABLE);
322 ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B, HBG_STATUS_ENABLE);
323 ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B, HBG_STATUS_ENABLE);
324
325 hbg_reg_write(priv, HBG_REG_TRANSMIT_CTRL_ADDR, ctrl);
326 }
327
hbg_hw_init_rx_ctrl(struct hbg_priv * priv)328 static void hbg_hw_init_rx_ctrl(struct hbg_priv *priv)
329 {
330 u32 ctrl = 0;
331
332 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B,
333 HBG_STATUS_ENABLE);
334 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B, HBG_STATUS_DISABLE);
335 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M, HBG_RX_SKIP1);
336 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M,
337 HBG_RX_SKIP2);
338 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M, NET_IP_ALIGN);
339 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM, priv->dev_specs.mac_id);
340
341 hbg_reg_write(priv, HBG_REG_RX_CTRL_ADDR, ctrl);
342 }
343
hbg_hw_init_rx_control(struct hbg_priv * priv)344 static void hbg_hw_init_rx_control(struct hbg_priv *priv)
345 {
346 hbg_hw_init_rx_ctrl(priv);
347
348 /* parse from L2 layer */
349 hbg_reg_write_field(priv, HBG_REG_RX_PKT_MODE_ADDR,
350 HBG_REG_RX_PKT_MODE_PARSE_MODE_M, 0x1);
351
352 hbg_reg_write_field(priv, HBG_REG_RECV_CTRL_ADDR,
353 HBG_REG_RECV_CTRL_STRIP_PAD_EN_B,
354 HBG_STATUS_ENABLE);
355 hbg_reg_write_field(priv, HBG_REG_RX_BUF_SIZE_ADDR,
356 HBG_REG_RX_BUF_SIZE_M, priv->dev_specs.rx_buf_size);
357 hbg_reg_write_field(priv, HBG_REG_CF_CRC_STRIP_ADDR,
358 HBG_REG_CF_CRC_STRIP_B, HBG_STATUS_DISABLE);
359 }
360
hbg_hw_init(struct hbg_priv * priv)361 int hbg_hw_init(struct hbg_priv *priv)
362 {
363 int ret;
364
365 ret = hbg_hw_dev_specs_init(priv);
366 if (ret)
367 return ret;
368
369 hbg_reg_write_field(priv, HBG_REG_BUS_CTRL_ADDR,
370 HBG_REG_BUS_CTRL_ENDIAN_M,
371 HBG_ENDIAN_CTRL_LE_DATA_BE);
372 hbg_reg_write_field(priv, HBG_REG_MODE_CHANGE_EN_ADDR,
373 HBG_REG_MODE_CHANGE_EN_B, HBG_STATUS_ENABLE);
374
375 hbg_hw_init_rx_control(priv);
376 hbg_hw_init_transmit_ctrl(priv);
377
378 hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_TX_FULL_THRSLD,
379 HBG_FIFO_TX_EMPTY_THRSLD, HBG_DIR_TX);
380 hbg_hw_set_fifo_thrsld(priv, HBG_FIFO_RX_FULL_THRSLD,
381 HBG_FIFO_RX_EMPTY_THRSLD, HBG_DIR_RX);
382 hbg_hw_set_cfg_fifo_thrsld(priv, HBG_CFG_FIFO_FULL_THRSLD,
383 HBG_CFG_FIFO_EMPTY_THRSLD, HBG_DIR_TX_RX);
384 return 0;
385 }
386