xref: /linux/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c (revision 9208c05f9fdfd927ea160b97dfef3c379049fff2)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2024 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/iopoll.h>
7 #include <linux/minmax.h>
8 #include "hbg_common.h"
9 #include "hbg_hw.h"
10 #include "hbg_reg.h"
11 
12 #define HBG_HW_EVENT_WAIT_TIMEOUT_US	(2 * 1000 * 1000)
13 #define HBG_HW_EVENT_WAIT_INTERVAL_US	(10 * 1000)
14 /* little endian or big endian.
15  * ctrl means packet description, data means skb packet data
16  */
17 #define HBG_ENDIAN_CTRL_LE_DATA_BE	0x0
18 #define HBG_PCU_FRAME_LEN_PLUS 4
19 
20 static bool hbg_hw_spec_is_valid(struct hbg_priv *priv)
21 {
22 	return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) &&
23 	       !hbg_reg_read(priv, HBG_REG_EVENT_REQ_ADDR);
24 }
25 
26 int hbg_hw_event_notify(struct hbg_priv *priv,
27 			enum hbg_hw_event_type event_type)
28 {
29 	bool is_valid;
30 	int ret;
31 
32 	if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state))
33 		return -EBUSY;
34 
35 	/* notify */
36 	hbg_reg_write(priv, HBG_REG_EVENT_REQ_ADDR, event_type);
37 
38 	ret = read_poll_timeout(hbg_hw_spec_is_valid, is_valid, is_valid,
39 				HBG_HW_EVENT_WAIT_INTERVAL_US,
40 				HBG_HW_EVENT_WAIT_TIMEOUT_US,
41 				HBG_HW_EVENT_WAIT_INTERVAL_US, priv);
42 
43 	clear_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state);
44 
45 	if (ret)
46 		dev_err(&priv->pdev->dev,
47 			"event %d wait timeout\n", event_type);
48 
49 	return ret;
50 }
51 
52 static int hbg_hw_dev_specs_init(struct hbg_priv *priv)
53 {
54 	struct hbg_dev_specs *specs = &priv->dev_specs;
55 	u64 mac_addr;
56 
57 	if (!hbg_hw_spec_is_valid(priv)) {
58 		dev_err(&priv->pdev->dev, "dev_specs not init\n");
59 		return -EINVAL;
60 	}
61 
62 	specs->mac_id = hbg_reg_read(priv, HBG_REG_MAC_ID_ADDR);
63 	specs->phy_addr = hbg_reg_read(priv, HBG_REG_PHY_ID_ADDR);
64 	specs->mdio_frequency = hbg_reg_read(priv, HBG_REG_MDIO_FREQ_ADDR);
65 	specs->max_mtu = hbg_reg_read(priv, HBG_REG_MAX_MTU_ADDR);
66 	specs->min_mtu = hbg_reg_read(priv, HBG_REG_MIN_MTU_ADDR);
67 	specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR);
68 	specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR);
69 	specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR);
70 	mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR);
71 	u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data);
72 
73 	if (!is_valid_ether_addr((u8 *)specs->mac_addr.sa_data))
74 		return -EADDRNOTAVAIL;
75 
76 	specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu;
77 	specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len;
78 	return 0;
79 }
80 
81 u32 hbg_hw_get_irq_status(struct hbg_priv *priv)
82 {
83 	u32 status;
84 
85 	status = hbg_reg_read(priv, HBG_REG_CF_INTRPT_STAT_ADDR);
86 
87 	hbg_field_modify(status, HBG_INT_MSK_TX_B,
88 			 hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_STAT_ADDR));
89 	hbg_field_modify(status, HBG_INT_MSK_RX_B,
90 			 hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_STAT_ADDR));
91 
92 	return status;
93 }
94 
95 void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask)
96 {
97 	if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
98 		return hbg_reg_write(priv, HBG_REG_CF_IND_TXINT_CLR_ADDR, 0x1);
99 
100 	if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
101 		return hbg_reg_write(priv, HBG_REG_CF_IND_RXINT_CLR_ADDR, 0x1);
102 
103 	return hbg_reg_write(priv, HBG_REG_CF_INTRPT_CLR_ADDR, mask);
104 }
105 
106 bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask)
107 {
108 	if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
109 		return hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_MSK_ADDR);
110 
111 	if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
112 		return hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_MSK_ADDR);
113 
114 	return hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR) & mask;
115 }
116 
117 void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable)
118 {
119 	u32 value;
120 
121 	if (FIELD_GET(HBG_INT_MSK_TX_B, mask))
122 		return hbg_reg_write(priv,
123 				     HBG_REG_CF_IND_TXINT_MSK_ADDR, enable);
124 
125 	if (FIELD_GET(HBG_INT_MSK_RX_B, mask))
126 		return hbg_reg_write(priv,
127 				     HBG_REG_CF_IND_RXINT_MSK_ADDR, enable);
128 
129 	value = hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR);
130 	if (enable)
131 		value |= mask;
132 	else
133 		value &= ~mask;
134 
135 	hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value);
136 }
137 
138 void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr)
139 {
140 	hbg_reg_write64(priv, HBG_REG_STATION_ADDR_LOW_2_ADDR, mac_addr);
141 }
142 
143 static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv,
144 					 u16 max_frame_len)
145 {
146 	max_frame_len = max_t(u32, max_frame_len, ETH_DATA_LEN);
147 
148 	/* lower two bits of value must be set to 0 */
149 	max_frame_len = round_up(max_frame_len, HBG_PCU_FRAME_LEN_PLUS);
150 
151 	hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_LEN_ADDR,
152 			    HBG_REG_MAX_FRAME_LEN_M, max_frame_len);
153 }
154 
155 static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv,
156 					 u16 max_frame_size)
157 {
158 	hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_SIZE_ADDR,
159 			    HBG_REG_MAX_FRAME_LEN_M, max_frame_size);
160 }
161 
162 void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu)
163 {
164 	hbg_hw_set_pcu_max_frame_len(priv, mtu);
165 	hbg_hw_set_mac_max_frame_len(priv, mtu);
166 }
167 
168 void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable)
169 {
170 	hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
171 			    HBG_REG_PORT_ENABLE_TX_B, enable);
172 	hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR,
173 			    HBG_REG_PORT_ENABLE_RX_B, enable);
174 }
175 
176 u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir)
177 {
178 	if (dir & HBG_DIR_TX)
179 		return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
180 					  HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M);
181 
182 	if (dir & HBG_DIR_RX)
183 		return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR,
184 					  HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M);
185 
186 	return 0;
187 }
188 
189 void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc)
190 {
191 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_0_ADDR, tx_desc->word0);
192 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_1_ADDR, tx_desc->word1);
193 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_2_ADDR, tx_desc->word2);
194 	hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3);
195 }
196 
197 void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
198 {
199 	hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr);
200 }
201 
202 void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
203 {
204 	hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
205 			    HBG_REG_PORT_MODE_M, speed);
206 	hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
207 			    HBG_REG_DUPLEX_B, duplex);
208 }
209 
210 static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv)
211 {
212 	u32 ctrl = 0;
213 
214 	ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B, HBG_STATUS_ENABLE);
215 	ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B, HBG_STATUS_ENABLE);
216 	ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B, HBG_STATUS_ENABLE);
217 
218 	hbg_reg_write(priv, HBG_REG_TRANSMIT_CTRL_ADDR, ctrl);
219 }
220 
221 static void hbg_hw_init_rx_ctrl(struct hbg_priv *priv)
222 {
223 	u32 ctrl = 0;
224 
225 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B,
226 			   HBG_STATUS_ENABLE);
227 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B, HBG_STATUS_DISABLE);
228 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M, HBG_RX_SKIP1);
229 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M,
230 			   HBG_RX_SKIP2);
231 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M, NET_IP_ALIGN);
232 	ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM, priv->dev_specs.mac_id);
233 
234 	hbg_reg_write(priv, HBG_REG_RX_CTRL_ADDR, ctrl);
235 }
236 
237 static void hbg_hw_init_rx_control(struct hbg_priv *priv)
238 {
239 	hbg_hw_init_rx_ctrl(priv);
240 
241 	/* parse from L2 layer */
242 	hbg_reg_write_field(priv, HBG_REG_RX_PKT_MODE_ADDR,
243 			    HBG_REG_RX_PKT_MODE_PARSE_MODE_M, 0x1);
244 
245 	hbg_reg_write_field(priv, HBG_REG_RECV_CTRL_ADDR,
246 			    HBG_REG_RECV_CTRL_STRIP_PAD_EN_B,
247 			    HBG_STATUS_ENABLE);
248 	hbg_reg_write_field(priv, HBG_REG_RX_BUF_SIZE_ADDR,
249 			    HBG_REG_RX_BUF_SIZE_M, priv->dev_specs.rx_buf_size);
250 	hbg_reg_write_field(priv, HBG_REG_CF_CRC_STRIP_ADDR,
251 			    HBG_REG_CF_CRC_STRIP_B, HBG_STATUS_DISABLE);
252 }
253 
254 int hbg_hw_init(struct hbg_priv *priv)
255 {
256 	int ret;
257 
258 	ret = hbg_hw_dev_specs_init(priv);
259 	if (ret)
260 		return ret;
261 
262 	hbg_reg_write_field(priv, HBG_REG_BUS_CTRL_ADDR,
263 			    HBG_REG_BUS_CTRL_ENDIAN_M,
264 			    HBG_ENDIAN_CTRL_LE_DATA_BE);
265 	hbg_reg_write_field(priv, HBG_REG_MODE_CHANGE_EN_ADDR,
266 			    HBG_REG_MODE_CHANGE_EN_B, HBG_STATUS_ENABLE);
267 
268 	hbg_hw_init_rx_control(priv);
269 	hbg_hw_init_transmit_ctrl(priv);
270 	return 0;
271 }
272