1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2024 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/ethtool.h> 6 #include <linux/if_vlan.h> 7 #include <linux/iopoll.h> 8 #include <linux/minmax.h> 9 #include "hbg_common.h" 10 #include "hbg_hw.h" 11 #include "hbg_reg.h" 12 13 #define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000) 14 #define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000) 15 /* little endian or big endian. 16 * ctrl means packet description, data means skb packet data 17 */ 18 #define HBG_ENDIAN_CTRL_LE_DATA_BE 0x0 19 #define HBG_PCU_FRAME_LEN_PLUS 4 20 21 static bool hbg_hw_spec_is_valid(struct hbg_priv *priv) 22 { 23 return hbg_reg_read(priv, HBG_REG_SPEC_VALID_ADDR) && 24 !hbg_reg_read(priv, HBG_REG_EVENT_REQ_ADDR); 25 } 26 27 int hbg_hw_event_notify(struct hbg_priv *priv, 28 enum hbg_hw_event_type event_type) 29 { 30 bool is_valid; 31 int ret; 32 33 if (test_and_set_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state)) 34 return -EBUSY; 35 36 /* notify */ 37 hbg_reg_write(priv, HBG_REG_EVENT_REQ_ADDR, event_type); 38 39 ret = read_poll_timeout(hbg_hw_spec_is_valid, is_valid, is_valid, 40 HBG_HW_EVENT_WAIT_INTERVAL_US, 41 HBG_HW_EVENT_WAIT_TIMEOUT_US, 42 HBG_HW_EVENT_WAIT_INTERVAL_US, priv); 43 44 clear_bit(HBG_NIC_STATE_EVENT_HANDLING, &priv->state); 45 46 if (ret) 47 dev_err(&priv->pdev->dev, 48 "event %d wait timeout\n", event_type); 49 50 return ret; 51 } 52 53 static int hbg_hw_dev_specs_init(struct hbg_priv *priv) 54 { 55 struct hbg_dev_specs *specs = &priv->dev_specs; 56 u64 mac_addr; 57 58 if (!hbg_hw_spec_is_valid(priv)) { 59 dev_err(&priv->pdev->dev, "dev_specs not init\n"); 60 return -EINVAL; 61 } 62 63 specs->mac_id = hbg_reg_read(priv, HBG_REG_MAC_ID_ADDR); 64 specs->phy_addr = hbg_reg_read(priv, HBG_REG_PHY_ID_ADDR); 65 specs->mdio_frequency = hbg_reg_read(priv, HBG_REG_MDIO_FREQ_ADDR); 66 specs->max_mtu = hbg_reg_read(priv, HBG_REG_MAX_MTU_ADDR); 67 specs->min_mtu = hbg_reg_read(priv, HBG_REG_MIN_MTU_ADDR); 68 specs->vlan_layers = hbg_reg_read(priv, HBG_REG_VLAN_LAYERS_ADDR); 69 specs->rx_fifo_num = hbg_reg_read(priv, HBG_REG_RX_FIFO_NUM_ADDR); 70 specs->tx_fifo_num = hbg_reg_read(priv, HBG_REG_TX_FIFO_NUM_ADDR); 71 specs->uc_mac_num = hbg_reg_read(priv, HBG_REG_UC_MAC_NUM_ADDR); 72 73 mac_addr = hbg_reg_read64(priv, HBG_REG_MAC_ADDR_ADDR); 74 u64_to_ether_addr(mac_addr, (u8 *)specs->mac_addr.sa_data); 75 76 if (!is_valid_ether_addr((u8 *)specs->mac_addr.sa_data)) 77 return -EADDRNOTAVAIL; 78 79 specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + specs->max_mtu; 80 specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + specs->max_frame_len; 81 return 0; 82 } 83 84 u32 hbg_hw_get_irq_status(struct hbg_priv *priv) 85 { 86 u32 status; 87 88 status = hbg_reg_read(priv, HBG_REG_CF_INTRPT_STAT_ADDR); 89 90 hbg_field_modify(status, HBG_INT_MSK_TX_B, 91 hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_STAT_ADDR)); 92 hbg_field_modify(status, HBG_INT_MSK_RX_B, 93 hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_STAT_ADDR)); 94 95 return status; 96 } 97 98 void hbg_hw_irq_clear(struct hbg_priv *priv, u32 mask) 99 { 100 if (FIELD_GET(HBG_INT_MSK_TX_B, mask)) 101 return hbg_reg_write(priv, HBG_REG_CF_IND_TXINT_CLR_ADDR, 0x1); 102 103 if (FIELD_GET(HBG_INT_MSK_RX_B, mask)) 104 return hbg_reg_write(priv, HBG_REG_CF_IND_RXINT_CLR_ADDR, 0x1); 105 106 return hbg_reg_write(priv, HBG_REG_CF_INTRPT_CLR_ADDR, mask); 107 } 108 109 bool hbg_hw_irq_is_enabled(struct hbg_priv *priv, u32 mask) 110 { 111 if (FIELD_GET(HBG_INT_MSK_TX_B, mask)) 112 return hbg_reg_read(priv, HBG_REG_CF_IND_TXINT_MSK_ADDR); 113 114 if (FIELD_GET(HBG_INT_MSK_RX_B, mask)) 115 return hbg_reg_read(priv, HBG_REG_CF_IND_RXINT_MSK_ADDR); 116 117 return hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR) & mask; 118 } 119 120 void hbg_hw_irq_enable(struct hbg_priv *priv, u32 mask, bool enable) 121 { 122 u32 value; 123 124 if (FIELD_GET(HBG_INT_MSK_TX_B, mask)) 125 return hbg_reg_write(priv, 126 HBG_REG_CF_IND_TXINT_MSK_ADDR, enable); 127 128 if (FIELD_GET(HBG_INT_MSK_RX_B, mask)) 129 return hbg_reg_write(priv, 130 HBG_REG_CF_IND_RXINT_MSK_ADDR, enable); 131 132 value = hbg_reg_read(priv, HBG_REG_CF_INTRPT_MSK_ADDR); 133 if (enable) 134 value |= mask; 135 else 136 value &= ~mask; 137 138 hbg_reg_write(priv, HBG_REG_CF_INTRPT_MSK_ADDR, value); 139 } 140 141 void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr, u32 index) 142 { 143 u32 addr; 144 145 /* mac addr is u64, so the addr offset is 0x8 */ 146 addr = HBG_REG_STATION_ADDR_LOW_2_ADDR + (index * 0x8); 147 hbg_reg_write64(priv, addr, mac_addr); 148 } 149 150 static void hbg_hw_set_pcu_max_frame_len(struct hbg_priv *priv, 151 u16 max_frame_len) 152 { 153 max_frame_len = max_t(u32, max_frame_len, ETH_DATA_LEN); 154 155 /* lower two bits of value must be set to 0 */ 156 max_frame_len = round_up(max_frame_len, HBG_PCU_FRAME_LEN_PLUS); 157 158 hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_LEN_ADDR, 159 HBG_REG_MAX_FRAME_LEN_M, max_frame_len); 160 } 161 162 static void hbg_hw_set_mac_max_frame_len(struct hbg_priv *priv, 163 u16 max_frame_size) 164 { 165 hbg_reg_write_field(priv, HBG_REG_MAX_FRAME_SIZE_ADDR, 166 HBG_REG_MAX_FRAME_LEN_M, max_frame_size); 167 } 168 169 void hbg_hw_set_mtu(struct hbg_priv *priv, u16 mtu) 170 { 171 u32 frame_len; 172 173 frame_len = mtu + VLAN_HLEN * priv->dev_specs.vlan_layers + 174 ETH_HLEN + ETH_FCS_LEN; 175 176 hbg_hw_set_pcu_max_frame_len(priv, frame_len); 177 hbg_hw_set_mac_max_frame_len(priv, frame_len); 178 } 179 180 void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable) 181 { 182 hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR, 183 HBG_REG_PORT_ENABLE_TX_B, enable); 184 hbg_reg_write_field(priv, HBG_REG_PORT_ENABLE_ADDR, 185 HBG_REG_PORT_ENABLE_RX_B, enable); 186 } 187 188 u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir) 189 { 190 if (dir & HBG_DIR_TX) 191 return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, 192 HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M); 193 194 if (dir & HBG_DIR_RX) 195 return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, 196 HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M); 197 198 return 0; 199 } 200 201 void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc) 202 { 203 hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_0_ADDR, tx_desc->word0); 204 hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_1_ADDR, tx_desc->word1); 205 hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_2_ADDR, tx_desc->word2); 206 hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3); 207 } 208 209 void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr) 210 { 211 hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr); 212 } 213 214 void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) 215 { 216 hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, 217 HBG_REG_PORT_MODE_M, speed); 218 hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR, 219 HBG_REG_DUPLEX_B, duplex); 220 } 221 222 /* only support uc filter */ 223 void hbg_hw_set_mac_filter_enable(struct hbg_priv *priv, u32 enable) 224 { 225 hbg_reg_write_field(priv, HBG_REG_REC_FILT_CTRL_ADDR, 226 HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B, enable); 227 } 228 229 void hbg_hw_set_pause_enable(struct hbg_priv *priv, u32 tx_en, u32 rx_en) 230 { 231 hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, 232 HBG_REG_PAUSE_ENABLE_TX_B, tx_en); 233 hbg_reg_write_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, 234 HBG_REG_PAUSE_ENABLE_RX_B, rx_en); 235 } 236 237 void hbg_hw_get_pause_enable(struct hbg_priv *priv, u32 *tx_en, u32 *rx_en) 238 { 239 *tx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, 240 HBG_REG_PAUSE_ENABLE_TX_B); 241 *rx_en = hbg_reg_read_field(priv, HBG_REG_PAUSE_ENABLE_ADDR, 242 HBG_REG_PAUSE_ENABLE_RX_B); 243 } 244 245 void hbg_hw_set_rx_pause_mac_addr(struct hbg_priv *priv, u64 mac_addr) 246 { 247 hbg_reg_write64(priv, HBG_REG_FD_FC_ADDR_LOW_ADDR, mac_addr); 248 } 249 250 static void hbg_hw_init_transmit_ctrl(struct hbg_priv *priv) 251 { 252 u32 ctrl = 0; 253 254 ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_AN_EN_B, HBG_STATUS_ENABLE); 255 ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_CRC_ADD_B, HBG_STATUS_ENABLE); 256 ctrl |= FIELD_PREP(HBG_REG_TRANSMIT_CTRL_PAD_EN_B, HBG_STATUS_ENABLE); 257 258 hbg_reg_write(priv, HBG_REG_TRANSMIT_CTRL_ADDR, ctrl); 259 } 260 261 static void hbg_hw_init_rx_ctrl(struct hbg_priv *priv) 262 { 263 u32 ctrl = 0; 264 265 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_GET_ADDR_MODE_B, 266 HBG_STATUS_ENABLE); 267 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_TIME_INF_EN_B, HBG_STATUS_DISABLE); 268 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE_M, HBG_RX_SKIP1); 269 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M, 270 HBG_RX_SKIP2); 271 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_RX_ALIGN_NUM_M, NET_IP_ALIGN); 272 ctrl |= FIELD_PREP(HBG_REG_RX_CTRL_PORT_NUM, priv->dev_specs.mac_id); 273 274 hbg_reg_write(priv, HBG_REG_RX_CTRL_ADDR, ctrl); 275 } 276 277 static void hbg_hw_init_rx_control(struct hbg_priv *priv) 278 { 279 hbg_hw_init_rx_ctrl(priv); 280 281 /* parse from L2 layer */ 282 hbg_reg_write_field(priv, HBG_REG_RX_PKT_MODE_ADDR, 283 HBG_REG_RX_PKT_MODE_PARSE_MODE_M, 0x1); 284 285 hbg_reg_write_field(priv, HBG_REG_RECV_CTRL_ADDR, 286 HBG_REG_RECV_CTRL_STRIP_PAD_EN_B, 287 HBG_STATUS_ENABLE); 288 hbg_reg_write_field(priv, HBG_REG_RX_BUF_SIZE_ADDR, 289 HBG_REG_RX_BUF_SIZE_M, priv->dev_specs.rx_buf_size); 290 hbg_reg_write_field(priv, HBG_REG_CF_CRC_STRIP_ADDR, 291 HBG_REG_CF_CRC_STRIP_B, HBG_STATUS_DISABLE); 292 } 293 294 int hbg_hw_init(struct hbg_priv *priv) 295 { 296 int ret; 297 298 ret = hbg_hw_dev_specs_init(priv); 299 if (ret) 300 return ret; 301 302 hbg_reg_write_field(priv, HBG_REG_BUS_CTRL_ADDR, 303 HBG_REG_BUS_CTRL_ENDIAN_M, 304 HBG_ENDIAN_CTRL_LE_DATA_BE); 305 hbg_reg_write_field(priv, HBG_REG_MODE_CHANGE_EN_ADDR, 306 HBG_REG_MODE_CHANGE_EN_B, HBG_STATUS_ENABLE); 307 308 hbg_hw_init_rx_control(priv); 309 hbg_hw_init_transmit_ctrl(priv); 310 return 0; 311 } 312