1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #ifndef __HCLGE_MAIN_H 5 #define __HCLGE_MAIN_H 6 #include <linux/fs.h> 7 #include <linux/types.h> 8 #include <linux/phy.h> 9 #include <linux/if_vlan.h> 10 #include <linux/kfifo.h> 11 12 #include "hclge_cmd.h" 13 #include "hnae3.h" 14 15 #define HCLGE_MOD_VERSION "1.0" 16 #define HCLGE_DRIVER_NAME "hclge" 17 18 #define HCLGE_MAX_PF_NUM 8 19 20 #define HCLGE_RD_FIRST_STATS_NUM 2 21 #define HCLGE_RD_OTHER_STATS_NUM 4 22 23 #define HCLGE_INVALID_VPORT 0xffff 24 25 #define HCLGE_PF_CFG_BLOCK_SIZE 32 26 #define HCLGE_PF_CFG_DESC_NUM \ 27 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) 28 29 #define HCLGE_VECTOR_REG_BASE 0x20000 30 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400 31 32 #define HCLGE_VECTOR_REG_OFFSET 0x4 33 #define HCLGE_VECTOR_VF_OFFSET 0x100000 34 35 #define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 36 #define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004 37 #define HCLGE_CMDQ_TX_DEPTH_REG 0x27008 38 #define HCLGE_CMDQ_TX_TAIL_REG 0x27010 39 #define HCLGE_CMDQ_TX_HEAD_REG 0x27014 40 #define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018 41 #define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C 42 #define HCLGE_CMDQ_RX_DEPTH_REG 0x27020 43 #define HCLGE_CMDQ_RX_TAIL_REG 0x27024 44 #define HCLGE_CMDQ_RX_HEAD_REG 0x27028 45 #define HCLGE_CMDQ_INTR_SRC_REG 0x27100 46 #define HCLGE_CMDQ_INTR_STS_REG 0x27104 47 #define HCLGE_CMDQ_INTR_EN_REG 0x27108 48 #define HCLGE_CMDQ_INTR_GEN_REG 0x2710C 49 50 /* bar registers for common func */ 51 #define HCLGE_VECTOR0_OTER_EN_REG 0x20600 52 #define HCLGE_RAS_OTHER_STS_REG 0x20B00 53 #define HCLGE_FUNC_RESET_STS_REG 0x20C00 54 #define HCLGE_GRO_EN_REG 0x28000 55 56 /* bar registers for rcb */ 57 #define HCLGE_RING_RX_ADDR_L_REG 0x80000 58 #define HCLGE_RING_RX_ADDR_H_REG 0x80004 59 #define HCLGE_RING_RX_BD_NUM_REG 0x80008 60 #define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C 61 #define HCLGE_RING_RX_MERGE_EN_REG 0x80014 62 #define HCLGE_RING_RX_TAIL_REG 0x80018 63 #define HCLGE_RING_RX_HEAD_REG 0x8001C 64 #define HCLGE_RING_RX_FBD_NUM_REG 0x80020 65 #define HCLGE_RING_RX_OFFSET_REG 0x80024 66 #define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028 67 #define HCLGE_RING_RX_STASH_REG 0x80030 68 #define HCLGE_RING_RX_BD_ERR_REG 0x80034 69 #define HCLGE_RING_TX_ADDR_L_REG 0x80040 70 #define HCLGE_RING_TX_ADDR_H_REG 0x80044 71 #define HCLGE_RING_TX_BD_NUM_REG 0x80048 72 #define HCLGE_RING_TX_PRIORITY_REG 0x8004C 73 #define HCLGE_RING_TX_TC_REG 0x80050 74 #define HCLGE_RING_TX_MERGE_EN_REG 0x80054 75 #define HCLGE_RING_TX_TAIL_REG 0x80058 76 #define HCLGE_RING_TX_HEAD_REG 0x8005C 77 #define HCLGE_RING_TX_FBD_NUM_REG 0x80060 78 #define HCLGE_RING_TX_OFFSET_REG 0x80064 79 #define HCLGE_RING_TX_EBD_NUM_REG 0x80068 80 #define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070 81 #define HCLGE_RING_TX_BD_ERR_REG 0x80074 82 #define HCLGE_RING_EN_REG 0x80090 83 84 /* bar registers for tqp interrupt */ 85 #define HCLGE_TQP_INTR_CTRL_REG 0x20000 86 #define HCLGE_TQP_INTR_GL0_REG 0x20100 87 #define HCLGE_TQP_INTR_GL1_REG 0x20200 88 #define HCLGE_TQP_INTR_GL2_REG 0x20300 89 #define HCLGE_TQP_INTR_RL_REG 0x20900 90 91 #define HCLGE_RSS_IND_TBL_SIZE 512 92 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) 93 #define HCLGE_RSS_KEY_SIZE 40 94 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 95 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1 96 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 97 #define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0) 98 #define HCLGE_RSS_CFG_TBL_NUM \ 99 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) 100 101 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) 102 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) 103 #define HCLGE_D_PORT_BIT BIT(0) 104 #define HCLGE_S_PORT_BIT BIT(1) 105 #define HCLGE_D_IP_BIT BIT(2) 106 #define HCLGE_S_IP_BIT BIT(3) 107 #define HCLGE_V_TAG_BIT BIT(4) 108 109 #define HCLGE_RSS_TC_SIZE_0 1 110 #define HCLGE_RSS_TC_SIZE_1 2 111 #define HCLGE_RSS_TC_SIZE_2 4 112 #define HCLGE_RSS_TC_SIZE_3 8 113 #define HCLGE_RSS_TC_SIZE_4 16 114 #define HCLGE_RSS_TC_SIZE_5 32 115 #define HCLGE_RSS_TC_SIZE_6 64 116 #define HCLGE_RSS_TC_SIZE_7 128 117 118 #define HCLGE_UMV_TBL_SIZE 3072 119 #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \ 120 (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM) 121 122 #define HCLGE_TQP_RESET_TRY_TIMES 10 123 124 #define HCLGE_PHY_PAGE_MDIX 0 125 #define HCLGE_PHY_PAGE_COPPER 0 126 127 /* Page Selection Reg. */ 128 #define HCLGE_PHY_PAGE_REG 22 129 130 /* Copper Specific Control Register */ 131 #define HCLGE_PHY_CSC_REG 16 132 133 /* Copper Specific Status Register */ 134 #define HCLGE_PHY_CSS_REG 17 135 136 #define HCLGE_PHY_MDIX_CTRL_S 5 137 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) 138 139 #define HCLGE_PHY_MDIX_STATUS_B 6 140 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 141 142 /* Factor used to calculate offset and bitmap of VF num */ 143 #define HCLGE_VF_NUM_PER_CMD 64 144 #define HCLGE_VF_NUM_PER_BYTE 8 145 146 enum HLCGE_PORT_TYPE { 147 HOST_PORT, 148 NETWORK_PORT 149 }; 150 151 #define HCLGE_PF_ID_S 0 152 #define HCLGE_PF_ID_M GENMASK(2, 0) 153 #define HCLGE_VF_ID_S 3 154 #define HCLGE_VF_ID_M GENMASK(10, 3) 155 #define HCLGE_PORT_TYPE_B 11 156 #define HCLGE_NETWORK_PORT_ID_S 0 157 #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) 158 159 /* Reset related Registers */ 160 #define HCLGE_PF_OTHER_INT_REG 0x20600 161 #define HCLGE_MISC_RESET_STS_REG 0x20700 162 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 163 #define HCLGE_GLOBAL_RESET_REG 0x20A00 164 #define HCLGE_GLOBAL_RESET_BIT 0 165 #define HCLGE_CORE_RESET_BIT 1 166 #define HCLGE_IMP_RESET_BIT 2 167 #define HCLGE_FUN_RST_ING 0x20C00 168 #define HCLGE_FUN_RST_ING_B 0 169 170 /* Vector0 register bits define */ 171 #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5 172 #define HCLGE_VECTOR0_CORERESET_INT_B 6 173 #define HCLGE_VECTOR0_IMPRESET_INT_B 7 174 175 /* Vector0 interrupt CMDQ event source register(RW) */ 176 #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100 177 /* CMDQ register bits for RX event(=MBX event) */ 178 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 179 180 #define HCLGE_VECTOR0_IMP_RESET_INT_B 1 181 182 #define HCLGE_MAC_DEFAULT_FRAME \ 183 (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) 184 #define HCLGE_MAC_MIN_FRAME 64 185 #define HCLGE_MAC_MAX_FRAME 9728 186 187 #define HCLGE_SUPPORT_1G_BIT BIT(0) 188 #define HCLGE_SUPPORT_10G_BIT BIT(1) 189 #define HCLGE_SUPPORT_25G_BIT BIT(2) 190 #define HCLGE_SUPPORT_50G_BIT BIT(3) 191 #define HCLGE_SUPPORT_100G_BIT BIT(4) 192 /* to be compatible with exsit board */ 193 #define HCLGE_SUPPORT_40G_BIT BIT(5) 194 #define HCLGE_SUPPORT_100M_BIT BIT(6) 195 #define HCLGE_SUPPORT_10M_BIT BIT(7) 196 #define HCLGE_SUPPORT_GE \ 197 (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) 198 199 enum HCLGE_DEV_STATE { 200 HCLGE_STATE_REINITING, 201 HCLGE_STATE_DOWN, 202 HCLGE_STATE_DISABLED, 203 HCLGE_STATE_REMOVING, 204 HCLGE_STATE_NIC_REGISTERED, 205 HCLGE_STATE_ROCE_REGISTERED, 206 HCLGE_STATE_SERVICE_INITED, 207 HCLGE_STATE_SERVICE_SCHED, 208 HCLGE_STATE_RST_SERVICE_SCHED, 209 HCLGE_STATE_RST_HANDLING, 210 HCLGE_STATE_MBX_SERVICE_SCHED, 211 HCLGE_STATE_MBX_HANDLING, 212 HCLGE_STATE_STATISTICS_UPDATING, 213 HCLGE_STATE_CMD_DISABLE, 214 HCLGE_STATE_MAX 215 }; 216 217 enum hclge_evt_cause { 218 HCLGE_VECTOR0_EVENT_RST, 219 HCLGE_VECTOR0_EVENT_MBX, 220 HCLGE_VECTOR0_EVENT_ERR, 221 HCLGE_VECTOR0_EVENT_OTHER, 222 }; 223 224 #define HCLGE_MPF_ENBALE 1 225 226 enum HCLGE_MAC_SPEED { 227 HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ 228 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ 229 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ 230 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ 231 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ 232 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ 233 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ 234 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ 235 HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */ 236 }; 237 238 enum HCLGE_MAC_DUPLEX { 239 HCLGE_MAC_HALF, 240 HCLGE_MAC_FULL 241 }; 242 243 #define QUERY_SFP_SPEED 0 244 #define QUERY_ACTIVE_SPEED 1 245 246 struct hclge_mac { 247 u8 phy_addr; 248 u8 flag; 249 u8 media_type; /* port media type, e.g. fibre/copper/backplane */ 250 u8 mac_addr[ETH_ALEN]; 251 u8 autoneg; 252 u8 duplex; 253 u8 support_autoneg; 254 u8 speed_type; /* 0: sfp speed, 1: active speed */ 255 u32 speed; 256 u32 speed_ability; /* speed ability supported by current media */ 257 u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */ 258 u32 fec_mode; /* active fec mode */ 259 u32 user_fec_mode; 260 u32 fec_ability; 261 int link; /* store the link status of mac & phy (if phy exit) */ 262 struct phy_device *phydev; 263 struct mii_bus *mdio_bus; 264 phy_interface_t phy_if; 265 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); 266 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 267 }; 268 269 struct hclge_hw { 270 void __iomem *io_base; 271 struct hclge_mac mac; 272 int num_vec; 273 struct hclge_cmq cmq; 274 }; 275 276 /* TQP stats */ 277 struct hlcge_tqp_stats { 278 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ 279 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ 280 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ 281 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ 282 }; 283 284 struct hclge_tqp { 285 /* copy of device pointer from pci_dev, 286 * used when perform DMA mapping 287 */ 288 struct device *dev; 289 struct hnae3_queue q; 290 struct hlcge_tqp_stats tqp_stats; 291 u16 index; /* Global index in a NIC controller */ 292 293 bool alloced; 294 }; 295 296 enum hclge_fc_mode { 297 HCLGE_FC_NONE, 298 HCLGE_FC_RX_PAUSE, 299 HCLGE_FC_TX_PAUSE, 300 HCLGE_FC_FULL, 301 HCLGE_FC_PFC, 302 HCLGE_FC_DEFAULT 303 }; 304 305 enum hclge_link_fail_code { 306 HCLGE_LF_NORMAL, 307 HCLGE_LF_REF_CLOCK_LOST, 308 HCLGE_LF_XSFP_TX_DISABLE, 309 HCLGE_LF_XSFP_ABSENT, 310 }; 311 312 #define HCLGE_PG_NUM 4 313 #define HCLGE_SCH_MODE_SP 0 314 #define HCLGE_SCH_MODE_DWRR 1 315 struct hclge_pg_info { 316 u8 pg_id; 317 u8 pg_sch_mode; /* 0: sp; 1: dwrr */ 318 u8 tc_bit_map; 319 u32 bw_limit; 320 u8 tc_dwrr[HNAE3_MAX_TC]; 321 }; 322 323 struct hclge_tc_info { 324 u8 tc_id; 325 u8 tc_sch_mode; /* 0: sp; 1: dwrr */ 326 u8 pgid; 327 u32 bw_limit; 328 }; 329 330 struct hclge_cfg { 331 u8 vmdq_vport_num; 332 u8 tc_num; 333 u16 tqp_desc_num; 334 u16 rx_buf_len; 335 u16 rss_size_max; 336 u8 phy_addr; 337 u8 media_type; 338 u8 mac_addr[ETH_ALEN]; 339 u8 default_speed; 340 u32 numa_node_map; 341 u8 speed_ability; 342 u16 umv_space; 343 }; 344 345 struct hclge_tm_info { 346 u8 num_tc; 347 u8 num_pg; /* It must be 1 if vNET-Base schd */ 348 u8 pg_dwrr[HCLGE_PG_NUM]; 349 u8 prio_tc[HNAE3_MAX_USER_PRIO]; 350 struct hclge_pg_info pg_info[HCLGE_PG_NUM]; 351 struct hclge_tc_info tc_info[HNAE3_MAX_TC]; 352 enum hclge_fc_mode fc_mode; 353 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ 354 u8 pfc_en; /* PFC enabled or not for user priority */ 355 }; 356 357 struct hclge_comm_stats_str { 358 char desc[ETH_GSTRING_LEN]; 359 unsigned long offset; 360 }; 361 362 /* mac stats ,opcode id: 0x0032 */ 363 struct hclge_mac_stats { 364 u64 mac_tx_mac_pause_num; 365 u64 mac_rx_mac_pause_num; 366 u64 mac_tx_pfc_pri0_pkt_num; 367 u64 mac_tx_pfc_pri1_pkt_num; 368 u64 mac_tx_pfc_pri2_pkt_num; 369 u64 mac_tx_pfc_pri3_pkt_num; 370 u64 mac_tx_pfc_pri4_pkt_num; 371 u64 mac_tx_pfc_pri5_pkt_num; 372 u64 mac_tx_pfc_pri6_pkt_num; 373 u64 mac_tx_pfc_pri7_pkt_num; 374 u64 mac_rx_pfc_pri0_pkt_num; 375 u64 mac_rx_pfc_pri1_pkt_num; 376 u64 mac_rx_pfc_pri2_pkt_num; 377 u64 mac_rx_pfc_pri3_pkt_num; 378 u64 mac_rx_pfc_pri4_pkt_num; 379 u64 mac_rx_pfc_pri5_pkt_num; 380 u64 mac_rx_pfc_pri6_pkt_num; 381 u64 mac_rx_pfc_pri7_pkt_num; 382 u64 mac_tx_total_pkt_num; 383 u64 mac_tx_total_oct_num; 384 u64 mac_tx_good_pkt_num; 385 u64 mac_tx_bad_pkt_num; 386 u64 mac_tx_good_oct_num; 387 u64 mac_tx_bad_oct_num; 388 u64 mac_tx_uni_pkt_num; 389 u64 mac_tx_multi_pkt_num; 390 u64 mac_tx_broad_pkt_num; 391 u64 mac_tx_undersize_pkt_num; 392 u64 mac_tx_oversize_pkt_num; 393 u64 mac_tx_64_oct_pkt_num; 394 u64 mac_tx_65_127_oct_pkt_num; 395 u64 mac_tx_128_255_oct_pkt_num; 396 u64 mac_tx_256_511_oct_pkt_num; 397 u64 mac_tx_512_1023_oct_pkt_num; 398 u64 mac_tx_1024_1518_oct_pkt_num; 399 u64 mac_tx_1519_2047_oct_pkt_num; 400 u64 mac_tx_2048_4095_oct_pkt_num; 401 u64 mac_tx_4096_8191_oct_pkt_num; 402 u64 rsv0; 403 u64 mac_tx_8192_9216_oct_pkt_num; 404 u64 mac_tx_9217_12287_oct_pkt_num; 405 u64 mac_tx_12288_16383_oct_pkt_num; 406 u64 mac_tx_1519_max_good_oct_pkt_num; 407 u64 mac_tx_1519_max_bad_oct_pkt_num; 408 409 u64 mac_rx_total_pkt_num; 410 u64 mac_rx_total_oct_num; 411 u64 mac_rx_good_pkt_num; 412 u64 mac_rx_bad_pkt_num; 413 u64 mac_rx_good_oct_num; 414 u64 mac_rx_bad_oct_num; 415 u64 mac_rx_uni_pkt_num; 416 u64 mac_rx_multi_pkt_num; 417 u64 mac_rx_broad_pkt_num; 418 u64 mac_rx_undersize_pkt_num; 419 u64 mac_rx_oversize_pkt_num; 420 u64 mac_rx_64_oct_pkt_num; 421 u64 mac_rx_65_127_oct_pkt_num; 422 u64 mac_rx_128_255_oct_pkt_num; 423 u64 mac_rx_256_511_oct_pkt_num; 424 u64 mac_rx_512_1023_oct_pkt_num; 425 u64 mac_rx_1024_1518_oct_pkt_num; 426 u64 mac_rx_1519_2047_oct_pkt_num; 427 u64 mac_rx_2048_4095_oct_pkt_num; 428 u64 mac_rx_4096_8191_oct_pkt_num; 429 u64 rsv1; 430 u64 mac_rx_8192_9216_oct_pkt_num; 431 u64 mac_rx_9217_12287_oct_pkt_num; 432 u64 mac_rx_12288_16383_oct_pkt_num; 433 u64 mac_rx_1519_max_good_oct_pkt_num; 434 u64 mac_rx_1519_max_bad_oct_pkt_num; 435 436 u64 mac_tx_fragment_pkt_num; 437 u64 mac_tx_undermin_pkt_num; 438 u64 mac_tx_jabber_pkt_num; 439 u64 mac_tx_err_all_pkt_num; 440 u64 mac_tx_from_app_good_pkt_num; 441 u64 mac_tx_from_app_bad_pkt_num; 442 u64 mac_rx_fragment_pkt_num; 443 u64 mac_rx_undermin_pkt_num; 444 u64 mac_rx_jabber_pkt_num; 445 u64 mac_rx_fcs_err_pkt_num; 446 u64 mac_rx_send_app_good_pkt_num; 447 u64 mac_rx_send_app_bad_pkt_num; 448 u64 mac_tx_pfc_pause_pkt_num; 449 u64 mac_rx_pfc_pause_pkt_num; 450 u64 mac_tx_ctrl_pkt_num; 451 u64 mac_rx_ctrl_pkt_num; 452 }; 453 454 #define HCLGE_STATS_TIMER_INTERVAL (60 * 5) 455 struct hclge_hw_stats { 456 struct hclge_mac_stats mac_stats; 457 u32 stats_timer; 458 }; 459 460 struct hclge_vlan_type_cfg { 461 u16 rx_ot_fst_vlan_type; 462 u16 rx_ot_sec_vlan_type; 463 u16 rx_in_fst_vlan_type; 464 u16 rx_in_sec_vlan_type; 465 u16 tx_ot_vlan_type; 466 u16 tx_in_vlan_type; 467 }; 468 469 enum HCLGE_FD_MODE { 470 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1, 471 HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2, 472 HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1, 473 HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2, 474 }; 475 476 enum HCLGE_FD_KEY_TYPE { 477 HCLGE_FD_KEY_BASE_ON_PTYPE, 478 HCLGE_FD_KEY_BASE_ON_TUPLE, 479 }; 480 481 enum HCLGE_FD_STAGE { 482 HCLGE_FD_STAGE_1, 483 HCLGE_FD_STAGE_2, 484 MAX_STAGE_NUM, 485 }; 486 487 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet 488 * INNER_XXX indicate tuples in tunneled header of tunnel packet or 489 * tuples of non-tunnel packet 490 */ 491 enum HCLGE_FD_TUPLE { 492 OUTER_DST_MAC, 493 OUTER_SRC_MAC, 494 OUTER_VLAN_TAG_FST, 495 OUTER_VLAN_TAG_SEC, 496 OUTER_ETH_TYPE, 497 OUTER_L2_RSV, 498 OUTER_IP_TOS, 499 OUTER_IP_PROTO, 500 OUTER_SRC_IP, 501 OUTER_DST_IP, 502 OUTER_L3_RSV, 503 OUTER_SRC_PORT, 504 OUTER_DST_PORT, 505 OUTER_L4_RSV, 506 OUTER_TUN_VNI, 507 OUTER_TUN_FLOW_ID, 508 INNER_DST_MAC, 509 INNER_SRC_MAC, 510 INNER_VLAN_TAG_FST, 511 INNER_VLAN_TAG_SEC, 512 INNER_ETH_TYPE, 513 INNER_L2_RSV, 514 INNER_IP_TOS, 515 INNER_IP_PROTO, 516 INNER_SRC_IP, 517 INNER_DST_IP, 518 INNER_L3_RSV, 519 INNER_SRC_PORT, 520 INNER_DST_PORT, 521 INNER_L4_RSV, 522 MAX_TUPLE, 523 }; 524 525 enum HCLGE_FD_META_DATA { 526 PACKET_TYPE_ID, 527 IP_FRAGEMENT, 528 ROCE_TYPE, 529 NEXT_KEY, 530 VLAN_NUMBER, 531 SRC_VPORT, 532 DST_VPORT, 533 TUNNEL_PACKET, 534 MAX_META_DATA, 535 }; 536 537 struct key_info { 538 u8 key_type; 539 u8 key_length; /* use bit as unit */ 540 }; 541 542 static const struct key_info meta_data_key_info[] = { 543 { PACKET_TYPE_ID, 6}, 544 { IP_FRAGEMENT, 1}, 545 { ROCE_TYPE, 1}, 546 { NEXT_KEY, 5}, 547 { VLAN_NUMBER, 2}, 548 { SRC_VPORT, 12}, 549 { DST_VPORT, 12}, 550 { TUNNEL_PACKET, 1}, 551 }; 552 553 static const struct key_info tuple_key_info[] = { 554 { OUTER_DST_MAC, 48}, 555 { OUTER_SRC_MAC, 48}, 556 { OUTER_VLAN_TAG_FST, 16}, 557 { OUTER_VLAN_TAG_SEC, 16}, 558 { OUTER_ETH_TYPE, 16}, 559 { OUTER_L2_RSV, 16}, 560 { OUTER_IP_TOS, 8}, 561 { OUTER_IP_PROTO, 8}, 562 { OUTER_SRC_IP, 32}, 563 { OUTER_DST_IP, 32}, 564 { OUTER_L3_RSV, 16}, 565 { OUTER_SRC_PORT, 16}, 566 { OUTER_DST_PORT, 16}, 567 { OUTER_L4_RSV, 32}, 568 { OUTER_TUN_VNI, 24}, 569 { OUTER_TUN_FLOW_ID, 8}, 570 { INNER_DST_MAC, 48}, 571 { INNER_SRC_MAC, 48}, 572 { INNER_VLAN_TAG_FST, 16}, 573 { INNER_VLAN_TAG_SEC, 16}, 574 { INNER_ETH_TYPE, 16}, 575 { INNER_L2_RSV, 16}, 576 { INNER_IP_TOS, 8}, 577 { INNER_IP_PROTO, 8}, 578 { INNER_SRC_IP, 32}, 579 { INNER_DST_IP, 32}, 580 { INNER_L3_RSV, 16}, 581 { INNER_SRC_PORT, 16}, 582 { INNER_DST_PORT, 16}, 583 { INNER_L4_RSV, 32}, 584 }; 585 586 #define MAX_KEY_LENGTH 400 587 #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4) 588 #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4) 589 #define MAX_META_DATA_LENGTH 32 590 591 /* assigned by firmware, the real filter number for each pf may be less */ 592 #define MAX_FD_FILTER_NUM 4096 593 #define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5 594 595 enum HCLGE_FD_ACTIVE_RULE_TYPE { 596 HCLGE_FD_RULE_NONE, 597 HCLGE_FD_ARFS_ACTIVE, 598 HCLGE_FD_EP_ACTIVE, 599 }; 600 601 enum HCLGE_FD_PACKET_TYPE { 602 NIC_PACKET, 603 ROCE_PACKET, 604 }; 605 606 enum HCLGE_FD_ACTION { 607 HCLGE_FD_ACTION_ACCEPT_PACKET, 608 HCLGE_FD_ACTION_DROP_PACKET, 609 }; 610 611 struct hclge_fd_key_cfg { 612 u8 key_sel; 613 u8 inner_sipv6_word_en; 614 u8 inner_dipv6_word_en; 615 u8 outer_sipv6_word_en; 616 u8 outer_dipv6_word_en; 617 u32 tuple_active; 618 u32 meta_data_active; 619 }; 620 621 struct hclge_fd_cfg { 622 u8 fd_mode; 623 u16 max_key_length; /* use bit as unit */ 624 u32 proto_support; 625 u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */ 626 u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */ 627 struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM]; 628 }; 629 630 #define IPV4_INDEX 3 631 #define IPV6_SIZE 4 632 struct hclge_fd_rule_tuples { 633 u8 src_mac[ETH_ALEN]; 634 u8 dst_mac[ETH_ALEN]; 635 /* Be compatible for ip address of both ipv4 and ipv6. 636 * For ipv4 address, we store it in src/dst_ip[3]. 637 */ 638 u32 src_ip[IPV6_SIZE]; 639 u32 dst_ip[IPV6_SIZE]; 640 u16 src_port; 641 u16 dst_port; 642 u16 vlan_tag1; 643 u16 ether_proto; 644 u8 ip_tos; 645 u8 ip_proto; 646 }; 647 648 struct hclge_fd_rule { 649 struct hlist_node rule_node; 650 struct hclge_fd_rule_tuples tuples; 651 struct hclge_fd_rule_tuples tuples_mask; 652 u32 unused_tuple; 653 u32 flow_type; 654 u8 action; 655 u16 vf_id; 656 u16 queue_id; 657 u16 location; 658 u16 flow_id; /* only used for arfs */ 659 enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; 660 }; 661 662 struct hclge_fd_ad_data { 663 u16 ad_id; 664 u8 drop_packet; 665 u8 forward_to_direct_queue; 666 u16 queue_id; 667 u8 use_counter; 668 u8 counter_id; 669 u8 use_next_stage; 670 u8 write_rule_id_to_bd; 671 u8 next_input_key; 672 u16 rule_id; 673 }; 674 675 struct hclge_vport_mac_addr_cfg { 676 struct list_head node; 677 int hd_tbl_status; 678 u8 mac_addr[ETH_ALEN]; 679 }; 680 681 enum HCLGE_MAC_ADDR_TYPE { 682 HCLGE_MAC_ADDR_UC, 683 HCLGE_MAC_ADDR_MC 684 }; 685 686 struct hclge_vport_vlan_cfg { 687 struct list_head node; 688 int hd_tbl_status; 689 u16 vlan_id; 690 }; 691 692 struct hclge_rst_stats { 693 u32 reset_done_cnt; /* the number of reset has completed */ 694 u32 hw_reset_done_cnt; /* the number of HW reset has completed */ 695 u32 pf_rst_cnt; /* the number of PF reset */ 696 u32 flr_rst_cnt; /* the number of FLR */ 697 u32 core_rst_cnt; /* the number of CORE reset */ 698 u32 global_rst_cnt; /* the number of GLOBAL */ 699 u32 imp_rst_cnt; /* the number of IMP reset */ 700 u32 reset_cnt; /* the number of reset */ 701 }; 702 703 /* time and register status when mac tunnel interruption occur */ 704 struct hclge_mac_tnl_stats { 705 u64 time; 706 u32 status; 707 }; 708 709 #define HCLGE_RESET_INTERVAL (10 * HZ) 710 #define HCLGE_WAIT_RESET_DONE 100 711 712 #pragma pack(1) 713 struct hclge_vf_vlan_cfg { 714 u8 mbx_cmd; 715 u8 subcode; 716 u8 is_kill; 717 u16 vlan; 718 u16 proto; 719 }; 720 721 #pragma pack() 722 723 /* For each bit of TCAM entry, it uses a pair of 'x' and 724 * 'y' to indicate which value to match, like below: 725 * ---------------------------------- 726 * | bit x | bit y | search value | 727 * ---------------------------------- 728 * | 0 | 0 | always hit | 729 * ---------------------------------- 730 * | 1 | 0 | match '0' | 731 * ---------------------------------- 732 * | 0 | 1 | match '1' | 733 * ---------------------------------- 734 * | 1 | 1 | invalid | 735 * ---------------------------------- 736 * Then for input key(k) and mask(v), we can calculate the value by 737 * the formulae: 738 * x = (~k) & v 739 * y = (k ^ ~v) & k 740 */ 741 #define calc_x(x, k, v) ((x) = (~(k) & (v))) 742 #define calc_y(y, k, v) \ 743 do { \ 744 const typeof(k) _k_ = (k); \ 745 const typeof(v) _v_ = (v); \ 746 (y) = (_k_ ^ ~_v_) & (_k_); \ 747 } while (0) 748 749 #define HCLGE_MAC_TNL_LOG_SIZE 8 750 #define HCLGE_VPORT_NUM 256 751 struct hclge_dev { 752 struct pci_dev *pdev; 753 struct hnae3_ae_dev *ae_dev; 754 struct hclge_hw hw; 755 struct hclge_misc_vector misc_vector; 756 struct hclge_hw_stats hw_stats; 757 unsigned long state; 758 unsigned long flr_state; 759 unsigned long last_reset_time; 760 761 enum hnae3_reset_type reset_type; 762 enum hnae3_reset_type reset_level; 763 unsigned long default_reset_request; 764 unsigned long reset_request; /* reset has been requested */ 765 unsigned long reset_pending; /* client rst is pending to be served */ 766 struct hclge_rst_stats rst_stats; 767 u32 reset_fail_cnt; 768 u32 fw_version; 769 u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ 770 u16 num_tqps; /* Num task queue pairs of this PF */ 771 u16 num_req_vfs; /* Num VFs requested for this PF */ 772 773 u16 base_tqp_pid; /* Base task tqp physical id of this PF */ 774 u16 alloc_rss_size; /* Allocated RSS task queue */ 775 u16 rss_size_max; /* HW defined max RSS task queue */ 776 777 u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ 778 u16 num_alloc_vport; /* Num vports this driver supports */ 779 u32 numa_node_mask; 780 u16 rx_buf_len; 781 u16 num_tx_desc; /* desc num of per tx queue */ 782 u16 num_rx_desc; /* desc num of per rx queue */ 783 u8 hw_tc_map; 784 u8 tc_num_last_time; 785 enum hclge_fc_mode fc_mode_last_time; 786 u8 support_sfp_query; 787 788 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 789 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 790 u8 tx_sch_mode; 791 u8 tc_max; 792 u8 pfc_max; 793 794 u8 default_up; 795 u8 dcbx_cap; 796 struct hclge_tm_info tm_info; 797 798 u16 num_msi; 799 u16 num_msi_left; 800 u16 num_msi_used; 801 u16 roce_base_msix_offset; 802 u32 base_msi_vector; 803 u16 *vector_status; 804 int *vector_irq; 805 u16 num_roce_msi; /* Num of roce vectors for this PF */ 806 int roce_base_vector; 807 808 u16 pending_udp_bitmap; 809 810 u16 rx_itr_default; 811 u16 tx_itr_default; 812 813 u16 adminq_work_limit; /* Num of admin receive queue desc to process */ 814 unsigned long service_timer_period; 815 unsigned long service_timer_previous; 816 struct timer_list reset_timer; 817 struct delayed_work service_task; 818 struct work_struct rst_service_task; 819 struct work_struct mbx_service_task; 820 821 bool cur_promisc; 822 int num_alloc_vfs; /* Actual number of VFs allocated */ 823 824 struct hclge_tqp *htqp; 825 struct hclge_vport *vport; 826 827 struct dentry *hclge_dbgfs; 828 829 struct hnae3_client *nic_client; 830 struct hnae3_client *roce_client; 831 832 #define HCLGE_FLAG_MAIN BIT(0) 833 #define HCLGE_FLAG_DCB_CAPABLE BIT(1) 834 #define HCLGE_FLAG_DCB_ENABLE BIT(2) 835 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3) 836 u32 flag; 837 838 u32 pkt_buf_size; /* Total pf buf size for tx/rx */ 839 u32 tx_buf_size; /* Tx buffer size for each TC */ 840 u32 dv_buf_size; /* Dv buffer size for each TC */ 841 842 u32 mps; /* Max packet size */ 843 /* vport_lock protect resource shared by vports */ 844 struct mutex vport_lock; 845 846 struct hclge_vlan_type_cfg vlan_type_cfg; 847 848 unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; 849 unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; 850 851 struct hclge_fd_cfg fd_cfg; 852 struct hlist_head fd_rule_list; 853 spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */ 854 u16 hclge_fd_rule_num; 855 u16 fd_arfs_expire_timer; 856 unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; 857 enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; 858 u8 fd_en; 859 860 u16 wanted_umv_size; 861 /* max available unicast mac vlan space */ 862 u16 max_umv_size; 863 /* private unicast mac vlan space, it's same for PF and its VFs */ 864 u16 priv_umv_size; 865 /* unicast mac vlan space shared by PF and its VFs */ 866 u16 share_umv_size; 867 struct mutex umv_mutex; /* protect share_umv_size */ 868 869 struct mutex vport_cfg_mutex; /* Protect stored vf table */ 870 871 DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, 872 HCLGE_MAC_TNL_LOG_SIZE); 873 874 /* affinity mask and notify for misc interrupt */ 875 cpumask_t affinity_mask; 876 struct irq_affinity_notify affinity_notify; 877 }; 878 879 /* VPort level vlan tag configuration for TX direction */ 880 struct hclge_tx_vtag_cfg { 881 bool accept_tag1; /* Whether accept tag1 packet from host */ 882 bool accept_untag1; /* Whether accept untag1 packet from host */ 883 bool accept_tag2; 884 bool accept_untag2; 885 bool insert_tag1_en; /* Whether insert inner vlan tag */ 886 bool insert_tag2_en; /* Whether insert outer vlan tag */ 887 u16 default_tag1; /* The default inner vlan tag to insert */ 888 u16 default_tag2; /* The default outer vlan tag to insert */ 889 }; 890 891 /* VPort level vlan tag configuration for RX direction */ 892 struct hclge_rx_vtag_cfg { 893 u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */ 894 u8 strip_tag1_en; /* Whether strip inner vlan tag */ 895 u8 strip_tag2_en; /* Whether strip outer vlan tag */ 896 u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */ 897 u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */ 898 }; 899 900 struct hclge_rss_tuple_cfg { 901 u8 ipv4_tcp_en; 902 u8 ipv4_udp_en; 903 u8 ipv4_sctp_en; 904 u8 ipv4_fragment_en; 905 u8 ipv6_tcp_en; 906 u8 ipv6_udp_en; 907 u8 ipv6_sctp_en; 908 u8 ipv6_fragment_en; 909 }; 910 911 enum HCLGE_VPORT_STATE { 912 HCLGE_VPORT_STATE_ALIVE, 913 HCLGE_VPORT_STATE_MAX 914 }; 915 916 struct hclge_vlan_info { 917 u16 vlan_proto; /* so far support 802.1Q only */ 918 u16 qos; 919 u16 vlan_tag; 920 }; 921 922 struct hclge_port_base_vlan_config { 923 u16 state; 924 struct hclge_vlan_info vlan_info; 925 }; 926 927 struct hclge_vport { 928 u16 alloc_tqps; /* Allocated Tx/Rx queues */ 929 930 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ 931 /* User configured lookup table entries */ 932 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; 933 int rss_algo; /* User configured hash algorithm */ 934 /* User configured rss tuple sets */ 935 struct hclge_rss_tuple_cfg rss_tuple_sets; 936 937 u16 alloc_rss_size; 938 939 u16 qs_offset; 940 u32 bw_limit; /* VSI BW Limit (0 = disabled) */ 941 u8 dwrr; 942 943 unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; 944 struct hclge_port_base_vlan_config port_base_vlan_cfg; 945 struct hclge_tx_vtag_cfg txvlan_cfg; 946 struct hclge_rx_vtag_cfg rxvlan_cfg; 947 948 u16 used_umv_num; 949 950 u16 vport_id; 951 struct hclge_dev *back; /* Back reference to associated dev */ 952 struct hnae3_handle nic; 953 struct hnae3_handle roce; 954 955 unsigned long state; 956 unsigned long last_active_jiffies; 957 u32 mps; /* Max packet size */ 958 959 struct list_head uc_mac_list; /* Store VF unicast table */ 960 struct list_head mc_mac_list; /* Store VF multicast table */ 961 struct list_head vlan_list; /* Store VF vlan table */ 962 }; 963 964 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 965 bool en_mc, bool en_bc, int vport_id); 966 967 int hclge_add_uc_addr_common(struct hclge_vport *vport, 968 const unsigned char *addr); 969 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 970 const unsigned char *addr); 971 int hclge_add_mc_addr_common(struct hclge_vport *vport, 972 const unsigned char *addr); 973 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 974 const unsigned char *addr); 975 976 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); 977 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 978 int vector_id, bool en, 979 struct hnae3_ring_chain_node *ring_chain); 980 981 static inline int hclge_get_queue_id(struct hnae3_queue *queue) 982 { 983 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); 984 985 return tqp->index; 986 } 987 988 static inline bool hclge_is_reset_pending(struct hclge_dev *hdev) 989 { 990 return !!hdev->reset_pending; 991 } 992 993 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); 994 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); 995 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 996 u16 vlan_id, bool is_kill); 997 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); 998 999 int hclge_buffer_alloc(struct hclge_dev *hdev); 1000 int hclge_rss_init_hw(struct hclge_dev *hdev); 1001 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); 1002 1003 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); 1004 void hclge_mbx_handler(struct hclge_dev *hdev); 1005 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); 1006 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); 1007 int hclge_cfg_flowctrl(struct hclge_dev *hdev); 1008 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); 1009 int hclge_vport_start(struct hclge_vport *vport); 1010 void hclge_vport_stop(struct hclge_vport *vport); 1011 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); 1012 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf); 1013 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); 1014 int hclge_notify_client(struct hclge_dev *hdev, 1015 enum hnae3_reset_notify_type type); 1016 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, 1017 enum HCLGE_MAC_ADDR_TYPE mac_type); 1018 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, 1019 bool is_write_tbl, 1020 enum HCLGE_MAC_ADDR_TYPE mac_type); 1021 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, 1022 enum HCLGE_MAC_ADDR_TYPE mac_type); 1023 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev); 1024 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); 1025 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); 1026 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, 1027 struct hclge_vlan_info *vlan_info); 1028 int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, 1029 u16 state, u16 vlan_tag, u16 qos, 1030 u16 vlan_proto); 1031 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time); 1032 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, 1033 struct hclge_desc *desc); 1034 #endif 1035