1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/acpi.h> 5 #include <linux/device.h> 6 #include <linux/etherdevice.h> 7 #include <linux/init.h> 8 #include <linux/interrupt.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/platform_device.h> 14 #include <linux/if_vlan.h> 15 #include <linux/crash_dump.h> 16 #include <net/rtnetlink.h> 17 #include "hclge_cmd.h" 18 #include "hclge_dcb.h" 19 #include "hclge_main.h" 20 #include "hclge_mbx.h" 21 #include "hclge_mdio.h" 22 #include "hclge_tm.h" 23 #include "hclge_err.h" 24 #include "hnae3.h" 25 26 #define HCLGE_NAME "hclge" 27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 29 30 #define HCLGE_BUF_SIZE_UNIT 256U 31 #define HCLGE_BUF_MUL_BY 2 32 #define HCLGE_BUF_DIV_BY 2 33 #define NEED_RESERVE_TC_NUM 2 34 #define BUF_MAX_PERCENT 100 35 #define BUF_RESERVE_PERCENT 90 36 37 #define HCLGE_RESET_MAX_FAIL_CNT 5 38 #define HCLGE_RESET_SYNC_TIME 100 39 #define HCLGE_PF_RESET_SYNC_TIME 20 40 #define HCLGE_PF_RESET_SYNC_CNT 1500 41 42 /* Get DFX BD number offset */ 43 #define HCLGE_DFX_BIOS_BD_OFFSET 1 44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2 45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3 46 #define HCLGE_DFX_IGU_BD_OFFSET 4 47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5 48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6 49 #define HCLGE_DFX_NCSI_BD_OFFSET 7 50 #define HCLGE_DFX_RTC_BD_OFFSET 8 51 #define HCLGE_DFX_PPP_BD_OFFSET 9 52 #define HCLGE_DFX_RCB_BD_OFFSET 10 53 #define HCLGE_DFX_TQP_BD_OFFSET 11 54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12 55 56 #define HCLGE_LINK_STATUS_MS 10 57 58 #define HCLGE_VF_VPORT_START_NUM 1 59 60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); 61 static int hclge_init_vlan_config(struct hclge_dev *hdev); 62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev); 63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); 65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 66 u16 *allocated_size, bool is_alloc); 67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev); 68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle); 69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 70 unsigned long *addr); 71 static int hclge_set_default_loopback(struct hclge_dev *hdev); 72 73 static struct hnae3_ae_algo ae_algo; 74 75 static const struct pci_device_id ae_algo_pci_tbl[] = { 76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 83 /* required last entry */ 84 {0, } 85 }; 86 87 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 88 89 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, 90 HCLGE_CMDQ_TX_ADDR_H_REG, 91 HCLGE_CMDQ_TX_DEPTH_REG, 92 HCLGE_CMDQ_TX_TAIL_REG, 93 HCLGE_CMDQ_TX_HEAD_REG, 94 HCLGE_CMDQ_RX_ADDR_L_REG, 95 HCLGE_CMDQ_RX_ADDR_H_REG, 96 HCLGE_CMDQ_RX_DEPTH_REG, 97 HCLGE_CMDQ_RX_TAIL_REG, 98 HCLGE_CMDQ_RX_HEAD_REG, 99 HCLGE_VECTOR0_CMDQ_SRC_REG, 100 HCLGE_CMDQ_INTR_STS_REG, 101 HCLGE_CMDQ_INTR_EN_REG, 102 HCLGE_CMDQ_INTR_GEN_REG}; 103 104 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, 105 HCLGE_VECTOR0_OTER_EN_REG, 106 HCLGE_MISC_RESET_STS_REG, 107 HCLGE_MISC_VECTOR_INT_STS, 108 HCLGE_GLOBAL_RESET_REG, 109 HCLGE_FUN_RST_ING, 110 HCLGE_GRO_EN_REG}; 111 112 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, 113 HCLGE_RING_RX_ADDR_H_REG, 114 HCLGE_RING_RX_BD_NUM_REG, 115 HCLGE_RING_RX_BD_LENGTH_REG, 116 HCLGE_RING_RX_MERGE_EN_REG, 117 HCLGE_RING_RX_TAIL_REG, 118 HCLGE_RING_RX_HEAD_REG, 119 HCLGE_RING_RX_FBD_NUM_REG, 120 HCLGE_RING_RX_OFFSET_REG, 121 HCLGE_RING_RX_FBD_OFFSET_REG, 122 HCLGE_RING_RX_STASH_REG, 123 HCLGE_RING_RX_BD_ERR_REG, 124 HCLGE_RING_TX_ADDR_L_REG, 125 HCLGE_RING_TX_ADDR_H_REG, 126 HCLGE_RING_TX_BD_NUM_REG, 127 HCLGE_RING_TX_PRIORITY_REG, 128 HCLGE_RING_TX_TC_REG, 129 HCLGE_RING_TX_MERGE_EN_REG, 130 HCLGE_RING_TX_TAIL_REG, 131 HCLGE_RING_TX_HEAD_REG, 132 HCLGE_RING_TX_FBD_NUM_REG, 133 HCLGE_RING_TX_OFFSET_REG, 134 HCLGE_RING_TX_EBD_NUM_REG, 135 HCLGE_RING_TX_EBD_OFFSET_REG, 136 HCLGE_RING_TX_BD_ERR_REG, 137 HCLGE_RING_EN_REG}; 138 139 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, 140 HCLGE_TQP_INTR_GL0_REG, 141 HCLGE_TQP_INTR_GL1_REG, 142 HCLGE_TQP_INTR_GL2_REG, 143 HCLGE_TQP_INTR_RL_REG}; 144 145 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 146 "App Loopback test", 147 "Serdes serial Loopback test", 148 "Serdes parallel Loopback test", 149 "Phy Loopback test" 150 }; 151 152 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 153 {"mac_tx_mac_pause_num", 154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 155 {"mac_rx_mac_pause_num", 156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 157 {"mac_tx_control_pkt_num", 158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, 159 {"mac_rx_control_pkt_num", 160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, 161 {"mac_tx_pfc_pkt_num", 162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, 163 {"mac_tx_pfc_pri0_pkt_num", 164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 165 {"mac_tx_pfc_pri1_pkt_num", 166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 167 {"mac_tx_pfc_pri2_pkt_num", 168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 169 {"mac_tx_pfc_pri3_pkt_num", 170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 171 {"mac_tx_pfc_pri4_pkt_num", 172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 173 {"mac_tx_pfc_pri5_pkt_num", 174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 175 {"mac_tx_pfc_pri6_pkt_num", 176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 177 {"mac_tx_pfc_pri7_pkt_num", 178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 179 {"mac_rx_pfc_pkt_num", 180 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, 181 {"mac_rx_pfc_pri0_pkt_num", 182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 183 {"mac_rx_pfc_pri1_pkt_num", 184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 185 {"mac_rx_pfc_pri2_pkt_num", 186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 187 {"mac_rx_pfc_pri3_pkt_num", 188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 189 {"mac_rx_pfc_pri4_pkt_num", 190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 191 {"mac_rx_pfc_pri5_pkt_num", 192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 193 {"mac_rx_pfc_pri6_pkt_num", 194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 195 {"mac_rx_pfc_pri7_pkt_num", 196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 197 {"mac_tx_total_pkt_num", 198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 199 {"mac_tx_total_oct_num", 200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 201 {"mac_tx_good_pkt_num", 202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 203 {"mac_tx_bad_pkt_num", 204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 205 {"mac_tx_good_oct_num", 206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 207 {"mac_tx_bad_oct_num", 208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 209 {"mac_tx_uni_pkt_num", 210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 211 {"mac_tx_multi_pkt_num", 212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 213 {"mac_tx_broad_pkt_num", 214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 215 {"mac_tx_undersize_pkt_num", 216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 217 {"mac_tx_oversize_pkt_num", 218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 219 {"mac_tx_64_oct_pkt_num", 220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 221 {"mac_tx_65_127_oct_pkt_num", 222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 223 {"mac_tx_128_255_oct_pkt_num", 224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 225 {"mac_tx_256_511_oct_pkt_num", 226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 227 {"mac_tx_512_1023_oct_pkt_num", 228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 229 {"mac_tx_1024_1518_oct_pkt_num", 230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 231 {"mac_tx_1519_2047_oct_pkt_num", 232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 233 {"mac_tx_2048_4095_oct_pkt_num", 234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 235 {"mac_tx_4096_8191_oct_pkt_num", 236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 237 {"mac_tx_8192_9216_oct_pkt_num", 238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 239 {"mac_tx_9217_12287_oct_pkt_num", 240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 241 {"mac_tx_12288_16383_oct_pkt_num", 242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 243 {"mac_tx_1519_max_good_pkt_num", 244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 245 {"mac_tx_1519_max_bad_pkt_num", 246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 247 {"mac_rx_total_pkt_num", 248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 249 {"mac_rx_total_oct_num", 250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 251 {"mac_rx_good_pkt_num", 252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 253 {"mac_rx_bad_pkt_num", 254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 255 {"mac_rx_good_oct_num", 256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 257 {"mac_rx_bad_oct_num", 258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 259 {"mac_rx_uni_pkt_num", 260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 261 {"mac_rx_multi_pkt_num", 262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 263 {"mac_rx_broad_pkt_num", 264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 265 {"mac_rx_undersize_pkt_num", 266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 267 {"mac_rx_oversize_pkt_num", 268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 269 {"mac_rx_64_oct_pkt_num", 270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 271 {"mac_rx_65_127_oct_pkt_num", 272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 273 {"mac_rx_128_255_oct_pkt_num", 274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 275 {"mac_rx_256_511_oct_pkt_num", 276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 277 {"mac_rx_512_1023_oct_pkt_num", 278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 279 {"mac_rx_1024_1518_oct_pkt_num", 280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 281 {"mac_rx_1519_2047_oct_pkt_num", 282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 283 {"mac_rx_2048_4095_oct_pkt_num", 284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 285 {"mac_rx_4096_8191_oct_pkt_num", 286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 287 {"mac_rx_8192_9216_oct_pkt_num", 288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 289 {"mac_rx_9217_12287_oct_pkt_num", 290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 291 {"mac_rx_12288_16383_oct_pkt_num", 292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 293 {"mac_rx_1519_max_good_pkt_num", 294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 295 {"mac_rx_1519_max_bad_pkt_num", 296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 297 298 {"mac_tx_fragment_pkt_num", 299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 300 {"mac_tx_undermin_pkt_num", 301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 302 {"mac_tx_jabber_pkt_num", 303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 304 {"mac_tx_err_all_pkt_num", 305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 306 {"mac_tx_from_app_good_pkt_num", 307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 308 {"mac_tx_from_app_bad_pkt_num", 309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 310 {"mac_rx_fragment_pkt_num", 311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 312 {"mac_rx_undermin_pkt_num", 313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 314 {"mac_rx_jabber_pkt_num", 315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 316 {"mac_rx_fcs_err_pkt_num", 317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 318 {"mac_rx_send_app_good_pkt_num", 319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 320 {"mac_rx_send_app_bad_pkt_num", 321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 322 }; 323 324 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 325 { 326 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 327 .ethter_type = cpu_to_le16(ETH_P_LLDP), 328 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, 329 .i_port_bitmap = 0x1, 330 }, 331 }; 332 333 static const u8 hclge_hash_key[] = { 334 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 335 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 336 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, 337 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 338 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA 339 }; 340 341 static const u32 hclge_dfx_bd_offset_list[] = { 342 HCLGE_DFX_BIOS_BD_OFFSET, 343 HCLGE_DFX_SSU_0_BD_OFFSET, 344 HCLGE_DFX_SSU_1_BD_OFFSET, 345 HCLGE_DFX_IGU_BD_OFFSET, 346 HCLGE_DFX_RPU_0_BD_OFFSET, 347 HCLGE_DFX_RPU_1_BD_OFFSET, 348 HCLGE_DFX_NCSI_BD_OFFSET, 349 HCLGE_DFX_RTC_BD_OFFSET, 350 HCLGE_DFX_PPP_BD_OFFSET, 351 HCLGE_DFX_RCB_BD_OFFSET, 352 HCLGE_DFX_TQP_BD_OFFSET, 353 HCLGE_DFX_SSU_2_BD_OFFSET 354 }; 355 356 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { 357 HCLGE_OPC_DFX_BIOS_COMMON_REG, 358 HCLGE_OPC_DFX_SSU_REG_0, 359 HCLGE_OPC_DFX_SSU_REG_1, 360 HCLGE_OPC_DFX_IGU_EGU_REG, 361 HCLGE_OPC_DFX_RPU_REG_0, 362 HCLGE_OPC_DFX_RPU_REG_1, 363 HCLGE_OPC_DFX_NCSI_REG, 364 HCLGE_OPC_DFX_RTC_REG, 365 HCLGE_OPC_DFX_PPP_REG, 366 HCLGE_OPC_DFX_RCB_REG, 367 HCLGE_OPC_DFX_TQP_REG, 368 HCLGE_OPC_DFX_SSU_REG_2 369 }; 370 371 static const struct key_info meta_data_key_info[] = { 372 { PACKET_TYPE_ID, 6}, 373 { IP_FRAGEMENT, 1}, 374 { ROCE_TYPE, 1}, 375 { NEXT_KEY, 5}, 376 { VLAN_NUMBER, 2}, 377 { SRC_VPORT, 12}, 378 { DST_VPORT, 12}, 379 { TUNNEL_PACKET, 1}, 380 }; 381 382 static const struct key_info tuple_key_info[] = { 383 { OUTER_DST_MAC, 48}, 384 { OUTER_SRC_MAC, 48}, 385 { OUTER_VLAN_TAG_FST, 16}, 386 { OUTER_VLAN_TAG_SEC, 16}, 387 { OUTER_ETH_TYPE, 16}, 388 { OUTER_L2_RSV, 16}, 389 { OUTER_IP_TOS, 8}, 390 { OUTER_IP_PROTO, 8}, 391 { OUTER_SRC_IP, 32}, 392 { OUTER_DST_IP, 32}, 393 { OUTER_L3_RSV, 16}, 394 { OUTER_SRC_PORT, 16}, 395 { OUTER_DST_PORT, 16}, 396 { OUTER_L4_RSV, 32}, 397 { OUTER_TUN_VNI, 24}, 398 { OUTER_TUN_FLOW_ID, 8}, 399 { INNER_DST_MAC, 48}, 400 { INNER_SRC_MAC, 48}, 401 { INNER_VLAN_TAG_FST, 16}, 402 { INNER_VLAN_TAG_SEC, 16}, 403 { INNER_ETH_TYPE, 16}, 404 { INNER_L2_RSV, 16}, 405 { INNER_IP_TOS, 8}, 406 { INNER_IP_PROTO, 8}, 407 { INNER_SRC_IP, 32}, 408 { INNER_DST_IP, 32}, 409 { INNER_L3_RSV, 16}, 410 { INNER_SRC_PORT, 16}, 411 { INNER_DST_PORT, 16}, 412 { INNER_L4_RSV, 32}, 413 }; 414 415 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) 416 { 417 #define HCLGE_MAC_CMD_NUM 21 418 419 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 420 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 421 __le64 *desc_data; 422 int i, k, n; 423 int ret; 424 425 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 426 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 427 if (ret) { 428 dev_err(&hdev->pdev->dev, 429 "Get MAC pkt stats fail, status = %d.\n", ret); 430 431 return ret; 432 } 433 434 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 435 /* for special opcode 0032, only the first desc has the head */ 436 if (unlikely(i == 0)) { 437 desc_data = (__le64 *)(&desc[i].data[0]); 438 n = HCLGE_RD_FIRST_STATS_NUM; 439 } else { 440 desc_data = (__le64 *)(&desc[i]); 441 n = HCLGE_RD_OTHER_STATS_NUM; 442 } 443 444 for (k = 0; k < n; k++) { 445 *data += le64_to_cpu(*desc_data); 446 data++; 447 desc_data++; 448 } 449 } 450 451 return 0; 452 } 453 454 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) 455 { 456 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 457 struct hclge_desc *desc; 458 __le64 *desc_data; 459 u16 i, k, n; 460 int ret; 461 462 /* This may be called inside atomic sections, 463 * so GFP_ATOMIC is more suitalbe here 464 */ 465 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC); 466 if (!desc) 467 return -ENOMEM; 468 469 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); 470 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); 471 if (ret) { 472 kfree(desc); 473 return ret; 474 } 475 476 for (i = 0; i < desc_num; i++) { 477 /* for special opcode 0034, only the first desc has the head */ 478 if (i == 0) { 479 desc_data = (__le64 *)(&desc[i].data[0]); 480 n = HCLGE_RD_FIRST_STATS_NUM; 481 } else { 482 desc_data = (__le64 *)(&desc[i]); 483 n = HCLGE_RD_OTHER_STATS_NUM; 484 } 485 486 for (k = 0; k < n; k++) { 487 *data += le64_to_cpu(*desc_data); 488 data++; 489 desc_data++; 490 } 491 } 492 493 kfree(desc); 494 495 return 0; 496 } 497 498 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num) 499 { 500 struct hclge_desc desc; 501 __le32 *desc_data; 502 u32 reg_num; 503 int ret; 504 505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); 506 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 507 if (ret) 508 return ret; 509 510 desc_data = (__le32 *)(&desc.data[0]); 511 reg_num = le32_to_cpu(*desc_data); 512 513 *desc_num = 1 + ((reg_num - 3) >> 2) + 514 (u32)(((reg_num - 3) & 0x3) ? 1 : 0); 515 516 return 0; 517 } 518 519 static int hclge_mac_update_stats(struct hclge_dev *hdev) 520 { 521 u32 desc_num; 522 int ret; 523 524 ret = hclge_mac_query_reg_num(hdev, &desc_num); 525 526 /* The firmware supports the new statistics acquisition method */ 527 if (!ret) 528 ret = hclge_mac_update_stats_complete(hdev, desc_num); 529 else if (ret == -EOPNOTSUPP) 530 ret = hclge_mac_update_stats_defective(hdev); 531 else 532 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n"); 533 534 return ret; 535 } 536 537 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 538 { 539 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 540 struct hclge_vport *vport = hclge_get_vport(handle); 541 struct hclge_dev *hdev = vport->back; 542 struct hnae3_queue *queue; 543 struct hclge_desc desc[1]; 544 struct hclge_tqp *tqp; 545 int ret, i; 546 547 for (i = 0; i < kinfo->num_tqps; i++) { 548 queue = handle->kinfo.tqp[i]; 549 tqp = container_of(queue, struct hclge_tqp, q); 550 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 551 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS, 552 true); 553 554 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 555 ret = hclge_cmd_send(&hdev->hw, desc, 1); 556 if (ret) { 557 dev_err(&hdev->pdev->dev, 558 "Query tqp stat fail, status = %d,queue = %d\n", 559 ret, i); 560 return ret; 561 } 562 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 563 le32_to_cpu(desc[0].data[1]); 564 } 565 566 for (i = 0; i < kinfo->num_tqps; i++) { 567 queue = handle->kinfo.tqp[i]; 568 tqp = container_of(queue, struct hclge_tqp, q); 569 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 570 hclge_cmd_setup_basic_desc(&desc[0], 571 HCLGE_OPC_QUERY_TX_STATUS, 572 true); 573 574 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 575 ret = hclge_cmd_send(&hdev->hw, desc, 1); 576 if (ret) { 577 dev_err(&hdev->pdev->dev, 578 "Query tqp stat fail, status = %d,queue = %d\n", 579 ret, i); 580 return ret; 581 } 582 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 583 le32_to_cpu(desc[0].data[1]); 584 } 585 586 return 0; 587 } 588 589 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 590 { 591 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 592 struct hclge_tqp *tqp; 593 u64 *buff = data; 594 int i; 595 596 for (i = 0; i < kinfo->num_tqps; i++) { 597 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 598 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 599 } 600 601 for (i = 0; i < kinfo->num_tqps; i++) { 602 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 603 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 604 } 605 606 return buff; 607 } 608 609 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 610 { 611 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 612 613 /* each tqp has TX & RX two queues */ 614 return kinfo->num_tqps * (2); 615 } 616 617 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 618 { 619 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 620 u8 *buff = data; 621 int i = 0; 622 623 for (i = 0; i < kinfo->num_tqps; i++) { 624 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 625 struct hclge_tqp, q); 626 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 627 tqp->index); 628 buff = buff + ETH_GSTRING_LEN; 629 } 630 631 for (i = 0; i < kinfo->num_tqps; i++) { 632 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 633 struct hclge_tqp, q); 634 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 635 tqp->index); 636 buff = buff + ETH_GSTRING_LEN; 637 } 638 639 return buff; 640 } 641 642 static u64 *hclge_comm_get_stats(const void *comm_stats, 643 const struct hclge_comm_stats_str strs[], 644 int size, u64 *data) 645 { 646 u64 *buf = data; 647 u32 i; 648 649 for (i = 0; i < size; i++) 650 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 651 652 return buf + size; 653 } 654 655 static u8 *hclge_comm_get_strings(u32 stringset, 656 const struct hclge_comm_stats_str strs[], 657 int size, u8 *data) 658 { 659 char *buff = (char *)data; 660 u32 i; 661 662 if (stringset != ETH_SS_STATS) 663 return buff; 664 665 for (i = 0; i < size; i++) { 666 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); 667 buff = buff + ETH_GSTRING_LEN; 668 } 669 670 return (u8 *)buff; 671 } 672 673 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 674 { 675 struct hnae3_handle *handle; 676 int status; 677 678 handle = &hdev->vport[0].nic; 679 if (handle->client) { 680 status = hclge_tqps_update_stats(handle); 681 if (status) { 682 dev_err(&hdev->pdev->dev, 683 "Update TQPS stats fail, status = %d.\n", 684 status); 685 } 686 } 687 688 status = hclge_mac_update_stats(hdev); 689 if (status) 690 dev_err(&hdev->pdev->dev, 691 "Update MAC stats fail, status = %d.\n", status); 692 } 693 694 static void hclge_update_stats(struct hnae3_handle *handle, 695 struct net_device_stats *net_stats) 696 { 697 struct hclge_vport *vport = hclge_get_vport(handle); 698 struct hclge_dev *hdev = vport->back; 699 int status; 700 701 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 702 return; 703 704 status = hclge_mac_update_stats(hdev); 705 if (status) 706 dev_err(&hdev->pdev->dev, 707 "Update MAC stats fail, status = %d.\n", 708 status); 709 710 status = hclge_tqps_update_stats(handle); 711 if (status) 712 dev_err(&hdev->pdev->dev, 713 "Update TQPS stats fail, status = %d.\n", 714 status); 715 716 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 717 } 718 719 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 720 { 721 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ 722 HNAE3_SUPPORT_PHY_LOOPBACK |\ 723 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ 724 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) 725 726 struct hclge_vport *vport = hclge_get_vport(handle); 727 struct hclge_dev *hdev = vport->back; 728 int count = 0; 729 730 /* Loopback test support rules: 731 * mac: only GE mode support 732 * serdes: all mac mode will support include GE/XGE/LGE/CGE 733 * phy: only support when phy device exist on board 734 */ 735 if (stringset == ETH_SS_TEST) { 736 /* clear loopback bit flags at first */ 737 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 738 if (hdev->pdev->revision >= 0x21 || 739 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 740 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 742 count += 1; 743 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; 744 } 745 746 count += 2; 747 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 748 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 749 750 if (hdev->hw.mac.phydev) { 751 count += 1; 752 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; 753 } 754 755 } else if (stringset == ETH_SS_STATS) { 756 count = ARRAY_SIZE(g_mac_stats_string) + 757 hclge_tqps_get_sset_count(handle, stringset); 758 } 759 760 return count; 761 } 762 763 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, 764 u8 *data) 765 { 766 u8 *p = (char *)data; 767 int size; 768 769 if (stringset == ETH_SS_STATS) { 770 size = ARRAY_SIZE(g_mac_stats_string); 771 p = hclge_comm_get_strings(stringset, g_mac_stats_string, 772 size, p); 773 p = hclge_tqps_get_strings(handle, p); 774 } else if (stringset == ETH_SS_TEST) { 775 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { 776 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], 777 ETH_GSTRING_LEN); 778 p += ETH_GSTRING_LEN; 779 } 780 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { 781 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], 782 ETH_GSTRING_LEN); 783 p += ETH_GSTRING_LEN; 784 } 785 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { 786 memcpy(p, 787 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], 788 ETH_GSTRING_LEN); 789 p += ETH_GSTRING_LEN; 790 } 791 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 792 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], 793 ETH_GSTRING_LEN); 794 p += ETH_GSTRING_LEN; 795 } 796 } 797 } 798 799 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 800 { 801 struct hclge_vport *vport = hclge_get_vport(handle); 802 struct hclge_dev *hdev = vport->back; 803 u64 *p; 804 805 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string, 806 ARRAY_SIZE(g_mac_stats_string), data); 807 p = hclge_tqps_get_stats(handle, p); 808 } 809 810 static void hclge_get_mac_stat(struct hnae3_handle *handle, 811 struct hns3_mac_stats *mac_stats) 812 { 813 struct hclge_vport *vport = hclge_get_vport(handle); 814 struct hclge_dev *hdev = vport->back; 815 816 hclge_update_stats(handle, NULL); 817 818 mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num; 819 mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num; 820 } 821 822 static int hclge_parse_func_status(struct hclge_dev *hdev, 823 struct hclge_func_status_cmd *status) 824 { 825 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 826 return -EINVAL; 827 828 /* Set the pf to main pf */ 829 if (status->pf_state & HCLGE_PF_STATE_MAIN) 830 hdev->flag |= HCLGE_FLAG_MAIN; 831 else 832 hdev->flag &= ~HCLGE_FLAG_MAIN; 833 834 return 0; 835 } 836 837 static int hclge_query_function_status(struct hclge_dev *hdev) 838 { 839 #define HCLGE_QUERY_MAX_CNT 5 840 841 struct hclge_func_status_cmd *req; 842 struct hclge_desc desc; 843 int timeout = 0; 844 int ret; 845 846 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 847 req = (struct hclge_func_status_cmd *)desc.data; 848 849 do { 850 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 851 if (ret) { 852 dev_err(&hdev->pdev->dev, 853 "query function status failed %d.\n", ret); 854 return ret; 855 } 856 857 /* Check pf reset is done */ 858 if (req->pf_state) 859 break; 860 usleep_range(1000, 2000); 861 } while (timeout++ < HCLGE_QUERY_MAX_CNT); 862 863 ret = hclge_parse_func_status(hdev, req); 864 865 return ret; 866 } 867 868 static int hclge_query_pf_resource(struct hclge_dev *hdev) 869 { 870 struct hclge_pf_res_cmd *req; 871 struct hclge_desc desc; 872 int ret; 873 874 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 875 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 876 if (ret) { 877 dev_err(&hdev->pdev->dev, 878 "query pf resource failed %d.\n", ret); 879 return ret; 880 } 881 882 req = (struct hclge_pf_res_cmd *)desc.data; 883 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 884 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 885 886 if (req->tx_buf_size) 887 hdev->tx_buf_size = 888 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; 889 else 890 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 891 892 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); 893 894 if (req->dv_buf_size) 895 hdev->dv_buf_size = 896 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; 897 else 898 hdev->dv_buf_size = HCLGE_DEFAULT_DV; 899 900 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); 901 902 if (hnae3_dev_roce_supported(hdev)) { 903 hdev->roce_base_msix_offset = 904 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 905 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); 906 hdev->num_roce_msi = 907 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), 908 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 909 910 /* nic's msix numbers is always equals to the roce's. */ 911 hdev->num_nic_msi = hdev->num_roce_msi; 912 913 /* PF should have NIC vectors and Roce vectors, 914 * NIC vectors are queued before Roce vectors. 915 */ 916 hdev->num_msi = hdev->num_roce_msi + 917 hdev->roce_base_msix_offset; 918 } else { 919 hdev->num_msi = 920 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), 921 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 922 923 hdev->num_nic_msi = hdev->num_msi; 924 } 925 926 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { 927 dev_err(&hdev->pdev->dev, 928 "Just %u msi resources, not enough for pf(min:2).\n", 929 hdev->num_nic_msi); 930 return -EINVAL; 931 } 932 933 return 0; 934 } 935 936 static int hclge_parse_speed(int speed_cmd, int *speed) 937 { 938 switch (speed_cmd) { 939 case 6: 940 *speed = HCLGE_MAC_SPEED_10M; 941 break; 942 case 7: 943 *speed = HCLGE_MAC_SPEED_100M; 944 break; 945 case 0: 946 *speed = HCLGE_MAC_SPEED_1G; 947 break; 948 case 1: 949 *speed = HCLGE_MAC_SPEED_10G; 950 break; 951 case 2: 952 *speed = HCLGE_MAC_SPEED_25G; 953 break; 954 case 3: 955 *speed = HCLGE_MAC_SPEED_40G; 956 break; 957 case 4: 958 *speed = HCLGE_MAC_SPEED_50G; 959 break; 960 case 5: 961 *speed = HCLGE_MAC_SPEED_100G; 962 break; 963 default: 964 return -EINVAL; 965 } 966 967 return 0; 968 } 969 970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) 971 { 972 struct hclge_vport *vport = hclge_get_vport(handle); 973 struct hclge_dev *hdev = vport->back; 974 u32 speed_ability = hdev->hw.mac.speed_ability; 975 u32 speed_bit = 0; 976 977 switch (speed) { 978 case HCLGE_MAC_SPEED_10M: 979 speed_bit = HCLGE_SUPPORT_10M_BIT; 980 break; 981 case HCLGE_MAC_SPEED_100M: 982 speed_bit = HCLGE_SUPPORT_100M_BIT; 983 break; 984 case HCLGE_MAC_SPEED_1G: 985 speed_bit = HCLGE_SUPPORT_1G_BIT; 986 break; 987 case HCLGE_MAC_SPEED_10G: 988 speed_bit = HCLGE_SUPPORT_10G_BIT; 989 break; 990 case HCLGE_MAC_SPEED_25G: 991 speed_bit = HCLGE_SUPPORT_25G_BIT; 992 break; 993 case HCLGE_MAC_SPEED_40G: 994 speed_bit = HCLGE_SUPPORT_40G_BIT; 995 break; 996 case HCLGE_MAC_SPEED_50G: 997 speed_bit = HCLGE_SUPPORT_50G_BIT; 998 break; 999 case HCLGE_MAC_SPEED_100G: 1000 speed_bit = HCLGE_SUPPORT_100G_BIT; 1001 break; 1002 default: 1003 return -EINVAL; 1004 } 1005 1006 if (speed_bit & speed_ability) 1007 return 0; 1008 1009 return -EINVAL; 1010 } 1011 1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability) 1013 { 1014 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1015 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1016 mac->supported); 1017 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1018 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1019 mac->supported); 1020 if (speed_ability & HCLGE_SUPPORT_40G_BIT) 1021 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 1022 mac->supported); 1023 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1024 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 1025 mac->supported); 1026 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1027 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1028 mac->supported); 1029 } 1030 1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability) 1032 { 1033 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1034 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 1035 mac->supported); 1036 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1037 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1038 mac->supported); 1039 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1040 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 1041 mac->supported); 1042 if (speed_ability & HCLGE_SUPPORT_40G_BIT) 1043 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 1044 mac->supported); 1045 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1046 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 1047 mac->supported); 1048 } 1049 1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability) 1051 { 1052 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1053 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 1054 mac->supported); 1055 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1056 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 1057 mac->supported); 1058 if (speed_ability & HCLGE_SUPPORT_40G_BIT) 1059 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 1060 mac->supported); 1061 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1062 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 1063 mac->supported); 1064 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1065 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 1066 mac->supported); 1067 } 1068 1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability) 1070 { 1071 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1072 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 1073 mac->supported); 1074 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1075 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 1076 mac->supported); 1077 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1078 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 1079 mac->supported); 1080 if (speed_ability & HCLGE_SUPPORT_40G_BIT) 1081 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 1082 mac->supported); 1083 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1084 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 1085 mac->supported); 1086 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1087 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 1088 mac->supported); 1089 } 1090 1091 static void hclge_convert_setting_fec(struct hclge_mac *mac) 1092 { 1093 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); 1094 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); 1095 1096 switch (mac->speed) { 1097 case HCLGE_MAC_SPEED_10G: 1098 case HCLGE_MAC_SPEED_40G: 1099 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 1100 mac->supported); 1101 mac->fec_ability = 1102 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO); 1103 break; 1104 case HCLGE_MAC_SPEED_25G: 1105 case HCLGE_MAC_SPEED_50G: 1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 1107 mac->supported); 1108 mac->fec_ability = 1109 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | 1110 BIT(HNAE3_FEC_AUTO); 1111 break; 1112 case HCLGE_MAC_SPEED_100G: 1113 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); 1114 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO); 1115 break; 1116 default: 1117 mac->fec_ability = 0; 1118 break; 1119 } 1120 } 1121 1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 1123 u8 speed_ability) 1124 { 1125 struct hclge_mac *mac = &hdev->hw.mac; 1126 1127 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1128 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1129 mac->supported); 1130 1131 hclge_convert_setting_sr(mac, speed_ability); 1132 hclge_convert_setting_lr(mac, speed_ability); 1133 hclge_convert_setting_cr(mac, speed_ability); 1134 if (hdev->pdev->revision >= 0x21) 1135 hclge_convert_setting_fec(mac); 1136 1137 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); 1138 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1140 } 1141 1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, 1143 u8 speed_ability) 1144 { 1145 struct hclge_mac *mac = &hdev->hw.mac; 1146 1147 hclge_convert_setting_kr(mac, speed_ability); 1148 if (hdev->pdev->revision >= 0x21) 1149 hclge_convert_setting_fec(mac); 1150 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); 1151 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1152 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1153 } 1154 1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, 1156 u8 speed_ability) 1157 { 1158 unsigned long *supported = hdev->hw.mac.supported; 1159 1160 /* default to support all speed for GE port */ 1161 if (!speed_ability) 1162 speed_ability = HCLGE_SUPPORT_GE; 1163 1164 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1165 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1166 supported); 1167 1168 if (speed_ability & HCLGE_SUPPORT_100M_BIT) { 1169 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1170 supported); 1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 1172 supported); 1173 } 1174 1175 if (speed_ability & HCLGE_SUPPORT_10M_BIT) { 1176 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); 1177 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); 1178 } 1179 1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); 1181 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); 1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1183 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); 1184 } 1185 1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) 1187 { 1188 u8 media_type = hdev->hw.mac.media_type; 1189 1190 if (media_type == HNAE3_MEDIA_TYPE_FIBER) 1191 hclge_parse_fiber_link_mode(hdev, speed_ability); 1192 else if (media_type == HNAE3_MEDIA_TYPE_COPPER) 1193 hclge_parse_copper_link_mode(hdev, speed_ability); 1194 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) 1195 hclge_parse_backplane_link_mode(hdev, speed_ability); 1196 } 1197 1198 static u32 hclge_get_max_speed(u8 speed_ability) 1199 { 1200 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1201 return HCLGE_MAC_SPEED_100G; 1202 1203 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1204 return HCLGE_MAC_SPEED_50G; 1205 1206 if (speed_ability & HCLGE_SUPPORT_40G_BIT) 1207 return HCLGE_MAC_SPEED_40G; 1208 1209 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1210 return HCLGE_MAC_SPEED_25G; 1211 1212 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1213 return HCLGE_MAC_SPEED_10G; 1214 1215 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1216 return HCLGE_MAC_SPEED_1G; 1217 1218 if (speed_ability & HCLGE_SUPPORT_100M_BIT) 1219 return HCLGE_MAC_SPEED_100M; 1220 1221 if (speed_ability & HCLGE_SUPPORT_10M_BIT) 1222 return HCLGE_MAC_SPEED_10M; 1223 1224 return HCLGE_MAC_SPEED_1G; 1225 } 1226 1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1228 { 1229 struct hclge_cfg_param_cmd *req; 1230 u64 mac_addr_tmp_high; 1231 u64 mac_addr_tmp; 1232 unsigned int i; 1233 1234 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1235 1236 /* get the configuration */ 1237 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1238 HCLGE_CFG_VMDQ_M, 1239 HCLGE_CFG_VMDQ_S); 1240 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1241 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1242 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1243 HCLGE_CFG_TQP_DESC_N_M, 1244 HCLGE_CFG_TQP_DESC_N_S); 1245 1246 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), 1247 HCLGE_CFG_PHY_ADDR_M, 1248 HCLGE_CFG_PHY_ADDR_S); 1249 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), 1250 HCLGE_CFG_MEDIA_TP_M, 1251 HCLGE_CFG_MEDIA_TP_S); 1252 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), 1253 HCLGE_CFG_RX_BUF_LEN_M, 1254 HCLGE_CFG_RX_BUF_LEN_S); 1255 /* get mac_address */ 1256 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1257 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), 1258 HCLGE_CFG_MAC_ADDR_H_M, 1259 HCLGE_CFG_MAC_ADDR_H_S); 1260 1261 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1262 1263 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), 1264 HCLGE_CFG_DEFAULT_SPEED_M, 1265 HCLGE_CFG_DEFAULT_SPEED_S); 1266 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 1267 HCLGE_CFG_RSS_SIZE_M, 1268 HCLGE_CFG_RSS_SIZE_S); 1269 1270 for (i = 0; i < ETH_ALEN; i++) 1271 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1272 1273 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1274 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1275 1276 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), 1277 HCLGE_CFG_SPEED_ABILITY_M, 1278 HCLGE_CFG_SPEED_ABILITY_S); 1279 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), 1280 HCLGE_CFG_UMV_TBL_SPACE_M, 1281 HCLGE_CFG_UMV_TBL_SPACE_S); 1282 if (!cfg->umv_space) 1283 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1284 } 1285 1286 /* hclge_get_cfg: query the static parameter from flash 1287 * @hdev: pointer to struct hclge_dev 1288 * @hcfg: the config structure to be getted 1289 */ 1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1291 { 1292 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1293 struct hclge_cfg_param_cmd *req; 1294 unsigned int i; 1295 int ret; 1296 1297 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1298 u32 offset = 0; 1299 1300 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1301 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1302 true); 1303 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, 1304 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1305 /* Len should be united by 4 bytes when send to hardware */ 1306 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1307 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1308 req->offset = cpu_to_le32(offset); 1309 } 1310 1311 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1312 if (ret) { 1313 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); 1314 return ret; 1315 } 1316 1317 hclge_parse_cfg(hcfg, desc); 1318 1319 return 0; 1320 } 1321 1322 static int hclge_get_cap(struct hclge_dev *hdev) 1323 { 1324 int ret; 1325 1326 ret = hclge_query_function_status(hdev); 1327 if (ret) { 1328 dev_err(&hdev->pdev->dev, 1329 "query function status error %d.\n", ret); 1330 return ret; 1331 } 1332 1333 /* get pf resource */ 1334 ret = hclge_query_pf_resource(hdev); 1335 if (ret) 1336 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); 1337 1338 return ret; 1339 } 1340 1341 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) 1342 { 1343 #define HCLGE_MIN_TX_DESC 64 1344 #define HCLGE_MIN_RX_DESC 64 1345 1346 if (!is_kdump_kernel()) 1347 return; 1348 1349 dev_info(&hdev->pdev->dev, 1350 "Running kdump kernel. Using minimal resources\n"); 1351 1352 /* minimal queue pairs equals to the number of vports */ 1353 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1354 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; 1355 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; 1356 } 1357 1358 static int hclge_configure(struct hclge_dev *hdev) 1359 { 1360 struct hclge_cfg cfg; 1361 unsigned int i; 1362 int ret; 1363 1364 ret = hclge_get_cfg(hdev, &cfg); 1365 if (ret) { 1366 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 1367 return ret; 1368 } 1369 1370 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 1371 hdev->base_tqp_pid = 0; 1372 hdev->rss_size_max = cfg.rss_size_max; 1373 hdev->rx_buf_len = cfg.rx_buf_len; 1374 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1375 hdev->hw.mac.media_type = cfg.media_type; 1376 hdev->hw.mac.phy_addr = cfg.phy_addr; 1377 hdev->num_tx_desc = cfg.tqp_desc_num; 1378 hdev->num_rx_desc = cfg.tqp_desc_num; 1379 hdev->tm_info.num_pg = 1; 1380 hdev->tc_max = cfg.tc_num; 1381 hdev->tm_info.hw_pfc_map = 0; 1382 hdev->wanted_umv_size = cfg.umv_space; 1383 1384 if (hnae3_dev_fd_supported(hdev)) { 1385 hdev->fd_en = true; 1386 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 1387 } 1388 1389 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1390 if (ret) { 1391 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 1392 return ret; 1393 } 1394 1395 hclge_parse_link_mode(hdev, cfg.speed_ability); 1396 1397 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); 1398 1399 if ((hdev->tc_max > HNAE3_MAX_TC) || 1400 (hdev->tc_max < 1)) { 1401 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", 1402 hdev->tc_max); 1403 hdev->tc_max = 1; 1404 } 1405 1406 /* Dev does not support DCB */ 1407 if (!hnae3_dev_dcb_supported(hdev)) { 1408 hdev->tc_max = 1; 1409 hdev->pfc_max = 0; 1410 } else { 1411 hdev->pfc_max = hdev->tc_max; 1412 } 1413 1414 hdev->tm_info.num_tc = 1; 1415 1416 /* Currently not support uncontiuous tc */ 1417 for (i = 0; i < hdev->tm_info.num_tc; i++) 1418 hnae3_set_bit(hdev->hw_tc_map, i, 1); 1419 1420 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1421 1422 hclge_init_kdump_kernel_config(hdev); 1423 1424 /* Set the init affinity based on pci func number */ 1425 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); 1426 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; 1427 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), 1428 &hdev->affinity_mask); 1429 1430 return ret; 1431 } 1432 1433 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min, 1434 unsigned int tso_mss_max) 1435 { 1436 struct hclge_cfg_tso_status_cmd *req; 1437 struct hclge_desc desc; 1438 u16 tso_mss; 1439 1440 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1441 1442 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1443 1444 tso_mss = 0; 1445 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1446 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 1447 req->tso_mss_min = cpu_to_le16(tso_mss); 1448 1449 tso_mss = 0; 1450 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1451 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 1452 req->tso_mss_max = cpu_to_le16(tso_mss); 1453 1454 return hclge_cmd_send(&hdev->hw, &desc, 1); 1455 } 1456 1457 static int hclge_config_gro(struct hclge_dev *hdev, bool en) 1458 { 1459 struct hclge_cfg_gro_status_cmd *req; 1460 struct hclge_desc desc; 1461 int ret; 1462 1463 if (!hnae3_dev_gro_supported(hdev)) 1464 return 0; 1465 1466 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); 1467 req = (struct hclge_cfg_gro_status_cmd *)desc.data; 1468 1469 req->gro_en = cpu_to_le16(en ? 1 : 0); 1470 1471 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1472 if (ret) 1473 dev_err(&hdev->pdev->dev, 1474 "GRO hardware config cmd failed, ret = %d\n", ret); 1475 1476 return ret; 1477 } 1478 1479 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1480 { 1481 struct hclge_tqp *tqp; 1482 int i; 1483 1484 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1485 sizeof(struct hclge_tqp), GFP_KERNEL); 1486 if (!hdev->htqp) 1487 return -ENOMEM; 1488 1489 tqp = hdev->htqp; 1490 1491 for (i = 0; i < hdev->num_tqps; i++) { 1492 tqp->dev = &hdev->pdev->dev; 1493 tqp->index = i; 1494 1495 tqp->q.ae_algo = &ae_algo; 1496 tqp->q.buf_size = hdev->rx_buf_len; 1497 tqp->q.tx_desc_num = hdev->num_tx_desc; 1498 tqp->q.rx_desc_num = hdev->num_rx_desc; 1499 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 1500 i * HCLGE_TQP_REG_SIZE; 1501 1502 tqp++; 1503 } 1504 1505 return 0; 1506 } 1507 1508 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1509 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1510 { 1511 struct hclge_tqp_map_cmd *req; 1512 struct hclge_desc desc; 1513 int ret; 1514 1515 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1516 1517 req = (struct hclge_tqp_map_cmd *)desc.data; 1518 req->tqp_id = cpu_to_le16(tqp_pid); 1519 req->tqp_vf = func_id; 1520 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; 1521 if (!is_pf) 1522 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; 1523 req->tqp_vid = cpu_to_le16(tqp_vid); 1524 1525 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1526 if (ret) 1527 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); 1528 1529 return ret; 1530 } 1531 1532 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) 1533 { 1534 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1535 struct hclge_dev *hdev = vport->back; 1536 int i, alloced; 1537 1538 for (i = 0, alloced = 0; i < hdev->num_tqps && 1539 alloced < num_tqps; i++) { 1540 if (!hdev->htqp[i].alloced) { 1541 hdev->htqp[i].q.handle = &vport->nic; 1542 hdev->htqp[i].q.tqp_index = alloced; 1543 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; 1544 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; 1545 kinfo->tqp[alloced] = &hdev->htqp[i].q; 1546 hdev->htqp[i].alloced = true; 1547 alloced++; 1548 } 1549 } 1550 vport->alloc_tqps = alloced; 1551 kinfo->rss_size = min_t(u16, hdev->rss_size_max, 1552 vport->alloc_tqps / hdev->tm_info.num_tc); 1553 1554 /* ensure one to one mapping between irq and queue at default */ 1555 kinfo->rss_size = min_t(u16, kinfo->rss_size, 1556 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); 1557 1558 return 0; 1559 } 1560 1561 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, 1562 u16 num_tx_desc, u16 num_rx_desc) 1563 1564 { 1565 struct hnae3_handle *nic = &vport->nic; 1566 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1567 struct hclge_dev *hdev = vport->back; 1568 int ret; 1569 1570 kinfo->num_tx_desc = num_tx_desc; 1571 kinfo->num_rx_desc = num_rx_desc; 1572 1573 kinfo->rx_buf_len = hdev->rx_buf_len; 1574 1575 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, 1576 sizeof(struct hnae3_queue *), GFP_KERNEL); 1577 if (!kinfo->tqp) 1578 return -ENOMEM; 1579 1580 ret = hclge_assign_tqp(vport, num_tqps); 1581 if (ret) 1582 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1583 1584 return ret; 1585 } 1586 1587 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1588 struct hclge_vport *vport) 1589 { 1590 struct hnae3_handle *nic = &vport->nic; 1591 struct hnae3_knic_private_info *kinfo; 1592 u16 i; 1593 1594 kinfo = &nic->kinfo; 1595 for (i = 0; i < vport->alloc_tqps; i++) { 1596 struct hclge_tqp *q = 1597 container_of(kinfo->tqp[i], struct hclge_tqp, q); 1598 bool is_pf; 1599 int ret; 1600 1601 is_pf = !(vport->vport_id); 1602 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1603 i, is_pf); 1604 if (ret) 1605 return ret; 1606 } 1607 1608 return 0; 1609 } 1610 1611 static int hclge_map_tqp(struct hclge_dev *hdev) 1612 { 1613 struct hclge_vport *vport = hdev->vport; 1614 u16 i, num_vport; 1615 1616 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1617 for (i = 0; i < num_vport; i++) { 1618 int ret; 1619 1620 ret = hclge_map_tqp_to_vport(hdev, vport); 1621 if (ret) 1622 return ret; 1623 1624 vport++; 1625 } 1626 1627 return 0; 1628 } 1629 1630 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1631 { 1632 struct hnae3_handle *nic = &vport->nic; 1633 struct hclge_dev *hdev = vport->back; 1634 int ret; 1635 1636 nic->pdev = hdev->pdev; 1637 nic->ae_algo = &ae_algo; 1638 nic->numa_node_mask = hdev->numa_node_mask; 1639 1640 ret = hclge_knic_setup(vport, num_tqps, 1641 hdev->num_tx_desc, hdev->num_rx_desc); 1642 if (ret) 1643 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); 1644 1645 return ret; 1646 } 1647 1648 static int hclge_alloc_vport(struct hclge_dev *hdev) 1649 { 1650 struct pci_dev *pdev = hdev->pdev; 1651 struct hclge_vport *vport; 1652 u32 tqp_main_vport; 1653 u32 tqp_per_vport; 1654 int num_vport, i; 1655 int ret; 1656 1657 /* We need to alloc a vport for main NIC of PF */ 1658 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1659 1660 if (hdev->num_tqps < num_vport) { 1661 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", 1662 hdev->num_tqps, num_vport); 1663 return -EINVAL; 1664 } 1665 1666 /* Alloc the same number of TQPs for every vport */ 1667 tqp_per_vport = hdev->num_tqps / num_vport; 1668 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1669 1670 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1671 GFP_KERNEL); 1672 if (!vport) 1673 return -ENOMEM; 1674 1675 hdev->vport = vport; 1676 hdev->num_alloc_vport = num_vport; 1677 1678 if (IS_ENABLED(CONFIG_PCI_IOV)) 1679 hdev->num_alloc_vfs = hdev->num_req_vfs; 1680 1681 for (i = 0; i < num_vport; i++) { 1682 vport->back = hdev; 1683 vport->vport_id = i; 1684 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; 1685 vport->mps = HCLGE_MAC_DEFAULT_FRAME; 1686 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; 1687 vport->rxvlan_cfg.rx_vlan_offload_en = true; 1688 INIT_LIST_HEAD(&vport->vlan_list); 1689 INIT_LIST_HEAD(&vport->uc_mac_list); 1690 INIT_LIST_HEAD(&vport->mc_mac_list); 1691 1692 if (i == 0) 1693 ret = hclge_vport_setup(vport, tqp_main_vport); 1694 else 1695 ret = hclge_vport_setup(vport, tqp_per_vport); 1696 if (ret) { 1697 dev_err(&pdev->dev, 1698 "vport setup failed for vport %d, %d\n", 1699 i, ret); 1700 return ret; 1701 } 1702 1703 vport++; 1704 } 1705 1706 return 0; 1707 } 1708 1709 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1710 struct hclge_pkt_buf_alloc *buf_alloc) 1711 { 1712 /* TX buffer size is unit by 128 byte */ 1713 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1714 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1715 struct hclge_tx_buff_alloc_cmd *req; 1716 struct hclge_desc desc; 1717 int ret; 1718 u8 i; 1719 1720 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1721 1722 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1723 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1724 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1725 1726 req->tx_pkt_buff[i] = 1727 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1728 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1729 } 1730 1731 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1732 if (ret) 1733 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1734 ret); 1735 1736 return ret; 1737 } 1738 1739 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1740 struct hclge_pkt_buf_alloc *buf_alloc) 1741 { 1742 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1743 1744 if (ret) 1745 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); 1746 1747 return ret; 1748 } 1749 1750 static u32 hclge_get_tc_num(struct hclge_dev *hdev) 1751 { 1752 unsigned int i; 1753 u32 cnt = 0; 1754 1755 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1756 if (hdev->hw_tc_map & BIT(i)) 1757 cnt++; 1758 return cnt; 1759 } 1760 1761 /* Get the number of pfc enabled TCs, which have private buffer */ 1762 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1763 struct hclge_pkt_buf_alloc *buf_alloc) 1764 { 1765 struct hclge_priv_buf *priv; 1766 unsigned int i; 1767 int cnt = 0; 1768 1769 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1770 priv = &buf_alloc->priv_buf[i]; 1771 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1772 priv->enable) 1773 cnt++; 1774 } 1775 1776 return cnt; 1777 } 1778 1779 /* Get the number of pfc disabled TCs, which have private buffer */ 1780 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1781 struct hclge_pkt_buf_alloc *buf_alloc) 1782 { 1783 struct hclge_priv_buf *priv; 1784 unsigned int i; 1785 int cnt = 0; 1786 1787 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1788 priv = &buf_alloc->priv_buf[i]; 1789 if (hdev->hw_tc_map & BIT(i) && 1790 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1791 priv->enable) 1792 cnt++; 1793 } 1794 1795 return cnt; 1796 } 1797 1798 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1799 { 1800 struct hclge_priv_buf *priv; 1801 u32 rx_priv = 0; 1802 int i; 1803 1804 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1805 priv = &buf_alloc->priv_buf[i]; 1806 if (priv->enable) 1807 rx_priv += priv->buf_size; 1808 } 1809 return rx_priv; 1810 } 1811 1812 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1813 { 1814 u32 i, total_tx_size = 0; 1815 1816 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1817 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1818 1819 return total_tx_size; 1820 } 1821 1822 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1823 struct hclge_pkt_buf_alloc *buf_alloc, 1824 u32 rx_all) 1825 { 1826 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 1827 u32 tc_num = hclge_get_tc_num(hdev); 1828 u32 shared_buf, aligned_mps; 1829 u32 rx_priv; 1830 int i; 1831 1832 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); 1833 1834 if (hnae3_dev_dcb_supported(hdev)) 1835 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + 1836 hdev->dv_buf_size; 1837 else 1838 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF 1839 + hdev->dv_buf_size; 1840 1841 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 1842 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), 1843 HCLGE_BUF_SIZE_UNIT); 1844 1845 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1846 if (rx_all < rx_priv + shared_std) 1847 return false; 1848 1849 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); 1850 buf_alloc->s_buf.buf_size = shared_buf; 1851 if (hnae3_dev_dcb_supported(hdev)) { 1852 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; 1853 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 1854 - roundup(aligned_mps / HCLGE_BUF_DIV_BY, 1855 HCLGE_BUF_SIZE_UNIT); 1856 } else { 1857 buf_alloc->s_buf.self.high = aligned_mps + 1858 HCLGE_NON_DCB_ADDITIONAL_BUF; 1859 buf_alloc->s_buf.self.low = aligned_mps; 1860 } 1861 1862 if (hnae3_dev_dcb_supported(hdev)) { 1863 hi_thrd = shared_buf - hdev->dv_buf_size; 1864 1865 if (tc_num <= NEED_RESERVE_TC_NUM) 1866 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT 1867 / BUF_MAX_PERCENT; 1868 1869 if (tc_num) 1870 hi_thrd = hi_thrd / tc_num; 1871 1872 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps); 1873 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); 1874 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; 1875 } else { 1876 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; 1877 lo_thrd = aligned_mps; 1878 } 1879 1880 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1881 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 1882 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 1883 } 1884 1885 return true; 1886 } 1887 1888 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1889 struct hclge_pkt_buf_alloc *buf_alloc) 1890 { 1891 u32 i, total_size; 1892 1893 total_size = hdev->pkt_buf_size; 1894 1895 /* alloc tx buffer for all enabled tc */ 1896 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1897 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1898 1899 if (hdev->hw_tc_map & BIT(i)) { 1900 if (total_size < hdev->tx_buf_size) 1901 return -ENOMEM; 1902 1903 priv->tx_buf_size = hdev->tx_buf_size; 1904 } else { 1905 priv->tx_buf_size = 0; 1906 } 1907 1908 total_size -= priv->tx_buf_size; 1909 } 1910 1911 return 0; 1912 } 1913 1914 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, 1915 struct hclge_pkt_buf_alloc *buf_alloc) 1916 { 1917 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 1918 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); 1919 unsigned int i; 1920 1921 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1922 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1923 1924 priv->enable = 0; 1925 priv->wl.low = 0; 1926 priv->wl.high = 0; 1927 priv->buf_size = 0; 1928 1929 if (!(hdev->hw_tc_map & BIT(i))) 1930 continue; 1931 1932 priv->enable = 1; 1933 1934 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1935 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; 1936 priv->wl.high = roundup(priv->wl.low + aligned_mps, 1937 HCLGE_BUF_SIZE_UNIT); 1938 } else { 1939 priv->wl.low = 0; 1940 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : 1941 aligned_mps; 1942 } 1943 1944 priv->buf_size = priv->wl.high + hdev->dv_buf_size; 1945 } 1946 1947 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 1948 } 1949 1950 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, 1951 struct hclge_pkt_buf_alloc *buf_alloc) 1952 { 1953 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 1954 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1955 int i; 1956 1957 /* let the last to be cleared first */ 1958 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1959 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1960 unsigned int mask = BIT((unsigned int)i); 1961 1962 if (hdev->hw_tc_map & mask && 1963 !(hdev->tm_info.hw_pfc_map & mask)) { 1964 /* Clear the no pfc TC private buffer */ 1965 priv->wl.low = 0; 1966 priv->wl.high = 0; 1967 priv->buf_size = 0; 1968 priv->enable = 0; 1969 no_pfc_priv_num--; 1970 } 1971 1972 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1973 no_pfc_priv_num == 0) 1974 break; 1975 } 1976 1977 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 1978 } 1979 1980 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, 1981 struct hclge_pkt_buf_alloc *buf_alloc) 1982 { 1983 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 1984 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1985 int i; 1986 1987 /* let the last to be cleared first */ 1988 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1989 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1990 unsigned int mask = BIT((unsigned int)i); 1991 1992 if (hdev->hw_tc_map & mask && 1993 hdev->tm_info.hw_pfc_map & mask) { 1994 /* Reduce the number of pfc TC with private buffer */ 1995 priv->wl.low = 0; 1996 priv->enable = 0; 1997 priv->wl.high = 0; 1998 priv->buf_size = 0; 1999 pfc_priv_num--; 2000 } 2001 2002 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 2003 pfc_priv_num == 0) 2004 break; 2005 } 2006 2007 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2008 } 2009 2010 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, 2011 struct hclge_pkt_buf_alloc *buf_alloc) 2012 { 2013 #define COMPENSATE_BUFFER 0x3C00 2014 #define COMPENSATE_HALF_MPS_NUM 5 2015 #define PRIV_WL_GAP 0x1800 2016 2017 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2018 u32 tc_num = hclge_get_tc_num(hdev); 2019 u32 half_mps = hdev->mps >> 1; 2020 u32 min_rx_priv; 2021 unsigned int i; 2022 2023 if (tc_num) 2024 rx_priv = rx_priv / tc_num; 2025 2026 if (tc_num <= NEED_RESERVE_TC_NUM) 2027 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 2028 2029 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + 2030 COMPENSATE_HALF_MPS_NUM * half_mps; 2031 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); 2032 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); 2033 2034 if (rx_priv < min_rx_priv) 2035 return false; 2036 2037 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2038 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2039 2040 priv->enable = 0; 2041 priv->wl.low = 0; 2042 priv->wl.high = 0; 2043 priv->buf_size = 0; 2044 2045 if (!(hdev->hw_tc_map & BIT(i))) 2046 continue; 2047 2048 priv->enable = 1; 2049 priv->buf_size = rx_priv; 2050 priv->wl.high = rx_priv - hdev->dv_buf_size; 2051 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 2052 } 2053 2054 buf_alloc->s_buf.buf_size = 0; 2055 2056 return true; 2057 } 2058 2059 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 2060 * @hdev: pointer to struct hclge_dev 2061 * @buf_alloc: pointer to buffer calculation data 2062 * @return: 0: calculate sucessful, negative: fail 2063 */ 2064 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 2065 struct hclge_pkt_buf_alloc *buf_alloc) 2066 { 2067 /* When DCB is not supported, rx private buffer is not allocated. */ 2068 if (!hnae3_dev_dcb_supported(hdev)) { 2069 u32 rx_all = hdev->pkt_buf_size; 2070 2071 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 2072 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 2073 return -ENOMEM; 2074 2075 return 0; 2076 } 2077 2078 if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) 2079 return 0; 2080 2081 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) 2082 return 0; 2083 2084 /* try to decrease the buffer size */ 2085 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) 2086 return 0; 2087 2088 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) 2089 return 0; 2090 2091 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) 2092 return 0; 2093 2094 return -ENOMEM; 2095 } 2096 2097 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 2098 struct hclge_pkt_buf_alloc *buf_alloc) 2099 { 2100 struct hclge_rx_priv_buff_cmd *req; 2101 struct hclge_desc desc; 2102 int ret; 2103 int i; 2104 2105 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 2106 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 2107 2108 /* Alloc private buffer TCs */ 2109 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2110 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2111 2112 req->buf_num[i] = 2113 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 2114 req->buf_num[i] |= 2115 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 2116 } 2117 2118 req->shared_buf = 2119 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 2120 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 2121 2122 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2123 if (ret) 2124 dev_err(&hdev->pdev->dev, 2125 "rx private buffer alloc cmd failed %d\n", ret); 2126 2127 return ret; 2128 } 2129 2130 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 2131 struct hclge_pkt_buf_alloc *buf_alloc) 2132 { 2133 struct hclge_rx_priv_wl_buf *req; 2134 struct hclge_priv_buf *priv; 2135 struct hclge_desc desc[2]; 2136 int i, j; 2137 int ret; 2138 2139 for (i = 0; i < 2; i++) { 2140 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 2141 false); 2142 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 2143 2144 /* The first descriptor set the NEXT bit to 1 */ 2145 if (i == 0) 2146 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 2147 else 2148 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 2149 2150 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2151 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 2152 2153 priv = &buf_alloc->priv_buf[idx]; 2154 req->tc_wl[j].high = 2155 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 2156 req->tc_wl[j].high |= 2157 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2158 req->tc_wl[j].low = 2159 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 2160 req->tc_wl[j].low |= 2161 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2162 } 2163 } 2164 2165 /* Send 2 descriptor at one time */ 2166 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2167 if (ret) 2168 dev_err(&hdev->pdev->dev, 2169 "rx private waterline config cmd failed %d\n", 2170 ret); 2171 return ret; 2172 } 2173 2174 static int hclge_common_thrd_config(struct hclge_dev *hdev, 2175 struct hclge_pkt_buf_alloc *buf_alloc) 2176 { 2177 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 2178 struct hclge_rx_com_thrd *req; 2179 struct hclge_desc desc[2]; 2180 struct hclge_tc_thrd *tc; 2181 int i, j; 2182 int ret; 2183 2184 for (i = 0; i < 2; i++) { 2185 hclge_cmd_setup_basic_desc(&desc[i], 2186 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 2187 req = (struct hclge_rx_com_thrd *)&desc[i].data; 2188 2189 /* The first descriptor set the NEXT bit to 1 */ 2190 if (i == 0) 2191 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 2192 else 2193 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 2194 2195 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2196 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 2197 2198 req->com_thrd[j].high = 2199 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 2200 req->com_thrd[j].high |= 2201 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2202 req->com_thrd[j].low = 2203 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 2204 req->com_thrd[j].low |= 2205 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2206 } 2207 } 2208 2209 /* Send 2 descriptors at one time */ 2210 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2211 if (ret) 2212 dev_err(&hdev->pdev->dev, 2213 "common threshold config cmd failed %d\n", ret); 2214 return ret; 2215 } 2216 2217 static int hclge_common_wl_config(struct hclge_dev *hdev, 2218 struct hclge_pkt_buf_alloc *buf_alloc) 2219 { 2220 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 2221 struct hclge_rx_com_wl *req; 2222 struct hclge_desc desc; 2223 int ret; 2224 2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 2226 2227 req = (struct hclge_rx_com_wl *)desc.data; 2228 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 2229 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2230 2231 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 2232 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2233 2234 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2235 if (ret) 2236 dev_err(&hdev->pdev->dev, 2237 "common waterline config cmd failed %d\n", ret); 2238 2239 return ret; 2240 } 2241 2242 int hclge_buffer_alloc(struct hclge_dev *hdev) 2243 { 2244 struct hclge_pkt_buf_alloc *pkt_buf; 2245 int ret; 2246 2247 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 2248 if (!pkt_buf) 2249 return -ENOMEM; 2250 2251 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 2252 if (ret) { 2253 dev_err(&hdev->pdev->dev, 2254 "could not calc tx buffer size for all TCs %d\n", ret); 2255 goto out; 2256 } 2257 2258 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 2259 if (ret) { 2260 dev_err(&hdev->pdev->dev, 2261 "could not alloc tx buffers %d\n", ret); 2262 goto out; 2263 } 2264 2265 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 2266 if (ret) { 2267 dev_err(&hdev->pdev->dev, 2268 "could not calc rx priv buffer size for all TCs %d\n", 2269 ret); 2270 goto out; 2271 } 2272 2273 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 2274 if (ret) { 2275 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 2276 ret); 2277 goto out; 2278 } 2279 2280 if (hnae3_dev_dcb_supported(hdev)) { 2281 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2282 if (ret) { 2283 dev_err(&hdev->pdev->dev, 2284 "could not configure rx private waterline %d\n", 2285 ret); 2286 goto out; 2287 } 2288 2289 ret = hclge_common_thrd_config(hdev, pkt_buf); 2290 if (ret) { 2291 dev_err(&hdev->pdev->dev, 2292 "could not configure common threshold %d\n", 2293 ret); 2294 goto out; 2295 } 2296 } 2297 2298 ret = hclge_common_wl_config(hdev, pkt_buf); 2299 if (ret) 2300 dev_err(&hdev->pdev->dev, 2301 "could not configure common waterline %d\n", ret); 2302 2303 out: 2304 kfree(pkt_buf); 2305 return ret; 2306 } 2307 2308 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2309 { 2310 struct hnae3_handle *roce = &vport->roce; 2311 struct hnae3_handle *nic = &vport->nic; 2312 2313 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2314 2315 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 2316 vport->back->num_msi_left == 0) 2317 return -EINVAL; 2318 2319 roce->rinfo.base_vector = vport->back->roce_base_vector; 2320 2321 roce->rinfo.netdev = nic->kinfo.netdev; 2322 roce->rinfo.roce_io_base = vport->back->hw.io_base; 2323 2324 roce->pdev = nic->pdev; 2325 roce->ae_algo = nic->ae_algo; 2326 roce->numa_node_mask = nic->numa_node_mask; 2327 2328 return 0; 2329 } 2330 2331 static int hclge_init_msi(struct hclge_dev *hdev) 2332 { 2333 struct pci_dev *pdev = hdev->pdev; 2334 int vectors; 2335 int i; 2336 2337 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2338 hdev->num_msi, 2339 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2340 if (vectors < 0) { 2341 dev_err(&pdev->dev, 2342 "failed(%d) to allocate MSI/MSI-X vectors\n", 2343 vectors); 2344 return vectors; 2345 } 2346 if (vectors < hdev->num_msi) 2347 dev_warn(&hdev->pdev->dev, 2348 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2349 hdev->num_msi, vectors); 2350 2351 hdev->num_msi = vectors; 2352 hdev->num_msi_left = vectors; 2353 2354 hdev->base_msi_vector = pdev->irq; 2355 hdev->roce_base_vector = hdev->base_msi_vector + 2356 hdev->roce_base_msix_offset; 2357 2358 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2359 sizeof(u16), GFP_KERNEL); 2360 if (!hdev->vector_status) { 2361 pci_free_irq_vectors(pdev); 2362 return -ENOMEM; 2363 } 2364 2365 for (i = 0; i < hdev->num_msi; i++) 2366 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2367 2368 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2369 sizeof(int), GFP_KERNEL); 2370 if (!hdev->vector_irq) { 2371 pci_free_irq_vectors(pdev); 2372 return -ENOMEM; 2373 } 2374 2375 return 0; 2376 } 2377 2378 static u8 hclge_check_speed_dup(u8 duplex, int speed) 2379 { 2380 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) 2381 duplex = HCLGE_MAC_FULL; 2382 2383 return duplex; 2384 } 2385 2386 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, 2387 u8 duplex) 2388 { 2389 struct hclge_config_mac_speed_dup_cmd *req; 2390 struct hclge_desc desc; 2391 int ret; 2392 2393 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2394 2395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2396 2397 if (duplex) 2398 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); 2399 2400 switch (speed) { 2401 case HCLGE_MAC_SPEED_10M: 2402 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2403 HCLGE_CFG_SPEED_S, 6); 2404 break; 2405 case HCLGE_MAC_SPEED_100M: 2406 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2407 HCLGE_CFG_SPEED_S, 7); 2408 break; 2409 case HCLGE_MAC_SPEED_1G: 2410 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2411 HCLGE_CFG_SPEED_S, 0); 2412 break; 2413 case HCLGE_MAC_SPEED_10G: 2414 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2415 HCLGE_CFG_SPEED_S, 1); 2416 break; 2417 case HCLGE_MAC_SPEED_25G: 2418 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2419 HCLGE_CFG_SPEED_S, 2); 2420 break; 2421 case HCLGE_MAC_SPEED_40G: 2422 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2423 HCLGE_CFG_SPEED_S, 3); 2424 break; 2425 case HCLGE_MAC_SPEED_50G: 2426 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2427 HCLGE_CFG_SPEED_S, 4); 2428 break; 2429 case HCLGE_MAC_SPEED_100G: 2430 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2431 HCLGE_CFG_SPEED_S, 5); 2432 break; 2433 default: 2434 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2435 return -EINVAL; 2436 } 2437 2438 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2439 1); 2440 2441 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2442 if (ret) { 2443 dev_err(&hdev->pdev->dev, 2444 "mac speed/duplex config cmd failed %d.\n", ret); 2445 return ret; 2446 } 2447 2448 return 0; 2449 } 2450 2451 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 2452 { 2453 int ret; 2454 2455 duplex = hclge_check_speed_dup(duplex, speed); 2456 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) 2457 return 0; 2458 2459 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); 2460 if (ret) 2461 return ret; 2462 2463 hdev->hw.mac.speed = speed; 2464 hdev->hw.mac.duplex = duplex; 2465 2466 return 0; 2467 } 2468 2469 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2470 u8 duplex) 2471 { 2472 struct hclge_vport *vport = hclge_get_vport(handle); 2473 struct hclge_dev *hdev = vport->back; 2474 2475 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2476 } 2477 2478 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2479 { 2480 struct hclge_config_auto_neg_cmd *req; 2481 struct hclge_desc desc; 2482 u32 flag = 0; 2483 int ret; 2484 2485 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2486 2487 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2488 if (enable) 2489 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U); 2490 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2491 2492 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2493 if (ret) 2494 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2495 ret); 2496 2497 return ret; 2498 } 2499 2500 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2501 { 2502 struct hclge_vport *vport = hclge_get_vport(handle); 2503 struct hclge_dev *hdev = vport->back; 2504 2505 if (!hdev->hw.mac.support_autoneg) { 2506 if (enable) { 2507 dev_err(&hdev->pdev->dev, 2508 "autoneg is not supported by current port\n"); 2509 return -EOPNOTSUPP; 2510 } else { 2511 return 0; 2512 } 2513 } 2514 2515 return hclge_set_autoneg_en(hdev, enable); 2516 } 2517 2518 static int hclge_get_autoneg(struct hnae3_handle *handle) 2519 { 2520 struct hclge_vport *vport = hclge_get_vport(handle); 2521 struct hclge_dev *hdev = vport->back; 2522 struct phy_device *phydev = hdev->hw.mac.phydev; 2523 2524 if (phydev) 2525 return phydev->autoneg; 2526 2527 return hdev->hw.mac.autoneg; 2528 } 2529 2530 static int hclge_restart_autoneg(struct hnae3_handle *handle) 2531 { 2532 struct hclge_vport *vport = hclge_get_vport(handle); 2533 struct hclge_dev *hdev = vport->back; 2534 int ret; 2535 2536 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); 2537 2538 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2539 if (ret) 2540 return ret; 2541 return hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2542 } 2543 2544 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt) 2545 { 2546 struct hclge_vport *vport = hclge_get_vport(handle); 2547 struct hclge_dev *hdev = vport->back; 2548 2549 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) 2550 return hclge_set_autoneg_en(hdev, !halt); 2551 2552 return 0; 2553 } 2554 2555 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) 2556 { 2557 struct hclge_config_fec_cmd *req; 2558 struct hclge_desc desc; 2559 int ret; 2560 2561 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); 2562 2563 req = (struct hclge_config_fec_cmd *)desc.data; 2564 if (fec_mode & BIT(HNAE3_FEC_AUTO)) 2565 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); 2566 if (fec_mode & BIT(HNAE3_FEC_RS)) 2567 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2568 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); 2569 if (fec_mode & BIT(HNAE3_FEC_BASER)) 2570 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2571 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); 2572 2573 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2574 if (ret) 2575 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); 2576 2577 return ret; 2578 } 2579 2580 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) 2581 { 2582 struct hclge_vport *vport = hclge_get_vport(handle); 2583 struct hclge_dev *hdev = vport->back; 2584 struct hclge_mac *mac = &hdev->hw.mac; 2585 int ret; 2586 2587 if (fec_mode && !(mac->fec_ability & fec_mode)) { 2588 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); 2589 return -EINVAL; 2590 } 2591 2592 ret = hclge_set_fec_hw(hdev, fec_mode); 2593 if (ret) 2594 return ret; 2595 2596 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); 2597 return 0; 2598 } 2599 2600 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, 2601 u8 *fec_mode) 2602 { 2603 struct hclge_vport *vport = hclge_get_vport(handle); 2604 struct hclge_dev *hdev = vport->back; 2605 struct hclge_mac *mac = &hdev->hw.mac; 2606 2607 if (fec_ability) 2608 *fec_ability = mac->fec_ability; 2609 if (fec_mode) 2610 *fec_mode = mac->fec_mode; 2611 } 2612 2613 static int hclge_mac_init(struct hclge_dev *hdev) 2614 { 2615 struct hclge_mac *mac = &hdev->hw.mac; 2616 int ret; 2617 2618 hdev->support_sfp_query = true; 2619 hdev->hw.mac.duplex = HCLGE_MAC_FULL; 2620 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, 2621 hdev->hw.mac.duplex); 2622 if (ret) { 2623 dev_err(&hdev->pdev->dev, 2624 "Config mac speed dup fail ret=%d\n", ret); 2625 return ret; 2626 } 2627 2628 if (hdev->hw.mac.support_autoneg) { 2629 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); 2630 if (ret) { 2631 dev_err(&hdev->pdev->dev, 2632 "Config mac autoneg fail ret=%d\n", ret); 2633 return ret; 2634 } 2635 } 2636 2637 mac->link = 0; 2638 2639 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { 2640 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); 2641 if (ret) { 2642 dev_err(&hdev->pdev->dev, 2643 "Fec mode init fail, ret = %d\n", ret); 2644 return ret; 2645 } 2646 } 2647 2648 ret = hclge_set_mac_mtu(hdev, hdev->mps); 2649 if (ret) { 2650 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); 2651 return ret; 2652 } 2653 2654 ret = hclge_set_default_loopback(hdev); 2655 if (ret) 2656 return ret; 2657 2658 ret = hclge_buffer_alloc(hdev); 2659 if (ret) 2660 dev_err(&hdev->pdev->dev, 2661 "allocate buffer fail, ret=%d\n", ret); 2662 2663 return ret; 2664 } 2665 2666 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2667 { 2668 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) && 2669 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2670 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq, 2671 &hdev->mbx_service_task); 2672 } 2673 2674 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2675 { 2676 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2677 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 2678 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq, 2679 &hdev->rst_service_task); 2680 } 2681 2682 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) 2683 { 2684 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 2685 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2686 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) { 2687 hdev->hw_stats.stats_timer++; 2688 hdev->fd_arfs_expire_timer++; 2689 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), 2690 system_wq, &hdev->service_task, 2691 delay_time); 2692 } 2693 } 2694 2695 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2696 { 2697 struct hclge_link_status_cmd *req; 2698 struct hclge_desc desc; 2699 int link_status; 2700 int ret; 2701 2702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2703 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2704 if (ret) { 2705 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2706 ret); 2707 return ret; 2708 } 2709 2710 req = (struct hclge_link_status_cmd *)desc.data; 2711 link_status = req->status & HCLGE_LINK_STATUS_UP_M; 2712 2713 return !!link_status; 2714 } 2715 2716 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2717 { 2718 unsigned int mac_state; 2719 int link_stat; 2720 2721 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) 2722 return 0; 2723 2724 mac_state = hclge_get_mac_link_status(hdev); 2725 2726 if (hdev->hw.mac.phydev) { 2727 if (hdev->hw.mac.phydev->state == PHY_RUNNING) 2728 link_stat = mac_state & 2729 hdev->hw.mac.phydev->link; 2730 else 2731 link_stat = 0; 2732 2733 } else { 2734 link_stat = mac_state; 2735 } 2736 2737 return !!link_stat; 2738 } 2739 2740 static void hclge_update_link_status(struct hclge_dev *hdev) 2741 { 2742 struct hnae3_client *rclient = hdev->roce_client; 2743 struct hnae3_client *client = hdev->nic_client; 2744 struct hnae3_handle *rhandle; 2745 struct hnae3_handle *handle; 2746 int state; 2747 int i; 2748 2749 if (!client) 2750 return; 2751 state = hclge_get_mac_phy_link(hdev); 2752 if (state != hdev->hw.mac.link) { 2753 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2754 handle = &hdev->vport[i].nic; 2755 client->ops->link_status_change(handle, state); 2756 hclge_config_mac_tnl_int(hdev, state); 2757 rhandle = &hdev->vport[i].roce; 2758 if (rclient && rclient->ops->link_status_change) 2759 rclient->ops->link_status_change(rhandle, 2760 state); 2761 } 2762 hdev->hw.mac.link = state; 2763 } 2764 } 2765 2766 static void hclge_update_port_capability(struct hclge_mac *mac) 2767 { 2768 /* update fec ability by speed */ 2769 hclge_convert_setting_fec(mac); 2770 2771 /* firmware can not identify back plane type, the media type 2772 * read from configuration can help deal it 2773 */ 2774 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && 2775 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) 2776 mac->module_type = HNAE3_MODULE_TYPE_KR; 2777 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 2778 mac->module_type = HNAE3_MODULE_TYPE_TP; 2779 2780 if (mac->support_autoneg) { 2781 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); 2782 linkmode_copy(mac->advertising, mac->supported); 2783 } else { 2784 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 2785 mac->supported); 2786 linkmode_zero(mac->advertising); 2787 } 2788 } 2789 2790 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) 2791 { 2792 struct hclge_sfp_info_cmd *resp; 2793 struct hclge_desc desc; 2794 int ret; 2795 2796 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 2797 resp = (struct hclge_sfp_info_cmd *)desc.data; 2798 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2799 if (ret == -EOPNOTSUPP) { 2800 dev_warn(&hdev->pdev->dev, 2801 "IMP do not support get SFP speed %d\n", ret); 2802 return ret; 2803 } else if (ret) { 2804 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); 2805 return ret; 2806 } 2807 2808 *speed = le32_to_cpu(resp->speed); 2809 2810 return 0; 2811 } 2812 2813 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) 2814 { 2815 struct hclge_sfp_info_cmd *resp; 2816 struct hclge_desc desc; 2817 int ret; 2818 2819 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 2820 resp = (struct hclge_sfp_info_cmd *)desc.data; 2821 2822 resp->query_type = QUERY_ACTIVE_SPEED; 2823 2824 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2825 if (ret == -EOPNOTSUPP) { 2826 dev_warn(&hdev->pdev->dev, 2827 "IMP does not support get SFP info %d\n", ret); 2828 return ret; 2829 } else if (ret) { 2830 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); 2831 return ret; 2832 } 2833 2834 mac->speed = le32_to_cpu(resp->speed); 2835 /* if resp->speed_ability is 0, it means it's an old version 2836 * firmware, do not update these params 2837 */ 2838 if (resp->speed_ability) { 2839 mac->module_type = le32_to_cpu(resp->module_type); 2840 mac->speed_ability = le32_to_cpu(resp->speed_ability); 2841 mac->autoneg = resp->autoneg; 2842 mac->support_autoneg = resp->autoneg_ability; 2843 mac->speed_type = QUERY_ACTIVE_SPEED; 2844 if (!resp->active_fec) 2845 mac->fec_mode = 0; 2846 else 2847 mac->fec_mode = BIT(resp->active_fec); 2848 } else { 2849 mac->speed_type = QUERY_SFP_SPEED; 2850 } 2851 2852 return 0; 2853 } 2854 2855 static int hclge_update_port_info(struct hclge_dev *hdev) 2856 { 2857 struct hclge_mac *mac = &hdev->hw.mac; 2858 int speed = HCLGE_MAC_SPEED_UNKNOWN; 2859 int ret; 2860 2861 /* get the port info from SFP cmd if not copper port */ 2862 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 2863 return 0; 2864 2865 /* if IMP does not support get SFP/qSFP info, return directly */ 2866 if (!hdev->support_sfp_query) 2867 return 0; 2868 2869 if (hdev->pdev->revision >= 0x21) 2870 ret = hclge_get_sfp_info(hdev, mac); 2871 else 2872 ret = hclge_get_sfp_speed(hdev, &speed); 2873 2874 if (ret == -EOPNOTSUPP) { 2875 hdev->support_sfp_query = false; 2876 return ret; 2877 } else if (ret) { 2878 return ret; 2879 } 2880 2881 if (hdev->pdev->revision >= 0x21) { 2882 if (mac->speed_type == QUERY_ACTIVE_SPEED) { 2883 hclge_update_port_capability(mac); 2884 return 0; 2885 } 2886 return hclge_cfg_mac_speed_dup(hdev, mac->speed, 2887 HCLGE_MAC_FULL); 2888 } else { 2889 if (speed == HCLGE_MAC_SPEED_UNKNOWN) 2890 return 0; /* do nothing if no SFP */ 2891 2892 /* must config full duplex for SFP */ 2893 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); 2894 } 2895 } 2896 2897 static int hclge_get_status(struct hnae3_handle *handle) 2898 { 2899 struct hclge_vport *vport = hclge_get_vport(handle); 2900 struct hclge_dev *hdev = vport->back; 2901 2902 hclge_update_link_status(hdev); 2903 2904 return hdev->hw.mac.link; 2905 } 2906 2907 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) 2908 { 2909 if (pci_num_vf(hdev->pdev) == 0) { 2910 dev_err(&hdev->pdev->dev, 2911 "SRIOV is disabled, can not get vport(%d) info.\n", vf); 2912 return NULL; 2913 } 2914 2915 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { 2916 dev_err(&hdev->pdev->dev, 2917 "vf id(%d) is out of range(0 <= vfid < %d)\n", 2918 vf, pci_num_vf(hdev->pdev)); 2919 return NULL; 2920 } 2921 2922 /* VF start from 1 in vport */ 2923 vf += HCLGE_VF_VPORT_START_NUM; 2924 return &hdev->vport[vf]; 2925 } 2926 2927 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, 2928 struct ifla_vf_info *ivf) 2929 { 2930 struct hclge_vport *vport = hclge_get_vport(handle); 2931 struct hclge_dev *hdev = vport->back; 2932 2933 vport = hclge_get_vf_vport(hdev, vf); 2934 if (!vport) 2935 return -EINVAL; 2936 2937 ivf->vf = vf; 2938 ivf->linkstate = vport->vf_info.link_state; 2939 ivf->spoofchk = vport->vf_info.spoofchk; 2940 ivf->trusted = vport->vf_info.trusted; 2941 ivf->min_tx_rate = 0; 2942 ivf->max_tx_rate = vport->vf_info.max_tx_rate; 2943 ether_addr_copy(ivf->mac, vport->vf_info.mac); 2944 2945 return 0; 2946 } 2947 2948 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, 2949 int link_state) 2950 { 2951 struct hclge_vport *vport = hclge_get_vport(handle); 2952 struct hclge_dev *hdev = vport->back; 2953 2954 vport = hclge_get_vf_vport(hdev, vf); 2955 if (!vport) 2956 return -EINVAL; 2957 2958 vport->vf_info.link_state = link_state; 2959 2960 return 0; 2961 } 2962 2963 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 2964 { 2965 u32 rst_src_reg, cmdq_src_reg, msix_src_reg; 2966 2967 /* fetch the events from their corresponding regs */ 2968 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 2969 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 2970 msix_src_reg = hclge_read_dev(&hdev->hw, 2971 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); 2972 2973 /* Assumption: If by any chance reset and mailbox events are reported 2974 * together then we will only process reset event in this go and will 2975 * defer the processing of the mailbox events. Since, we would have not 2976 * cleared RX CMDQ event this time we would receive again another 2977 * interrupt from H/W just for the mailbox. 2978 * 2979 * check for vector0 reset event sources 2980 */ 2981 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { 2982 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); 2983 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 2984 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 2985 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2986 hdev->rst_stats.imp_rst_cnt++; 2987 return HCLGE_VECTOR0_EVENT_RST; 2988 } 2989 2990 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { 2991 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); 2992 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 2993 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 2994 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2995 hdev->rst_stats.global_rst_cnt++; 2996 return HCLGE_VECTOR0_EVENT_RST; 2997 } 2998 2999 /* check for vector0 msix event source */ 3000 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { 3001 dev_info(&hdev->pdev->dev, "received event 0x%x\n", 3002 msix_src_reg); 3003 *clearval = msix_src_reg; 3004 return HCLGE_VECTOR0_EVENT_ERR; 3005 } 3006 3007 /* check for vector0 mailbox(=CMDQ RX) event source */ 3008 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 3009 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 3010 *clearval = cmdq_src_reg; 3011 return HCLGE_VECTOR0_EVENT_MBX; 3012 } 3013 3014 /* print other vector0 event source */ 3015 dev_info(&hdev->pdev->dev, 3016 "CMDQ INT status:0x%x, other INT status:0x%x\n", 3017 cmdq_src_reg, msix_src_reg); 3018 *clearval = msix_src_reg; 3019 3020 return HCLGE_VECTOR0_EVENT_OTHER; 3021 } 3022 3023 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 3024 u32 regclr) 3025 { 3026 switch (event_type) { 3027 case HCLGE_VECTOR0_EVENT_RST: 3028 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 3029 break; 3030 case HCLGE_VECTOR0_EVENT_MBX: 3031 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 3032 break; 3033 default: 3034 break; 3035 } 3036 } 3037 3038 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) 3039 { 3040 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, 3041 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | 3042 BIT(HCLGE_VECTOR0_CORERESET_INT_B) | 3043 BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); 3044 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); 3045 } 3046 3047 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 3048 { 3049 writel(enable ? 1 : 0, vector->addr); 3050 } 3051 3052 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 3053 { 3054 struct hclge_dev *hdev = data; 3055 u32 clearval = 0; 3056 u32 event_cause; 3057 3058 hclge_enable_vector(&hdev->misc_vector, false); 3059 event_cause = hclge_check_event_cause(hdev, &clearval); 3060 3061 /* vector 0 interrupt is shared with reset and mailbox source events.*/ 3062 switch (event_cause) { 3063 case HCLGE_VECTOR0_EVENT_ERR: 3064 /* we do not know what type of reset is required now. This could 3065 * only be decided after we fetch the type of errors which 3066 * caused this event. Therefore, we will do below for now: 3067 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we 3068 * have defered type of reset to be used. 3069 * 2. Schedule the reset serivce task. 3070 * 3. When service task receives HNAE3_UNKNOWN_RESET type it 3071 * will fetch the correct type of reset. This would be done 3072 * by first decoding the types of errors. 3073 */ 3074 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); 3075 /* fall through */ 3076 case HCLGE_VECTOR0_EVENT_RST: 3077 hclge_reset_task_schedule(hdev); 3078 break; 3079 case HCLGE_VECTOR0_EVENT_MBX: 3080 /* If we are here then, 3081 * 1. Either we are not handling any mbx task and we are not 3082 * scheduled as well 3083 * OR 3084 * 2. We could be handling a mbx task but nothing more is 3085 * scheduled. 3086 * In both cases, we should schedule mbx task as there are more 3087 * mbx messages reported by this interrupt. 3088 */ 3089 hclge_mbx_task_schedule(hdev); 3090 break; 3091 default: 3092 dev_warn(&hdev->pdev->dev, 3093 "received unknown or unhandled event of vector0\n"); 3094 break; 3095 } 3096 3097 hclge_clear_event_cause(hdev, event_cause, clearval); 3098 3099 /* Enable interrupt if it is not cause by reset. And when 3100 * clearval equal to 0, it means interrupt status may be 3101 * cleared by hardware before driver reads status register. 3102 * For this case, vector0 interrupt also should be enabled. 3103 */ 3104 if (!clearval || 3105 event_cause == HCLGE_VECTOR0_EVENT_MBX) { 3106 hclge_enable_vector(&hdev->misc_vector, true); 3107 } 3108 3109 return IRQ_HANDLED; 3110 } 3111 3112 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 3113 { 3114 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { 3115 dev_warn(&hdev->pdev->dev, 3116 "vector(vector_id %d) has been freed.\n", vector_id); 3117 return; 3118 } 3119 3120 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 3121 hdev->num_msi_left += 1; 3122 hdev->num_msi_used -= 1; 3123 } 3124 3125 static void hclge_get_misc_vector(struct hclge_dev *hdev) 3126 { 3127 struct hclge_misc_vector *vector = &hdev->misc_vector; 3128 3129 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 3130 3131 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 3132 hdev->vector_status[0] = 0; 3133 3134 hdev->num_msi_left -= 1; 3135 hdev->num_msi_used += 1; 3136 } 3137 3138 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify, 3139 const cpumask_t *mask) 3140 { 3141 struct hclge_dev *hdev = container_of(notify, struct hclge_dev, 3142 affinity_notify); 3143 3144 cpumask_copy(&hdev->affinity_mask, mask); 3145 } 3146 3147 static void hclge_irq_affinity_release(struct kref *ref) 3148 { 3149 } 3150 3151 static void hclge_misc_affinity_setup(struct hclge_dev *hdev) 3152 { 3153 irq_set_affinity_hint(hdev->misc_vector.vector_irq, 3154 &hdev->affinity_mask); 3155 3156 hdev->affinity_notify.notify = hclge_irq_affinity_notify; 3157 hdev->affinity_notify.release = hclge_irq_affinity_release; 3158 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, 3159 &hdev->affinity_notify); 3160 } 3161 3162 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) 3163 { 3164 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL); 3165 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); 3166 } 3167 3168 static int hclge_misc_irq_init(struct hclge_dev *hdev) 3169 { 3170 int ret; 3171 3172 hclge_get_misc_vector(hdev); 3173 3174 /* this would be explicitly freed in the end */ 3175 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 3176 0, "hclge_misc", hdev); 3177 if (ret) { 3178 hclge_free_vector(hdev, 0); 3179 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 3180 hdev->misc_vector.vector_irq); 3181 } 3182 3183 return ret; 3184 } 3185 3186 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 3187 { 3188 free_irq(hdev->misc_vector.vector_irq, hdev); 3189 hclge_free_vector(hdev, 0); 3190 } 3191 3192 int hclge_notify_client(struct hclge_dev *hdev, 3193 enum hnae3_reset_notify_type type) 3194 { 3195 struct hnae3_client *client = hdev->nic_client; 3196 u16 i; 3197 3198 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) 3199 return 0; 3200 3201 if (!client->ops->reset_notify) 3202 return -EOPNOTSUPP; 3203 3204 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 3205 struct hnae3_handle *handle = &hdev->vport[i].nic; 3206 int ret; 3207 3208 ret = client->ops->reset_notify(handle, type); 3209 if (ret) { 3210 dev_err(&hdev->pdev->dev, 3211 "notify nic client failed %d(%d)\n", type, ret); 3212 return ret; 3213 } 3214 } 3215 3216 return 0; 3217 } 3218 3219 static int hclge_notify_roce_client(struct hclge_dev *hdev, 3220 enum hnae3_reset_notify_type type) 3221 { 3222 struct hnae3_client *client = hdev->roce_client; 3223 int ret = 0; 3224 u16 i; 3225 3226 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) 3227 return 0; 3228 3229 if (!client->ops->reset_notify) 3230 return -EOPNOTSUPP; 3231 3232 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 3233 struct hnae3_handle *handle = &hdev->vport[i].roce; 3234 3235 ret = client->ops->reset_notify(handle, type); 3236 if (ret) { 3237 dev_err(&hdev->pdev->dev, 3238 "notify roce client failed %d(%d)", 3239 type, ret); 3240 return ret; 3241 } 3242 } 3243 3244 return ret; 3245 } 3246 3247 static int hclge_reset_wait(struct hclge_dev *hdev) 3248 { 3249 #define HCLGE_RESET_WATI_MS 100 3250 #define HCLGE_RESET_WAIT_CNT 200 3251 u32 val, reg, reg_bit; 3252 u32 cnt = 0; 3253 3254 switch (hdev->reset_type) { 3255 case HNAE3_IMP_RESET: 3256 reg = HCLGE_GLOBAL_RESET_REG; 3257 reg_bit = HCLGE_IMP_RESET_BIT; 3258 break; 3259 case HNAE3_GLOBAL_RESET: 3260 reg = HCLGE_GLOBAL_RESET_REG; 3261 reg_bit = HCLGE_GLOBAL_RESET_BIT; 3262 break; 3263 case HNAE3_FUNC_RESET: 3264 reg = HCLGE_FUN_RST_ING; 3265 reg_bit = HCLGE_FUN_RST_ING_B; 3266 break; 3267 case HNAE3_FLR_RESET: 3268 break; 3269 default: 3270 dev_err(&hdev->pdev->dev, 3271 "Wait for unsupported reset type: %d\n", 3272 hdev->reset_type); 3273 return -EINVAL; 3274 } 3275 3276 if (hdev->reset_type == HNAE3_FLR_RESET) { 3277 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 3278 cnt++ < HCLGE_RESET_WAIT_CNT) 3279 msleep(HCLGE_RESET_WATI_MS); 3280 3281 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 3282 dev_err(&hdev->pdev->dev, 3283 "flr wait timeout: %u\n", cnt); 3284 return -EBUSY; 3285 } 3286 3287 return 0; 3288 } 3289 3290 val = hclge_read_dev(&hdev->hw, reg); 3291 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 3292 msleep(HCLGE_RESET_WATI_MS); 3293 val = hclge_read_dev(&hdev->hw, reg); 3294 cnt++; 3295 } 3296 3297 if (cnt >= HCLGE_RESET_WAIT_CNT) { 3298 dev_warn(&hdev->pdev->dev, 3299 "Wait for reset timeout: %d\n", hdev->reset_type); 3300 return -EBUSY; 3301 } 3302 3303 return 0; 3304 } 3305 3306 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) 3307 { 3308 struct hclge_vf_rst_cmd *req; 3309 struct hclge_desc desc; 3310 3311 req = (struct hclge_vf_rst_cmd *)desc.data; 3312 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); 3313 req->dest_vfid = func_id; 3314 3315 if (reset) 3316 req->vf_rst = 0x1; 3317 3318 return hclge_cmd_send(&hdev->hw, &desc, 1); 3319 } 3320 3321 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) 3322 { 3323 int i; 3324 3325 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { 3326 struct hclge_vport *vport = &hdev->vport[i]; 3327 int ret; 3328 3329 /* Send cmd to set/clear VF's FUNC_RST_ING */ 3330 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); 3331 if (ret) { 3332 dev_err(&hdev->pdev->dev, 3333 "set vf(%u) rst failed %d!\n", 3334 vport->vport_id, ret); 3335 return ret; 3336 } 3337 3338 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 3339 continue; 3340 3341 /* Inform VF to process the reset. 3342 * hclge_inform_reset_assert_to_vf may fail if VF 3343 * driver is not loaded. 3344 */ 3345 ret = hclge_inform_reset_assert_to_vf(vport); 3346 if (ret) 3347 dev_warn(&hdev->pdev->dev, 3348 "inform reset to vf(%u) failed %d!\n", 3349 vport->vport_id, ret); 3350 } 3351 3352 return 0; 3353 } 3354 3355 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev) 3356 { 3357 struct hclge_pf_rst_sync_cmd *req; 3358 struct hclge_desc desc; 3359 int cnt = 0; 3360 int ret; 3361 3362 req = (struct hclge_pf_rst_sync_cmd *)desc.data; 3363 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); 3364 3365 do { 3366 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3367 /* for compatible with old firmware, wait 3368 * 100 ms for VF to stop IO 3369 */ 3370 if (ret == -EOPNOTSUPP) { 3371 msleep(HCLGE_RESET_SYNC_TIME); 3372 return 0; 3373 } else if (ret) { 3374 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n", 3375 ret); 3376 return ret; 3377 } else if (req->all_vf_ready) { 3378 return 0; 3379 } 3380 msleep(HCLGE_PF_RESET_SYNC_TIME); 3381 hclge_cmd_reuse_desc(&desc, true); 3382 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); 3383 3384 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n"); 3385 return -ETIME; 3386 } 3387 3388 void hclge_report_hw_error(struct hclge_dev *hdev, 3389 enum hnae3_hw_error_type type) 3390 { 3391 struct hnae3_client *client = hdev->nic_client; 3392 u16 i; 3393 3394 if (!client || !client->ops->process_hw_error || 3395 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) 3396 return; 3397 3398 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) 3399 client->ops->process_hw_error(&hdev->vport[i].nic, type); 3400 } 3401 3402 static void hclge_handle_imp_error(struct hclge_dev *hdev) 3403 { 3404 u32 reg_val; 3405 3406 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 3407 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { 3408 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); 3409 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); 3410 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 3411 } 3412 3413 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { 3414 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); 3415 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); 3416 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 3417 } 3418 } 3419 3420 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 3421 { 3422 struct hclge_desc desc; 3423 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 3424 int ret; 3425 3426 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 3427 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 3428 req->fun_reset_vfid = func_id; 3429 3430 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3431 if (ret) 3432 dev_err(&hdev->pdev->dev, 3433 "send function reset cmd fail, status =%d\n", ret); 3434 3435 return ret; 3436 } 3437 3438 static void hclge_do_reset(struct hclge_dev *hdev) 3439 { 3440 struct hnae3_handle *handle = &hdev->vport[0].nic; 3441 struct pci_dev *pdev = hdev->pdev; 3442 u32 val; 3443 3444 if (hclge_get_hw_reset_stat(handle)) { 3445 dev_info(&pdev->dev, "Hardware reset not finish\n"); 3446 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", 3447 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), 3448 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 3449 return; 3450 } 3451 3452 switch (hdev->reset_type) { 3453 case HNAE3_GLOBAL_RESET: 3454 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 3455 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 3456 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 3457 dev_info(&pdev->dev, "Global Reset requested\n"); 3458 break; 3459 case HNAE3_FUNC_RESET: 3460 dev_info(&pdev->dev, "PF Reset requested\n"); 3461 /* schedule again to check later */ 3462 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 3463 hclge_reset_task_schedule(hdev); 3464 break; 3465 case HNAE3_FLR_RESET: 3466 dev_info(&pdev->dev, "FLR requested\n"); 3467 /* schedule again to check later */ 3468 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); 3469 hclge_reset_task_schedule(hdev); 3470 break; 3471 default: 3472 dev_warn(&pdev->dev, 3473 "Unsupported reset type: %d\n", hdev->reset_type); 3474 break; 3475 } 3476 } 3477 3478 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 3479 unsigned long *addr) 3480 { 3481 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 3482 struct hclge_dev *hdev = ae_dev->priv; 3483 3484 /* first, resolve any unknown reset type to the known type(s) */ 3485 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) { 3486 /* we will intentionally ignore any errors from this function 3487 * as we will end up in *some* reset request in any case 3488 */ 3489 hclge_handle_hw_msix_error(hdev, addr); 3490 clear_bit(HNAE3_UNKNOWN_RESET, addr); 3491 /* We defered the clearing of the error event which caused 3492 * interrupt since it was not posssible to do that in 3493 * interrupt context (and this is the reason we introduced 3494 * new UNKNOWN reset type). Now, the errors have been 3495 * handled and cleared in hardware we can safely enable 3496 * interrupts. This is an exception to the norm. 3497 */ 3498 hclge_enable_vector(&hdev->misc_vector, true); 3499 } 3500 3501 /* return the highest priority reset level amongst all */ 3502 if (test_bit(HNAE3_IMP_RESET, addr)) { 3503 rst_level = HNAE3_IMP_RESET; 3504 clear_bit(HNAE3_IMP_RESET, addr); 3505 clear_bit(HNAE3_GLOBAL_RESET, addr); 3506 clear_bit(HNAE3_FUNC_RESET, addr); 3507 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { 3508 rst_level = HNAE3_GLOBAL_RESET; 3509 clear_bit(HNAE3_GLOBAL_RESET, addr); 3510 clear_bit(HNAE3_FUNC_RESET, addr); 3511 } else if (test_bit(HNAE3_FUNC_RESET, addr)) { 3512 rst_level = HNAE3_FUNC_RESET; 3513 clear_bit(HNAE3_FUNC_RESET, addr); 3514 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 3515 rst_level = HNAE3_FLR_RESET; 3516 clear_bit(HNAE3_FLR_RESET, addr); 3517 } 3518 3519 if (hdev->reset_type != HNAE3_NONE_RESET && 3520 rst_level < hdev->reset_type) 3521 return HNAE3_NONE_RESET; 3522 3523 return rst_level; 3524 } 3525 3526 static void hclge_clear_reset_cause(struct hclge_dev *hdev) 3527 { 3528 u32 clearval = 0; 3529 3530 switch (hdev->reset_type) { 3531 case HNAE3_IMP_RESET: 3532 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 3533 break; 3534 case HNAE3_GLOBAL_RESET: 3535 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 3536 break; 3537 default: 3538 break; 3539 } 3540 3541 if (!clearval) 3542 return; 3543 3544 /* For revision 0x20, the reset interrupt source 3545 * can only be cleared after hardware reset done 3546 */ 3547 if (hdev->pdev->revision == 0x20) 3548 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, 3549 clearval); 3550 3551 hclge_enable_vector(&hdev->misc_vector, true); 3552 } 3553 3554 static int hclge_reset_prepare_down(struct hclge_dev *hdev) 3555 { 3556 int ret = 0; 3557 3558 switch (hdev->reset_type) { 3559 case HNAE3_FUNC_RESET: 3560 /* fall through */ 3561 case HNAE3_FLR_RESET: 3562 ret = hclge_set_all_vf_rst(hdev, true); 3563 break; 3564 default: 3565 break; 3566 } 3567 3568 return ret; 3569 } 3570 3571 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) 3572 { 3573 u32 reg_val; 3574 3575 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG); 3576 if (enable) 3577 reg_val |= HCLGE_NIC_SW_RST_RDY; 3578 else 3579 reg_val &= ~HCLGE_NIC_SW_RST_RDY; 3580 3581 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val); 3582 } 3583 3584 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) 3585 { 3586 u32 reg_val; 3587 int ret = 0; 3588 3589 switch (hdev->reset_type) { 3590 case HNAE3_FUNC_RESET: 3591 /* to confirm whether all running VF is ready 3592 * before request PF reset 3593 */ 3594 ret = hclge_func_reset_sync_vf(hdev); 3595 if (ret) 3596 return ret; 3597 3598 ret = hclge_func_reset_cmd(hdev, 0); 3599 if (ret) { 3600 dev_err(&hdev->pdev->dev, 3601 "asserting function reset fail %d!\n", ret); 3602 return ret; 3603 } 3604 3605 /* After performaning pf reset, it is not necessary to do the 3606 * mailbox handling or send any command to firmware, because 3607 * any mailbox handling or command to firmware is only valid 3608 * after hclge_cmd_init is called. 3609 */ 3610 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 3611 hdev->rst_stats.pf_rst_cnt++; 3612 break; 3613 case HNAE3_FLR_RESET: 3614 /* to confirm whether all running VF is ready 3615 * before request PF reset 3616 */ 3617 ret = hclge_func_reset_sync_vf(hdev); 3618 if (ret) 3619 return ret; 3620 3621 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 3622 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 3623 hdev->rst_stats.flr_rst_cnt++; 3624 break; 3625 case HNAE3_IMP_RESET: 3626 hclge_handle_imp_error(hdev); 3627 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 3628 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, 3629 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); 3630 break; 3631 default: 3632 break; 3633 } 3634 3635 /* inform hardware that preparatory work is done */ 3636 msleep(HCLGE_RESET_SYNC_TIME); 3637 hclge_reset_handshake(hdev, true); 3638 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); 3639 3640 return ret; 3641 } 3642 3643 static bool hclge_reset_err_handle(struct hclge_dev *hdev) 3644 { 3645 #define MAX_RESET_FAIL_CNT 5 3646 3647 if (hdev->reset_pending) { 3648 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", 3649 hdev->reset_pending); 3650 return true; 3651 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & 3652 HCLGE_RESET_INT_M) { 3653 dev_info(&hdev->pdev->dev, 3654 "reset failed because new reset interrupt\n"); 3655 hclge_clear_reset_cause(hdev); 3656 return false; 3657 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { 3658 hdev->rst_stats.reset_fail_cnt++; 3659 set_bit(hdev->reset_type, &hdev->reset_pending); 3660 dev_info(&hdev->pdev->dev, 3661 "re-schedule reset task(%u)\n", 3662 hdev->rst_stats.reset_fail_cnt); 3663 return true; 3664 } 3665 3666 hclge_clear_reset_cause(hdev); 3667 3668 /* recover the handshake status when reset fail */ 3669 hclge_reset_handshake(hdev, true); 3670 3671 dev_err(&hdev->pdev->dev, "Reset fail!\n"); 3672 3673 hclge_dbg_dump_rst_info(hdev); 3674 3675 return false; 3676 } 3677 3678 static int hclge_set_rst_done(struct hclge_dev *hdev) 3679 { 3680 struct hclge_pf_rst_done_cmd *req; 3681 struct hclge_desc desc; 3682 int ret; 3683 3684 req = (struct hclge_pf_rst_done_cmd *)desc.data; 3685 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); 3686 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; 3687 3688 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3689 /* To be compatible with the old firmware, which does not support 3690 * command HCLGE_OPC_PF_RST_DONE, just print a warning and 3691 * return success 3692 */ 3693 if (ret == -EOPNOTSUPP) { 3694 dev_warn(&hdev->pdev->dev, 3695 "current firmware does not support command(0x%x)!\n", 3696 HCLGE_OPC_PF_RST_DONE); 3697 return 0; 3698 } else if (ret) { 3699 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", 3700 ret); 3701 } 3702 3703 return ret; 3704 } 3705 3706 static int hclge_reset_prepare_up(struct hclge_dev *hdev) 3707 { 3708 int ret = 0; 3709 3710 switch (hdev->reset_type) { 3711 case HNAE3_FUNC_RESET: 3712 /* fall through */ 3713 case HNAE3_FLR_RESET: 3714 ret = hclge_set_all_vf_rst(hdev, false); 3715 break; 3716 case HNAE3_GLOBAL_RESET: 3717 /* fall through */ 3718 case HNAE3_IMP_RESET: 3719 ret = hclge_set_rst_done(hdev); 3720 break; 3721 default: 3722 break; 3723 } 3724 3725 /* clear up the handshake status after re-initialize done */ 3726 hclge_reset_handshake(hdev, false); 3727 3728 return ret; 3729 } 3730 3731 static int hclge_reset_stack(struct hclge_dev *hdev) 3732 { 3733 int ret; 3734 3735 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 3736 if (ret) 3737 return ret; 3738 3739 ret = hclge_reset_ae_dev(hdev->ae_dev); 3740 if (ret) 3741 return ret; 3742 3743 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 3744 if (ret) 3745 return ret; 3746 3747 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT); 3748 } 3749 3750 static void hclge_reset(struct hclge_dev *hdev) 3751 { 3752 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3753 enum hnae3_reset_type reset_level; 3754 int ret; 3755 3756 /* Initialize ae_dev reset status as well, in case enet layer wants to 3757 * know if device is undergoing reset 3758 */ 3759 ae_dev->reset_type = hdev->reset_type; 3760 hdev->rst_stats.reset_cnt++; 3761 /* perform reset of the stack & ae device for a client */ 3762 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 3763 if (ret) 3764 goto err_reset; 3765 3766 ret = hclge_reset_prepare_down(hdev); 3767 if (ret) 3768 goto err_reset; 3769 3770 rtnl_lock(); 3771 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 3772 if (ret) 3773 goto err_reset_lock; 3774 3775 rtnl_unlock(); 3776 3777 ret = hclge_reset_prepare_wait(hdev); 3778 if (ret) 3779 goto err_reset; 3780 3781 if (hclge_reset_wait(hdev)) 3782 goto err_reset; 3783 3784 hdev->rst_stats.hw_reset_done_cnt++; 3785 3786 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 3787 if (ret) 3788 goto err_reset; 3789 3790 rtnl_lock(); 3791 3792 ret = hclge_reset_stack(hdev); 3793 if (ret) 3794 goto err_reset_lock; 3795 3796 hclge_clear_reset_cause(hdev); 3797 3798 ret = hclge_reset_prepare_up(hdev); 3799 if (ret) 3800 goto err_reset_lock; 3801 3802 rtnl_unlock(); 3803 3804 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 3805 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 3806 * times 3807 */ 3808 if (ret && 3809 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) 3810 goto err_reset; 3811 3812 rtnl_lock(); 3813 3814 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); 3815 if (ret) 3816 goto err_reset_lock; 3817 3818 rtnl_unlock(); 3819 3820 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); 3821 if (ret) 3822 goto err_reset; 3823 3824 hdev->last_reset_time = jiffies; 3825 hdev->rst_stats.reset_fail_cnt = 0; 3826 hdev->rst_stats.reset_done_cnt++; 3827 ae_dev->reset_type = HNAE3_NONE_RESET; 3828 3829 /* if default_reset_request has a higher level reset request, 3830 * it should be handled as soon as possible. since some errors 3831 * need this kind of reset to fix. 3832 */ 3833 reset_level = hclge_get_reset_level(ae_dev, 3834 &hdev->default_reset_request); 3835 if (reset_level != HNAE3_NONE_RESET) 3836 set_bit(reset_level, &hdev->reset_request); 3837 3838 return; 3839 3840 err_reset_lock: 3841 rtnl_unlock(); 3842 err_reset: 3843 if (hclge_reset_err_handle(hdev)) 3844 hclge_reset_task_schedule(hdev); 3845 } 3846 3847 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) 3848 { 3849 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 3850 struct hclge_dev *hdev = ae_dev->priv; 3851 3852 /* We might end up getting called broadly because of 2 below cases: 3853 * 1. Recoverable error was conveyed through APEI and only way to bring 3854 * normalcy is to reset. 3855 * 2. A new reset request from the stack due to timeout 3856 * 3857 * For the first case,error event might not have ae handle available. 3858 * check if this is a new reset request and we are not here just because 3859 * last reset attempt did not succeed and watchdog hit us again. We will 3860 * know this if last reset request did not occur very recently (watchdog 3861 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 3862 * In case of new request we reset the "reset level" to PF reset. 3863 * And if it is a repeat reset request of the most recent one then we 3864 * want to make sure we throttle the reset request. Therefore, we will 3865 * not allow it again before 3*HZ times. 3866 */ 3867 if (!handle) 3868 handle = &hdev->vport[0].nic; 3869 3870 if (time_before(jiffies, (hdev->last_reset_time + 3871 HCLGE_RESET_INTERVAL))) { 3872 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 3873 return; 3874 } else if (hdev->default_reset_request) { 3875 hdev->reset_level = 3876 hclge_get_reset_level(ae_dev, 3877 &hdev->default_reset_request); 3878 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { 3879 hdev->reset_level = HNAE3_FUNC_RESET; 3880 } 3881 3882 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", 3883 hdev->reset_level); 3884 3885 /* request reset & schedule reset task */ 3886 set_bit(hdev->reset_level, &hdev->reset_request); 3887 hclge_reset_task_schedule(hdev); 3888 3889 if (hdev->reset_level < HNAE3_GLOBAL_RESET) 3890 hdev->reset_level++; 3891 } 3892 3893 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 3894 enum hnae3_reset_type rst_type) 3895 { 3896 struct hclge_dev *hdev = ae_dev->priv; 3897 3898 set_bit(rst_type, &hdev->default_reset_request); 3899 } 3900 3901 static void hclge_reset_timer(struct timer_list *t) 3902 { 3903 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); 3904 3905 /* if default_reset_request has no value, it means that this reset 3906 * request has already be handled, so just return here 3907 */ 3908 if (!hdev->default_reset_request) 3909 return; 3910 3911 dev_info(&hdev->pdev->dev, 3912 "triggering reset in reset timer\n"); 3913 hclge_reset_event(hdev->pdev, NULL); 3914 } 3915 3916 static void hclge_reset_subtask(struct hclge_dev *hdev) 3917 { 3918 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 3919 3920 /* check if there is any ongoing reset in the hardware. This status can 3921 * be checked from reset_pending. If there is then, we need to wait for 3922 * hardware to complete reset. 3923 * a. If we are able to figure out in reasonable time that hardware 3924 * has fully resetted then, we can proceed with driver, client 3925 * reset. 3926 * b. else, we can come back later to check this status so re-sched 3927 * now. 3928 */ 3929 hdev->last_reset_time = jiffies; 3930 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); 3931 if (hdev->reset_type != HNAE3_NONE_RESET) 3932 hclge_reset(hdev); 3933 3934 /* check if we got any *new* reset requests to be honored */ 3935 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); 3936 if (hdev->reset_type != HNAE3_NONE_RESET) 3937 hclge_do_reset(hdev); 3938 3939 hdev->reset_type = HNAE3_NONE_RESET; 3940 } 3941 3942 static void hclge_reset_service_task(struct work_struct *work) 3943 { 3944 struct hclge_dev *hdev = 3945 container_of(work, struct hclge_dev, rst_service_task); 3946 3947 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3948 return; 3949 3950 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 3951 3952 hclge_reset_subtask(hdev); 3953 3954 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 3955 } 3956 3957 static void hclge_mailbox_service_task(struct work_struct *work) 3958 { 3959 struct hclge_dev *hdev = 3960 container_of(work, struct hclge_dev, mbx_service_task); 3961 3962 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 3963 return; 3964 3965 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 3966 3967 hclge_mbx_handler(hdev); 3968 3969 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 3970 } 3971 3972 static void hclge_update_vport_alive(struct hclge_dev *hdev) 3973 { 3974 int i; 3975 3976 /* start from vport 1 for PF is always alive */ 3977 for (i = 1; i < hdev->num_alloc_vport; i++) { 3978 struct hclge_vport *vport = &hdev->vport[i]; 3979 3980 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) 3981 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 3982 3983 /* If vf is not alive, set to default value */ 3984 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 3985 vport->mps = HCLGE_MAC_DEFAULT_FRAME; 3986 } 3987 } 3988 3989 static void hclge_service_task(struct work_struct *work) 3990 { 3991 struct hclge_dev *hdev = 3992 container_of(work, struct hclge_dev, service_task.work); 3993 3994 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 3995 3996 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { 3997 hclge_update_stats_for_all(hdev); 3998 hdev->hw_stats.stats_timer = 0; 3999 } 4000 4001 hclge_update_port_info(hdev); 4002 hclge_update_link_status(hdev); 4003 hclge_update_vport_alive(hdev); 4004 hclge_sync_vlan_filter(hdev); 4005 4006 if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) { 4007 hclge_rfs_filter_expire(hdev); 4008 hdev->fd_arfs_expire_timer = 0; 4009 } 4010 4011 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); 4012 } 4013 4014 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 4015 { 4016 /* VF handle has no client */ 4017 if (!handle->client) 4018 return container_of(handle, struct hclge_vport, nic); 4019 else if (handle->client->type == HNAE3_CLIENT_ROCE) 4020 return container_of(handle, struct hclge_vport, roce); 4021 else 4022 return container_of(handle, struct hclge_vport, nic); 4023 } 4024 4025 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 4026 struct hnae3_vector_info *vector_info) 4027 { 4028 struct hclge_vport *vport = hclge_get_vport(handle); 4029 struct hnae3_vector_info *vector = vector_info; 4030 struct hclge_dev *hdev = vport->back; 4031 int alloc = 0; 4032 int i, j; 4033 4034 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); 4035 vector_num = min(hdev->num_msi_left, vector_num); 4036 4037 for (j = 0; j < vector_num; j++) { 4038 for (i = 1; i < hdev->num_msi; i++) { 4039 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 4040 vector->vector = pci_irq_vector(hdev->pdev, i); 4041 vector->io_addr = hdev->hw.io_base + 4042 HCLGE_VECTOR_REG_BASE + 4043 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 4044 vport->vport_id * 4045 HCLGE_VECTOR_VF_OFFSET; 4046 hdev->vector_status[i] = vport->vport_id; 4047 hdev->vector_irq[i] = vector->vector; 4048 4049 vector++; 4050 alloc++; 4051 4052 break; 4053 } 4054 } 4055 } 4056 hdev->num_msi_left -= alloc; 4057 hdev->num_msi_used += alloc; 4058 4059 return alloc; 4060 } 4061 4062 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 4063 { 4064 int i; 4065 4066 for (i = 0; i < hdev->num_msi; i++) 4067 if (vector == hdev->vector_irq[i]) 4068 return i; 4069 4070 return -EINVAL; 4071 } 4072 4073 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 4074 { 4075 struct hclge_vport *vport = hclge_get_vport(handle); 4076 struct hclge_dev *hdev = vport->back; 4077 int vector_id; 4078 4079 vector_id = hclge_get_vector_index(hdev, vector); 4080 if (vector_id < 0) { 4081 dev_err(&hdev->pdev->dev, 4082 "Get vector index fail. vector_id =%d\n", vector_id); 4083 return vector_id; 4084 } 4085 4086 hclge_free_vector(hdev, vector_id); 4087 4088 return 0; 4089 } 4090 4091 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 4092 { 4093 return HCLGE_RSS_KEY_SIZE; 4094 } 4095 4096 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 4097 { 4098 return HCLGE_RSS_IND_TBL_SIZE; 4099 } 4100 4101 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 4102 const u8 hfunc, const u8 *key) 4103 { 4104 struct hclge_rss_config_cmd *req; 4105 unsigned int key_offset = 0; 4106 struct hclge_desc desc; 4107 int key_counts; 4108 int key_size; 4109 int ret; 4110 4111 key_counts = HCLGE_RSS_KEY_SIZE; 4112 req = (struct hclge_rss_config_cmd *)desc.data; 4113 4114 while (key_counts) { 4115 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 4116 false); 4117 4118 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 4119 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 4120 4121 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts); 4122 memcpy(req->hash_key, 4123 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 4124 4125 key_counts -= key_size; 4126 key_offset++; 4127 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4128 if (ret) { 4129 dev_err(&hdev->pdev->dev, 4130 "Configure RSS config fail, status = %d\n", 4131 ret); 4132 return ret; 4133 } 4134 } 4135 return 0; 4136 } 4137 4138 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) 4139 { 4140 struct hclge_rss_indirection_table_cmd *req; 4141 struct hclge_desc desc; 4142 int i, j; 4143 int ret; 4144 4145 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 4146 4147 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 4148 hclge_cmd_setup_basic_desc 4149 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 4150 4151 req->start_table_index = 4152 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 4153 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 4154 4155 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 4156 req->rss_result[j] = 4157 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 4158 4159 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4160 if (ret) { 4161 dev_err(&hdev->pdev->dev, 4162 "Configure rss indir table fail,status = %d\n", 4163 ret); 4164 return ret; 4165 } 4166 } 4167 return 0; 4168 } 4169 4170 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 4171 u16 *tc_size, u16 *tc_offset) 4172 { 4173 struct hclge_rss_tc_mode_cmd *req; 4174 struct hclge_desc desc; 4175 int ret; 4176 int i; 4177 4178 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 4179 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 4180 4181 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 4182 u16 mode = 0; 4183 4184 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 4185 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, 4186 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 4187 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 4188 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 4189 4190 req->rss_tc_mode[i] = cpu_to_le16(mode); 4191 } 4192 4193 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4194 if (ret) 4195 dev_err(&hdev->pdev->dev, 4196 "Configure rss tc mode fail, status = %d\n", ret); 4197 4198 return ret; 4199 } 4200 4201 static void hclge_get_rss_type(struct hclge_vport *vport) 4202 { 4203 if (vport->rss_tuple_sets.ipv4_tcp_en || 4204 vport->rss_tuple_sets.ipv4_udp_en || 4205 vport->rss_tuple_sets.ipv4_sctp_en || 4206 vport->rss_tuple_sets.ipv6_tcp_en || 4207 vport->rss_tuple_sets.ipv6_udp_en || 4208 vport->rss_tuple_sets.ipv6_sctp_en) 4209 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; 4210 else if (vport->rss_tuple_sets.ipv4_fragment_en || 4211 vport->rss_tuple_sets.ipv6_fragment_en) 4212 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; 4213 else 4214 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; 4215 } 4216 4217 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 4218 { 4219 struct hclge_rss_input_tuple_cmd *req; 4220 struct hclge_desc desc; 4221 int ret; 4222 4223 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 4224 4225 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 4226 4227 /* Get the tuple cfg from pf */ 4228 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; 4229 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; 4230 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; 4231 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; 4232 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; 4233 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; 4234 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; 4235 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; 4236 hclge_get_rss_type(&hdev->vport[0]); 4237 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4238 if (ret) 4239 dev_err(&hdev->pdev->dev, 4240 "Configure rss input fail, status = %d\n", ret); 4241 return ret; 4242 } 4243 4244 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 4245 u8 *key, u8 *hfunc) 4246 { 4247 struct hclge_vport *vport = hclge_get_vport(handle); 4248 int i; 4249 4250 /* Get hash algorithm */ 4251 if (hfunc) { 4252 switch (vport->rss_algo) { 4253 case HCLGE_RSS_HASH_ALGO_TOEPLITZ: 4254 *hfunc = ETH_RSS_HASH_TOP; 4255 break; 4256 case HCLGE_RSS_HASH_ALGO_SIMPLE: 4257 *hfunc = ETH_RSS_HASH_XOR; 4258 break; 4259 default: 4260 *hfunc = ETH_RSS_HASH_UNKNOWN; 4261 break; 4262 } 4263 } 4264 4265 /* Get the RSS Key required by the user */ 4266 if (key) 4267 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 4268 4269 /* Get indirect table */ 4270 if (indir) 4271 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 4272 indir[i] = vport->rss_indirection_tbl[i]; 4273 4274 return 0; 4275 } 4276 4277 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 4278 const u8 *key, const u8 hfunc) 4279 { 4280 struct hclge_vport *vport = hclge_get_vport(handle); 4281 struct hclge_dev *hdev = vport->back; 4282 u8 hash_algo; 4283 int ret, i; 4284 4285 /* Set the RSS Hash Key if specififed by the user */ 4286 if (key) { 4287 switch (hfunc) { 4288 case ETH_RSS_HASH_TOP: 4289 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 4290 break; 4291 case ETH_RSS_HASH_XOR: 4292 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; 4293 break; 4294 case ETH_RSS_HASH_NO_CHANGE: 4295 hash_algo = vport->rss_algo; 4296 break; 4297 default: 4298 return -EINVAL; 4299 } 4300 4301 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 4302 if (ret) 4303 return ret; 4304 4305 /* Update the shadow RSS key with user specified qids */ 4306 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 4307 vport->rss_algo = hash_algo; 4308 } 4309 4310 /* Update the shadow RSS table with user specified qids */ 4311 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 4312 vport->rss_indirection_tbl[i] = indir[i]; 4313 4314 /* Update the hardware */ 4315 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); 4316 } 4317 4318 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 4319 { 4320 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 4321 4322 if (nfc->data & RXH_L4_B_2_3) 4323 hash_sets |= HCLGE_D_PORT_BIT; 4324 else 4325 hash_sets &= ~HCLGE_D_PORT_BIT; 4326 4327 if (nfc->data & RXH_IP_SRC) 4328 hash_sets |= HCLGE_S_IP_BIT; 4329 else 4330 hash_sets &= ~HCLGE_S_IP_BIT; 4331 4332 if (nfc->data & RXH_IP_DST) 4333 hash_sets |= HCLGE_D_IP_BIT; 4334 else 4335 hash_sets &= ~HCLGE_D_IP_BIT; 4336 4337 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 4338 hash_sets |= HCLGE_V_TAG_BIT; 4339 4340 return hash_sets; 4341 } 4342 4343 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 4344 struct ethtool_rxnfc *nfc) 4345 { 4346 struct hclge_vport *vport = hclge_get_vport(handle); 4347 struct hclge_dev *hdev = vport->back; 4348 struct hclge_rss_input_tuple_cmd *req; 4349 struct hclge_desc desc; 4350 u8 tuple_sets; 4351 int ret; 4352 4353 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 4354 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 4355 return -EINVAL; 4356 4357 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 4358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 4359 4360 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; 4361 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; 4362 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; 4363 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; 4364 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; 4365 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; 4366 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; 4367 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; 4368 4369 tuple_sets = hclge_get_rss_hash_bits(nfc); 4370 switch (nfc->flow_type) { 4371 case TCP_V4_FLOW: 4372 req->ipv4_tcp_en = tuple_sets; 4373 break; 4374 case TCP_V6_FLOW: 4375 req->ipv6_tcp_en = tuple_sets; 4376 break; 4377 case UDP_V4_FLOW: 4378 req->ipv4_udp_en = tuple_sets; 4379 break; 4380 case UDP_V6_FLOW: 4381 req->ipv6_udp_en = tuple_sets; 4382 break; 4383 case SCTP_V4_FLOW: 4384 req->ipv4_sctp_en = tuple_sets; 4385 break; 4386 case SCTP_V6_FLOW: 4387 if ((nfc->data & RXH_L4_B_0_1) || 4388 (nfc->data & RXH_L4_B_2_3)) 4389 return -EINVAL; 4390 4391 req->ipv6_sctp_en = tuple_sets; 4392 break; 4393 case IPV4_FLOW: 4394 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 4395 break; 4396 case IPV6_FLOW: 4397 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 4398 break; 4399 default: 4400 return -EINVAL; 4401 } 4402 4403 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4404 if (ret) { 4405 dev_err(&hdev->pdev->dev, 4406 "Set rss tuple fail, status = %d\n", ret); 4407 return ret; 4408 } 4409 4410 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 4411 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 4412 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 4413 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 4414 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 4415 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 4416 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 4417 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 4418 hclge_get_rss_type(vport); 4419 return 0; 4420 } 4421 4422 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 4423 struct ethtool_rxnfc *nfc) 4424 { 4425 struct hclge_vport *vport = hclge_get_vport(handle); 4426 u8 tuple_sets; 4427 4428 nfc->data = 0; 4429 4430 switch (nfc->flow_type) { 4431 case TCP_V4_FLOW: 4432 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; 4433 break; 4434 case UDP_V4_FLOW: 4435 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; 4436 break; 4437 case TCP_V6_FLOW: 4438 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; 4439 break; 4440 case UDP_V6_FLOW: 4441 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; 4442 break; 4443 case SCTP_V4_FLOW: 4444 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; 4445 break; 4446 case SCTP_V6_FLOW: 4447 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; 4448 break; 4449 case IPV4_FLOW: 4450 case IPV6_FLOW: 4451 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 4452 break; 4453 default: 4454 return -EINVAL; 4455 } 4456 4457 if (!tuple_sets) 4458 return 0; 4459 4460 if (tuple_sets & HCLGE_D_PORT_BIT) 4461 nfc->data |= RXH_L4_B_2_3; 4462 if (tuple_sets & HCLGE_S_PORT_BIT) 4463 nfc->data |= RXH_L4_B_0_1; 4464 if (tuple_sets & HCLGE_D_IP_BIT) 4465 nfc->data |= RXH_IP_DST; 4466 if (tuple_sets & HCLGE_S_IP_BIT) 4467 nfc->data |= RXH_IP_SRC; 4468 4469 return 0; 4470 } 4471 4472 static int hclge_get_tc_size(struct hnae3_handle *handle) 4473 { 4474 struct hclge_vport *vport = hclge_get_vport(handle); 4475 struct hclge_dev *hdev = vport->back; 4476 4477 return hdev->rss_size_max; 4478 } 4479 4480 int hclge_rss_init_hw(struct hclge_dev *hdev) 4481 { 4482 struct hclge_vport *vport = hdev->vport; 4483 u8 *rss_indir = vport[0].rss_indirection_tbl; 4484 u16 rss_size = vport[0].alloc_rss_size; 4485 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 4486 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 4487 u8 *key = vport[0].rss_hash_key; 4488 u8 hfunc = vport[0].rss_algo; 4489 u16 tc_valid[HCLGE_MAX_TC_NUM]; 4490 u16 roundup_size; 4491 unsigned int i; 4492 int ret; 4493 4494 ret = hclge_set_rss_indir_table(hdev, rss_indir); 4495 if (ret) 4496 return ret; 4497 4498 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 4499 if (ret) 4500 return ret; 4501 4502 ret = hclge_set_rss_input_tuple(hdev); 4503 if (ret) 4504 return ret; 4505 4506 /* Each TC have the same queue size, and tc_size set to hardware is 4507 * the log2 of roundup power of two of rss_size, the acutal queue 4508 * size is limited by indirection table. 4509 */ 4510 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 4511 dev_err(&hdev->pdev->dev, 4512 "Configure rss tc size failed, invalid TC_SIZE = %u\n", 4513 rss_size); 4514 return -EINVAL; 4515 } 4516 4517 roundup_size = roundup_pow_of_two(rss_size); 4518 roundup_size = ilog2(roundup_size); 4519 4520 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 4521 tc_valid[i] = 0; 4522 4523 if (!(hdev->hw_tc_map & BIT(i))) 4524 continue; 4525 4526 tc_valid[i] = 1; 4527 tc_size[i] = roundup_size; 4528 tc_offset[i] = rss_size * i; 4529 } 4530 4531 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 4532 } 4533 4534 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) 4535 { 4536 struct hclge_vport *vport = hdev->vport; 4537 int i, j; 4538 4539 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 4540 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 4541 vport[j].rss_indirection_tbl[i] = 4542 i % vport[j].alloc_rss_size; 4543 } 4544 } 4545 4546 static void hclge_rss_init_cfg(struct hclge_dev *hdev) 4547 { 4548 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 4549 struct hclge_vport *vport = hdev->vport; 4550 4551 if (hdev->pdev->revision >= 0x21) 4552 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; 4553 4554 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 4555 vport[i].rss_tuple_sets.ipv4_tcp_en = 4556 HCLGE_RSS_INPUT_TUPLE_OTHER; 4557 vport[i].rss_tuple_sets.ipv4_udp_en = 4558 HCLGE_RSS_INPUT_TUPLE_OTHER; 4559 vport[i].rss_tuple_sets.ipv4_sctp_en = 4560 HCLGE_RSS_INPUT_TUPLE_SCTP; 4561 vport[i].rss_tuple_sets.ipv4_fragment_en = 4562 HCLGE_RSS_INPUT_TUPLE_OTHER; 4563 vport[i].rss_tuple_sets.ipv6_tcp_en = 4564 HCLGE_RSS_INPUT_TUPLE_OTHER; 4565 vport[i].rss_tuple_sets.ipv6_udp_en = 4566 HCLGE_RSS_INPUT_TUPLE_OTHER; 4567 vport[i].rss_tuple_sets.ipv6_sctp_en = 4568 HCLGE_RSS_INPUT_TUPLE_SCTP; 4569 vport[i].rss_tuple_sets.ipv6_fragment_en = 4570 HCLGE_RSS_INPUT_TUPLE_OTHER; 4571 4572 vport[i].rss_algo = rss_algo; 4573 4574 memcpy(vport[i].rss_hash_key, hclge_hash_key, 4575 HCLGE_RSS_KEY_SIZE); 4576 } 4577 4578 hclge_rss_indir_init_cfg(hdev); 4579 } 4580 4581 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 4582 int vector_id, bool en, 4583 struct hnae3_ring_chain_node *ring_chain) 4584 { 4585 struct hclge_dev *hdev = vport->back; 4586 struct hnae3_ring_chain_node *node; 4587 struct hclge_desc desc; 4588 struct hclge_ctrl_vector_chain_cmd *req = 4589 (struct hclge_ctrl_vector_chain_cmd *)desc.data; 4590 enum hclge_cmd_status status; 4591 enum hclge_opcode_type op; 4592 u16 tqp_type_and_id; 4593 int i; 4594 4595 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 4596 hclge_cmd_setup_basic_desc(&desc, op, false); 4597 req->int_vector_id = vector_id; 4598 4599 i = 0; 4600 for (node = ring_chain; node; node = node->next) { 4601 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 4602 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 4603 HCLGE_INT_TYPE_S, 4604 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); 4605 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 4606 HCLGE_TQP_ID_S, node->tqp_index); 4607 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 4608 HCLGE_INT_GL_IDX_S, 4609 hnae3_get_field(node->int_gl_idx, 4610 HNAE3_RING_GL_IDX_M, 4611 HNAE3_RING_GL_IDX_S)); 4612 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 4613 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 4614 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 4615 req->vfid = vport->vport_id; 4616 4617 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4618 if (status) { 4619 dev_err(&hdev->pdev->dev, 4620 "Map TQP fail, status is %d.\n", 4621 status); 4622 return -EIO; 4623 } 4624 i = 0; 4625 4626 hclge_cmd_setup_basic_desc(&desc, 4627 op, 4628 false); 4629 req->int_vector_id = vector_id; 4630 } 4631 } 4632 4633 if (i > 0) { 4634 req->int_cause_num = i; 4635 req->vfid = vport->vport_id; 4636 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4637 if (status) { 4638 dev_err(&hdev->pdev->dev, 4639 "Map TQP fail, status is %d.\n", status); 4640 return -EIO; 4641 } 4642 } 4643 4644 return 0; 4645 } 4646 4647 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, 4648 struct hnae3_ring_chain_node *ring_chain) 4649 { 4650 struct hclge_vport *vport = hclge_get_vport(handle); 4651 struct hclge_dev *hdev = vport->back; 4652 int vector_id; 4653 4654 vector_id = hclge_get_vector_index(hdev, vector); 4655 if (vector_id < 0) { 4656 dev_err(&hdev->pdev->dev, 4657 "Get vector index fail. vector_id =%d\n", vector_id); 4658 return vector_id; 4659 } 4660 4661 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 4662 } 4663 4664 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, 4665 struct hnae3_ring_chain_node *ring_chain) 4666 { 4667 struct hclge_vport *vport = hclge_get_vport(handle); 4668 struct hclge_dev *hdev = vport->back; 4669 int vector_id, ret; 4670 4671 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 4672 return 0; 4673 4674 vector_id = hclge_get_vector_index(hdev, vector); 4675 if (vector_id < 0) { 4676 dev_err(&handle->pdev->dev, 4677 "Get vector index fail. ret =%d\n", vector_id); 4678 return vector_id; 4679 } 4680 4681 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 4682 if (ret) 4683 dev_err(&handle->pdev->dev, 4684 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 4685 vector_id, ret); 4686 4687 return ret; 4688 } 4689 4690 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 4691 struct hclge_promisc_param *param) 4692 { 4693 struct hclge_promisc_cfg_cmd *req; 4694 struct hclge_desc desc; 4695 int ret; 4696 4697 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 4698 4699 req = (struct hclge_promisc_cfg_cmd *)desc.data; 4700 req->vf_id = param->vf_id; 4701 4702 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on 4703 * pdev revision(0x20), new revision support them. The 4704 * value of this two fields will not return error when driver 4705 * send command to fireware in revision(0x20). 4706 */ 4707 req->flag = (param->enable << HCLGE_PROMISC_EN_B) | 4708 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; 4709 4710 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4711 if (ret) 4712 dev_err(&hdev->pdev->dev, 4713 "Set promisc mode fail, status is %d.\n", ret); 4714 4715 return ret; 4716 } 4717 4718 static void hclge_promisc_param_init(struct hclge_promisc_param *param, 4719 bool en_uc, bool en_mc, bool en_bc, 4720 int vport_id) 4721 { 4722 if (!param) 4723 return; 4724 4725 memset(param, 0, sizeof(struct hclge_promisc_param)); 4726 if (en_uc) 4727 param->enable = HCLGE_PROMISC_EN_UC; 4728 if (en_mc) 4729 param->enable |= HCLGE_PROMISC_EN_MC; 4730 if (en_bc) 4731 param->enable |= HCLGE_PROMISC_EN_BC; 4732 param->vf_id = vport_id; 4733 } 4734 4735 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, 4736 bool en_mc_pmc, bool en_bc_pmc) 4737 { 4738 struct hclge_dev *hdev = vport->back; 4739 struct hclge_promisc_param param; 4740 4741 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, 4742 vport->vport_id); 4743 return hclge_cmd_set_promisc_mode(hdev, ¶m); 4744 } 4745 4746 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 4747 bool en_mc_pmc) 4748 { 4749 struct hclge_vport *vport = hclge_get_vport(handle); 4750 bool en_bc_pmc = true; 4751 4752 /* For revision 0x20, if broadcast promisc enabled, vlan filter is 4753 * always bypassed. So broadcast promisc should be disabled until 4754 * user enable promisc mode 4755 */ 4756 if (handle->pdev->revision == 0x20) 4757 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; 4758 4759 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, 4760 en_bc_pmc); 4761 } 4762 4763 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) 4764 { 4765 struct hclge_get_fd_mode_cmd *req; 4766 struct hclge_desc desc; 4767 int ret; 4768 4769 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); 4770 4771 req = (struct hclge_get_fd_mode_cmd *)desc.data; 4772 4773 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4774 if (ret) { 4775 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); 4776 return ret; 4777 } 4778 4779 *fd_mode = req->mode; 4780 4781 return ret; 4782 } 4783 4784 static int hclge_get_fd_allocation(struct hclge_dev *hdev, 4785 u32 *stage1_entry_num, 4786 u32 *stage2_entry_num, 4787 u16 *stage1_counter_num, 4788 u16 *stage2_counter_num) 4789 { 4790 struct hclge_get_fd_allocation_cmd *req; 4791 struct hclge_desc desc; 4792 int ret; 4793 4794 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); 4795 4796 req = (struct hclge_get_fd_allocation_cmd *)desc.data; 4797 4798 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4799 if (ret) { 4800 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", 4801 ret); 4802 return ret; 4803 } 4804 4805 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); 4806 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); 4807 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); 4808 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); 4809 4810 return ret; 4811 } 4812 4813 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) 4814 { 4815 struct hclge_set_fd_key_config_cmd *req; 4816 struct hclge_fd_key_cfg *stage; 4817 struct hclge_desc desc; 4818 int ret; 4819 4820 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); 4821 4822 req = (struct hclge_set_fd_key_config_cmd *)desc.data; 4823 stage = &hdev->fd_cfg.key_cfg[stage_num]; 4824 req->stage = stage_num; 4825 req->key_select = stage->key_sel; 4826 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; 4827 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; 4828 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; 4829 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; 4830 req->tuple_mask = cpu_to_le32(~stage->tuple_active); 4831 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); 4832 4833 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4834 if (ret) 4835 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); 4836 4837 return ret; 4838 } 4839 4840 static int hclge_init_fd_config(struct hclge_dev *hdev) 4841 { 4842 #define LOW_2_WORDS 0x03 4843 struct hclge_fd_key_cfg *key_cfg; 4844 int ret; 4845 4846 if (!hnae3_dev_fd_supported(hdev)) 4847 return 0; 4848 4849 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); 4850 if (ret) 4851 return ret; 4852 4853 switch (hdev->fd_cfg.fd_mode) { 4854 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: 4855 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; 4856 break; 4857 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: 4858 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; 4859 break; 4860 default: 4861 dev_err(&hdev->pdev->dev, 4862 "Unsupported flow director mode %u\n", 4863 hdev->fd_cfg.fd_mode); 4864 return -EOPNOTSUPP; 4865 } 4866 4867 hdev->fd_cfg.proto_support = 4868 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | 4869 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; 4870 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; 4871 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, 4872 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; 4873 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; 4874 key_cfg->outer_sipv6_word_en = 0; 4875 key_cfg->outer_dipv6_word_en = 0; 4876 4877 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | 4878 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | 4879 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 4880 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 4881 4882 /* If use max 400bit key, we can support tuples for ether type */ 4883 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { 4884 hdev->fd_cfg.proto_support |= ETHER_FLOW; 4885 key_cfg->tuple_active |= 4886 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); 4887 } 4888 4889 /* roce_type is used to filter roce frames 4890 * dst_vport is used to specify the rule 4891 */ 4892 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); 4893 4894 ret = hclge_get_fd_allocation(hdev, 4895 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 4896 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], 4897 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], 4898 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); 4899 if (ret) 4900 return ret; 4901 4902 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); 4903 } 4904 4905 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, 4906 int loc, u8 *key, bool is_add) 4907 { 4908 struct hclge_fd_tcam_config_1_cmd *req1; 4909 struct hclge_fd_tcam_config_2_cmd *req2; 4910 struct hclge_fd_tcam_config_3_cmd *req3; 4911 struct hclge_desc desc[3]; 4912 int ret; 4913 4914 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); 4915 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4916 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); 4917 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4918 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); 4919 4920 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 4921 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 4922 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 4923 4924 req1->stage = stage; 4925 req1->xy_sel = sel_x ? 1 : 0; 4926 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); 4927 req1->index = cpu_to_le32(loc); 4928 req1->entry_vld = sel_x ? is_add : 0; 4929 4930 if (key) { 4931 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); 4932 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], 4933 sizeof(req2->tcam_data)); 4934 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + 4935 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); 4936 } 4937 4938 ret = hclge_cmd_send(&hdev->hw, desc, 3); 4939 if (ret) 4940 dev_err(&hdev->pdev->dev, 4941 "config tcam key fail, ret=%d\n", 4942 ret); 4943 4944 return ret; 4945 } 4946 4947 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, 4948 struct hclge_fd_ad_data *action) 4949 { 4950 struct hclge_fd_ad_config_cmd *req; 4951 struct hclge_desc desc; 4952 u64 ad_data = 0; 4953 int ret; 4954 4955 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); 4956 4957 req = (struct hclge_fd_ad_config_cmd *)desc.data; 4958 req->index = cpu_to_le32(loc); 4959 req->stage = stage; 4960 4961 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, 4962 action->write_rule_id_to_bd); 4963 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, 4964 action->rule_id); 4965 ad_data <<= 32; 4966 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); 4967 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, 4968 action->forward_to_direct_queue); 4969 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, 4970 action->queue_id); 4971 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); 4972 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, 4973 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); 4974 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); 4975 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, 4976 action->counter_id); 4977 4978 req->ad_data = cpu_to_le64(ad_data); 4979 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4980 if (ret) 4981 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); 4982 4983 return ret; 4984 } 4985 4986 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, 4987 struct hclge_fd_rule *rule) 4988 { 4989 u16 tmp_x_s, tmp_y_s; 4990 u32 tmp_x_l, tmp_y_l; 4991 int i; 4992 4993 if (rule->unused_tuple & tuple_bit) 4994 return true; 4995 4996 switch (tuple_bit) { 4997 case 0: 4998 return false; 4999 case BIT(INNER_DST_MAC): 5000 for (i = 0; i < ETH_ALEN; i++) { 5001 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], 5002 rule->tuples_mask.dst_mac[i]); 5003 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i], 5004 rule->tuples_mask.dst_mac[i]); 5005 } 5006 5007 return true; 5008 case BIT(INNER_SRC_MAC): 5009 for (i = 0; i < ETH_ALEN; i++) { 5010 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], 5011 rule->tuples.src_mac[i]); 5012 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i], 5013 rule->tuples.src_mac[i]); 5014 } 5015 5016 return true; 5017 case BIT(INNER_VLAN_TAG_FST): 5018 calc_x(tmp_x_s, rule->tuples.vlan_tag1, 5019 rule->tuples_mask.vlan_tag1); 5020 calc_y(tmp_y_s, rule->tuples.vlan_tag1, 5021 rule->tuples_mask.vlan_tag1); 5022 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 5023 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 5024 5025 return true; 5026 case BIT(INNER_ETH_TYPE): 5027 calc_x(tmp_x_s, rule->tuples.ether_proto, 5028 rule->tuples_mask.ether_proto); 5029 calc_y(tmp_y_s, rule->tuples.ether_proto, 5030 rule->tuples_mask.ether_proto); 5031 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 5032 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 5033 5034 return true; 5035 case BIT(INNER_IP_TOS): 5036 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); 5037 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); 5038 5039 return true; 5040 case BIT(INNER_IP_PROTO): 5041 calc_x(*key_x, rule->tuples.ip_proto, 5042 rule->tuples_mask.ip_proto); 5043 calc_y(*key_y, rule->tuples.ip_proto, 5044 rule->tuples_mask.ip_proto); 5045 5046 return true; 5047 case BIT(INNER_SRC_IP): 5048 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX], 5049 rule->tuples_mask.src_ip[IPV4_INDEX]); 5050 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX], 5051 rule->tuples_mask.src_ip[IPV4_INDEX]); 5052 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5053 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5054 5055 return true; 5056 case BIT(INNER_DST_IP): 5057 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX], 5058 rule->tuples_mask.dst_ip[IPV4_INDEX]); 5059 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX], 5060 rule->tuples_mask.dst_ip[IPV4_INDEX]); 5061 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5062 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5063 5064 return true; 5065 case BIT(INNER_SRC_PORT): 5066 calc_x(tmp_x_s, rule->tuples.src_port, 5067 rule->tuples_mask.src_port); 5068 calc_y(tmp_y_s, rule->tuples.src_port, 5069 rule->tuples_mask.src_port); 5070 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 5071 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 5072 5073 return true; 5074 case BIT(INNER_DST_PORT): 5075 calc_x(tmp_x_s, rule->tuples.dst_port, 5076 rule->tuples_mask.dst_port); 5077 calc_y(tmp_y_s, rule->tuples.dst_port, 5078 rule->tuples_mask.dst_port); 5079 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 5080 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 5081 5082 return true; 5083 default: 5084 return false; 5085 } 5086 } 5087 5088 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, 5089 u8 vf_id, u8 network_port_id) 5090 { 5091 u32 port_number = 0; 5092 5093 if (port_type == HOST_PORT) { 5094 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, 5095 pf_id); 5096 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, 5097 vf_id); 5098 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); 5099 } else { 5100 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, 5101 HCLGE_NETWORK_PORT_ID_S, network_port_id); 5102 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); 5103 } 5104 5105 return port_number; 5106 } 5107 5108 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, 5109 __le32 *key_x, __le32 *key_y, 5110 struct hclge_fd_rule *rule) 5111 { 5112 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; 5113 u8 cur_pos = 0, tuple_size, shift_bits; 5114 unsigned int i; 5115 5116 for (i = 0; i < MAX_META_DATA; i++) { 5117 tuple_size = meta_data_key_info[i].key_length; 5118 tuple_bit = key_cfg->meta_data_active & BIT(i); 5119 5120 switch (tuple_bit) { 5121 case BIT(ROCE_TYPE): 5122 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); 5123 cur_pos += tuple_size; 5124 break; 5125 case BIT(DST_VPORT): 5126 port_number = hclge_get_port_number(HOST_PORT, 0, 5127 rule->vf_id, 0); 5128 hnae3_set_field(meta_data, 5129 GENMASK(cur_pos + tuple_size, cur_pos), 5130 cur_pos, port_number); 5131 cur_pos += tuple_size; 5132 break; 5133 default: 5134 break; 5135 } 5136 } 5137 5138 calc_x(tmp_x, meta_data, 0xFFFFFFFF); 5139 calc_y(tmp_y, meta_data, 0xFFFFFFFF); 5140 shift_bits = sizeof(meta_data) * 8 - cur_pos; 5141 5142 *key_x = cpu_to_le32(tmp_x << shift_bits); 5143 *key_y = cpu_to_le32(tmp_y << shift_bits); 5144 } 5145 5146 /* A complete key is combined with meta data key and tuple key. 5147 * Meta data key is stored at the MSB region, and tuple key is stored at 5148 * the LSB region, unused bits will be filled 0. 5149 */ 5150 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, 5151 struct hclge_fd_rule *rule) 5152 { 5153 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; 5154 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; 5155 u8 *cur_key_x, *cur_key_y; 5156 unsigned int i; 5157 int ret, tuple_size; 5158 u8 meta_data_region; 5159 5160 memset(key_x, 0, sizeof(key_x)); 5161 memset(key_y, 0, sizeof(key_y)); 5162 cur_key_x = key_x; 5163 cur_key_y = key_y; 5164 5165 for (i = 0 ; i < MAX_TUPLE; i++) { 5166 bool tuple_valid; 5167 u32 check_tuple; 5168 5169 tuple_size = tuple_key_info[i].key_length / 8; 5170 check_tuple = key_cfg->tuple_active & BIT(i); 5171 5172 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, 5173 cur_key_y, rule); 5174 if (tuple_valid) { 5175 cur_key_x += tuple_size; 5176 cur_key_y += tuple_size; 5177 } 5178 } 5179 5180 meta_data_region = hdev->fd_cfg.max_key_length / 8 - 5181 MAX_META_DATA_LENGTH / 8; 5182 5183 hclge_fd_convert_meta_data(key_cfg, 5184 (__le32 *)(key_x + meta_data_region), 5185 (__le32 *)(key_y + meta_data_region), 5186 rule); 5187 5188 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, 5189 true); 5190 if (ret) { 5191 dev_err(&hdev->pdev->dev, 5192 "fd key_y config fail, loc=%u, ret=%d\n", 5193 rule->queue_id, ret); 5194 return ret; 5195 } 5196 5197 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, 5198 true); 5199 if (ret) 5200 dev_err(&hdev->pdev->dev, 5201 "fd key_x config fail, loc=%u, ret=%d\n", 5202 rule->queue_id, ret); 5203 return ret; 5204 } 5205 5206 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, 5207 struct hclge_fd_rule *rule) 5208 { 5209 struct hclge_fd_ad_data ad_data; 5210 5211 ad_data.ad_id = rule->location; 5212 5213 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 5214 ad_data.drop_packet = true; 5215 ad_data.forward_to_direct_queue = false; 5216 ad_data.queue_id = 0; 5217 } else { 5218 ad_data.drop_packet = false; 5219 ad_data.forward_to_direct_queue = true; 5220 ad_data.queue_id = rule->queue_id; 5221 } 5222 5223 ad_data.use_counter = false; 5224 ad_data.counter_id = 0; 5225 5226 ad_data.use_next_stage = false; 5227 ad_data.next_input_key = 0; 5228 5229 ad_data.write_rule_id_to_bd = true; 5230 ad_data.rule_id = rule->location; 5231 5232 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); 5233 } 5234 5235 static int hclge_fd_check_spec(struct hclge_dev *hdev, 5236 struct ethtool_rx_flow_spec *fs, u32 *unused) 5237 { 5238 struct ethtool_tcpip4_spec *tcp_ip4_spec; 5239 struct ethtool_usrip4_spec *usr_ip4_spec; 5240 struct ethtool_tcpip6_spec *tcp_ip6_spec; 5241 struct ethtool_usrip6_spec *usr_ip6_spec; 5242 struct ethhdr *ether_spec; 5243 5244 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 5245 return -EINVAL; 5246 5247 if (!(fs->flow_type & hdev->fd_cfg.proto_support)) 5248 return -EOPNOTSUPP; 5249 5250 if ((fs->flow_type & FLOW_EXT) && 5251 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { 5252 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); 5253 return -EOPNOTSUPP; 5254 } 5255 5256 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 5257 case SCTP_V4_FLOW: 5258 case TCP_V4_FLOW: 5259 case UDP_V4_FLOW: 5260 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; 5261 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 5262 5263 if (!tcp_ip4_spec->ip4src) 5264 *unused |= BIT(INNER_SRC_IP); 5265 5266 if (!tcp_ip4_spec->ip4dst) 5267 *unused |= BIT(INNER_DST_IP); 5268 5269 if (!tcp_ip4_spec->psrc) 5270 *unused |= BIT(INNER_SRC_PORT); 5271 5272 if (!tcp_ip4_spec->pdst) 5273 *unused |= BIT(INNER_DST_PORT); 5274 5275 if (!tcp_ip4_spec->tos) 5276 *unused |= BIT(INNER_IP_TOS); 5277 5278 break; 5279 case IP_USER_FLOW: 5280 usr_ip4_spec = &fs->h_u.usr_ip4_spec; 5281 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5282 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5283 5284 if (!usr_ip4_spec->ip4src) 5285 *unused |= BIT(INNER_SRC_IP); 5286 5287 if (!usr_ip4_spec->ip4dst) 5288 *unused |= BIT(INNER_DST_IP); 5289 5290 if (!usr_ip4_spec->tos) 5291 *unused |= BIT(INNER_IP_TOS); 5292 5293 if (!usr_ip4_spec->proto) 5294 *unused |= BIT(INNER_IP_PROTO); 5295 5296 if (usr_ip4_spec->l4_4_bytes) 5297 return -EOPNOTSUPP; 5298 5299 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) 5300 return -EOPNOTSUPP; 5301 5302 break; 5303 case SCTP_V6_FLOW: 5304 case TCP_V6_FLOW: 5305 case UDP_V6_FLOW: 5306 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; 5307 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5308 BIT(INNER_IP_TOS); 5309 5310 /* check whether src/dst ip address used */ 5311 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && 5312 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) 5313 *unused |= BIT(INNER_SRC_IP); 5314 5315 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && 5316 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) 5317 *unused |= BIT(INNER_DST_IP); 5318 5319 if (!tcp_ip6_spec->psrc) 5320 *unused |= BIT(INNER_SRC_PORT); 5321 5322 if (!tcp_ip6_spec->pdst) 5323 *unused |= BIT(INNER_DST_PORT); 5324 5325 if (tcp_ip6_spec->tclass) 5326 return -EOPNOTSUPP; 5327 5328 break; 5329 case IPV6_USER_FLOW: 5330 usr_ip6_spec = &fs->h_u.usr_ip6_spec; 5331 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5332 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | 5333 BIT(INNER_DST_PORT); 5334 5335 /* check whether src/dst ip address used */ 5336 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && 5337 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) 5338 *unused |= BIT(INNER_SRC_IP); 5339 5340 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && 5341 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) 5342 *unused |= BIT(INNER_DST_IP); 5343 5344 if (!usr_ip6_spec->l4_proto) 5345 *unused |= BIT(INNER_IP_PROTO); 5346 5347 if (usr_ip6_spec->tclass) 5348 return -EOPNOTSUPP; 5349 5350 if (usr_ip6_spec->l4_4_bytes) 5351 return -EOPNOTSUPP; 5352 5353 break; 5354 case ETHER_FLOW: 5355 ether_spec = &fs->h_u.ether_spec; 5356 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 5357 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | 5358 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); 5359 5360 if (is_zero_ether_addr(ether_spec->h_source)) 5361 *unused |= BIT(INNER_SRC_MAC); 5362 5363 if (is_zero_ether_addr(ether_spec->h_dest)) 5364 *unused |= BIT(INNER_DST_MAC); 5365 5366 if (!ether_spec->h_proto) 5367 *unused |= BIT(INNER_ETH_TYPE); 5368 5369 break; 5370 default: 5371 return -EOPNOTSUPP; 5372 } 5373 5374 if ((fs->flow_type & FLOW_EXT)) { 5375 if (fs->h_ext.vlan_etype) 5376 return -EOPNOTSUPP; 5377 if (!fs->h_ext.vlan_tci) 5378 *unused |= BIT(INNER_VLAN_TAG_FST); 5379 5380 if (fs->m_ext.vlan_tci) { 5381 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) 5382 return -EINVAL; 5383 } 5384 } else { 5385 *unused |= BIT(INNER_VLAN_TAG_FST); 5386 } 5387 5388 if (fs->flow_type & FLOW_MAC_EXT) { 5389 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) 5390 return -EOPNOTSUPP; 5391 5392 if (is_zero_ether_addr(fs->h_ext.h_dest)) 5393 *unused |= BIT(INNER_DST_MAC); 5394 else 5395 *unused &= ~(BIT(INNER_DST_MAC)); 5396 } 5397 5398 return 0; 5399 } 5400 5401 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) 5402 { 5403 struct hclge_fd_rule *rule = NULL; 5404 struct hlist_node *node2; 5405 5406 spin_lock_bh(&hdev->fd_rule_lock); 5407 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 5408 if (rule->location >= location) 5409 break; 5410 } 5411 5412 spin_unlock_bh(&hdev->fd_rule_lock); 5413 5414 return rule && rule->location == location; 5415 } 5416 5417 /* make sure being called after lock up with fd_rule_lock */ 5418 static int hclge_fd_update_rule_list(struct hclge_dev *hdev, 5419 struct hclge_fd_rule *new_rule, 5420 u16 location, 5421 bool is_add) 5422 { 5423 struct hclge_fd_rule *rule = NULL, *parent = NULL; 5424 struct hlist_node *node2; 5425 5426 if (is_add && !new_rule) 5427 return -EINVAL; 5428 5429 hlist_for_each_entry_safe(rule, node2, 5430 &hdev->fd_rule_list, rule_node) { 5431 if (rule->location >= location) 5432 break; 5433 parent = rule; 5434 } 5435 5436 if (rule && rule->location == location) { 5437 hlist_del(&rule->rule_node); 5438 kfree(rule); 5439 hdev->hclge_fd_rule_num--; 5440 5441 if (!is_add) { 5442 if (!hdev->hclge_fd_rule_num) 5443 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 5444 clear_bit(location, hdev->fd_bmap); 5445 5446 return 0; 5447 } 5448 } else if (!is_add) { 5449 dev_err(&hdev->pdev->dev, 5450 "delete fail, rule %u is inexistent\n", 5451 location); 5452 return -EINVAL; 5453 } 5454 5455 INIT_HLIST_NODE(&new_rule->rule_node); 5456 5457 if (parent) 5458 hlist_add_behind(&new_rule->rule_node, &parent->rule_node); 5459 else 5460 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); 5461 5462 set_bit(location, hdev->fd_bmap); 5463 hdev->hclge_fd_rule_num++; 5464 hdev->fd_active_type = new_rule->rule_type; 5465 5466 return 0; 5467 } 5468 5469 static int hclge_fd_get_tuple(struct hclge_dev *hdev, 5470 struct ethtool_rx_flow_spec *fs, 5471 struct hclge_fd_rule *rule) 5472 { 5473 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 5474 5475 switch (flow_type) { 5476 case SCTP_V4_FLOW: 5477 case TCP_V4_FLOW: 5478 case UDP_V4_FLOW: 5479 rule->tuples.src_ip[IPV4_INDEX] = 5480 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); 5481 rule->tuples_mask.src_ip[IPV4_INDEX] = 5482 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); 5483 5484 rule->tuples.dst_ip[IPV4_INDEX] = 5485 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); 5486 rule->tuples_mask.dst_ip[IPV4_INDEX] = 5487 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); 5488 5489 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); 5490 rule->tuples_mask.src_port = 5491 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); 5492 5493 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); 5494 rule->tuples_mask.dst_port = 5495 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); 5496 5497 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; 5498 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; 5499 5500 rule->tuples.ether_proto = ETH_P_IP; 5501 rule->tuples_mask.ether_proto = 0xFFFF; 5502 5503 break; 5504 case IP_USER_FLOW: 5505 rule->tuples.src_ip[IPV4_INDEX] = 5506 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); 5507 rule->tuples_mask.src_ip[IPV4_INDEX] = 5508 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); 5509 5510 rule->tuples.dst_ip[IPV4_INDEX] = 5511 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); 5512 rule->tuples_mask.dst_ip[IPV4_INDEX] = 5513 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); 5514 5515 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; 5516 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; 5517 5518 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; 5519 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; 5520 5521 rule->tuples.ether_proto = ETH_P_IP; 5522 rule->tuples_mask.ether_proto = 0xFFFF; 5523 5524 break; 5525 case SCTP_V6_FLOW: 5526 case TCP_V6_FLOW: 5527 case UDP_V6_FLOW: 5528 be32_to_cpu_array(rule->tuples.src_ip, 5529 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE); 5530 be32_to_cpu_array(rule->tuples_mask.src_ip, 5531 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE); 5532 5533 be32_to_cpu_array(rule->tuples.dst_ip, 5534 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); 5535 be32_to_cpu_array(rule->tuples_mask.dst_ip, 5536 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE); 5537 5538 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); 5539 rule->tuples_mask.src_port = 5540 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); 5541 5542 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); 5543 rule->tuples_mask.dst_port = 5544 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); 5545 5546 rule->tuples.ether_proto = ETH_P_IPV6; 5547 rule->tuples_mask.ether_proto = 0xFFFF; 5548 5549 break; 5550 case IPV6_USER_FLOW: 5551 be32_to_cpu_array(rule->tuples.src_ip, 5552 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE); 5553 be32_to_cpu_array(rule->tuples_mask.src_ip, 5554 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE); 5555 5556 be32_to_cpu_array(rule->tuples.dst_ip, 5557 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE); 5558 be32_to_cpu_array(rule->tuples_mask.dst_ip, 5559 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE); 5560 5561 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; 5562 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; 5563 5564 rule->tuples.ether_proto = ETH_P_IPV6; 5565 rule->tuples_mask.ether_proto = 0xFFFF; 5566 5567 break; 5568 case ETHER_FLOW: 5569 ether_addr_copy(rule->tuples.src_mac, 5570 fs->h_u.ether_spec.h_source); 5571 ether_addr_copy(rule->tuples_mask.src_mac, 5572 fs->m_u.ether_spec.h_source); 5573 5574 ether_addr_copy(rule->tuples.dst_mac, 5575 fs->h_u.ether_spec.h_dest); 5576 ether_addr_copy(rule->tuples_mask.dst_mac, 5577 fs->m_u.ether_spec.h_dest); 5578 5579 rule->tuples.ether_proto = 5580 be16_to_cpu(fs->h_u.ether_spec.h_proto); 5581 rule->tuples_mask.ether_proto = 5582 be16_to_cpu(fs->m_u.ether_spec.h_proto); 5583 5584 break; 5585 default: 5586 return -EOPNOTSUPP; 5587 } 5588 5589 switch (flow_type) { 5590 case SCTP_V4_FLOW: 5591 case SCTP_V6_FLOW: 5592 rule->tuples.ip_proto = IPPROTO_SCTP; 5593 rule->tuples_mask.ip_proto = 0xFF; 5594 break; 5595 case TCP_V4_FLOW: 5596 case TCP_V6_FLOW: 5597 rule->tuples.ip_proto = IPPROTO_TCP; 5598 rule->tuples_mask.ip_proto = 0xFF; 5599 break; 5600 case UDP_V4_FLOW: 5601 case UDP_V6_FLOW: 5602 rule->tuples.ip_proto = IPPROTO_UDP; 5603 rule->tuples_mask.ip_proto = 0xFF; 5604 break; 5605 default: 5606 break; 5607 } 5608 5609 if ((fs->flow_type & FLOW_EXT)) { 5610 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); 5611 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); 5612 } 5613 5614 if (fs->flow_type & FLOW_MAC_EXT) { 5615 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); 5616 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); 5617 } 5618 5619 return 0; 5620 } 5621 5622 /* make sure being called after lock up with fd_rule_lock */ 5623 static int hclge_fd_config_rule(struct hclge_dev *hdev, 5624 struct hclge_fd_rule *rule) 5625 { 5626 int ret; 5627 5628 if (!rule) { 5629 dev_err(&hdev->pdev->dev, 5630 "The flow director rule is NULL\n"); 5631 return -EINVAL; 5632 } 5633 5634 /* it will never fail here, so needn't to check return value */ 5635 hclge_fd_update_rule_list(hdev, rule, rule->location, true); 5636 5637 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 5638 if (ret) 5639 goto clear_rule; 5640 5641 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 5642 if (ret) 5643 goto clear_rule; 5644 5645 return 0; 5646 5647 clear_rule: 5648 hclge_fd_update_rule_list(hdev, rule, rule->location, false); 5649 return ret; 5650 } 5651 5652 static int hclge_add_fd_entry(struct hnae3_handle *handle, 5653 struct ethtool_rxnfc *cmd) 5654 { 5655 struct hclge_vport *vport = hclge_get_vport(handle); 5656 struct hclge_dev *hdev = vport->back; 5657 u16 dst_vport_id = 0, q_index = 0; 5658 struct ethtool_rx_flow_spec *fs; 5659 struct hclge_fd_rule *rule; 5660 u32 unused = 0; 5661 u8 action; 5662 int ret; 5663 5664 if (!hnae3_dev_fd_supported(hdev)) 5665 return -EOPNOTSUPP; 5666 5667 if (!hdev->fd_en) { 5668 dev_warn(&hdev->pdev->dev, 5669 "Please enable flow director first\n"); 5670 return -EOPNOTSUPP; 5671 } 5672 5673 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 5674 5675 ret = hclge_fd_check_spec(hdev, fs, &unused); 5676 if (ret) { 5677 dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); 5678 return ret; 5679 } 5680 5681 if (fs->ring_cookie == RX_CLS_FLOW_DISC) { 5682 action = HCLGE_FD_ACTION_DROP_PACKET; 5683 } else { 5684 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 5685 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 5686 u16 tqps; 5687 5688 if (vf > hdev->num_req_vfs) { 5689 dev_err(&hdev->pdev->dev, 5690 "Error: vf id (%u) > max vf num (%u)\n", 5691 vf, hdev->num_req_vfs); 5692 return -EINVAL; 5693 } 5694 5695 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; 5696 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; 5697 5698 if (ring >= tqps) { 5699 dev_err(&hdev->pdev->dev, 5700 "Error: queue id (%u) > max tqp num (%u)\n", 5701 ring, tqps - 1); 5702 return -EINVAL; 5703 } 5704 5705 action = HCLGE_FD_ACTION_ACCEPT_PACKET; 5706 q_index = ring; 5707 } 5708 5709 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 5710 if (!rule) 5711 return -ENOMEM; 5712 5713 ret = hclge_fd_get_tuple(hdev, fs, rule); 5714 if (ret) { 5715 kfree(rule); 5716 return ret; 5717 } 5718 5719 rule->flow_type = fs->flow_type; 5720 5721 rule->location = fs->location; 5722 rule->unused_tuple = unused; 5723 rule->vf_id = dst_vport_id; 5724 rule->queue_id = q_index; 5725 rule->action = action; 5726 rule->rule_type = HCLGE_FD_EP_ACTIVE; 5727 5728 /* to avoid rule conflict, when user configure rule by ethtool, 5729 * we need to clear all arfs rules 5730 */ 5731 hclge_clear_arfs_rules(handle); 5732 5733 spin_lock_bh(&hdev->fd_rule_lock); 5734 ret = hclge_fd_config_rule(hdev, rule); 5735 5736 spin_unlock_bh(&hdev->fd_rule_lock); 5737 5738 return ret; 5739 } 5740 5741 static int hclge_del_fd_entry(struct hnae3_handle *handle, 5742 struct ethtool_rxnfc *cmd) 5743 { 5744 struct hclge_vport *vport = hclge_get_vport(handle); 5745 struct hclge_dev *hdev = vport->back; 5746 struct ethtool_rx_flow_spec *fs; 5747 int ret; 5748 5749 if (!hnae3_dev_fd_supported(hdev)) 5750 return -EOPNOTSUPP; 5751 5752 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 5753 5754 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 5755 return -EINVAL; 5756 5757 if (!hclge_fd_rule_exist(hdev, fs->location)) { 5758 dev_err(&hdev->pdev->dev, 5759 "Delete fail, rule %u is inexistent\n", fs->location); 5760 return -ENOENT; 5761 } 5762 5763 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, 5764 NULL, false); 5765 if (ret) 5766 return ret; 5767 5768 spin_lock_bh(&hdev->fd_rule_lock); 5769 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false); 5770 5771 spin_unlock_bh(&hdev->fd_rule_lock); 5772 5773 return ret; 5774 } 5775 5776 static void hclge_del_all_fd_entries(struct hnae3_handle *handle, 5777 bool clear_list) 5778 { 5779 struct hclge_vport *vport = hclge_get_vport(handle); 5780 struct hclge_dev *hdev = vport->back; 5781 struct hclge_fd_rule *rule; 5782 struct hlist_node *node; 5783 u16 location; 5784 5785 if (!hnae3_dev_fd_supported(hdev)) 5786 return; 5787 5788 spin_lock_bh(&hdev->fd_rule_lock); 5789 for_each_set_bit(location, hdev->fd_bmap, 5790 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 5791 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, 5792 NULL, false); 5793 5794 if (clear_list) { 5795 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, 5796 rule_node) { 5797 hlist_del(&rule->rule_node); 5798 kfree(rule); 5799 } 5800 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 5801 hdev->hclge_fd_rule_num = 0; 5802 bitmap_zero(hdev->fd_bmap, 5803 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 5804 } 5805 5806 spin_unlock_bh(&hdev->fd_rule_lock); 5807 } 5808 5809 static int hclge_restore_fd_entries(struct hnae3_handle *handle) 5810 { 5811 struct hclge_vport *vport = hclge_get_vport(handle); 5812 struct hclge_dev *hdev = vport->back; 5813 struct hclge_fd_rule *rule; 5814 struct hlist_node *node; 5815 int ret; 5816 5817 /* Return ok here, because reset error handling will check this 5818 * return value. If error is returned here, the reset process will 5819 * fail. 5820 */ 5821 if (!hnae3_dev_fd_supported(hdev)) 5822 return 0; 5823 5824 /* if fd is disabled, should not restore it when reset */ 5825 if (!hdev->fd_en) 5826 return 0; 5827 5828 spin_lock_bh(&hdev->fd_rule_lock); 5829 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 5830 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 5831 if (!ret) 5832 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 5833 5834 if (ret) { 5835 dev_warn(&hdev->pdev->dev, 5836 "Restore rule %u failed, remove it\n", 5837 rule->location); 5838 clear_bit(rule->location, hdev->fd_bmap); 5839 hlist_del(&rule->rule_node); 5840 kfree(rule); 5841 hdev->hclge_fd_rule_num--; 5842 } 5843 } 5844 5845 if (hdev->hclge_fd_rule_num) 5846 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE; 5847 5848 spin_unlock_bh(&hdev->fd_rule_lock); 5849 5850 return 0; 5851 } 5852 5853 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, 5854 struct ethtool_rxnfc *cmd) 5855 { 5856 struct hclge_vport *vport = hclge_get_vport(handle); 5857 struct hclge_dev *hdev = vport->back; 5858 5859 if (!hnae3_dev_fd_supported(hdev)) 5860 return -EOPNOTSUPP; 5861 5862 cmd->rule_cnt = hdev->hclge_fd_rule_num; 5863 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 5864 5865 return 0; 5866 } 5867 5868 static int hclge_get_fd_rule_info(struct hnae3_handle *handle, 5869 struct ethtool_rxnfc *cmd) 5870 { 5871 struct hclge_vport *vport = hclge_get_vport(handle); 5872 struct hclge_fd_rule *rule = NULL; 5873 struct hclge_dev *hdev = vport->back; 5874 struct ethtool_rx_flow_spec *fs; 5875 struct hlist_node *node2; 5876 5877 if (!hnae3_dev_fd_supported(hdev)) 5878 return -EOPNOTSUPP; 5879 5880 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 5881 5882 spin_lock_bh(&hdev->fd_rule_lock); 5883 5884 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 5885 if (rule->location >= fs->location) 5886 break; 5887 } 5888 5889 if (!rule || fs->location != rule->location) { 5890 spin_unlock_bh(&hdev->fd_rule_lock); 5891 5892 return -ENOENT; 5893 } 5894 5895 fs->flow_type = rule->flow_type; 5896 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 5897 case SCTP_V4_FLOW: 5898 case TCP_V4_FLOW: 5899 case UDP_V4_FLOW: 5900 fs->h_u.tcp_ip4_spec.ip4src = 5901 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 5902 fs->m_u.tcp_ip4_spec.ip4src = 5903 rule->unused_tuple & BIT(INNER_SRC_IP) ? 5904 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 5905 5906 fs->h_u.tcp_ip4_spec.ip4dst = 5907 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 5908 fs->m_u.tcp_ip4_spec.ip4dst = 5909 rule->unused_tuple & BIT(INNER_DST_IP) ? 5910 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 5911 5912 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); 5913 fs->m_u.tcp_ip4_spec.psrc = 5914 rule->unused_tuple & BIT(INNER_SRC_PORT) ? 5915 0 : cpu_to_be16(rule->tuples_mask.src_port); 5916 5917 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); 5918 fs->m_u.tcp_ip4_spec.pdst = 5919 rule->unused_tuple & BIT(INNER_DST_PORT) ? 5920 0 : cpu_to_be16(rule->tuples_mask.dst_port); 5921 5922 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; 5923 fs->m_u.tcp_ip4_spec.tos = 5924 rule->unused_tuple & BIT(INNER_IP_TOS) ? 5925 0 : rule->tuples_mask.ip_tos; 5926 5927 break; 5928 case IP_USER_FLOW: 5929 fs->h_u.usr_ip4_spec.ip4src = 5930 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 5931 fs->m_u.tcp_ip4_spec.ip4src = 5932 rule->unused_tuple & BIT(INNER_SRC_IP) ? 5933 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 5934 5935 fs->h_u.usr_ip4_spec.ip4dst = 5936 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 5937 fs->m_u.usr_ip4_spec.ip4dst = 5938 rule->unused_tuple & BIT(INNER_DST_IP) ? 5939 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 5940 5941 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; 5942 fs->m_u.usr_ip4_spec.tos = 5943 rule->unused_tuple & BIT(INNER_IP_TOS) ? 5944 0 : rule->tuples_mask.ip_tos; 5945 5946 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; 5947 fs->m_u.usr_ip4_spec.proto = 5948 rule->unused_tuple & BIT(INNER_IP_PROTO) ? 5949 0 : rule->tuples_mask.ip_proto; 5950 5951 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 5952 5953 break; 5954 case SCTP_V6_FLOW: 5955 case TCP_V6_FLOW: 5956 case UDP_V6_FLOW: 5957 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, 5958 rule->tuples.src_ip, IPV6_SIZE); 5959 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 5960 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, 5961 sizeof(int) * IPV6_SIZE); 5962 else 5963 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, 5964 rule->tuples_mask.src_ip, IPV6_SIZE); 5965 5966 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, 5967 rule->tuples.dst_ip, IPV6_SIZE); 5968 if (rule->unused_tuple & BIT(INNER_DST_IP)) 5969 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, 5970 sizeof(int) * IPV6_SIZE); 5971 else 5972 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, 5973 rule->tuples_mask.dst_ip, IPV6_SIZE); 5974 5975 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); 5976 fs->m_u.tcp_ip6_spec.psrc = 5977 rule->unused_tuple & BIT(INNER_SRC_PORT) ? 5978 0 : cpu_to_be16(rule->tuples_mask.src_port); 5979 5980 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); 5981 fs->m_u.tcp_ip6_spec.pdst = 5982 rule->unused_tuple & BIT(INNER_DST_PORT) ? 5983 0 : cpu_to_be16(rule->tuples_mask.dst_port); 5984 5985 break; 5986 case IPV6_USER_FLOW: 5987 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, 5988 rule->tuples.src_ip, IPV6_SIZE); 5989 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 5990 memset(fs->m_u.usr_ip6_spec.ip6src, 0, 5991 sizeof(int) * IPV6_SIZE); 5992 else 5993 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, 5994 rule->tuples_mask.src_ip, IPV6_SIZE); 5995 5996 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, 5997 rule->tuples.dst_ip, IPV6_SIZE); 5998 if (rule->unused_tuple & BIT(INNER_DST_IP)) 5999 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, 6000 sizeof(int) * IPV6_SIZE); 6001 else 6002 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, 6003 rule->tuples_mask.dst_ip, IPV6_SIZE); 6004 6005 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; 6006 fs->m_u.usr_ip6_spec.l4_proto = 6007 rule->unused_tuple & BIT(INNER_IP_PROTO) ? 6008 0 : rule->tuples_mask.ip_proto; 6009 6010 break; 6011 case ETHER_FLOW: 6012 ether_addr_copy(fs->h_u.ether_spec.h_source, 6013 rule->tuples.src_mac); 6014 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) 6015 eth_zero_addr(fs->m_u.ether_spec.h_source); 6016 else 6017 ether_addr_copy(fs->m_u.ether_spec.h_source, 6018 rule->tuples_mask.src_mac); 6019 6020 ether_addr_copy(fs->h_u.ether_spec.h_dest, 6021 rule->tuples.dst_mac); 6022 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6023 eth_zero_addr(fs->m_u.ether_spec.h_dest); 6024 else 6025 ether_addr_copy(fs->m_u.ether_spec.h_dest, 6026 rule->tuples_mask.dst_mac); 6027 6028 fs->h_u.ether_spec.h_proto = 6029 cpu_to_be16(rule->tuples.ether_proto); 6030 fs->m_u.ether_spec.h_proto = 6031 rule->unused_tuple & BIT(INNER_ETH_TYPE) ? 6032 0 : cpu_to_be16(rule->tuples_mask.ether_proto); 6033 6034 break; 6035 default: 6036 spin_unlock_bh(&hdev->fd_rule_lock); 6037 return -EOPNOTSUPP; 6038 } 6039 6040 if (fs->flow_type & FLOW_EXT) { 6041 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); 6042 fs->m_ext.vlan_tci = 6043 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? 6044 cpu_to_be16(VLAN_VID_MASK) : 6045 cpu_to_be16(rule->tuples_mask.vlan_tag1); 6046 } 6047 6048 if (fs->flow_type & FLOW_MAC_EXT) { 6049 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); 6050 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6051 eth_zero_addr(fs->m_u.ether_spec.h_dest); 6052 else 6053 ether_addr_copy(fs->m_u.ether_spec.h_dest, 6054 rule->tuples_mask.dst_mac); 6055 } 6056 6057 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 6058 fs->ring_cookie = RX_CLS_FLOW_DISC; 6059 } else { 6060 u64 vf_id; 6061 6062 fs->ring_cookie = rule->queue_id; 6063 vf_id = rule->vf_id; 6064 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 6065 fs->ring_cookie |= vf_id; 6066 } 6067 6068 spin_unlock_bh(&hdev->fd_rule_lock); 6069 6070 return 0; 6071 } 6072 6073 static int hclge_get_all_rules(struct hnae3_handle *handle, 6074 struct ethtool_rxnfc *cmd, u32 *rule_locs) 6075 { 6076 struct hclge_vport *vport = hclge_get_vport(handle); 6077 struct hclge_dev *hdev = vport->back; 6078 struct hclge_fd_rule *rule; 6079 struct hlist_node *node2; 6080 int cnt = 0; 6081 6082 if (!hnae3_dev_fd_supported(hdev)) 6083 return -EOPNOTSUPP; 6084 6085 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 6086 6087 spin_lock_bh(&hdev->fd_rule_lock); 6088 hlist_for_each_entry_safe(rule, node2, 6089 &hdev->fd_rule_list, rule_node) { 6090 if (cnt == cmd->rule_cnt) { 6091 spin_unlock_bh(&hdev->fd_rule_lock); 6092 return -EMSGSIZE; 6093 } 6094 6095 rule_locs[cnt] = rule->location; 6096 cnt++; 6097 } 6098 6099 spin_unlock_bh(&hdev->fd_rule_lock); 6100 6101 cmd->rule_cnt = cnt; 6102 6103 return 0; 6104 } 6105 6106 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, 6107 struct hclge_fd_rule_tuples *tuples) 6108 { 6109 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); 6110 tuples->ip_proto = fkeys->basic.ip_proto; 6111 tuples->dst_port = be16_to_cpu(fkeys->ports.dst); 6112 6113 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 6114 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); 6115 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); 6116 } else { 6117 memcpy(tuples->src_ip, 6118 fkeys->addrs.v6addrs.src.in6_u.u6_addr32, 6119 sizeof(tuples->src_ip)); 6120 memcpy(tuples->dst_ip, 6121 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32, 6122 sizeof(tuples->dst_ip)); 6123 } 6124 } 6125 6126 /* traverse all rules, check whether an existed rule has the same tuples */ 6127 static struct hclge_fd_rule * 6128 hclge_fd_search_flow_keys(struct hclge_dev *hdev, 6129 const struct hclge_fd_rule_tuples *tuples) 6130 { 6131 struct hclge_fd_rule *rule = NULL; 6132 struct hlist_node *node; 6133 6134 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 6135 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) 6136 return rule; 6137 } 6138 6139 return NULL; 6140 } 6141 6142 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, 6143 struct hclge_fd_rule *rule) 6144 { 6145 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 6146 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) | 6147 BIT(INNER_SRC_PORT); 6148 rule->action = 0; 6149 rule->vf_id = 0; 6150 rule->rule_type = HCLGE_FD_ARFS_ACTIVE; 6151 if (tuples->ether_proto == ETH_P_IP) { 6152 if (tuples->ip_proto == IPPROTO_TCP) 6153 rule->flow_type = TCP_V4_FLOW; 6154 else 6155 rule->flow_type = UDP_V4_FLOW; 6156 } else { 6157 if (tuples->ip_proto == IPPROTO_TCP) 6158 rule->flow_type = TCP_V6_FLOW; 6159 else 6160 rule->flow_type = UDP_V6_FLOW; 6161 } 6162 memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); 6163 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); 6164 } 6165 6166 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, 6167 u16 flow_id, struct flow_keys *fkeys) 6168 { 6169 struct hclge_vport *vport = hclge_get_vport(handle); 6170 struct hclge_fd_rule_tuples new_tuples; 6171 struct hclge_dev *hdev = vport->back; 6172 struct hclge_fd_rule *rule; 6173 u16 tmp_queue_id; 6174 u16 bit_id; 6175 int ret; 6176 6177 if (!hnae3_dev_fd_supported(hdev)) 6178 return -EOPNOTSUPP; 6179 6180 memset(&new_tuples, 0, sizeof(new_tuples)); 6181 hclge_fd_get_flow_tuples(fkeys, &new_tuples); 6182 6183 spin_lock_bh(&hdev->fd_rule_lock); 6184 6185 /* when there is already fd rule existed add by user, 6186 * arfs should not work 6187 */ 6188 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { 6189 spin_unlock_bh(&hdev->fd_rule_lock); 6190 6191 return -EOPNOTSUPP; 6192 } 6193 6194 /* check is there flow director filter existed for this flow, 6195 * if not, create a new filter for it; 6196 * if filter exist with different queue id, modify the filter; 6197 * if filter exist with same queue id, do nothing 6198 */ 6199 rule = hclge_fd_search_flow_keys(hdev, &new_tuples); 6200 if (!rule) { 6201 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); 6202 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 6203 spin_unlock_bh(&hdev->fd_rule_lock); 6204 6205 return -ENOSPC; 6206 } 6207 6208 rule = kzalloc(sizeof(*rule), GFP_ATOMIC); 6209 if (!rule) { 6210 spin_unlock_bh(&hdev->fd_rule_lock); 6211 6212 return -ENOMEM; 6213 } 6214 6215 set_bit(bit_id, hdev->fd_bmap); 6216 rule->location = bit_id; 6217 rule->flow_id = flow_id; 6218 rule->queue_id = queue_id; 6219 hclge_fd_build_arfs_rule(&new_tuples, rule); 6220 ret = hclge_fd_config_rule(hdev, rule); 6221 6222 spin_unlock_bh(&hdev->fd_rule_lock); 6223 6224 if (ret) 6225 return ret; 6226 6227 return rule->location; 6228 } 6229 6230 spin_unlock_bh(&hdev->fd_rule_lock); 6231 6232 if (rule->queue_id == queue_id) 6233 return rule->location; 6234 6235 tmp_queue_id = rule->queue_id; 6236 rule->queue_id = queue_id; 6237 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 6238 if (ret) { 6239 rule->queue_id = tmp_queue_id; 6240 return ret; 6241 } 6242 6243 return rule->location; 6244 } 6245 6246 static void hclge_rfs_filter_expire(struct hclge_dev *hdev) 6247 { 6248 #ifdef CONFIG_RFS_ACCEL 6249 struct hnae3_handle *handle = &hdev->vport[0].nic; 6250 struct hclge_fd_rule *rule; 6251 struct hlist_node *node; 6252 HLIST_HEAD(del_list); 6253 6254 spin_lock_bh(&hdev->fd_rule_lock); 6255 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { 6256 spin_unlock_bh(&hdev->fd_rule_lock); 6257 return; 6258 } 6259 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 6260 if (rps_may_expire_flow(handle->netdev, rule->queue_id, 6261 rule->flow_id, rule->location)) { 6262 hlist_del_init(&rule->rule_node); 6263 hlist_add_head(&rule->rule_node, &del_list); 6264 hdev->hclge_fd_rule_num--; 6265 clear_bit(rule->location, hdev->fd_bmap); 6266 } 6267 } 6268 spin_unlock_bh(&hdev->fd_rule_lock); 6269 6270 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) { 6271 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 6272 rule->location, NULL, false); 6273 kfree(rule); 6274 } 6275 #endif 6276 } 6277 6278 static void hclge_clear_arfs_rules(struct hnae3_handle *handle) 6279 { 6280 #ifdef CONFIG_RFS_ACCEL 6281 struct hclge_vport *vport = hclge_get_vport(handle); 6282 struct hclge_dev *hdev = vport->back; 6283 6284 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE) 6285 hclge_del_all_fd_entries(handle, true); 6286 #endif 6287 } 6288 6289 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) 6290 { 6291 struct hclge_vport *vport = hclge_get_vport(handle); 6292 struct hclge_dev *hdev = vport->back; 6293 6294 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || 6295 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); 6296 } 6297 6298 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) 6299 { 6300 struct hclge_vport *vport = hclge_get_vport(handle); 6301 struct hclge_dev *hdev = vport->back; 6302 6303 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 6304 } 6305 6306 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) 6307 { 6308 struct hclge_vport *vport = hclge_get_vport(handle); 6309 struct hclge_dev *hdev = vport->back; 6310 6311 return hdev->rst_stats.hw_reset_done_cnt; 6312 } 6313 6314 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) 6315 { 6316 struct hclge_vport *vport = hclge_get_vport(handle); 6317 struct hclge_dev *hdev = vport->back; 6318 bool clear; 6319 6320 hdev->fd_en = enable; 6321 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; 6322 if (!enable) 6323 hclge_del_all_fd_entries(handle, clear); 6324 else 6325 hclge_restore_fd_entries(handle); 6326 } 6327 6328 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 6329 { 6330 struct hclge_desc desc; 6331 struct hclge_config_mac_mode_cmd *req = 6332 (struct hclge_config_mac_mode_cmd *)desc.data; 6333 u32 loop_en = 0; 6334 int ret; 6335 6336 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 6337 6338 if (enable) { 6339 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U); 6340 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U); 6341 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U); 6342 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U); 6343 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U); 6344 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U); 6345 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U); 6346 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U); 6347 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U); 6348 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U); 6349 } 6350 6351 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 6352 6353 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6354 if (ret) 6355 dev_err(&hdev->pdev->dev, 6356 "mac enable fail, ret =%d.\n", ret); 6357 } 6358 6359 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, 6360 u8 switch_param, u8 param_mask) 6361 { 6362 struct hclge_mac_vlan_switch_cmd *req; 6363 struct hclge_desc desc; 6364 u32 func_id; 6365 int ret; 6366 6367 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); 6368 req = (struct hclge_mac_vlan_switch_cmd *)desc.data; 6369 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, 6370 false); 6371 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; 6372 req->func_id = cpu_to_le32(func_id); 6373 req->switch_param = switch_param; 6374 req->param_mask = param_mask; 6375 6376 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6377 if (ret) 6378 dev_err(&hdev->pdev->dev, 6379 "set mac vlan switch parameter fail, ret = %d\n", ret); 6380 return ret; 6381 } 6382 6383 static void hclge_phy_link_status_wait(struct hclge_dev *hdev, 6384 int link_ret) 6385 { 6386 #define HCLGE_PHY_LINK_STATUS_NUM 200 6387 6388 struct phy_device *phydev = hdev->hw.mac.phydev; 6389 int i = 0; 6390 int ret; 6391 6392 do { 6393 ret = phy_read_status(phydev); 6394 if (ret) { 6395 dev_err(&hdev->pdev->dev, 6396 "phy update link status fail, ret = %d\n", ret); 6397 return; 6398 } 6399 6400 if (phydev->link == link_ret) 6401 break; 6402 6403 msleep(HCLGE_LINK_STATUS_MS); 6404 } while (++i < HCLGE_PHY_LINK_STATUS_NUM); 6405 } 6406 6407 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) 6408 { 6409 #define HCLGE_MAC_LINK_STATUS_NUM 100 6410 6411 int i = 0; 6412 int ret; 6413 6414 do { 6415 ret = hclge_get_mac_link_status(hdev); 6416 if (ret < 0) 6417 return ret; 6418 else if (ret == link_ret) 6419 return 0; 6420 6421 msleep(HCLGE_LINK_STATUS_MS); 6422 } while (++i < HCLGE_MAC_LINK_STATUS_NUM); 6423 return -EBUSY; 6424 } 6425 6426 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, 6427 bool is_phy) 6428 { 6429 #define HCLGE_LINK_STATUS_DOWN 0 6430 #define HCLGE_LINK_STATUS_UP 1 6431 6432 int link_ret; 6433 6434 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 6435 6436 if (is_phy) 6437 hclge_phy_link_status_wait(hdev, link_ret); 6438 6439 return hclge_mac_link_status_wait(hdev, link_ret); 6440 } 6441 6442 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) 6443 { 6444 struct hclge_config_mac_mode_cmd *req; 6445 struct hclge_desc desc; 6446 u32 loop_en; 6447 int ret; 6448 6449 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 6450 /* 1 Read out the MAC mode config at first */ 6451 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 6452 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6453 if (ret) { 6454 dev_err(&hdev->pdev->dev, 6455 "mac loopback get fail, ret =%d.\n", ret); 6456 return ret; 6457 } 6458 6459 /* 2 Then setup the loopback flag */ 6460 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 6461 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 6462 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); 6463 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); 6464 6465 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 6466 6467 /* 3 Config mac work mode with loopback flag 6468 * and its original configure parameters 6469 */ 6470 hclge_cmd_reuse_desc(&desc, false); 6471 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6472 if (ret) 6473 dev_err(&hdev->pdev->dev, 6474 "mac loopback set fail, ret =%d.\n", ret); 6475 return ret; 6476 } 6477 6478 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, 6479 enum hnae3_loop loop_mode) 6480 { 6481 #define HCLGE_SERDES_RETRY_MS 10 6482 #define HCLGE_SERDES_RETRY_NUM 100 6483 6484 struct hclge_serdes_lb_cmd *req; 6485 struct hclge_desc desc; 6486 int ret, i = 0; 6487 u8 loop_mode_b; 6488 6489 req = (struct hclge_serdes_lb_cmd *)desc.data; 6490 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); 6491 6492 switch (loop_mode) { 6493 case HNAE3_LOOP_SERIAL_SERDES: 6494 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 6495 break; 6496 case HNAE3_LOOP_PARALLEL_SERDES: 6497 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 6498 break; 6499 default: 6500 dev_err(&hdev->pdev->dev, 6501 "unsupported serdes loopback mode %d\n", loop_mode); 6502 return -ENOTSUPP; 6503 } 6504 6505 if (en) { 6506 req->enable = loop_mode_b; 6507 req->mask = loop_mode_b; 6508 } else { 6509 req->mask = loop_mode_b; 6510 } 6511 6512 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6513 if (ret) { 6514 dev_err(&hdev->pdev->dev, 6515 "serdes loopback set fail, ret = %d\n", ret); 6516 return ret; 6517 } 6518 6519 do { 6520 msleep(HCLGE_SERDES_RETRY_MS); 6521 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, 6522 true); 6523 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6524 if (ret) { 6525 dev_err(&hdev->pdev->dev, 6526 "serdes loopback get, ret = %d\n", ret); 6527 return ret; 6528 } 6529 } while (++i < HCLGE_SERDES_RETRY_NUM && 6530 !(req->result & HCLGE_CMD_SERDES_DONE_B)); 6531 6532 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { 6533 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); 6534 return -EBUSY; 6535 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { 6536 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); 6537 return -EIO; 6538 } 6539 return ret; 6540 } 6541 6542 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, 6543 enum hnae3_loop loop_mode) 6544 { 6545 int ret; 6546 6547 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode); 6548 if (ret) 6549 return ret; 6550 6551 hclge_cfg_mac_mode(hdev, en); 6552 6553 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE); 6554 if (ret) 6555 dev_err(&hdev->pdev->dev, 6556 "serdes loopback config mac mode timeout\n"); 6557 6558 return ret; 6559 } 6560 6561 static int hclge_enable_phy_loopback(struct hclge_dev *hdev, 6562 struct phy_device *phydev) 6563 { 6564 int ret; 6565 6566 if (!phydev->suspended) { 6567 ret = phy_suspend(phydev); 6568 if (ret) 6569 return ret; 6570 } 6571 6572 ret = phy_resume(phydev); 6573 if (ret) 6574 return ret; 6575 6576 return phy_loopback(phydev, true); 6577 } 6578 6579 static int hclge_disable_phy_loopback(struct hclge_dev *hdev, 6580 struct phy_device *phydev) 6581 { 6582 int ret; 6583 6584 ret = phy_loopback(phydev, false); 6585 if (ret) 6586 return ret; 6587 6588 return phy_suspend(phydev); 6589 } 6590 6591 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) 6592 { 6593 struct phy_device *phydev = hdev->hw.mac.phydev; 6594 int ret; 6595 6596 if (!phydev) 6597 return -ENOTSUPP; 6598 6599 if (en) 6600 ret = hclge_enable_phy_loopback(hdev, phydev); 6601 else 6602 ret = hclge_disable_phy_loopback(hdev, phydev); 6603 if (ret) { 6604 dev_err(&hdev->pdev->dev, 6605 "set phy loopback fail, ret = %d\n", ret); 6606 return ret; 6607 } 6608 6609 hclge_cfg_mac_mode(hdev, en); 6610 6611 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE); 6612 if (ret) 6613 dev_err(&hdev->pdev->dev, 6614 "phy loopback config mac mode timeout\n"); 6615 6616 return ret; 6617 } 6618 6619 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id, 6620 int stream_id, bool enable) 6621 { 6622 struct hclge_desc desc; 6623 struct hclge_cfg_com_tqp_queue_cmd *req = 6624 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 6625 int ret; 6626 6627 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 6628 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 6629 req->stream_id = cpu_to_le16(stream_id); 6630 if (enable) 6631 req->enable |= 1U << HCLGE_TQP_ENABLE_B; 6632 6633 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6634 if (ret) 6635 dev_err(&hdev->pdev->dev, 6636 "Tqp enable fail, status =%d.\n", ret); 6637 return ret; 6638 } 6639 6640 static int hclge_set_loopback(struct hnae3_handle *handle, 6641 enum hnae3_loop loop_mode, bool en) 6642 { 6643 struct hclge_vport *vport = hclge_get_vport(handle); 6644 struct hnae3_knic_private_info *kinfo; 6645 struct hclge_dev *hdev = vport->back; 6646 int i, ret; 6647 6648 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By 6649 * default, SSU loopback is enabled, so if the SMAC and the DMAC are 6650 * the same, the packets are looped back in the SSU. If SSU loopback 6651 * is disabled, packets can reach MAC even if SMAC is the same as DMAC. 6652 */ 6653 if (hdev->pdev->revision >= 0x21) { 6654 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); 6655 6656 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, 6657 HCLGE_SWITCH_ALW_LPBK_MASK); 6658 if (ret) 6659 return ret; 6660 } 6661 6662 switch (loop_mode) { 6663 case HNAE3_LOOP_APP: 6664 ret = hclge_set_app_loopback(hdev, en); 6665 break; 6666 case HNAE3_LOOP_SERIAL_SERDES: 6667 case HNAE3_LOOP_PARALLEL_SERDES: 6668 ret = hclge_set_serdes_loopback(hdev, en, loop_mode); 6669 break; 6670 case HNAE3_LOOP_PHY: 6671 ret = hclge_set_phy_loopback(hdev, en); 6672 break; 6673 default: 6674 ret = -ENOTSUPP; 6675 dev_err(&hdev->pdev->dev, 6676 "loop_mode %d is not supported\n", loop_mode); 6677 break; 6678 } 6679 6680 if (ret) 6681 return ret; 6682 6683 kinfo = &vport->nic.kinfo; 6684 for (i = 0; i < kinfo->num_tqps; i++) { 6685 ret = hclge_tqp_enable(hdev, i, 0, en); 6686 if (ret) 6687 return ret; 6688 } 6689 6690 return 0; 6691 } 6692 6693 static int hclge_set_default_loopback(struct hclge_dev *hdev) 6694 { 6695 int ret; 6696 6697 ret = hclge_set_app_loopback(hdev, false); 6698 if (ret) 6699 return ret; 6700 6701 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); 6702 if (ret) 6703 return ret; 6704 6705 return hclge_cfg_serdes_loopback(hdev, false, 6706 HNAE3_LOOP_PARALLEL_SERDES); 6707 } 6708 6709 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 6710 { 6711 struct hclge_vport *vport = hclge_get_vport(handle); 6712 struct hnae3_knic_private_info *kinfo; 6713 struct hnae3_queue *queue; 6714 struct hclge_tqp *tqp; 6715 int i; 6716 6717 kinfo = &vport->nic.kinfo; 6718 for (i = 0; i < kinfo->num_tqps; i++) { 6719 queue = handle->kinfo.tqp[i]; 6720 tqp = container_of(queue, struct hclge_tqp, q); 6721 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 6722 } 6723 } 6724 6725 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) 6726 { 6727 struct hclge_vport *vport = hclge_get_vport(handle); 6728 struct hclge_dev *hdev = vport->back; 6729 6730 if (enable) { 6731 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); 6732 } else { 6733 /* Set the DOWN flag here to disable the service to be 6734 * scheduled again 6735 */ 6736 set_bit(HCLGE_STATE_DOWN, &hdev->state); 6737 cancel_delayed_work_sync(&hdev->service_task); 6738 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 6739 } 6740 } 6741 6742 static int hclge_ae_start(struct hnae3_handle *handle) 6743 { 6744 struct hclge_vport *vport = hclge_get_vport(handle); 6745 struct hclge_dev *hdev = vport->back; 6746 6747 /* mac enable */ 6748 hclge_cfg_mac_mode(hdev, true); 6749 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 6750 hdev->hw.mac.link = 0; 6751 6752 /* reset tqp stats */ 6753 hclge_reset_tqp_stats(handle); 6754 6755 hclge_mac_start_phy(hdev); 6756 6757 return 0; 6758 } 6759 6760 static void hclge_ae_stop(struct hnae3_handle *handle) 6761 { 6762 struct hclge_vport *vport = hclge_get_vport(handle); 6763 struct hclge_dev *hdev = vport->back; 6764 int i; 6765 6766 set_bit(HCLGE_STATE_DOWN, &hdev->state); 6767 6768 hclge_clear_arfs_rules(handle); 6769 6770 /* If it is not PF reset, the firmware will disable the MAC, 6771 * so it only need to stop phy here. 6772 */ 6773 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && 6774 hdev->reset_type != HNAE3_FUNC_RESET) { 6775 hclge_mac_stop_phy(hdev); 6776 hclge_update_link_status(hdev); 6777 return; 6778 } 6779 6780 for (i = 0; i < handle->kinfo.num_tqps; i++) 6781 hclge_reset_tqp(handle, i); 6782 6783 hclge_config_mac_tnl_int(hdev, false); 6784 6785 /* Mac disable */ 6786 hclge_cfg_mac_mode(hdev, false); 6787 6788 hclge_mac_stop_phy(hdev); 6789 6790 /* reset tqp stats */ 6791 hclge_reset_tqp_stats(handle); 6792 hclge_update_link_status(hdev); 6793 } 6794 6795 int hclge_vport_start(struct hclge_vport *vport) 6796 { 6797 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 6798 vport->last_active_jiffies = jiffies; 6799 return 0; 6800 } 6801 6802 void hclge_vport_stop(struct hclge_vport *vport) 6803 { 6804 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 6805 } 6806 6807 static int hclge_client_start(struct hnae3_handle *handle) 6808 { 6809 struct hclge_vport *vport = hclge_get_vport(handle); 6810 6811 return hclge_vport_start(vport); 6812 } 6813 6814 static void hclge_client_stop(struct hnae3_handle *handle) 6815 { 6816 struct hclge_vport *vport = hclge_get_vport(handle); 6817 6818 hclge_vport_stop(vport); 6819 } 6820 6821 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 6822 u16 cmdq_resp, u8 resp_code, 6823 enum hclge_mac_vlan_tbl_opcode op) 6824 { 6825 struct hclge_dev *hdev = vport->back; 6826 6827 if (cmdq_resp) { 6828 dev_err(&hdev->pdev->dev, 6829 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n", 6830 cmdq_resp); 6831 return -EIO; 6832 } 6833 6834 if (op == HCLGE_MAC_VLAN_ADD) { 6835 if ((!resp_code) || (resp_code == 1)) { 6836 return 0; 6837 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) { 6838 dev_err(&hdev->pdev->dev, 6839 "add mac addr failed for uc_overflow.\n"); 6840 return -ENOSPC; 6841 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) { 6842 dev_err(&hdev->pdev->dev, 6843 "add mac addr failed for mc_overflow.\n"); 6844 return -ENOSPC; 6845 } 6846 6847 dev_err(&hdev->pdev->dev, 6848 "add mac addr failed for undefined, code=%u.\n", 6849 resp_code); 6850 return -EIO; 6851 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 6852 if (!resp_code) { 6853 return 0; 6854 } else if (resp_code == 1) { 6855 dev_dbg(&hdev->pdev->dev, 6856 "remove mac addr failed for miss.\n"); 6857 return -ENOENT; 6858 } 6859 6860 dev_err(&hdev->pdev->dev, 6861 "remove mac addr failed for undefined, code=%u.\n", 6862 resp_code); 6863 return -EIO; 6864 } else if (op == HCLGE_MAC_VLAN_LKUP) { 6865 if (!resp_code) { 6866 return 0; 6867 } else if (resp_code == 1) { 6868 dev_dbg(&hdev->pdev->dev, 6869 "lookup mac addr failed for miss.\n"); 6870 return -ENOENT; 6871 } 6872 6873 dev_err(&hdev->pdev->dev, 6874 "lookup mac addr failed for undefined, code=%u.\n", 6875 resp_code); 6876 return -EIO; 6877 } 6878 6879 dev_err(&hdev->pdev->dev, 6880 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); 6881 6882 return -EINVAL; 6883 } 6884 6885 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 6886 { 6887 #define HCLGE_VF_NUM_IN_FIRST_DESC 192 6888 6889 unsigned int word_num; 6890 unsigned int bit_num; 6891 6892 if (vfid > 255 || vfid < 0) 6893 return -EIO; 6894 6895 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { 6896 word_num = vfid / 32; 6897 bit_num = vfid % 32; 6898 if (clr) 6899 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 6900 else 6901 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 6902 } else { 6903 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; 6904 bit_num = vfid % 32; 6905 if (clr) 6906 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 6907 else 6908 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 6909 } 6910 6911 return 0; 6912 } 6913 6914 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 6915 { 6916 #define HCLGE_DESC_NUMBER 3 6917 #define HCLGE_FUNC_NUMBER_PER_DESC 6 6918 int i, j; 6919 6920 for (i = 1; i < HCLGE_DESC_NUMBER; i++) 6921 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 6922 if (desc[i].data[j]) 6923 return false; 6924 6925 return true; 6926 } 6927 6928 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 6929 const u8 *addr, bool is_mc) 6930 { 6931 const unsigned char *mac_addr = addr; 6932 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 6933 (mac_addr[0]) | (mac_addr[1] << 8); 6934 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 6935 6936 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 6937 if (is_mc) { 6938 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 6939 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 6940 } 6941 6942 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 6943 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 6944 } 6945 6946 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 6947 struct hclge_mac_vlan_tbl_entry_cmd *req) 6948 { 6949 struct hclge_dev *hdev = vport->back; 6950 struct hclge_desc desc; 6951 u8 resp_code; 6952 u16 retval; 6953 int ret; 6954 6955 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 6956 6957 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 6958 6959 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6960 if (ret) { 6961 dev_err(&hdev->pdev->dev, 6962 "del mac addr failed for cmd_send, ret =%d.\n", 6963 ret); 6964 return ret; 6965 } 6966 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 6967 retval = le16_to_cpu(desc.retval); 6968 6969 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 6970 HCLGE_MAC_VLAN_REMOVE); 6971 } 6972 6973 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 6974 struct hclge_mac_vlan_tbl_entry_cmd *req, 6975 struct hclge_desc *desc, 6976 bool is_mc) 6977 { 6978 struct hclge_dev *hdev = vport->back; 6979 u8 resp_code; 6980 u16 retval; 6981 int ret; 6982 6983 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 6984 if (is_mc) { 6985 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 6986 memcpy(desc[0].data, 6987 req, 6988 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 6989 hclge_cmd_setup_basic_desc(&desc[1], 6990 HCLGE_OPC_MAC_VLAN_ADD, 6991 true); 6992 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 6993 hclge_cmd_setup_basic_desc(&desc[2], 6994 HCLGE_OPC_MAC_VLAN_ADD, 6995 true); 6996 ret = hclge_cmd_send(&hdev->hw, desc, 3); 6997 } else { 6998 memcpy(desc[0].data, 6999 req, 7000 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 7001 ret = hclge_cmd_send(&hdev->hw, desc, 1); 7002 } 7003 if (ret) { 7004 dev_err(&hdev->pdev->dev, 7005 "lookup mac addr failed for cmd_send, ret =%d.\n", 7006 ret); 7007 return ret; 7008 } 7009 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 7010 retval = le16_to_cpu(desc[0].retval); 7011 7012 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 7013 HCLGE_MAC_VLAN_LKUP); 7014 } 7015 7016 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 7017 struct hclge_mac_vlan_tbl_entry_cmd *req, 7018 struct hclge_desc *mc_desc) 7019 { 7020 struct hclge_dev *hdev = vport->back; 7021 int cfg_status; 7022 u8 resp_code; 7023 u16 retval; 7024 int ret; 7025 7026 if (!mc_desc) { 7027 struct hclge_desc desc; 7028 7029 hclge_cmd_setup_basic_desc(&desc, 7030 HCLGE_OPC_MAC_VLAN_ADD, 7031 false); 7032 memcpy(desc.data, req, 7033 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 7034 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7035 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 7036 retval = le16_to_cpu(desc.retval); 7037 7038 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 7039 resp_code, 7040 HCLGE_MAC_VLAN_ADD); 7041 } else { 7042 hclge_cmd_reuse_desc(&mc_desc[0], false); 7043 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 7044 hclge_cmd_reuse_desc(&mc_desc[1], false); 7045 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 7046 hclge_cmd_reuse_desc(&mc_desc[2], false); 7047 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 7048 memcpy(mc_desc[0].data, req, 7049 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 7050 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 7051 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 7052 retval = le16_to_cpu(mc_desc[0].retval); 7053 7054 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 7055 resp_code, 7056 HCLGE_MAC_VLAN_ADD); 7057 } 7058 7059 if (ret) { 7060 dev_err(&hdev->pdev->dev, 7061 "add mac addr failed for cmd_send, ret =%d.\n", 7062 ret); 7063 return ret; 7064 } 7065 7066 return cfg_status; 7067 } 7068 7069 static int hclge_init_umv_space(struct hclge_dev *hdev) 7070 { 7071 u16 allocated_size = 0; 7072 int ret; 7073 7074 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, 7075 true); 7076 if (ret) 7077 return ret; 7078 7079 if (allocated_size < hdev->wanted_umv_size) 7080 dev_warn(&hdev->pdev->dev, 7081 "Alloc umv space failed, want %u, get %u\n", 7082 hdev->wanted_umv_size, allocated_size); 7083 7084 mutex_init(&hdev->umv_mutex); 7085 hdev->max_umv_size = allocated_size; 7086 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to 7087 * preserve some unicast mac vlan table entries shared by pf 7088 * and its vfs. 7089 */ 7090 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); 7091 hdev->share_umv_size = hdev->priv_umv_size + 7092 hdev->max_umv_size % (hdev->num_req_vfs + 2); 7093 7094 return 0; 7095 } 7096 7097 static int hclge_uninit_umv_space(struct hclge_dev *hdev) 7098 { 7099 int ret; 7100 7101 if (hdev->max_umv_size > 0) { 7102 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, 7103 false); 7104 if (ret) 7105 return ret; 7106 hdev->max_umv_size = 0; 7107 } 7108 mutex_destroy(&hdev->umv_mutex); 7109 7110 return 0; 7111 } 7112 7113 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 7114 u16 *allocated_size, bool is_alloc) 7115 { 7116 struct hclge_umv_spc_alc_cmd *req; 7117 struct hclge_desc desc; 7118 int ret; 7119 7120 req = (struct hclge_umv_spc_alc_cmd *)desc.data; 7121 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); 7122 if (!is_alloc) 7123 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1); 7124 7125 req->space_size = cpu_to_le32(space_size); 7126 7127 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7128 if (ret) { 7129 dev_err(&hdev->pdev->dev, 7130 "%s umv space failed for cmd_send, ret =%d\n", 7131 is_alloc ? "allocate" : "free", ret); 7132 return ret; 7133 } 7134 7135 if (is_alloc && allocated_size) 7136 *allocated_size = le32_to_cpu(desc.data[1]); 7137 7138 return 0; 7139 } 7140 7141 static void hclge_reset_umv_space(struct hclge_dev *hdev) 7142 { 7143 struct hclge_vport *vport; 7144 int i; 7145 7146 for (i = 0; i < hdev->num_alloc_vport; i++) { 7147 vport = &hdev->vport[i]; 7148 vport->used_umv_num = 0; 7149 } 7150 7151 mutex_lock(&hdev->umv_mutex); 7152 hdev->share_umv_size = hdev->priv_umv_size + 7153 hdev->max_umv_size % (hdev->num_req_vfs + 2); 7154 mutex_unlock(&hdev->umv_mutex); 7155 } 7156 7157 static bool hclge_is_umv_space_full(struct hclge_vport *vport) 7158 { 7159 struct hclge_dev *hdev = vport->back; 7160 bool is_full; 7161 7162 mutex_lock(&hdev->umv_mutex); 7163 is_full = (vport->used_umv_num >= hdev->priv_umv_size && 7164 hdev->share_umv_size == 0); 7165 mutex_unlock(&hdev->umv_mutex); 7166 7167 return is_full; 7168 } 7169 7170 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) 7171 { 7172 struct hclge_dev *hdev = vport->back; 7173 7174 mutex_lock(&hdev->umv_mutex); 7175 if (is_free) { 7176 if (vport->used_umv_num > hdev->priv_umv_size) 7177 hdev->share_umv_size++; 7178 7179 if (vport->used_umv_num > 0) 7180 vport->used_umv_num--; 7181 } else { 7182 if (vport->used_umv_num >= hdev->priv_umv_size && 7183 hdev->share_umv_size > 0) 7184 hdev->share_umv_size--; 7185 vport->used_umv_num++; 7186 } 7187 mutex_unlock(&hdev->umv_mutex); 7188 } 7189 7190 static int hclge_add_uc_addr(struct hnae3_handle *handle, 7191 const unsigned char *addr) 7192 { 7193 struct hclge_vport *vport = hclge_get_vport(handle); 7194 7195 return hclge_add_uc_addr_common(vport, addr); 7196 } 7197 7198 int hclge_add_uc_addr_common(struct hclge_vport *vport, 7199 const unsigned char *addr) 7200 { 7201 struct hclge_dev *hdev = vport->back; 7202 struct hclge_mac_vlan_tbl_entry_cmd req; 7203 struct hclge_desc desc; 7204 u16 egress_port = 0; 7205 int ret; 7206 7207 /* mac addr check */ 7208 if (is_zero_ether_addr(addr) || 7209 is_broadcast_ether_addr(addr) || 7210 is_multicast_ether_addr(addr)) { 7211 dev_err(&hdev->pdev->dev, 7212 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 7213 addr, is_zero_ether_addr(addr), 7214 is_broadcast_ether_addr(addr), 7215 is_multicast_ether_addr(addr)); 7216 return -EINVAL; 7217 } 7218 7219 memset(&req, 0, sizeof(req)); 7220 7221 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 7222 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 7223 7224 req.egress_port = cpu_to_le16(egress_port); 7225 7226 hclge_prepare_mac_addr(&req, addr, false); 7227 7228 /* Lookup the mac address in the mac_vlan table, and add 7229 * it if the entry is inexistent. Repeated unicast entry 7230 * is not allowed in the mac vlan table. 7231 */ 7232 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 7233 if (ret == -ENOENT) { 7234 if (!hclge_is_umv_space_full(vport)) { 7235 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); 7236 if (!ret) 7237 hclge_update_umv_space(vport, false); 7238 return ret; 7239 } 7240 7241 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", 7242 hdev->priv_umv_size); 7243 7244 return -ENOSPC; 7245 } 7246 7247 /* check if we just hit the duplicate */ 7248 if (!ret) { 7249 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n", 7250 vport->vport_id, addr); 7251 return 0; 7252 } 7253 7254 dev_err(&hdev->pdev->dev, 7255 "PF failed to add unicast entry(%pM) in the MAC table\n", 7256 addr); 7257 7258 return ret; 7259 } 7260 7261 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 7262 const unsigned char *addr) 7263 { 7264 struct hclge_vport *vport = hclge_get_vport(handle); 7265 7266 return hclge_rm_uc_addr_common(vport, addr); 7267 } 7268 7269 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 7270 const unsigned char *addr) 7271 { 7272 struct hclge_dev *hdev = vport->back; 7273 struct hclge_mac_vlan_tbl_entry_cmd req; 7274 int ret; 7275 7276 /* mac addr check */ 7277 if (is_zero_ether_addr(addr) || 7278 is_broadcast_ether_addr(addr) || 7279 is_multicast_ether_addr(addr)) { 7280 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n", 7281 addr); 7282 return -EINVAL; 7283 } 7284 7285 memset(&req, 0, sizeof(req)); 7286 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 7287 hclge_prepare_mac_addr(&req, addr, false); 7288 ret = hclge_remove_mac_vlan_tbl(vport, &req); 7289 if (!ret) 7290 hclge_update_umv_space(vport, true); 7291 7292 return ret; 7293 } 7294 7295 static int hclge_add_mc_addr(struct hnae3_handle *handle, 7296 const unsigned char *addr) 7297 { 7298 struct hclge_vport *vport = hclge_get_vport(handle); 7299 7300 return hclge_add_mc_addr_common(vport, addr); 7301 } 7302 7303 int hclge_add_mc_addr_common(struct hclge_vport *vport, 7304 const unsigned char *addr) 7305 { 7306 struct hclge_dev *hdev = vport->back; 7307 struct hclge_mac_vlan_tbl_entry_cmd req; 7308 struct hclge_desc desc[3]; 7309 int status; 7310 7311 /* mac addr check */ 7312 if (!is_multicast_ether_addr(addr)) { 7313 dev_err(&hdev->pdev->dev, 7314 "Add mc mac err! invalid mac:%pM.\n", 7315 addr); 7316 return -EINVAL; 7317 } 7318 memset(&req, 0, sizeof(req)); 7319 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 7320 hclge_prepare_mac_addr(&req, addr, true); 7321 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 7322 if (status) { 7323 /* This mac addr do not exist, add new entry for it */ 7324 memset(desc[0].data, 0, sizeof(desc[0].data)); 7325 memset(desc[1].data, 0, sizeof(desc[0].data)); 7326 memset(desc[2].data, 0, sizeof(desc[0].data)); 7327 } 7328 status = hclge_update_desc_vfid(desc, vport->vport_id, false); 7329 if (status) 7330 return status; 7331 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 7332 7333 if (status == -ENOSPC) 7334 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); 7335 7336 return status; 7337 } 7338 7339 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 7340 const unsigned char *addr) 7341 { 7342 struct hclge_vport *vport = hclge_get_vport(handle); 7343 7344 return hclge_rm_mc_addr_common(vport, addr); 7345 } 7346 7347 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 7348 const unsigned char *addr) 7349 { 7350 struct hclge_dev *hdev = vport->back; 7351 struct hclge_mac_vlan_tbl_entry_cmd req; 7352 enum hclge_cmd_status status; 7353 struct hclge_desc desc[3]; 7354 7355 /* mac addr check */ 7356 if (!is_multicast_ether_addr(addr)) { 7357 dev_dbg(&hdev->pdev->dev, 7358 "Remove mc mac err! invalid mac:%pM.\n", 7359 addr); 7360 return -EINVAL; 7361 } 7362 7363 memset(&req, 0, sizeof(req)); 7364 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 7365 hclge_prepare_mac_addr(&req, addr, true); 7366 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 7367 if (!status) { 7368 /* This mac addr exist, remove this handle's VFID for it */ 7369 status = hclge_update_desc_vfid(desc, vport->vport_id, true); 7370 if (status) 7371 return status; 7372 7373 if (hclge_is_all_function_id_zero(desc)) 7374 /* All the vfid is zero, so need to delete this entry */ 7375 status = hclge_remove_mac_vlan_tbl(vport, &req); 7376 else 7377 /* Not all the vfid is zero, update the vfid */ 7378 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 7379 7380 } else { 7381 /* Maybe this mac address is in mta table, but it cannot be 7382 * deleted here because an entry of mta represents an address 7383 * range rather than a specific address. the delete action to 7384 * all entries will take effect in update_mta_status called by 7385 * hns3_nic_set_rx_mode. 7386 */ 7387 status = 0; 7388 } 7389 7390 return status; 7391 } 7392 7393 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, 7394 enum HCLGE_MAC_ADDR_TYPE mac_type) 7395 { 7396 struct hclge_vport_mac_addr_cfg *mac_cfg; 7397 struct list_head *list; 7398 7399 if (!vport->vport_id) 7400 return; 7401 7402 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL); 7403 if (!mac_cfg) 7404 return; 7405 7406 mac_cfg->hd_tbl_status = true; 7407 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); 7408 7409 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 7410 &vport->uc_mac_list : &vport->mc_mac_list; 7411 7412 list_add_tail(&mac_cfg->node, list); 7413 } 7414 7415 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, 7416 bool is_write_tbl, 7417 enum HCLGE_MAC_ADDR_TYPE mac_type) 7418 { 7419 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; 7420 struct list_head *list; 7421 bool uc_flag, mc_flag; 7422 7423 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 7424 &vport->uc_mac_list : &vport->mc_mac_list; 7425 7426 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; 7427 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; 7428 7429 list_for_each_entry_safe(mac_cfg, tmp, list, node) { 7430 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) { 7431 if (uc_flag && mac_cfg->hd_tbl_status) 7432 hclge_rm_uc_addr_common(vport, mac_addr); 7433 7434 if (mc_flag && mac_cfg->hd_tbl_status) 7435 hclge_rm_mc_addr_common(vport, mac_addr); 7436 7437 list_del(&mac_cfg->node); 7438 kfree(mac_cfg); 7439 break; 7440 } 7441 } 7442 } 7443 7444 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, 7445 enum HCLGE_MAC_ADDR_TYPE mac_type) 7446 { 7447 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; 7448 struct list_head *list; 7449 7450 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 7451 &vport->uc_mac_list : &vport->mc_mac_list; 7452 7453 list_for_each_entry_safe(mac_cfg, tmp, list, node) { 7454 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status) 7455 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr); 7456 7457 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status) 7458 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr); 7459 7460 mac_cfg->hd_tbl_status = false; 7461 if (is_del_list) { 7462 list_del(&mac_cfg->node); 7463 kfree(mac_cfg); 7464 } 7465 } 7466 } 7467 7468 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) 7469 { 7470 struct hclge_vport_mac_addr_cfg *mac, *tmp; 7471 struct hclge_vport *vport; 7472 int i; 7473 7474 mutex_lock(&hdev->vport_cfg_mutex); 7475 for (i = 0; i < hdev->num_alloc_vport; i++) { 7476 vport = &hdev->vport[i]; 7477 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) { 7478 list_del(&mac->node); 7479 kfree(mac); 7480 } 7481 7482 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) { 7483 list_del(&mac->node); 7484 kfree(mac); 7485 } 7486 } 7487 mutex_unlock(&hdev->vport_cfg_mutex); 7488 } 7489 7490 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 7491 u16 cmdq_resp, u8 resp_code) 7492 { 7493 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 7494 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 7495 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 7496 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 7497 7498 int return_status; 7499 7500 if (cmdq_resp) { 7501 dev_err(&hdev->pdev->dev, 7502 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 7503 cmdq_resp); 7504 return -EIO; 7505 } 7506 7507 switch (resp_code) { 7508 case HCLGE_ETHERTYPE_SUCCESS_ADD: 7509 case HCLGE_ETHERTYPE_ALREADY_ADD: 7510 return_status = 0; 7511 break; 7512 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 7513 dev_err(&hdev->pdev->dev, 7514 "add mac ethertype failed for manager table overflow.\n"); 7515 return_status = -EIO; 7516 break; 7517 case HCLGE_ETHERTYPE_KEY_CONFLICT: 7518 dev_err(&hdev->pdev->dev, 7519 "add mac ethertype failed for key conflict.\n"); 7520 return_status = -EIO; 7521 break; 7522 default: 7523 dev_err(&hdev->pdev->dev, 7524 "add mac ethertype failed for undefined, code=%u.\n", 7525 resp_code); 7526 return_status = -EIO; 7527 } 7528 7529 return return_status; 7530 } 7531 7532 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx, 7533 u8 *mac_addr) 7534 { 7535 struct hclge_mac_vlan_tbl_entry_cmd req; 7536 struct hclge_dev *hdev = vport->back; 7537 struct hclge_desc desc; 7538 u16 egress_port = 0; 7539 int i; 7540 7541 if (is_zero_ether_addr(mac_addr)) 7542 return false; 7543 7544 memset(&req, 0, sizeof(req)); 7545 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 7546 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 7547 req.egress_port = cpu_to_le16(egress_port); 7548 hclge_prepare_mac_addr(&req, mac_addr, false); 7549 7550 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT) 7551 return true; 7552 7553 vf_idx += HCLGE_VF_VPORT_START_NUM; 7554 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) 7555 if (i != vf_idx && 7556 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac)) 7557 return true; 7558 7559 return false; 7560 } 7561 7562 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, 7563 u8 *mac_addr) 7564 { 7565 struct hclge_vport *vport = hclge_get_vport(handle); 7566 struct hclge_dev *hdev = vport->back; 7567 7568 vport = hclge_get_vf_vport(hdev, vf); 7569 if (!vport) 7570 return -EINVAL; 7571 7572 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { 7573 dev_info(&hdev->pdev->dev, 7574 "Specified MAC(=%pM) is same as before, no change committed!\n", 7575 mac_addr); 7576 return 0; 7577 } 7578 7579 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) { 7580 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n", 7581 mac_addr); 7582 return -EEXIST; 7583 } 7584 7585 ether_addr_copy(vport->vf_info.mac, mac_addr); 7586 dev_info(&hdev->pdev->dev, 7587 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n", 7588 vf, mac_addr); 7589 7590 return hclge_inform_reset_assert_to_vf(vport); 7591 } 7592 7593 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 7594 const struct hclge_mac_mgr_tbl_entry_cmd *req) 7595 { 7596 struct hclge_desc desc; 7597 u8 resp_code; 7598 u16 retval; 7599 int ret; 7600 7601 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 7602 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 7603 7604 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7605 if (ret) { 7606 dev_err(&hdev->pdev->dev, 7607 "add mac ethertype failed for cmd_send, ret =%d.\n", 7608 ret); 7609 return ret; 7610 } 7611 7612 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 7613 retval = le16_to_cpu(desc.retval); 7614 7615 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 7616 } 7617 7618 static int init_mgr_tbl(struct hclge_dev *hdev) 7619 { 7620 int ret; 7621 int i; 7622 7623 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 7624 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 7625 if (ret) { 7626 dev_err(&hdev->pdev->dev, 7627 "add mac ethertype failed, ret =%d.\n", 7628 ret); 7629 return ret; 7630 } 7631 } 7632 7633 return 0; 7634 } 7635 7636 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 7637 { 7638 struct hclge_vport *vport = hclge_get_vport(handle); 7639 struct hclge_dev *hdev = vport->back; 7640 7641 ether_addr_copy(p, hdev->hw.mac.mac_addr); 7642 } 7643 7644 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, 7645 bool is_first) 7646 { 7647 const unsigned char *new_addr = (const unsigned char *)p; 7648 struct hclge_vport *vport = hclge_get_vport(handle); 7649 struct hclge_dev *hdev = vport->back; 7650 int ret; 7651 7652 /* mac addr check */ 7653 if (is_zero_ether_addr(new_addr) || 7654 is_broadcast_ether_addr(new_addr) || 7655 is_multicast_ether_addr(new_addr)) { 7656 dev_err(&hdev->pdev->dev, 7657 "Change uc mac err! invalid mac:%pM.\n", 7658 new_addr); 7659 return -EINVAL; 7660 } 7661 7662 if ((!is_first || is_kdump_kernel()) && 7663 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) 7664 dev_warn(&hdev->pdev->dev, 7665 "remove old uc mac address fail.\n"); 7666 7667 ret = hclge_add_uc_addr(handle, new_addr); 7668 if (ret) { 7669 dev_err(&hdev->pdev->dev, 7670 "add uc mac address fail, ret =%d.\n", 7671 ret); 7672 7673 if (!is_first && 7674 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) 7675 dev_err(&hdev->pdev->dev, 7676 "restore uc mac address fail.\n"); 7677 7678 return -EIO; 7679 } 7680 7681 ret = hclge_pause_addr_cfg(hdev, new_addr); 7682 if (ret) { 7683 dev_err(&hdev->pdev->dev, 7684 "configure mac pause address fail, ret =%d.\n", 7685 ret); 7686 return -EIO; 7687 } 7688 7689 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 7690 7691 return 0; 7692 } 7693 7694 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, 7695 int cmd) 7696 { 7697 struct hclge_vport *vport = hclge_get_vport(handle); 7698 struct hclge_dev *hdev = vport->back; 7699 7700 if (!hdev->hw.mac.phydev) 7701 return -EOPNOTSUPP; 7702 7703 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); 7704 } 7705 7706 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 7707 u8 fe_type, bool filter_en, u8 vf_id) 7708 { 7709 struct hclge_vlan_filter_ctrl_cmd *req; 7710 struct hclge_desc desc; 7711 int ret; 7712 7713 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 7714 7715 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 7716 req->vlan_type = vlan_type; 7717 req->vlan_fe = filter_en ? fe_type : 0; 7718 req->vf_id = vf_id; 7719 7720 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7721 if (ret) 7722 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 7723 ret); 7724 7725 return ret; 7726 } 7727 7728 #define HCLGE_FILTER_TYPE_VF 0 7729 #define HCLGE_FILTER_TYPE_PORT 1 7730 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) 7731 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) 7732 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) 7733 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) 7734 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) 7735 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ 7736 | HCLGE_FILTER_FE_ROCE_EGRESS_B) 7737 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ 7738 | HCLGE_FILTER_FE_ROCE_INGRESS_B) 7739 7740 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 7741 { 7742 struct hclge_vport *vport = hclge_get_vport(handle); 7743 struct hclge_dev *hdev = vport->back; 7744 7745 if (hdev->pdev->revision >= 0x21) { 7746 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 7747 HCLGE_FILTER_FE_EGRESS, enable, 0); 7748 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 7749 HCLGE_FILTER_FE_INGRESS, enable, 0); 7750 } else { 7751 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 7752 HCLGE_FILTER_FE_EGRESS_V1_B, enable, 7753 0); 7754 } 7755 if (enable) 7756 handle->netdev_flags |= HNAE3_VLAN_FLTR; 7757 else 7758 handle->netdev_flags &= ~HNAE3_VLAN_FLTR; 7759 } 7760 7761 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, 7762 bool is_kill, u16 vlan, 7763 __be16 proto) 7764 { 7765 struct hclge_vport *vport = &hdev->vport[vfid]; 7766 struct hclge_vlan_filter_vf_cfg_cmd *req0; 7767 struct hclge_vlan_filter_vf_cfg_cmd *req1; 7768 struct hclge_desc desc[2]; 7769 u8 vf_byte_val; 7770 u8 vf_byte_off; 7771 int ret; 7772 7773 /* if vf vlan table is full, firmware will close vf vlan filter, it 7774 * is unable and unnecessary to add new vlan id to vf vlan filter. 7775 * If spoof check is enable, and vf vlan is full, it shouldn't add 7776 * new vlan, because tx packets with these vlan id will be dropped. 7777 */ 7778 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { 7779 if (vport->vf_info.spoofchk && vlan) { 7780 dev_err(&hdev->pdev->dev, 7781 "Can't add vlan due to spoof check is on and vf vlan table is full\n"); 7782 return -EPERM; 7783 } 7784 return 0; 7785 } 7786 7787 hclge_cmd_setup_basic_desc(&desc[0], 7788 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 7789 hclge_cmd_setup_basic_desc(&desc[1], 7790 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 7791 7792 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 7793 7794 vf_byte_off = vfid / 8; 7795 vf_byte_val = 1 << (vfid % 8); 7796 7797 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 7798 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 7799 7800 req0->vlan_id = cpu_to_le16(vlan); 7801 req0->vlan_cfg = is_kill; 7802 7803 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 7804 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 7805 else 7806 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 7807 7808 ret = hclge_cmd_send(&hdev->hw, desc, 2); 7809 if (ret) { 7810 dev_err(&hdev->pdev->dev, 7811 "Send vf vlan command fail, ret =%d.\n", 7812 ret); 7813 return ret; 7814 } 7815 7816 if (!is_kill) { 7817 #define HCLGE_VF_VLAN_NO_ENTRY 2 7818 if (!req0->resp_code || req0->resp_code == 1) 7819 return 0; 7820 7821 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { 7822 set_bit(vfid, hdev->vf_vlan_full); 7823 dev_warn(&hdev->pdev->dev, 7824 "vf vlan table is full, vf vlan filter is disabled\n"); 7825 return 0; 7826 } 7827 7828 dev_err(&hdev->pdev->dev, 7829 "Add vf vlan filter fail, ret =%u.\n", 7830 req0->resp_code); 7831 } else { 7832 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 7833 if (!req0->resp_code) 7834 return 0; 7835 7836 /* vf vlan filter is disabled when vf vlan table is full, 7837 * then new vlan id will not be added into vf vlan table. 7838 * Just return 0 without warning, avoid massive verbose 7839 * print logs when unload. 7840 */ 7841 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) 7842 return 0; 7843 7844 dev_err(&hdev->pdev->dev, 7845 "Kill vf vlan filter fail, ret =%u.\n", 7846 req0->resp_code); 7847 } 7848 7849 return -EIO; 7850 } 7851 7852 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 7853 u16 vlan_id, bool is_kill) 7854 { 7855 struct hclge_vlan_filter_pf_cfg_cmd *req; 7856 struct hclge_desc desc; 7857 u8 vlan_offset_byte_val; 7858 u8 vlan_offset_byte; 7859 u8 vlan_offset_160; 7860 int ret; 7861 7862 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 7863 7864 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; 7865 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / 7866 HCLGE_VLAN_BYTE_SIZE; 7867 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); 7868 7869 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 7870 req->vlan_offset = vlan_offset_160; 7871 req->vlan_cfg = is_kill; 7872 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 7873 7874 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7875 if (ret) 7876 dev_err(&hdev->pdev->dev, 7877 "port vlan command, send fail, ret =%d.\n", ret); 7878 return ret; 7879 } 7880 7881 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 7882 u16 vport_id, u16 vlan_id, 7883 bool is_kill) 7884 { 7885 u16 vport_idx, vport_num = 0; 7886 int ret; 7887 7888 if (is_kill && !vlan_id) 7889 return 0; 7890 7891 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 7892 proto); 7893 if (ret) { 7894 dev_err(&hdev->pdev->dev, 7895 "Set %u vport vlan filter config fail, ret =%d.\n", 7896 vport_id, ret); 7897 return ret; 7898 } 7899 7900 /* vlan 0 may be added twice when 8021q module is enabled */ 7901 if (!is_kill && !vlan_id && 7902 test_bit(vport_id, hdev->vlan_table[vlan_id])) 7903 return 0; 7904 7905 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 7906 dev_err(&hdev->pdev->dev, 7907 "Add port vlan failed, vport %u is already in vlan %u\n", 7908 vport_id, vlan_id); 7909 return -EINVAL; 7910 } 7911 7912 if (is_kill && 7913 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 7914 dev_err(&hdev->pdev->dev, 7915 "Delete port vlan failed, vport %u is not in vlan %u\n", 7916 vport_id, vlan_id); 7917 return -EINVAL; 7918 } 7919 7920 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) 7921 vport_num++; 7922 7923 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 7924 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 7925 is_kill); 7926 7927 return ret; 7928 } 7929 7930 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 7931 { 7932 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 7933 struct hclge_vport_vtag_tx_cfg_cmd *req; 7934 struct hclge_dev *hdev = vport->back; 7935 struct hclge_desc desc; 7936 u16 bmap_index; 7937 int status; 7938 7939 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 7940 7941 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 7942 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 7943 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 7944 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 7945 vcfg->accept_tag1 ? 1 : 0); 7946 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 7947 vcfg->accept_untag1 ? 1 : 0); 7948 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 7949 vcfg->accept_tag2 ? 1 : 0); 7950 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 7951 vcfg->accept_untag2 ? 1 : 0); 7952 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 7953 vcfg->insert_tag1_en ? 1 : 0); 7954 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 7955 vcfg->insert_tag2_en ? 1 : 0); 7956 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 7957 7958 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 7959 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 7960 HCLGE_VF_NUM_PER_BYTE; 7961 req->vf_bitmap[bmap_index] = 7962 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 7963 7964 status = hclge_cmd_send(&hdev->hw, &desc, 1); 7965 if (status) 7966 dev_err(&hdev->pdev->dev, 7967 "Send port txvlan cfg command fail, ret =%d\n", 7968 status); 7969 7970 return status; 7971 } 7972 7973 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 7974 { 7975 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 7976 struct hclge_vport_vtag_rx_cfg_cmd *req; 7977 struct hclge_dev *hdev = vport->back; 7978 struct hclge_desc desc; 7979 u16 bmap_index; 7980 int status; 7981 7982 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 7983 7984 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 7985 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 7986 vcfg->strip_tag1_en ? 1 : 0); 7987 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 7988 vcfg->strip_tag2_en ? 1 : 0); 7989 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 7990 vcfg->vlan1_vlan_prionly ? 1 : 0); 7991 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 7992 vcfg->vlan2_vlan_prionly ? 1 : 0); 7993 7994 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 7995 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 7996 HCLGE_VF_NUM_PER_BYTE; 7997 req->vf_bitmap[bmap_index] = 7998 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 7999 8000 status = hclge_cmd_send(&hdev->hw, &desc, 1); 8001 if (status) 8002 dev_err(&hdev->pdev->dev, 8003 "Send port rxvlan cfg command fail, ret =%d\n", 8004 status); 8005 8006 return status; 8007 } 8008 8009 static int hclge_vlan_offload_cfg(struct hclge_vport *vport, 8010 u16 port_base_vlan_state, 8011 u16 vlan_tag) 8012 { 8013 int ret; 8014 8015 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 8016 vport->txvlan_cfg.accept_tag1 = true; 8017 vport->txvlan_cfg.insert_tag1_en = false; 8018 vport->txvlan_cfg.default_tag1 = 0; 8019 } else { 8020 vport->txvlan_cfg.accept_tag1 = false; 8021 vport->txvlan_cfg.insert_tag1_en = true; 8022 vport->txvlan_cfg.default_tag1 = vlan_tag; 8023 } 8024 8025 vport->txvlan_cfg.accept_untag1 = true; 8026 8027 /* accept_tag2 and accept_untag2 are not supported on 8028 * pdev revision(0x20), new revision support them, 8029 * this two fields can not be configured by user. 8030 */ 8031 vport->txvlan_cfg.accept_tag2 = true; 8032 vport->txvlan_cfg.accept_untag2 = true; 8033 vport->txvlan_cfg.insert_tag2_en = false; 8034 vport->txvlan_cfg.default_tag2 = 0; 8035 8036 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 8037 vport->rxvlan_cfg.strip_tag1_en = false; 8038 vport->rxvlan_cfg.strip_tag2_en = 8039 vport->rxvlan_cfg.rx_vlan_offload_en; 8040 } else { 8041 vport->rxvlan_cfg.strip_tag1_en = 8042 vport->rxvlan_cfg.rx_vlan_offload_en; 8043 vport->rxvlan_cfg.strip_tag2_en = true; 8044 } 8045 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 8046 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 8047 8048 ret = hclge_set_vlan_tx_offload_cfg(vport); 8049 if (ret) 8050 return ret; 8051 8052 return hclge_set_vlan_rx_offload_cfg(vport); 8053 } 8054 8055 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 8056 { 8057 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 8058 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 8059 struct hclge_desc desc; 8060 int status; 8061 8062 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 8063 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 8064 rx_req->ot_fst_vlan_type = 8065 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 8066 rx_req->ot_sec_vlan_type = 8067 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 8068 rx_req->in_fst_vlan_type = 8069 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 8070 rx_req->in_sec_vlan_type = 8071 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 8072 8073 status = hclge_cmd_send(&hdev->hw, &desc, 1); 8074 if (status) { 8075 dev_err(&hdev->pdev->dev, 8076 "Send rxvlan protocol type command fail, ret =%d\n", 8077 status); 8078 return status; 8079 } 8080 8081 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 8082 8083 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; 8084 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 8085 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 8086 8087 status = hclge_cmd_send(&hdev->hw, &desc, 1); 8088 if (status) 8089 dev_err(&hdev->pdev->dev, 8090 "Send txvlan protocol type command fail, ret =%d\n", 8091 status); 8092 8093 return status; 8094 } 8095 8096 static int hclge_init_vlan_config(struct hclge_dev *hdev) 8097 { 8098 #define HCLGE_DEF_VLAN_TYPE 0x8100 8099 8100 struct hnae3_handle *handle = &hdev->vport[0].nic; 8101 struct hclge_vport *vport; 8102 int ret; 8103 int i; 8104 8105 if (hdev->pdev->revision >= 0x21) { 8106 /* for revision 0x21, vf vlan filter is per function */ 8107 for (i = 0; i < hdev->num_alloc_vport; i++) { 8108 vport = &hdev->vport[i]; 8109 ret = hclge_set_vlan_filter_ctrl(hdev, 8110 HCLGE_FILTER_TYPE_VF, 8111 HCLGE_FILTER_FE_EGRESS, 8112 true, 8113 vport->vport_id); 8114 if (ret) 8115 return ret; 8116 } 8117 8118 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 8119 HCLGE_FILTER_FE_INGRESS, true, 8120 0); 8121 if (ret) 8122 return ret; 8123 } else { 8124 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 8125 HCLGE_FILTER_FE_EGRESS_V1_B, 8126 true, 0); 8127 if (ret) 8128 return ret; 8129 } 8130 8131 handle->netdev_flags |= HNAE3_VLAN_FLTR; 8132 8133 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 8134 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 8135 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 8136 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 8137 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; 8138 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; 8139 8140 ret = hclge_set_vlan_protocol_type(hdev); 8141 if (ret) 8142 return ret; 8143 8144 for (i = 0; i < hdev->num_alloc_vport; i++) { 8145 u16 vlan_tag; 8146 8147 vport = &hdev->vport[i]; 8148 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; 8149 8150 ret = hclge_vlan_offload_cfg(vport, 8151 vport->port_base_vlan_cfg.state, 8152 vlan_tag); 8153 if (ret) 8154 return ret; 8155 } 8156 8157 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 8158 } 8159 8160 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 8161 bool writen_to_tbl) 8162 { 8163 struct hclge_vport_vlan_cfg *vlan; 8164 8165 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 8166 if (!vlan) 8167 return; 8168 8169 vlan->hd_tbl_status = writen_to_tbl; 8170 vlan->vlan_id = vlan_id; 8171 8172 list_add_tail(&vlan->node, &vport->vlan_list); 8173 } 8174 8175 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) 8176 { 8177 struct hclge_vport_vlan_cfg *vlan, *tmp; 8178 struct hclge_dev *hdev = vport->back; 8179 int ret; 8180 8181 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 8182 if (!vlan->hd_tbl_status) { 8183 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 8184 vport->vport_id, 8185 vlan->vlan_id, false); 8186 if (ret) { 8187 dev_err(&hdev->pdev->dev, 8188 "restore vport vlan list failed, ret=%d\n", 8189 ret); 8190 return ret; 8191 } 8192 } 8193 vlan->hd_tbl_status = true; 8194 } 8195 8196 return 0; 8197 } 8198 8199 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 8200 bool is_write_tbl) 8201 { 8202 struct hclge_vport_vlan_cfg *vlan, *tmp; 8203 struct hclge_dev *hdev = vport->back; 8204 8205 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 8206 if (vlan->vlan_id == vlan_id) { 8207 if (is_write_tbl && vlan->hd_tbl_status) 8208 hclge_set_vlan_filter_hw(hdev, 8209 htons(ETH_P_8021Q), 8210 vport->vport_id, 8211 vlan_id, 8212 true); 8213 8214 list_del(&vlan->node); 8215 kfree(vlan); 8216 break; 8217 } 8218 } 8219 } 8220 8221 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) 8222 { 8223 struct hclge_vport_vlan_cfg *vlan, *tmp; 8224 struct hclge_dev *hdev = vport->back; 8225 8226 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 8227 if (vlan->hd_tbl_status) 8228 hclge_set_vlan_filter_hw(hdev, 8229 htons(ETH_P_8021Q), 8230 vport->vport_id, 8231 vlan->vlan_id, 8232 true); 8233 8234 vlan->hd_tbl_status = false; 8235 if (is_del_list) { 8236 list_del(&vlan->node); 8237 kfree(vlan); 8238 } 8239 } 8240 } 8241 8242 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) 8243 { 8244 struct hclge_vport_vlan_cfg *vlan, *tmp; 8245 struct hclge_vport *vport; 8246 int i; 8247 8248 mutex_lock(&hdev->vport_cfg_mutex); 8249 for (i = 0; i < hdev->num_alloc_vport; i++) { 8250 vport = &hdev->vport[i]; 8251 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 8252 list_del(&vlan->node); 8253 kfree(vlan); 8254 } 8255 } 8256 mutex_unlock(&hdev->vport_cfg_mutex); 8257 } 8258 8259 static void hclge_restore_vlan_table(struct hnae3_handle *handle) 8260 { 8261 struct hclge_vport *vport = hclge_get_vport(handle); 8262 struct hclge_vport_vlan_cfg *vlan, *tmp; 8263 struct hclge_dev *hdev = vport->back; 8264 u16 vlan_proto; 8265 u16 state, vlan_id; 8266 int i; 8267 8268 mutex_lock(&hdev->vport_cfg_mutex); 8269 for (i = 0; i < hdev->num_alloc_vport; i++) { 8270 vport = &hdev->vport[i]; 8271 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; 8272 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; 8273 state = vport->port_base_vlan_cfg.state; 8274 8275 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { 8276 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), 8277 vport->vport_id, vlan_id, 8278 false); 8279 continue; 8280 } 8281 8282 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 8283 int ret; 8284 8285 if (!vlan->hd_tbl_status) 8286 continue; 8287 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 8288 vport->vport_id, 8289 vlan->vlan_id, false); 8290 if (ret) 8291 break; 8292 } 8293 } 8294 8295 mutex_unlock(&hdev->vport_cfg_mutex); 8296 } 8297 8298 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 8299 { 8300 struct hclge_vport *vport = hclge_get_vport(handle); 8301 8302 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { 8303 vport->rxvlan_cfg.strip_tag1_en = false; 8304 vport->rxvlan_cfg.strip_tag2_en = enable; 8305 } else { 8306 vport->rxvlan_cfg.strip_tag1_en = enable; 8307 vport->rxvlan_cfg.strip_tag2_en = true; 8308 } 8309 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 8310 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 8311 vport->rxvlan_cfg.rx_vlan_offload_en = enable; 8312 8313 return hclge_set_vlan_rx_offload_cfg(vport); 8314 } 8315 8316 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, 8317 u16 port_base_vlan_state, 8318 struct hclge_vlan_info *new_info, 8319 struct hclge_vlan_info *old_info) 8320 { 8321 struct hclge_dev *hdev = vport->back; 8322 int ret; 8323 8324 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { 8325 hclge_rm_vport_all_vlan_table(vport, false); 8326 return hclge_set_vlan_filter_hw(hdev, 8327 htons(new_info->vlan_proto), 8328 vport->vport_id, 8329 new_info->vlan_tag, 8330 false); 8331 } 8332 8333 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), 8334 vport->vport_id, old_info->vlan_tag, 8335 true); 8336 if (ret) 8337 return ret; 8338 8339 return hclge_add_vport_all_vlan_table(vport); 8340 } 8341 8342 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, 8343 struct hclge_vlan_info *vlan_info) 8344 { 8345 struct hnae3_handle *nic = &vport->nic; 8346 struct hclge_vlan_info *old_vlan_info; 8347 struct hclge_dev *hdev = vport->back; 8348 int ret; 8349 8350 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; 8351 8352 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag); 8353 if (ret) 8354 return ret; 8355 8356 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) { 8357 /* add new VLAN tag */ 8358 ret = hclge_set_vlan_filter_hw(hdev, 8359 htons(vlan_info->vlan_proto), 8360 vport->vport_id, 8361 vlan_info->vlan_tag, 8362 false); 8363 if (ret) 8364 return ret; 8365 8366 /* remove old VLAN tag */ 8367 ret = hclge_set_vlan_filter_hw(hdev, 8368 htons(old_vlan_info->vlan_proto), 8369 vport->vport_id, 8370 old_vlan_info->vlan_tag, 8371 true); 8372 if (ret) 8373 return ret; 8374 8375 goto update; 8376 } 8377 8378 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, 8379 old_vlan_info); 8380 if (ret) 8381 return ret; 8382 8383 /* update state only when disable/enable port based VLAN */ 8384 vport->port_base_vlan_cfg.state = state; 8385 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 8386 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 8387 else 8388 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 8389 8390 update: 8391 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag; 8392 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos; 8393 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto; 8394 8395 return 0; 8396 } 8397 8398 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, 8399 enum hnae3_port_base_vlan_state state, 8400 u16 vlan) 8401 { 8402 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { 8403 if (!vlan) 8404 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 8405 else 8406 return HNAE3_PORT_BASE_VLAN_ENABLE; 8407 } else { 8408 if (!vlan) 8409 return HNAE3_PORT_BASE_VLAN_DISABLE; 8410 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan) 8411 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 8412 else 8413 return HNAE3_PORT_BASE_VLAN_MODIFY; 8414 } 8415 } 8416 8417 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 8418 u16 vlan, u8 qos, __be16 proto) 8419 { 8420 struct hclge_vport *vport = hclge_get_vport(handle); 8421 struct hclge_dev *hdev = vport->back; 8422 struct hclge_vlan_info vlan_info; 8423 u16 state; 8424 int ret; 8425 8426 if (hdev->pdev->revision == 0x20) 8427 return -EOPNOTSUPP; 8428 8429 /* qos is a 3 bits value, so can not be bigger than 7 */ 8430 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7) 8431 return -EINVAL; 8432 if (proto != htons(ETH_P_8021Q)) 8433 return -EPROTONOSUPPORT; 8434 8435 vport = &hdev->vport[vfid]; 8436 state = hclge_get_port_base_vlan_state(vport, 8437 vport->port_base_vlan_cfg.state, 8438 vlan); 8439 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) 8440 return 0; 8441 8442 vlan_info.vlan_tag = vlan; 8443 vlan_info.qos = qos; 8444 vlan_info.vlan_proto = ntohs(proto); 8445 8446 /* update port based VLAN for PF */ 8447 if (!vfid) { 8448 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 8449 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); 8450 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 8451 8452 return ret; 8453 } 8454 8455 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { 8456 return hclge_update_port_base_vlan_cfg(vport, state, 8457 &vlan_info); 8458 } else { 8459 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], 8460 (u8)vfid, state, 8461 vlan, qos, 8462 ntohs(proto)); 8463 return ret; 8464 } 8465 } 8466 8467 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 8468 u16 vlan_id, bool is_kill) 8469 { 8470 struct hclge_vport *vport = hclge_get_vport(handle); 8471 struct hclge_dev *hdev = vport->back; 8472 bool writen_to_tbl = false; 8473 int ret = 0; 8474 8475 /* When device is resetting, firmware is unable to handle 8476 * mailbox. Just record the vlan id, and remove it after 8477 * reset finished. 8478 */ 8479 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) { 8480 set_bit(vlan_id, vport->vlan_del_fail_bmap); 8481 return -EBUSY; 8482 } 8483 8484 /* when port base vlan enabled, we use port base vlan as the vlan 8485 * filter entry. In this case, we don't update vlan filter table 8486 * when user add new vlan or remove exist vlan, just update the vport 8487 * vlan list. The vlan id in vlan list will be writen in vlan filter 8488 * table until port base vlan disabled 8489 */ 8490 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 8491 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, 8492 vlan_id, is_kill); 8493 writen_to_tbl = true; 8494 } 8495 8496 if (!ret) { 8497 if (is_kill) 8498 hclge_rm_vport_vlan_table(vport, vlan_id, false); 8499 else 8500 hclge_add_vport_vlan_table(vport, vlan_id, 8501 writen_to_tbl); 8502 } else if (is_kill) { 8503 /* when remove hw vlan filter failed, record the vlan id, 8504 * and try to remove it from hw later, to be consistence 8505 * with stack 8506 */ 8507 set_bit(vlan_id, vport->vlan_del_fail_bmap); 8508 } 8509 return ret; 8510 } 8511 8512 static void hclge_sync_vlan_filter(struct hclge_dev *hdev) 8513 { 8514 #define HCLGE_MAX_SYNC_COUNT 60 8515 8516 int i, ret, sync_cnt = 0; 8517 u16 vlan_id; 8518 8519 /* start from vport 1 for PF is always alive */ 8520 for (i = 0; i < hdev->num_alloc_vport; i++) { 8521 struct hclge_vport *vport = &hdev->vport[i]; 8522 8523 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 8524 VLAN_N_VID); 8525 while (vlan_id != VLAN_N_VID) { 8526 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 8527 vport->vport_id, vlan_id, 8528 true); 8529 if (ret && ret != -EINVAL) 8530 return; 8531 8532 clear_bit(vlan_id, vport->vlan_del_fail_bmap); 8533 hclge_rm_vport_vlan_table(vport, vlan_id, false); 8534 8535 sync_cnt++; 8536 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) 8537 return; 8538 8539 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 8540 VLAN_N_VID); 8541 } 8542 } 8543 } 8544 8545 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) 8546 { 8547 struct hclge_config_max_frm_size_cmd *req; 8548 struct hclge_desc desc; 8549 8550 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 8551 8552 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 8553 req->max_frm_size = cpu_to_le16(new_mps); 8554 req->min_frm_size = HCLGE_MAC_MIN_FRAME; 8555 8556 return hclge_cmd_send(&hdev->hw, &desc, 1); 8557 } 8558 8559 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 8560 { 8561 struct hclge_vport *vport = hclge_get_vport(handle); 8562 8563 return hclge_set_vport_mtu(vport, new_mtu); 8564 } 8565 8566 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) 8567 { 8568 struct hclge_dev *hdev = vport->back; 8569 int i, max_frm_size, ret; 8570 8571 /* HW supprt 2 layer vlan */ 8572 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 8573 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 8574 max_frm_size > HCLGE_MAC_MAX_FRAME) 8575 return -EINVAL; 8576 8577 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 8578 mutex_lock(&hdev->vport_lock); 8579 /* VF's mps must fit within hdev->mps */ 8580 if (vport->vport_id && max_frm_size > hdev->mps) { 8581 mutex_unlock(&hdev->vport_lock); 8582 return -EINVAL; 8583 } else if (vport->vport_id) { 8584 vport->mps = max_frm_size; 8585 mutex_unlock(&hdev->vport_lock); 8586 return 0; 8587 } 8588 8589 /* PF's mps must be greater then VF's mps */ 8590 for (i = 1; i < hdev->num_alloc_vport; i++) 8591 if (max_frm_size < hdev->vport[i].mps) { 8592 mutex_unlock(&hdev->vport_lock); 8593 return -EINVAL; 8594 } 8595 8596 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 8597 8598 ret = hclge_set_mac_mtu(hdev, max_frm_size); 8599 if (ret) { 8600 dev_err(&hdev->pdev->dev, 8601 "Change mtu fail, ret =%d\n", ret); 8602 goto out; 8603 } 8604 8605 hdev->mps = max_frm_size; 8606 vport->mps = max_frm_size; 8607 8608 ret = hclge_buffer_alloc(hdev); 8609 if (ret) 8610 dev_err(&hdev->pdev->dev, 8611 "Allocate buffer fail, ret =%d\n", ret); 8612 8613 out: 8614 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 8615 mutex_unlock(&hdev->vport_lock); 8616 return ret; 8617 } 8618 8619 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 8620 bool enable) 8621 { 8622 struct hclge_reset_tqp_queue_cmd *req; 8623 struct hclge_desc desc; 8624 int ret; 8625 8626 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 8627 8628 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 8629 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 8630 if (enable) 8631 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); 8632 8633 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8634 if (ret) { 8635 dev_err(&hdev->pdev->dev, 8636 "Send tqp reset cmd error, status =%d\n", ret); 8637 return ret; 8638 } 8639 8640 return 0; 8641 } 8642 8643 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 8644 { 8645 struct hclge_reset_tqp_queue_cmd *req; 8646 struct hclge_desc desc; 8647 int ret; 8648 8649 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 8650 8651 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 8652 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 8653 8654 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8655 if (ret) { 8656 dev_err(&hdev->pdev->dev, 8657 "Get reset status error, status =%d\n", ret); 8658 return ret; 8659 } 8660 8661 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 8662 } 8663 8664 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) 8665 { 8666 struct hnae3_queue *queue; 8667 struct hclge_tqp *tqp; 8668 8669 queue = handle->kinfo.tqp[queue_id]; 8670 tqp = container_of(queue, struct hclge_tqp, q); 8671 8672 return tqp->index; 8673 } 8674 8675 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 8676 { 8677 struct hclge_vport *vport = hclge_get_vport(handle); 8678 struct hclge_dev *hdev = vport->back; 8679 int reset_try_times = 0; 8680 int reset_status; 8681 u16 queue_gid; 8682 int ret; 8683 8684 queue_gid = hclge_covert_handle_qid_global(handle, queue_id); 8685 8686 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 8687 if (ret) { 8688 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 8689 return ret; 8690 } 8691 8692 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 8693 if (ret) { 8694 dev_err(&hdev->pdev->dev, 8695 "Send reset tqp cmd fail, ret = %d\n", ret); 8696 return ret; 8697 } 8698 8699 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 8700 reset_status = hclge_get_reset_status(hdev, queue_gid); 8701 if (reset_status) 8702 break; 8703 8704 /* Wait for tqp hw reset */ 8705 usleep_range(1000, 1200); 8706 } 8707 8708 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 8709 dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); 8710 return ret; 8711 } 8712 8713 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 8714 if (ret) 8715 dev_err(&hdev->pdev->dev, 8716 "Deassert the soft reset fail, ret = %d\n", ret); 8717 8718 return ret; 8719 } 8720 8721 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) 8722 { 8723 struct hclge_dev *hdev = vport->back; 8724 int reset_try_times = 0; 8725 int reset_status; 8726 u16 queue_gid; 8727 int ret; 8728 8729 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); 8730 8731 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 8732 if (ret) { 8733 dev_warn(&hdev->pdev->dev, 8734 "Send reset tqp cmd fail, ret = %d\n", ret); 8735 return; 8736 } 8737 8738 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 8739 reset_status = hclge_get_reset_status(hdev, queue_gid); 8740 if (reset_status) 8741 break; 8742 8743 /* Wait for tqp hw reset */ 8744 usleep_range(1000, 1200); 8745 } 8746 8747 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 8748 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 8749 return; 8750 } 8751 8752 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 8753 if (ret) 8754 dev_warn(&hdev->pdev->dev, 8755 "Deassert the soft reset fail, ret = %d\n", ret); 8756 } 8757 8758 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 8759 { 8760 struct hclge_vport *vport = hclge_get_vport(handle); 8761 struct hclge_dev *hdev = vport->back; 8762 8763 return hdev->fw_version; 8764 } 8765 8766 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 8767 { 8768 struct phy_device *phydev = hdev->hw.mac.phydev; 8769 8770 if (!phydev) 8771 return; 8772 8773 phy_set_asym_pause(phydev, rx_en, tx_en); 8774 } 8775 8776 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 8777 { 8778 int ret; 8779 8780 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 8781 return 0; 8782 8783 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 8784 if (ret) 8785 dev_err(&hdev->pdev->dev, 8786 "configure pauseparam error, ret = %d.\n", ret); 8787 8788 return ret; 8789 } 8790 8791 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 8792 { 8793 struct phy_device *phydev = hdev->hw.mac.phydev; 8794 u16 remote_advertising = 0; 8795 u16 local_advertising; 8796 u32 rx_pause, tx_pause; 8797 u8 flowctl; 8798 8799 if (!phydev->link || !phydev->autoneg) 8800 return 0; 8801 8802 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); 8803 8804 if (phydev->pause) 8805 remote_advertising = LPA_PAUSE_CAP; 8806 8807 if (phydev->asym_pause) 8808 remote_advertising |= LPA_PAUSE_ASYM; 8809 8810 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 8811 remote_advertising); 8812 tx_pause = flowctl & FLOW_CTRL_TX; 8813 rx_pause = flowctl & FLOW_CTRL_RX; 8814 8815 if (phydev->duplex == HCLGE_MAC_HALF) { 8816 tx_pause = 0; 8817 rx_pause = 0; 8818 } 8819 8820 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 8821 } 8822 8823 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 8824 u32 *rx_en, u32 *tx_en) 8825 { 8826 struct hclge_vport *vport = hclge_get_vport(handle); 8827 struct hclge_dev *hdev = vport->back; 8828 struct phy_device *phydev = hdev->hw.mac.phydev; 8829 8830 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0; 8831 8832 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 8833 *rx_en = 0; 8834 *tx_en = 0; 8835 return; 8836 } 8837 8838 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 8839 *rx_en = 1; 8840 *tx_en = 0; 8841 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 8842 *tx_en = 1; 8843 *rx_en = 0; 8844 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 8845 *rx_en = 1; 8846 *tx_en = 1; 8847 } else { 8848 *rx_en = 0; 8849 *tx_en = 0; 8850 } 8851 } 8852 8853 static void hclge_record_user_pauseparam(struct hclge_dev *hdev, 8854 u32 rx_en, u32 tx_en) 8855 { 8856 if (rx_en && tx_en) 8857 hdev->fc_mode_last_time = HCLGE_FC_FULL; 8858 else if (rx_en && !tx_en) 8859 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 8860 else if (!rx_en && tx_en) 8861 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 8862 else 8863 hdev->fc_mode_last_time = HCLGE_FC_NONE; 8864 8865 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 8866 } 8867 8868 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 8869 u32 rx_en, u32 tx_en) 8870 { 8871 struct hclge_vport *vport = hclge_get_vport(handle); 8872 struct hclge_dev *hdev = vport->back; 8873 struct phy_device *phydev = hdev->hw.mac.phydev; 8874 u32 fc_autoneg; 8875 8876 if (phydev) { 8877 fc_autoneg = hclge_get_autoneg(handle); 8878 if (auto_neg != fc_autoneg) { 8879 dev_info(&hdev->pdev->dev, 8880 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 8881 return -EOPNOTSUPP; 8882 } 8883 } 8884 8885 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 8886 dev_info(&hdev->pdev->dev, 8887 "Priority flow control enabled. Cannot set link flow control.\n"); 8888 return -EOPNOTSUPP; 8889 } 8890 8891 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 8892 8893 hclge_record_user_pauseparam(hdev, rx_en, tx_en); 8894 8895 if (!auto_neg) 8896 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 8897 8898 if (phydev) 8899 return phy_start_aneg(phydev); 8900 8901 return -EOPNOTSUPP; 8902 } 8903 8904 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 8905 u8 *auto_neg, u32 *speed, u8 *duplex) 8906 { 8907 struct hclge_vport *vport = hclge_get_vport(handle); 8908 struct hclge_dev *hdev = vport->back; 8909 8910 if (speed) 8911 *speed = hdev->hw.mac.speed; 8912 if (duplex) 8913 *duplex = hdev->hw.mac.duplex; 8914 if (auto_neg) 8915 *auto_neg = hdev->hw.mac.autoneg; 8916 } 8917 8918 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, 8919 u8 *module_type) 8920 { 8921 struct hclge_vport *vport = hclge_get_vport(handle); 8922 struct hclge_dev *hdev = vport->back; 8923 8924 if (media_type) 8925 *media_type = hdev->hw.mac.media_type; 8926 8927 if (module_type) 8928 *module_type = hdev->hw.mac.module_type; 8929 } 8930 8931 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 8932 u8 *tp_mdix_ctrl, u8 *tp_mdix) 8933 { 8934 struct hclge_vport *vport = hclge_get_vport(handle); 8935 struct hclge_dev *hdev = vport->back; 8936 struct phy_device *phydev = hdev->hw.mac.phydev; 8937 int mdix_ctrl, mdix, is_resolved; 8938 unsigned int retval; 8939 8940 if (!phydev) { 8941 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 8942 *tp_mdix = ETH_TP_MDI_INVALID; 8943 return; 8944 } 8945 8946 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 8947 8948 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 8949 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 8950 HCLGE_PHY_MDIX_CTRL_S); 8951 8952 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 8953 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 8954 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 8955 8956 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 8957 8958 switch (mdix_ctrl) { 8959 case 0x0: 8960 *tp_mdix_ctrl = ETH_TP_MDI; 8961 break; 8962 case 0x1: 8963 *tp_mdix_ctrl = ETH_TP_MDI_X; 8964 break; 8965 case 0x3: 8966 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 8967 break; 8968 default: 8969 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 8970 break; 8971 } 8972 8973 if (!is_resolved) 8974 *tp_mdix = ETH_TP_MDI_INVALID; 8975 else if (mdix) 8976 *tp_mdix = ETH_TP_MDI_X; 8977 else 8978 *tp_mdix = ETH_TP_MDI; 8979 } 8980 8981 static void hclge_info_show(struct hclge_dev *hdev) 8982 { 8983 struct device *dev = &hdev->pdev->dev; 8984 8985 dev_info(dev, "PF info begin:\n"); 8986 8987 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 8988 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 8989 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 8990 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 8991 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport); 8992 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); 8993 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 8994 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); 8995 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); 8996 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); 8997 dev_info(dev, "This is %s PF\n", 8998 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); 8999 dev_info(dev, "DCB %s\n", 9000 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable"); 9001 dev_info(dev, "MQPRIO %s\n", 9002 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable"); 9003 9004 dev_info(dev, "PF info end.\n"); 9005 } 9006 9007 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 9008 struct hclge_vport *vport) 9009 { 9010 struct hnae3_client *client = vport->nic.client; 9011 struct hclge_dev *hdev = ae_dev->priv; 9012 int rst_cnt = hdev->rst_stats.reset_cnt; 9013 int ret; 9014 9015 ret = client->ops->init_instance(&vport->nic); 9016 if (ret) 9017 return ret; 9018 9019 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 9020 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 9021 rst_cnt != hdev->rst_stats.reset_cnt) { 9022 ret = -EBUSY; 9023 goto init_nic_err; 9024 } 9025 9026 /* Enable nic hw error interrupts */ 9027 ret = hclge_config_nic_hw_error(hdev, true); 9028 if (ret) { 9029 dev_err(&ae_dev->pdev->dev, 9030 "fail(%d) to enable hw error interrupts\n", ret); 9031 goto init_nic_err; 9032 } 9033 9034 hnae3_set_client_init_flag(client, ae_dev, 1); 9035 9036 if (netif_msg_drv(&hdev->vport->nic)) 9037 hclge_info_show(hdev); 9038 9039 return ret; 9040 9041 init_nic_err: 9042 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 9043 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 9044 msleep(HCLGE_WAIT_RESET_DONE); 9045 9046 client->ops->uninit_instance(&vport->nic, 0); 9047 9048 return ret; 9049 } 9050 9051 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 9052 struct hclge_vport *vport) 9053 { 9054 struct hnae3_client *client = vport->roce.client; 9055 struct hclge_dev *hdev = ae_dev->priv; 9056 int rst_cnt; 9057 int ret; 9058 9059 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 9060 !hdev->nic_client) 9061 return 0; 9062 9063 client = hdev->roce_client; 9064 ret = hclge_init_roce_base_info(vport); 9065 if (ret) 9066 return ret; 9067 9068 rst_cnt = hdev->rst_stats.reset_cnt; 9069 ret = client->ops->init_instance(&vport->roce); 9070 if (ret) 9071 return ret; 9072 9073 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 9074 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 9075 rst_cnt != hdev->rst_stats.reset_cnt) { 9076 ret = -EBUSY; 9077 goto init_roce_err; 9078 } 9079 9080 /* Enable roce ras interrupts */ 9081 ret = hclge_config_rocee_ras_interrupt(hdev, true); 9082 if (ret) { 9083 dev_err(&ae_dev->pdev->dev, 9084 "fail(%d) to enable roce ras interrupts\n", ret); 9085 goto init_roce_err; 9086 } 9087 9088 hnae3_set_client_init_flag(client, ae_dev, 1); 9089 9090 return 0; 9091 9092 init_roce_err: 9093 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 9094 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 9095 msleep(HCLGE_WAIT_RESET_DONE); 9096 9097 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); 9098 9099 return ret; 9100 } 9101 9102 static int hclge_init_client_instance(struct hnae3_client *client, 9103 struct hnae3_ae_dev *ae_dev) 9104 { 9105 struct hclge_dev *hdev = ae_dev->priv; 9106 struct hclge_vport *vport; 9107 int i, ret; 9108 9109 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 9110 vport = &hdev->vport[i]; 9111 9112 switch (client->type) { 9113 case HNAE3_CLIENT_KNIC: 9114 hdev->nic_client = client; 9115 vport->nic.client = client; 9116 ret = hclge_init_nic_client_instance(ae_dev, vport); 9117 if (ret) 9118 goto clear_nic; 9119 9120 ret = hclge_init_roce_client_instance(ae_dev, vport); 9121 if (ret) 9122 goto clear_roce; 9123 9124 break; 9125 case HNAE3_CLIENT_ROCE: 9126 if (hnae3_dev_roce_supported(hdev)) { 9127 hdev->roce_client = client; 9128 vport->roce.client = client; 9129 } 9130 9131 ret = hclge_init_roce_client_instance(ae_dev, vport); 9132 if (ret) 9133 goto clear_roce; 9134 9135 break; 9136 default: 9137 return -EINVAL; 9138 } 9139 } 9140 9141 return 0; 9142 9143 clear_nic: 9144 hdev->nic_client = NULL; 9145 vport->nic.client = NULL; 9146 return ret; 9147 clear_roce: 9148 hdev->roce_client = NULL; 9149 vport->roce.client = NULL; 9150 return ret; 9151 } 9152 9153 static void hclge_uninit_client_instance(struct hnae3_client *client, 9154 struct hnae3_ae_dev *ae_dev) 9155 { 9156 struct hclge_dev *hdev = ae_dev->priv; 9157 struct hclge_vport *vport; 9158 int i; 9159 9160 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 9161 vport = &hdev->vport[i]; 9162 if (hdev->roce_client) { 9163 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 9164 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 9165 msleep(HCLGE_WAIT_RESET_DONE); 9166 9167 hdev->roce_client->ops->uninit_instance(&vport->roce, 9168 0); 9169 hdev->roce_client = NULL; 9170 vport->roce.client = NULL; 9171 } 9172 if (client->type == HNAE3_CLIENT_ROCE) 9173 return; 9174 if (hdev->nic_client && client->ops->uninit_instance) { 9175 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 9176 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 9177 msleep(HCLGE_WAIT_RESET_DONE); 9178 9179 client->ops->uninit_instance(&vport->nic, 0); 9180 hdev->nic_client = NULL; 9181 vport->nic.client = NULL; 9182 } 9183 } 9184 } 9185 9186 static int hclge_pci_init(struct hclge_dev *hdev) 9187 { 9188 struct pci_dev *pdev = hdev->pdev; 9189 struct hclge_hw *hw; 9190 int ret; 9191 9192 ret = pci_enable_device(pdev); 9193 if (ret) { 9194 dev_err(&pdev->dev, "failed to enable PCI device\n"); 9195 return ret; 9196 } 9197 9198 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 9199 if (ret) { 9200 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 9201 if (ret) { 9202 dev_err(&pdev->dev, 9203 "can't set consistent PCI DMA"); 9204 goto err_disable_device; 9205 } 9206 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 9207 } 9208 9209 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 9210 if (ret) { 9211 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 9212 goto err_disable_device; 9213 } 9214 9215 pci_set_master(pdev); 9216 hw = &hdev->hw; 9217 hw->io_base = pcim_iomap(pdev, 2, 0); 9218 if (!hw->io_base) { 9219 dev_err(&pdev->dev, "Can't map configuration register space\n"); 9220 ret = -ENOMEM; 9221 goto err_clr_master; 9222 } 9223 9224 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 9225 9226 return 0; 9227 err_clr_master: 9228 pci_clear_master(pdev); 9229 pci_release_regions(pdev); 9230 err_disable_device: 9231 pci_disable_device(pdev); 9232 9233 return ret; 9234 } 9235 9236 static void hclge_pci_uninit(struct hclge_dev *hdev) 9237 { 9238 struct pci_dev *pdev = hdev->pdev; 9239 9240 pcim_iounmap(pdev, hdev->hw.io_base); 9241 pci_free_irq_vectors(pdev); 9242 pci_clear_master(pdev); 9243 pci_release_mem_regions(pdev); 9244 pci_disable_device(pdev); 9245 } 9246 9247 static void hclge_state_init(struct hclge_dev *hdev) 9248 { 9249 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 9250 set_bit(HCLGE_STATE_DOWN, &hdev->state); 9251 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 9252 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 9253 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 9254 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 9255 } 9256 9257 static void hclge_state_uninit(struct hclge_dev *hdev) 9258 { 9259 set_bit(HCLGE_STATE_DOWN, &hdev->state); 9260 set_bit(HCLGE_STATE_REMOVING, &hdev->state); 9261 9262 if (hdev->reset_timer.function) 9263 del_timer_sync(&hdev->reset_timer); 9264 if (hdev->service_task.work.func) 9265 cancel_delayed_work_sync(&hdev->service_task); 9266 if (hdev->rst_service_task.func) 9267 cancel_work_sync(&hdev->rst_service_task); 9268 if (hdev->mbx_service_task.func) 9269 cancel_work_sync(&hdev->mbx_service_task); 9270 } 9271 9272 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) 9273 { 9274 #define HCLGE_FLR_WAIT_MS 100 9275 #define HCLGE_FLR_WAIT_CNT 50 9276 struct hclge_dev *hdev = ae_dev->priv; 9277 int cnt = 0; 9278 9279 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 9280 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 9281 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 9282 hclge_reset_event(hdev->pdev, NULL); 9283 9284 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 9285 cnt++ < HCLGE_FLR_WAIT_CNT) 9286 msleep(HCLGE_FLR_WAIT_MS); 9287 9288 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 9289 dev_err(&hdev->pdev->dev, 9290 "flr wait down timeout: %d\n", cnt); 9291 } 9292 9293 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) 9294 { 9295 struct hclge_dev *hdev = ae_dev->priv; 9296 9297 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 9298 } 9299 9300 static void hclge_clear_resetting_state(struct hclge_dev *hdev) 9301 { 9302 u16 i; 9303 9304 for (i = 0; i < hdev->num_alloc_vport; i++) { 9305 struct hclge_vport *vport = &hdev->vport[i]; 9306 int ret; 9307 9308 /* Send cmd to clear VF's FUNC_RST_ING */ 9309 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); 9310 if (ret) 9311 dev_warn(&hdev->pdev->dev, 9312 "clear vf(%u) rst failed %d!\n", 9313 vport->vport_id, ret); 9314 } 9315 } 9316 9317 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 9318 { 9319 struct pci_dev *pdev = ae_dev->pdev; 9320 struct hclge_dev *hdev; 9321 int ret; 9322 9323 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 9324 if (!hdev) { 9325 ret = -ENOMEM; 9326 goto out; 9327 } 9328 9329 hdev->pdev = pdev; 9330 hdev->ae_dev = ae_dev; 9331 hdev->reset_type = HNAE3_NONE_RESET; 9332 hdev->reset_level = HNAE3_FUNC_RESET; 9333 ae_dev->priv = hdev; 9334 9335 /* HW supprt 2 layer vlan */ 9336 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 9337 9338 mutex_init(&hdev->vport_lock); 9339 mutex_init(&hdev->vport_cfg_mutex); 9340 spin_lock_init(&hdev->fd_rule_lock); 9341 9342 ret = hclge_pci_init(hdev); 9343 if (ret) { 9344 dev_err(&pdev->dev, "PCI init failed\n"); 9345 goto out; 9346 } 9347 9348 /* Firmware command queue initialize */ 9349 ret = hclge_cmd_queue_init(hdev); 9350 if (ret) { 9351 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 9352 goto err_pci_uninit; 9353 } 9354 9355 /* Firmware command initialize */ 9356 ret = hclge_cmd_init(hdev); 9357 if (ret) 9358 goto err_cmd_uninit; 9359 9360 ret = hclge_get_cap(hdev); 9361 if (ret) { 9362 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 9363 ret); 9364 goto err_cmd_uninit; 9365 } 9366 9367 ret = hclge_configure(hdev); 9368 if (ret) { 9369 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 9370 goto err_cmd_uninit; 9371 } 9372 9373 ret = hclge_init_msi(hdev); 9374 if (ret) { 9375 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 9376 goto err_cmd_uninit; 9377 } 9378 9379 ret = hclge_misc_irq_init(hdev); 9380 if (ret) { 9381 dev_err(&pdev->dev, 9382 "Misc IRQ(vector0) init error, ret = %d.\n", 9383 ret); 9384 goto err_msi_uninit; 9385 } 9386 9387 ret = hclge_alloc_tqps(hdev); 9388 if (ret) { 9389 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 9390 goto err_msi_irq_uninit; 9391 } 9392 9393 ret = hclge_alloc_vport(hdev); 9394 if (ret) { 9395 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 9396 goto err_msi_irq_uninit; 9397 } 9398 9399 ret = hclge_map_tqp(hdev); 9400 if (ret) { 9401 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 9402 goto err_msi_irq_uninit; 9403 } 9404 9405 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 9406 ret = hclge_mac_mdio_config(hdev); 9407 if (ret) { 9408 dev_err(&hdev->pdev->dev, 9409 "mdio config fail ret=%d\n", ret); 9410 goto err_msi_irq_uninit; 9411 } 9412 } 9413 9414 ret = hclge_init_umv_space(hdev); 9415 if (ret) { 9416 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); 9417 goto err_mdiobus_unreg; 9418 } 9419 9420 ret = hclge_mac_init(hdev); 9421 if (ret) { 9422 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 9423 goto err_mdiobus_unreg; 9424 } 9425 9426 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 9427 if (ret) { 9428 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 9429 goto err_mdiobus_unreg; 9430 } 9431 9432 ret = hclge_config_gro(hdev, true); 9433 if (ret) 9434 goto err_mdiobus_unreg; 9435 9436 ret = hclge_init_vlan_config(hdev); 9437 if (ret) { 9438 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 9439 goto err_mdiobus_unreg; 9440 } 9441 9442 ret = hclge_tm_schd_init(hdev); 9443 if (ret) { 9444 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 9445 goto err_mdiobus_unreg; 9446 } 9447 9448 hclge_rss_init_cfg(hdev); 9449 ret = hclge_rss_init_hw(hdev); 9450 if (ret) { 9451 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 9452 goto err_mdiobus_unreg; 9453 } 9454 9455 ret = init_mgr_tbl(hdev); 9456 if (ret) { 9457 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 9458 goto err_mdiobus_unreg; 9459 } 9460 9461 ret = hclge_init_fd_config(hdev); 9462 if (ret) { 9463 dev_err(&pdev->dev, 9464 "fd table init fail, ret=%d\n", ret); 9465 goto err_mdiobus_unreg; 9466 } 9467 9468 INIT_KFIFO(hdev->mac_tnl_log); 9469 9470 hclge_dcb_ops_set(hdev); 9471 9472 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); 9473 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); 9474 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); 9475 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); 9476 9477 /* Setup affinity after service timer setup because add_timer_on 9478 * is called in affinity notify. 9479 */ 9480 hclge_misc_affinity_setup(hdev); 9481 9482 hclge_clear_all_event_cause(hdev); 9483 hclge_clear_resetting_state(hdev); 9484 9485 /* Log and clear the hw errors those already occurred */ 9486 hclge_handle_all_hns_hw_errors(ae_dev); 9487 9488 /* request delayed reset for the error recovery because an immediate 9489 * global reset on a PF affecting pending initialization of other PFs 9490 */ 9491 if (ae_dev->hw_err_reset_req) { 9492 enum hnae3_reset_type reset_level; 9493 9494 reset_level = hclge_get_reset_level(ae_dev, 9495 &ae_dev->hw_err_reset_req); 9496 hclge_set_def_reset_request(ae_dev, reset_level); 9497 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 9498 } 9499 9500 /* Enable MISC vector(vector0) */ 9501 hclge_enable_vector(&hdev->misc_vector, true); 9502 9503 hclge_state_init(hdev); 9504 hdev->last_reset_time = jiffies; 9505 9506 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", 9507 HCLGE_DRIVER_NAME); 9508 9509 return 0; 9510 9511 err_mdiobus_unreg: 9512 if (hdev->hw.mac.phydev) 9513 mdiobus_unregister(hdev->hw.mac.mdio_bus); 9514 err_msi_irq_uninit: 9515 hclge_misc_irq_uninit(hdev); 9516 err_msi_uninit: 9517 pci_free_irq_vectors(pdev); 9518 err_cmd_uninit: 9519 hclge_cmd_uninit(hdev); 9520 err_pci_uninit: 9521 pcim_iounmap(pdev, hdev->hw.io_base); 9522 pci_clear_master(pdev); 9523 pci_release_regions(pdev); 9524 pci_disable_device(pdev); 9525 out: 9526 return ret; 9527 } 9528 9529 static void hclge_stats_clear(struct hclge_dev *hdev) 9530 { 9531 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); 9532 } 9533 9534 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 9535 { 9536 return hclge_config_switch_param(hdev, vf, enable, 9537 HCLGE_SWITCH_ANTI_SPOOF_MASK); 9538 } 9539 9540 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 9541 { 9542 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9543 HCLGE_FILTER_FE_NIC_INGRESS_B, 9544 enable, vf); 9545 } 9546 9547 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) 9548 { 9549 int ret; 9550 9551 ret = hclge_set_mac_spoofchk(hdev, vf, enable); 9552 if (ret) { 9553 dev_err(&hdev->pdev->dev, 9554 "Set vf %d mac spoof check %s failed, ret=%d\n", 9555 vf, enable ? "on" : "off", ret); 9556 return ret; 9557 } 9558 9559 ret = hclge_set_vlan_spoofchk(hdev, vf, enable); 9560 if (ret) 9561 dev_err(&hdev->pdev->dev, 9562 "Set vf %d vlan spoof check %s failed, ret=%d\n", 9563 vf, enable ? "on" : "off", ret); 9564 9565 return ret; 9566 } 9567 9568 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, 9569 bool enable) 9570 { 9571 struct hclge_vport *vport = hclge_get_vport(handle); 9572 struct hclge_dev *hdev = vport->back; 9573 u32 new_spoofchk = enable ? 1 : 0; 9574 int ret; 9575 9576 if (hdev->pdev->revision == 0x20) 9577 return -EOPNOTSUPP; 9578 9579 vport = hclge_get_vf_vport(hdev, vf); 9580 if (!vport) 9581 return -EINVAL; 9582 9583 if (vport->vf_info.spoofchk == new_spoofchk) 9584 return 0; 9585 9586 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) 9587 dev_warn(&hdev->pdev->dev, 9588 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", 9589 vf); 9590 else if (enable && hclge_is_umv_space_full(vport)) 9591 dev_warn(&hdev->pdev->dev, 9592 "vf %d mac table is full, enable spoof check may cause its packet send fail\n", 9593 vf); 9594 9595 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); 9596 if (ret) 9597 return ret; 9598 9599 vport->vf_info.spoofchk = new_spoofchk; 9600 return 0; 9601 } 9602 9603 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) 9604 { 9605 struct hclge_vport *vport = hdev->vport; 9606 int ret; 9607 int i; 9608 9609 if (hdev->pdev->revision == 0x20) 9610 return 0; 9611 9612 /* resume the vf spoof check state after reset */ 9613 for (i = 0; i < hdev->num_alloc_vport; i++) { 9614 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, 9615 vport->vf_info.spoofchk); 9616 if (ret) 9617 return ret; 9618 9619 vport++; 9620 } 9621 9622 return 0; 9623 } 9624 9625 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) 9626 { 9627 struct hclge_vport *vport = hclge_get_vport(handle); 9628 struct hclge_dev *hdev = vport->back; 9629 u32 new_trusted = enable ? 1 : 0; 9630 bool en_bc_pmc; 9631 int ret; 9632 9633 vport = hclge_get_vf_vport(hdev, vf); 9634 if (!vport) 9635 return -EINVAL; 9636 9637 if (vport->vf_info.trusted == new_trusted) 9638 return 0; 9639 9640 /* Disable promisc mode for VF if it is not trusted any more. */ 9641 if (!enable && vport->vf_info.promisc_enable) { 9642 en_bc_pmc = hdev->pdev->revision != 0x20; 9643 ret = hclge_set_vport_promisc_mode(vport, false, false, 9644 en_bc_pmc); 9645 if (ret) 9646 return ret; 9647 vport->vf_info.promisc_enable = 0; 9648 hclge_inform_vf_promisc_info(vport); 9649 } 9650 9651 vport->vf_info.trusted = new_trusted; 9652 9653 return 0; 9654 } 9655 9656 static void hclge_reset_vf_rate(struct hclge_dev *hdev) 9657 { 9658 int ret; 9659 int vf; 9660 9661 /* reset vf rate to default value */ 9662 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { 9663 struct hclge_vport *vport = &hdev->vport[vf]; 9664 9665 vport->vf_info.max_tx_rate = 0; 9666 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); 9667 if (ret) 9668 dev_err(&hdev->pdev->dev, 9669 "vf%d failed to reset to default, ret=%d\n", 9670 vf - HCLGE_VF_VPORT_START_NUM, ret); 9671 } 9672 } 9673 9674 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf, 9675 int min_tx_rate, int max_tx_rate) 9676 { 9677 if (min_tx_rate != 0 || 9678 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { 9679 dev_err(&hdev->pdev->dev, 9680 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n", 9681 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); 9682 return -EINVAL; 9683 } 9684 9685 return 0; 9686 } 9687 9688 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, 9689 int min_tx_rate, int max_tx_rate, bool force) 9690 { 9691 struct hclge_vport *vport = hclge_get_vport(handle); 9692 struct hclge_dev *hdev = vport->back; 9693 int ret; 9694 9695 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate); 9696 if (ret) 9697 return ret; 9698 9699 vport = hclge_get_vf_vport(hdev, vf); 9700 if (!vport) 9701 return -EINVAL; 9702 9703 if (!force && max_tx_rate == vport->vf_info.max_tx_rate) 9704 return 0; 9705 9706 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); 9707 if (ret) 9708 return ret; 9709 9710 vport->vf_info.max_tx_rate = max_tx_rate; 9711 9712 return 0; 9713 } 9714 9715 static int hclge_resume_vf_rate(struct hclge_dev *hdev) 9716 { 9717 struct hnae3_handle *handle = &hdev->vport->nic; 9718 struct hclge_vport *vport; 9719 int ret; 9720 int vf; 9721 9722 /* resume the vf max_tx_rate after reset */ 9723 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { 9724 vport = hclge_get_vf_vport(hdev, vf); 9725 if (!vport) 9726 return -EINVAL; 9727 9728 /* zero means max rate, after reset, firmware already set it to 9729 * max rate, so just continue. 9730 */ 9731 if (!vport->vf_info.max_tx_rate) 9732 continue; 9733 9734 ret = hclge_set_vf_rate(handle, vf, 0, 9735 vport->vf_info.max_tx_rate, true); 9736 if (ret) { 9737 dev_err(&hdev->pdev->dev, 9738 "vf%d failed to resume tx_rate:%u, ret=%d\n", 9739 vf, vport->vf_info.max_tx_rate, ret); 9740 return ret; 9741 } 9742 } 9743 9744 return 0; 9745 } 9746 9747 static void hclge_reset_vport_state(struct hclge_dev *hdev) 9748 { 9749 struct hclge_vport *vport = hdev->vport; 9750 int i; 9751 9752 for (i = 0; i < hdev->num_alloc_vport; i++) { 9753 hclge_vport_stop(vport); 9754 vport++; 9755 } 9756 } 9757 9758 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 9759 { 9760 struct hclge_dev *hdev = ae_dev->priv; 9761 struct pci_dev *pdev = ae_dev->pdev; 9762 int ret; 9763 9764 set_bit(HCLGE_STATE_DOWN, &hdev->state); 9765 9766 hclge_stats_clear(hdev); 9767 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 9768 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); 9769 9770 ret = hclge_cmd_init(hdev); 9771 if (ret) { 9772 dev_err(&pdev->dev, "Cmd queue init failed\n"); 9773 return ret; 9774 } 9775 9776 ret = hclge_map_tqp(hdev); 9777 if (ret) { 9778 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 9779 return ret; 9780 } 9781 9782 hclge_reset_umv_space(hdev); 9783 9784 ret = hclge_mac_init(hdev); 9785 if (ret) { 9786 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 9787 return ret; 9788 } 9789 9790 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 9791 if (ret) { 9792 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 9793 return ret; 9794 } 9795 9796 ret = hclge_config_gro(hdev, true); 9797 if (ret) 9798 return ret; 9799 9800 ret = hclge_init_vlan_config(hdev); 9801 if (ret) { 9802 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 9803 return ret; 9804 } 9805 9806 ret = hclge_tm_init_hw(hdev, true); 9807 if (ret) { 9808 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 9809 return ret; 9810 } 9811 9812 ret = hclge_rss_init_hw(hdev); 9813 if (ret) { 9814 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 9815 return ret; 9816 } 9817 9818 ret = hclge_init_fd_config(hdev); 9819 if (ret) { 9820 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); 9821 return ret; 9822 } 9823 9824 /* Log and clear the hw errors those already occurred */ 9825 hclge_handle_all_hns_hw_errors(ae_dev); 9826 9827 /* Re-enable the hw error interrupts because 9828 * the interrupts get disabled on global reset. 9829 */ 9830 ret = hclge_config_nic_hw_error(hdev, true); 9831 if (ret) { 9832 dev_err(&pdev->dev, 9833 "fail(%d) to re-enable NIC hw error interrupts\n", 9834 ret); 9835 return ret; 9836 } 9837 9838 if (hdev->roce_client) { 9839 ret = hclge_config_rocee_ras_interrupt(hdev, true); 9840 if (ret) { 9841 dev_err(&pdev->dev, 9842 "fail(%d) to re-enable roce ras interrupts\n", 9843 ret); 9844 return ret; 9845 } 9846 } 9847 9848 hclge_reset_vport_state(hdev); 9849 ret = hclge_reset_vport_spoofchk(hdev); 9850 if (ret) 9851 return ret; 9852 9853 ret = hclge_resume_vf_rate(hdev); 9854 if (ret) 9855 return ret; 9856 9857 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 9858 HCLGE_DRIVER_NAME); 9859 9860 return 0; 9861 } 9862 9863 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 9864 { 9865 struct hclge_dev *hdev = ae_dev->priv; 9866 struct hclge_mac *mac = &hdev->hw.mac; 9867 9868 hclge_reset_vf_rate(hdev); 9869 hclge_misc_affinity_teardown(hdev); 9870 hclge_state_uninit(hdev); 9871 9872 if (mac->phydev) 9873 mdiobus_unregister(mac->mdio_bus); 9874 9875 hclge_uninit_umv_space(hdev); 9876 9877 /* Disable MISC vector(vector0) */ 9878 hclge_enable_vector(&hdev->misc_vector, false); 9879 synchronize_irq(hdev->misc_vector.vector_irq); 9880 9881 /* Disable all hw interrupts */ 9882 hclge_config_mac_tnl_int(hdev, false); 9883 hclge_config_nic_hw_error(hdev, false); 9884 hclge_config_rocee_ras_interrupt(hdev, false); 9885 9886 hclge_cmd_uninit(hdev); 9887 hclge_misc_irq_uninit(hdev); 9888 hclge_pci_uninit(hdev); 9889 mutex_destroy(&hdev->vport_lock); 9890 hclge_uninit_vport_mac_table(hdev); 9891 hclge_uninit_vport_vlan_table(hdev); 9892 mutex_destroy(&hdev->vport_cfg_mutex); 9893 ae_dev->priv = NULL; 9894 } 9895 9896 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 9897 { 9898 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 9899 struct hclge_vport *vport = hclge_get_vport(handle); 9900 struct hclge_dev *hdev = vport->back; 9901 9902 return min_t(u32, hdev->rss_size_max, 9903 vport->alloc_tqps / kinfo->num_tc); 9904 } 9905 9906 static void hclge_get_channels(struct hnae3_handle *handle, 9907 struct ethtool_channels *ch) 9908 { 9909 ch->max_combined = hclge_get_max_channels(handle); 9910 ch->other_count = 1; 9911 ch->max_other = 1; 9912 ch->combined_count = handle->kinfo.rss_size; 9913 } 9914 9915 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 9916 u16 *alloc_tqps, u16 *max_rss_size) 9917 { 9918 struct hclge_vport *vport = hclge_get_vport(handle); 9919 struct hclge_dev *hdev = vport->back; 9920 9921 *alloc_tqps = vport->alloc_tqps; 9922 *max_rss_size = hdev->rss_size_max; 9923 } 9924 9925 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 9926 bool rxfh_configured) 9927 { 9928 struct hclge_vport *vport = hclge_get_vport(handle); 9929 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 9930 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 9931 struct hclge_dev *hdev = vport->back; 9932 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 9933 u16 cur_rss_size = kinfo->rss_size; 9934 u16 cur_tqps = kinfo->num_tqps; 9935 u16 tc_valid[HCLGE_MAX_TC_NUM]; 9936 u16 roundup_size; 9937 u32 *rss_indir; 9938 unsigned int i; 9939 int ret; 9940 9941 kinfo->req_rss_size = new_tqps_num; 9942 9943 ret = hclge_tm_vport_map_update(hdev); 9944 if (ret) { 9945 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); 9946 return ret; 9947 } 9948 9949 roundup_size = roundup_pow_of_two(kinfo->rss_size); 9950 roundup_size = ilog2(roundup_size); 9951 /* Set the RSS TC mode according to the new RSS size */ 9952 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 9953 tc_valid[i] = 0; 9954 9955 if (!(hdev->hw_tc_map & BIT(i))) 9956 continue; 9957 9958 tc_valid[i] = 1; 9959 tc_size[i] = roundup_size; 9960 tc_offset[i] = kinfo->rss_size * i; 9961 } 9962 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 9963 if (ret) 9964 return ret; 9965 9966 /* RSS indirection table has been configuared by user */ 9967 if (rxfh_configured) 9968 goto out; 9969 9970 /* Reinitializes the rss indirect table according to the new RSS size */ 9971 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 9972 if (!rss_indir) 9973 return -ENOMEM; 9974 9975 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 9976 rss_indir[i] = i % kinfo->rss_size; 9977 9978 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 9979 if (ret) 9980 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 9981 ret); 9982 9983 kfree(rss_indir); 9984 9985 out: 9986 if (!ret) 9987 dev_info(&hdev->pdev->dev, 9988 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 9989 cur_rss_size, kinfo->rss_size, 9990 cur_tqps, kinfo->rss_size * kinfo->num_tc); 9991 9992 return ret; 9993 } 9994 9995 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, 9996 u32 *regs_num_64_bit) 9997 { 9998 struct hclge_desc desc; 9999 u32 total_num; 10000 int ret; 10001 10002 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); 10003 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10004 if (ret) { 10005 dev_err(&hdev->pdev->dev, 10006 "Query register number cmd failed, ret = %d.\n", ret); 10007 return ret; 10008 } 10009 10010 *regs_num_32_bit = le32_to_cpu(desc.data[0]); 10011 *regs_num_64_bit = le32_to_cpu(desc.data[1]); 10012 10013 total_num = *regs_num_32_bit + *regs_num_64_bit; 10014 if (!total_num) 10015 return -EINVAL; 10016 10017 return 0; 10018 } 10019 10020 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, 10021 void *data) 10022 { 10023 #define HCLGE_32_BIT_REG_RTN_DATANUM 8 10024 #define HCLGE_32_BIT_DESC_NODATA_LEN 2 10025 10026 struct hclge_desc *desc; 10027 u32 *reg_val = data; 10028 __le32 *desc_data; 10029 int nodata_num; 10030 int cmd_num; 10031 int i, k, n; 10032 int ret; 10033 10034 if (regs_num == 0) 10035 return 0; 10036 10037 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN; 10038 cmd_num = DIV_ROUND_UP(regs_num + nodata_num, 10039 HCLGE_32_BIT_REG_RTN_DATANUM); 10040 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 10041 if (!desc) 10042 return -ENOMEM; 10043 10044 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); 10045 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 10046 if (ret) { 10047 dev_err(&hdev->pdev->dev, 10048 "Query 32 bit register cmd failed, ret = %d.\n", ret); 10049 kfree(desc); 10050 return ret; 10051 } 10052 10053 for (i = 0; i < cmd_num; i++) { 10054 if (i == 0) { 10055 desc_data = (__le32 *)(&desc[i].data[0]); 10056 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num; 10057 } else { 10058 desc_data = (__le32 *)(&desc[i]); 10059 n = HCLGE_32_BIT_REG_RTN_DATANUM; 10060 } 10061 for (k = 0; k < n; k++) { 10062 *reg_val++ = le32_to_cpu(*desc_data++); 10063 10064 regs_num--; 10065 if (!regs_num) 10066 break; 10067 } 10068 } 10069 10070 kfree(desc); 10071 return 0; 10072 } 10073 10074 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, 10075 void *data) 10076 { 10077 #define HCLGE_64_BIT_REG_RTN_DATANUM 4 10078 #define HCLGE_64_BIT_DESC_NODATA_LEN 1 10079 10080 struct hclge_desc *desc; 10081 u64 *reg_val = data; 10082 __le64 *desc_data; 10083 int nodata_len; 10084 int cmd_num; 10085 int i, k, n; 10086 int ret; 10087 10088 if (regs_num == 0) 10089 return 0; 10090 10091 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN; 10092 cmd_num = DIV_ROUND_UP(regs_num + nodata_len, 10093 HCLGE_64_BIT_REG_RTN_DATANUM); 10094 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 10095 if (!desc) 10096 return -ENOMEM; 10097 10098 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); 10099 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 10100 if (ret) { 10101 dev_err(&hdev->pdev->dev, 10102 "Query 64 bit register cmd failed, ret = %d.\n", ret); 10103 kfree(desc); 10104 return ret; 10105 } 10106 10107 for (i = 0; i < cmd_num; i++) { 10108 if (i == 0) { 10109 desc_data = (__le64 *)(&desc[i].data[0]); 10110 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len; 10111 } else { 10112 desc_data = (__le64 *)(&desc[i]); 10113 n = HCLGE_64_BIT_REG_RTN_DATANUM; 10114 } 10115 for (k = 0; k < n; k++) { 10116 *reg_val++ = le64_to_cpu(*desc_data++); 10117 10118 regs_num--; 10119 if (!regs_num) 10120 break; 10121 } 10122 } 10123 10124 kfree(desc); 10125 return 0; 10126 } 10127 10128 #define MAX_SEPARATE_NUM 4 10129 #define SEPARATOR_VALUE 0xFDFCFBFA 10130 #define REG_NUM_PER_LINE 4 10131 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 10132 #define REG_SEPARATOR_LINE 1 10133 #define REG_NUM_REMAIN_MASK 3 10134 #define BD_LIST_MAX_NUM 30 10135 10136 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc) 10137 { 10138 /*prepare 4 commands to query DFX BD number*/ 10139 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); 10140 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 10141 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true); 10142 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 10143 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true); 10144 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 10145 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true); 10146 10147 return hclge_cmd_send(&hdev->hw, desc, 4); 10148 } 10149 10150 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, 10151 int *bd_num_list, 10152 u32 type_num) 10153 { 10154 #define HCLGE_DFX_REG_BD_NUM 4 10155 10156 u32 entries_per_desc, desc_index, index, offset, i; 10157 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM]; 10158 int ret; 10159 10160 ret = hclge_query_bd_num_cmd_send(hdev, desc); 10161 if (ret) { 10162 dev_err(&hdev->pdev->dev, 10163 "Get dfx bd num fail, status is %d.\n", ret); 10164 return ret; 10165 } 10166 10167 entries_per_desc = ARRAY_SIZE(desc[0].data); 10168 for (i = 0; i < type_num; i++) { 10169 offset = hclge_dfx_bd_offset_list[i]; 10170 index = offset % entries_per_desc; 10171 desc_index = offset / entries_per_desc; 10172 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]); 10173 } 10174 10175 return ret; 10176 } 10177 10178 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev, 10179 struct hclge_desc *desc_src, int bd_num, 10180 enum hclge_opcode_type cmd) 10181 { 10182 struct hclge_desc *desc = desc_src; 10183 int i, ret; 10184 10185 hclge_cmd_setup_basic_desc(desc, cmd, true); 10186 for (i = 0; i < bd_num - 1; i++) { 10187 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 10188 desc++; 10189 hclge_cmd_setup_basic_desc(desc, cmd, true); 10190 } 10191 10192 desc = desc_src; 10193 ret = hclge_cmd_send(&hdev->hw, desc, bd_num); 10194 if (ret) 10195 dev_err(&hdev->pdev->dev, 10196 "Query dfx reg cmd(0x%x) send fail, status is %d.\n", 10197 cmd, ret); 10198 10199 return ret; 10200 } 10201 10202 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num, 10203 void *data) 10204 { 10205 int entries_per_desc, reg_num, separator_num, desc_index, index, i; 10206 struct hclge_desc *desc = desc_src; 10207 u32 *reg = data; 10208 10209 entries_per_desc = ARRAY_SIZE(desc->data); 10210 reg_num = entries_per_desc * bd_num; 10211 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK); 10212 for (i = 0; i < reg_num; i++) { 10213 index = i % entries_per_desc; 10214 desc_index = i / entries_per_desc; 10215 *reg++ = le32_to_cpu(desc[desc_index].data[index]); 10216 } 10217 for (i = 0; i < separator_num; i++) 10218 *reg++ = SEPARATOR_VALUE; 10219 10220 return reg_num + separator_num; 10221 } 10222 10223 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len) 10224 { 10225 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); 10226 int data_len_per_desc, data_len, bd_num, i; 10227 int bd_num_list[BD_LIST_MAX_NUM]; 10228 int ret; 10229 10230 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); 10231 if (ret) { 10232 dev_err(&hdev->pdev->dev, 10233 "Get dfx reg bd num fail, status is %d.\n", ret); 10234 return ret; 10235 } 10236 10237 data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data); 10238 *len = 0; 10239 for (i = 0; i < dfx_reg_type_num; i++) { 10240 bd_num = bd_num_list[i]; 10241 data_len = data_len_per_desc * bd_num; 10242 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE; 10243 } 10244 10245 return ret; 10246 } 10247 10248 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) 10249 { 10250 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list); 10251 int bd_num, bd_num_max, buf_len, i; 10252 int bd_num_list[BD_LIST_MAX_NUM]; 10253 struct hclge_desc *desc_src; 10254 u32 *reg = data; 10255 int ret; 10256 10257 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num); 10258 if (ret) { 10259 dev_err(&hdev->pdev->dev, 10260 "Get dfx reg bd num fail, status is %d.\n", ret); 10261 return ret; 10262 } 10263 10264 bd_num_max = bd_num_list[0]; 10265 for (i = 1; i < dfx_reg_type_num; i++) 10266 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]); 10267 10268 buf_len = sizeof(*desc_src) * bd_num_max; 10269 desc_src = kzalloc(buf_len, GFP_KERNEL); 10270 if (!desc_src) { 10271 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__); 10272 return -ENOMEM; 10273 } 10274 10275 for (i = 0; i < dfx_reg_type_num; i++) { 10276 bd_num = bd_num_list[i]; 10277 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num, 10278 hclge_dfx_reg_opcode_list[i]); 10279 if (ret) { 10280 dev_err(&hdev->pdev->dev, 10281 "Get dfx reg fail, status is %d.\n", ret); 10282 break; 10283 } 10284 10285 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg); 10286 } 10287 10288 kfree(desc_src); 10289 return ret; 10290 } 10291 10292 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data, 10293 struct hnae3_knic_private_info *kinfo) 10294 { 10295 #define HCLGE_RING_REG_OFFSET 0x200 10296 #define HCLGE_RING_INT_REG_OFFSET 0x4 10297 10298 int i, j, reg_num, separator_num; 10299 int data_num_sum; 10300 u32 *reg = data; 10301 10302 /* fetching per-PF registers valus from PF PCIe register space */ 10303 reg_num = ARRAY_SIZE(cmdq_reg_addr_list); 10304 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); 10305 for (i = 0; i < reg_num; i++) 10306 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 10307 for (i = 0; i < separator_num; i++) 10308 *reg++ = SEPARATOR_VALUE; 10309 data_num_sum = reg_num + separator_num; 10310 10311 reg_num = ARRAY_SIZE(common_reg_addr_list); 10312 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); 10313 for (i = 0; i < reg_num; i++) 10314 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); 10315 for (i = 0; i < separator_num; i++) 10316 *reg++ = SEPARATOR_VALUE; 10317 data_num_sum += reg_num + separator_num; 10318 10319 reg_num = ARRAY_SIZE(ring_reg_addr_list); 10320 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); 10321 for (j = 0; j < kinfo->num_tqps; j++) { 10322 for (i = 0; i < reg_num; i++) 10323 *reg++ = hclge_read_dev(&hdev->hw, 10324 ring_reg_addr_list[i] + 10325 HCLGE_RING_REG_OFFSET * j); 10326 for (i = 0; i < separator_num; i++) 10327 *reg++ = SEPARATOR_VALUE; 10328 } 10329 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps; 10330 10331 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list); 10332 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); 10333 for (j = 0; j < hdev->num_msi_used - 1; j++) { 10334 for (i = 0; i < reg_num; i++) 10335 *reg++ = hclge_read_dev(&hdev->hw, 10336 tqp_intr_reg_addr_list[i] + 10337 HCLGE_RING_INT_REG_OFFSET * j); 10338 for (i = 0; i < separator_num; i++) 10339 *reg++ = SEPARATOR_VALUE; 10340 } 10341 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1); 10342 10343 return data_num_sum; 10344 } 10345 10346 static int hclge_get_regs_len(struct hnae3_handle *handle) 10347 { 10348 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 10349 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 10350 struct hclge_vport *vport = hclge_get_vport(handle); 10351 struct hclge_dev *hdev = vport->back; 10352 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len; 10353 int regs_lines_32_bit, regs_lines_64_bit; 10354 int ret; 10355 10356 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 10357 if (ret) { 10358 dev_err(&hdev->pdev->dev, 10359 "Get register number failed, ret = %d.\n", ret); 10360 return ret; 10361 } 10362 10363 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len); 10364 if (ret) { 10365 dev_err(&hdev->pdev->dev, 10366 "Get dfx reg len failed, ret = %d.\n", ret); 10367 return ret; 10368 } 10369 10370 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 10371 REG_SEPARATOR_LINE; 10372 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 10373 REG_SEPARATOR_LINE; 10374 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 10375 REG_SEPARATOR_LINE; 10376 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 10377 REG_SEPARATOR_LINE; 10378 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE + 10379 REG_SEPARATOR_LINE; 10380 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE + 10381 REG_SEPARATOR_LINE; 10382 10383 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + 10384 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit + 10385 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len; 10386 } 10387 10388 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, 10389 void *data) 10390 { 10391 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 10392 struct hclge_vport *vport = hclge_get_vport(handle); 10393 struct hclge_dev *hdev = vport->back; 10394 u32 regs_num_32_bit, regs_num_64_bit; 10395 int i, reg_num, separator_num, ret; 10396 u32 *reg = data; 10397 10398 *version = hdev->fw_version; 10399 10400 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 10401 if (ret) { 10402 dev_err(&hdev->pdev->dev, 10403 "Get register number failed, ret = %d.\n", ret); 10404 return; 10405 } 10406 10407 reg += hclge_fetch_pf_reg(hdev, reg, kinfo); 10408 10409 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); 10410 if (ret) { 10411 dev_err(&hdev->pdev->dev, 10412 "Get 32 bit register failed, ret = %d.\n", ret); 10413 return; 10414 } 10415 reg_num = regs_num_32_bit; 10416 reg += reg_num; 10417 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); 10418 for (i = 0; i < separator_num; i++) 10419 *reg++ = SEPARATOR_VALUE; 10420 10421 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); 10422 if (ret) { 10423 dev_err(&hdev->pdev->dev, 10424 "Get 64 bit register failed, ret = %d.\n", ret); 10425 return; 10426 } 10427 reg_num = regs_num_64_bit * 2; 10428 reg += reg_num; 10429 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK); 10430 for (i = 0; i < separator_num; i++) 10431 *reg++ = SEPARATOR_VALUE; 10432 10433 ret = hclge_get_dfx_reg(hdev, reg); 10434 if (ret) 10435 dev_err(&hdev->pdev->dev, 10436 "Get dfx register failed, ret = %d.\n", ret); 10437 } 10438 10439 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 10440 { 10441 struct hclge_set_led_state_cmd *req; 10442 struct hclge_desc desc; 10443 int ret; 10444 10445 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 10446 10447 req = (struct hclge_set_led_state_cmd *)desc.data; 10448 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 10449 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 10450 10451 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10452 if (ret) 10453 dev_err(&hdev->pdev->dev, 10454 "Send set led state cmd error, ret =%d\n", ret); 10455 10456 return ret; 10457 } 10458 10459 enum hclge_led_status { 10460 HCLGE_LED_OFF, 10461 HCLGE_LED_ON, 10462 HCLGE_LED_NO_CHANGE = 0xFF, 10463 }; 10464 10465 static int hclge_set_led_id(struct hnae3_handle *handle, 10466 enum ethtool_phys_id_state status) 10467 { 10468 struct hclge_vport *vport = hclge_get_vport(handle); 10469 struct hclge_dev *hdev = vport->back; 10470 10471 switch (status) { 10472 case ETHTOOL_ID_ACTIVE: 10473 return hclge_set_led_status(hdev, HCLGE_LED_ON); 10474 case ETHTOOL_ID_INACTIVE: 10475 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 10476 default: 10477 return -EINVAL; 10478 } 10479 } 10480 10481 static void hclge_get_link_mode(struct hnae3_handle *handle, 10482 unsigned long *supported, 10483 unsigned long *advertising) 10484 { 10485 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 10486 struct hclge_vport *vport = hclge_get_vport(handle); 10487 struct hclge_dev *hdev = vport->back; 10488 unsigned int idx = 0; 10489 10490 for (; idx < size; idx++) { 10491 supported[idx] = hdev->hw.mac.supported[idx]; 10492 advertising[idx] = hdev->hw.mac.advertising[idx]; 10493 } 10494 } 10495 10496 static int hclge_gro_en(struct hnae3_handle *handle, bool enable) 10497 { 10498 struct hclge_vport *vport = hclge_get_vport(handle); 10499 struct hclge_dev *hdev = vport->back; 10500 10501 return hclge_config_gro(hdev, enable); 10502 } 10503 10504 static const struct hnae3_ae_ops hclge_ops = { 10505 .init_ae_dev = hclge_init_ae_dev, 10506 .uninit_ae_dev = hclge_uninit_ae_dev, 10507 .flr_prepare = hclge_flr_prepare, 10508 .flr_done = hclge_flr_done, 10509 .init_client_instance = hclge_init_client_instance, 10510 .uninit_client_instance = hclge_uninit_client_instance, 10511 .map_ring_to_vector = hclge_map_ring_to_vector, 10512 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 10513 .get_vector = hclge_get_vector, 10514 .put_vector = hclge_put_vector, 10515 .set_promisc_mode = hclge_set_promisc_mode, 10516 .set_loopback = hclge_set_loopback, 10517 .start = hclge_ae_start, 10518 .stop = hclge_ae_stop, 10519 .client_start = hclge_client_start, 10520 .client_stop = hclge_client_stop, 10521 .get_status = hclge_get_status, 10522 .get_ksettings_an_result = hclge_get_ksettings_an_result, 10523 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 10524 .get_media_type = hclge_get_media_type, 10525 .check_port_speed = hclge_check_port_speed, 10526 .get_fec = hclge_get_fec, 10527 .set_fec = hclge_set_fec, 10528 .get_rss_key_size = hclge_get_rss_key_size, 10529 .get_rss_indir_size = hclge_get_rss_indir_size, 10530 .get_rss = hclge_get_rss, 10531 .set_rss = hclge_set_rss, 10532 .set_rss_tuple = hclge_set_rss_tuple, 10533 .get_rss_tuple = hclge_get_rss_tuple, 10534 .get_tc_size = hclge_get_tc_size, 10535 .get_mac_addr = hclge_get_mac_addr, 10536 .set_mac_addr = hclge_set_mac_addr, 10537 .do_ioctl = hclge_do_ioctl, 10538 .add_uc_addr = hclge_add_uc_addr, 10539 .rm_uc_addr = hclge_rm_uc_addr, 10540 .add_mc_addr = hclge_add_mc_addr, 10541 .rm_mc_addr = hclge_rm_mc_addr, 10542 .set_autoneg = hclge_set_autoneg, 10543 .get_autoneg = hclge_get_autoneg, 10544 .restart_autoneg = hclge_restart_autoneg, 10545 .halt_autoneg = hclge_halt_autoneg, 10546 .get_pauseparam = hclge_get_pauseparam, 10547 .set_pauseparam = hclge_set_pauseparam, 10548 .set_mtu = hclge_set_mtu, 10549 .reset_queue = hclge_reset_tqp, 10550 .get_stats = hclge_get_stats, 10551 .get_mac_stats = hclge_get_mac_stat, 10552 .update_stats = hclge_update_stats, 10553 .get_strings = hclge_get_strings, 10554 .get_sset_count = hclge_get_sset_count, 10555 .get_fw_version = hclge_get_fw_version, 10556 .get_mdix_mode = hclge_get_mdix_mode, 10557 .enable_vlan_filter = hclge_enable_vlan_filter, 10558 .set_vlan_filter = hclge_set_vlan_filter, 10559 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 10560 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 10561 .reset_event = hclge_reset_event, 10562 .get_reset_level = hclge_get_reset_level, 10563 .set_default_reset_request = hclge_set_def_reset_request, 10564 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 10565 .set_channels = hclge_set_channels, 10566 .get_channels = hclge_get_channels, 10567 .get_regs_len = hclge_get_regs_len, 10568 .get_regs = hclge_get_regs, 10569 .set_led_id = hclge_set_led_id, 10570 .get_link_mode = hclge_get_link_mode, 10571 .add_fd_entry = hclge_add_fd_entry, 10572 .del_fd_entry = hclge_del_fd_entry, 10573 .del_all_fd_entries = hclge_del_all_fd_entries, 10574 .get_fd_rule_cnt = hclge_get_fd_rule_cnt, 10575 .get_fd_rule_info = hclge_get_fd_rule_info, 10576 .get_fd_all_rules = hclge_get_all_rules, 10577 .restore_fd_rules = hclge_restore_fd_entries, 10578 .enable_fd = hclge_enable_fd, 10579 .add_arfs_entry = hclge_add_fd_entry_by_arfs, 10580 .dbg_run_cmd = hclge_dbg_run_cmd, 10581 .handle_hw_ras_error = hclge_handle_hw_ras_error, 10582 .get_hw_reset_stat = hclge_get_hw_reset_stat, 10583 .ae_dev_resetting = hclge_ae_dev_resetting, 10584 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, 10585 .set_gro_en = hclge_gro_en, 10586 .get_global_queue_id = hclge_covert_handle_qid_global, 10587 .set_timer_task = hclge_set_timer_task, 10588 .mac_connect_phy = hclge_mac_connect_phy, 10589 .mac_disconnect_phy = hclge_mac_disconnect_phy, 10590 .restore_vlan_table = hclge_restore_vlan_table, 10591 .get_vf_config = hclge_get_vf_config, 10592 .set_vf_link_state = hclge_set_vf_link_state, 10593 .set_vf_spoofchk = hclge_set_vf_spoofchk, 10594 .set_vf_trust = hclge_set_vf_trust, 10595 .set_vf_rate = hclge_set_vf_rate, 10596 .set_vf_mac = hclge_set_vf_mac, 10597 }; 10598 10599 static struct hnae3_ae_algo ae_algo = { 10600 .ops = &hclge_ops, 10601 .pdev_id_table = ae_algo_pci_tbl, 10602 }; 10603 10604 static int hclge_init(void) 10605 { 10606 pr_info("%s is initializing\n", HCLGE_NAME); 10607 10608 hnae3_register_ae_algo(&ae_algo); 10609 10610 return 0; 10611 } 10612 10613 static void hclge_exit(void) 10614 { 10615 hnae3_unregister_ae_algo(&ae_algo); 10616 } 10617 module_init(hclge_init); 10618 module_exit(hclge_exit); 10619 10620 MODULE_LICENSE("GPL"); 10621 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 10622 MODULE_DESCRIPTION("HCLGE Driver"); 10623 MODULE_VERSION(HCLGE_MOD_VERSION); 10624