1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/acpi.h> 5 #include <linux/device.h> 6 #include <linux/etherdevice.h> 7 #include <linux/init.h> 8 #include <linux/interrupt.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/platform_device.h> 14 #include <linux/if_vlan.h> 15 #include <net/rtnetlink.h> 16 #include "hclge_cmd.h" 17 #include "hclge_dcb.h" 18 #include "hclge_main.h" 19 #include "hclge_mbx.h" 20 #include "hclge_mdio.h" 21 #include "hclge_tm.h" 22 #include "hclge_err.h" 23 #include "hnae3.h" 24 25 #define HCLGE_NAME "hclge" 26 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 27 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 28 29 #define HCLGE_BUF_SIZE_UNIT 256 30 31 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); 32 static int hclge_init_vlan_config(struct hclge_dev *hdev); 33 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 34 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 35 u16 *allocated_size, bool is_alloc); 36 37 static struct hnae3_ae_algo ae_algo; 38 39 static const struct pci_device_id ae_algo_pci_tbl[] = { 40 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 47 /* required last entry */ 48 {0, } 49 }; 50 51 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 52 53 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, 54 HCLGE_CMDQ_TX_ADDR_H_REG, 55 HCLGE_CMDQ_TX_DEPTH_REG, 56 HCLGE_CMDQ_TX_TAIL_REG, 57 HCLGE_CMDQ_TX_HEAD_REG, 58 HCLGE_CMDQ_RX_ADDR_L_REG, 59 HCLGE_CMDQ_RX_ADDR_H_REG, 60 HCLGE_CMDQ_RX_DEPTH_REG, 61 HCLGE_CMDQ_RX_TAIL_REG, 62 HCLGE_CMDQ_RX_HEAD_REG, 63 HCLGE_VECTOR0_CMDQ_SRC_REG, 64 HCLGE_CMDQ_INTR_STS_REG, 65 HCLGE_CMDQ_INTR_EN_REG, 66 HCLGE_CMDQ_INTR_GEN_REG}; 67 68 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, 69 HCLGE_VECTOR0_OTER_EN_REG, 70 HCLGE_MISC_RESET_STS_REG, 71 HCLGE_MISC_VECTOR_INT_STS, 72 HCLGE_GLOBAL_RESET_REG, 73 HCLGE_FUN_RST_ING, 74 HCLGE_GRO_EN_REG}; 75 76 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, 77 HCLGE_RING_RX_ADDR_H_REG, 78 HCLGE_RING_RX_BD_NUM_REG, 79 HCLGE_RING_RX_BD_LENGTH_REG, 80 HCLGE_RING_RX_MERGE_EN_REG, 81 HCLGE_RING_RX_TAIL_REG, 82 HCLGE_RING_RX_HEAD_REG, 83 HCLGE_RING_RX_FBD_NUM_REG, 84 HCLGE_RING_RX_OFFSET_REG, 85 HCLGE_RING_RX_FBD_OFFSET_REG, 86 HCLGE_RING_RX_STASH_REG, 87 HCLGE_RING_RX_BD_ERR_REG, 88 HCLGE_RING_TX_ADDR_L_REG, 89 HCLGE_RING_TX_ADDR_H_REG, 90 HCLGE_RING_TX_BD_NUM_REG, 91 HCLGE_RING_TX_PRIORITY_REG, 92 HCLGE_RING_TX_TC_REG, 93 HCLGE_RING_TX_MERGE_EN_REG, 94 HCLGE_RING_TX_TAIL_REG, 95 HCLGE_RING_TX_HEAD_REG, 96 HCLGE_RING_TX_FBD_NUM_REG, 97 HCLGE_RING_TX_OFFSET_REG, 98 HCLGE_RING_TX_EBD_NUM_REG, 99 HCLGE_RING_TX_EBD_OFFSET_REG, 100 HCLGE_RING_TX_BD_ERR_REG, 101 HCLGE_RING_EN_REG}; 102 103 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, 104 HCLGE_TQP_INTR_GL0_REG, 105 HCLGE_TQP_INTR_GL1_REG, 106 HCLGE_TQP_INTR_GL2_REG, 107 HCLGE_TQP_INTR_RL_REG}; 108 109 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 110 "App Loopback test", 111 "Serdes serial Loopback test", 112 "Serdes parallel Loopback test", 113 "Phy Loopback test" 114 }; 115 116 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 117 {"mac_tx_mac_pause_num", 118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 119 {"mac_rx_mac_pause_num", 120 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 121 {"mac_tx_pfc_pri0_pkt_num", 122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 123 {"mac_tx_pfc_pri1_pkt_num", 124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 125 {"mac_tx_pfc_pri2_pkt_num", 126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 127 {"mac_tx_pfc_pri3_pkt_num", 128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 129 {"mac_tx_pfc_pri4_pkt_num", 130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 131 {"mac_tx_pfc_pri5_pkt_num", 132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 133 {"mac_tx_pfc_pri6_pkt_num", 134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 135 {"mac_tx_pfc_pri7_pkt_num", 136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 137 {"mac_rx_pfc_pri0_pkt_num", 138 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 139 {"mac_rx_pfc_pri1_pkt_num", 140 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 141 {"mac_rx_pfc_pri2_pkt_num", 142 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 143 {"mac_rx_pfc_pri3_pkt_num", 144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 145 {"mac_rx_pfc_pri4_pkt_num", 146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 147 {"mac_rx_pfc_pri5_pkt_num", 148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 149 {"mac_rx_pfc_pri6_pkt_num", 150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 151 {"mac_rx_pfc_pri7_pkt_num", 152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 153 {"mac_tx_total_pkt_num", 154 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 155 {"mac_tx_total_oct_num", 156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 157 {"mac_tx_good_pkt_num", 158 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 159 {"mac_tx_bad_pkt_num", 160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 161 {"mac_tx_good_oct_num", 162 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 163 {"mac_tx_bad_oct_num", 164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 165 {"mac_tx_uni_pkt_num", 166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 167 {"mac_tx_multi_pkt_num", 168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 169 {"mac_tx_broad_pkt_num", 170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 171 {"mac_tx_undersize_pkt_num", 172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 173 {"mac_tx_oversize_pkt_num", 174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 175 {"mac_tx_64_oct_pkt_num", 176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 177 {"mac_tx_65_127_oct_pkt_num", 178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 179 {"mac_tx_128_255_oct_pkt_num", 180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 181 {"mac_tx_256_511_oct_pkt_num", 182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 183 {"mac_tx_512_1023_oct_pkt_num", 184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 185 {"mac_tx_1024_1518_oct_pkt_num", 186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 187 {"mac_tx_1519_2047_oct_pkt_num", 188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 189 {"mac_tx_2048_4095_oct_pkt_num", 190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 191 {"mac_tx_4096_8191_oct_pkt_num", 192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 193 {"mac_tx_8192_9216_oct_pkt_num", 194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 195 {"mac_tx_9217_12287_oct_pkt_num", 196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 197 {"mac_tx_12288_16383_oct_pkt_num", 198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 199 {"mac_tx_1519_max_good_pkt_num", 200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 201 {"mac_tx_1519_max_bad_pkt_num", 202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 203 {"mac_rx_total_pkt_num", 204 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 205 {"mac_rx_total_oct_num", 206 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 207 {"mac_rx_good_pkt_num", 208 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 209 {"mac_rx_bad_pkt_num", 210 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 211 {"mac_rx_good_oct_num", 212 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 213 {"mac_rx_bad_oct_num", 214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 215 {"mac_rx_uni_pkt_num", 216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 217 {"mac_rx_multi_pkt_num", 218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 219 {"mac_rx_broad_pkt_num", 220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 221 {"mac_rx_undersize_pkt_num", 222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 223 {"mac_rx_oversize_pkt_num", 224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 225 {"mac_rx_64_oct_pkt_num", 226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 227 {"mac_rx_65_127_oct_pkt_num", 228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 229 {"mac_rx_128_255_oct_pkt_num", 230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 231 {"mac_rx_256_511_oct_pkt_num", 232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 233 {"mac_rx_512_1023_oct_pkt_num", 234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 235 {"mac_rx_1024_1518_oct_pkt_num", 236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 237 {"mac_rx_1519_2047_oct_pkt_num", 238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 239 {"mac_rx_2048_4095_oct_pkt_num", 240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 241 {"mac_rx_4096_8191_oct_pkt_num", 242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 243 {"mac_rx_8192_9216_oct_pkt_num", 244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 245 {"mac_rx_9217_12287_oct_pkt_num", 246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 247 {"mac_rx_12288_16383_oct_pkt_num", 248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 249 {"mac_rx_1519_max_good_pkt_num", 250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 251 {"mac_rx_1519_max_bad_pkt_num", 252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 253 254 {"mac_tx_fragment_pkt_num", 255 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 256 {"mac_tx_undermin_pkt_num", 257 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 258 {"mac_tx_jabber_pkt_num", 259 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 260 {"mac_tx_err_all_pkt_num", 261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 262 {"mac_tx_from_app_good_pkt_num", 263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 264 {"mac_tx_from_app_bad_pkt_num", 265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 266 {"mac_rx_fragment_pkt_num", 267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 268 {"mac_rx_undermin_pkt_num", 269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 270 {"mac_rx_jabber_pkt_num", 271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 272 {"mac_rx_fcs_err_pkt_num", 273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 274 {"mac_rx_send_app_good_pkt_num", 275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 276 {"mac_rx_send_app_bad_pkt_num", 277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 278 }; 279 280 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 281 { 282 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 283 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), 284 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), 285 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), 286 .i_port_bitmap = 0x1, 287 }, 288 }; 289 290 static int hclge_mac_update_stats(struct hclge_dev *hdev) 291 { 292 #define HCLGE_MAC_CMD_NUM 21 293 #define HCLGE_RTN_DATA_NUM 4 294 295 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 296 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 297 __le64 *desc_data; 298 int i, k, n; 299 int ret; 300 301 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 302 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 303 if (ret) { 304 dev_err(&hdev->pdev->dev, 305 "Get MAC pkt stats fail, status = %d.\n", ret); 306 307 return ret; 308 } 309 310 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 311 if (unlikely(i == 0)) { 312 desc_data = (__le64 *)(&desc[i].data[0]); 313 n = HCLGE_RTN_DATA_NUM - 2; 314 } else { 315 desc_data = (__le64 *)(&desc[i]); 316 n = HCLGE_RTN_DATA_NUM; 317 } 318 for (k = 0; k < n; k++) { 319 *data++ += le64_to_cpu(*desc_data); 320 desc_data++; 321 } 322 } 323 324 return 0; 325 } 326 327 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 328 { 329 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 330 struct hclge_vport *vport = hclge_get_vport(handle); 331 struct hclge_dev *hdev = vport->back; 332 struct hnae3_queue *queue; 333 struct hclge_desc desc[1]; 334 struct hclge_tqp *tqp; 335 int ret, i; 336 337 for (i = 0; i < kinfo->num_tqps; i++) { 338 queue = handle->kinfo.tqp[i]; 339 tqp = container_of(queue, struct hclge_tqp, q); 340 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 341 hclge_cmd_setup_basic_desc(&desc[0], 342 HCLGE_OPC_QUERY_RX_STATUS, 343 true); 344 345 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 346 ret = hclge_cmd_send(&hdev->hw, desc, 1); 347 if (ret) { 348 dev_err(&hdev->pdev->dev, 349 "Query tqp stat fail, status = %d,queue = %d\n", 350 ret, i); 351 return ret; 352 } 353 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 354 le32_to_cpu(desc[0].data[1]); 355 } 356 357 for (i = 0; i < kinfo->num_tqps; i++) { 358 queue = handle->kinfo.tqp[i]; 359 tqp = container_of(queue, struct hclge_tqp, q); 360 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 361 hclge_cmd_setup_basic_desc(&desc[0], 362 HCLGE_OPC_QUERY_TX_STATUS, 363 true); 364 365 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 366 ret = hclge_cmd_send(&hdev->hw, desc, 1); 367 if (ret) { 368 dev_err(&hdev->pdev->dev, 369 "Query tqp stat fail, status = %d,queue = %d\n", 370 ret, i); 371 return ret; 372 } 373 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 374 le32_to_cpu(desc[0].data[1]); 375 } 376 377 return 0; 378 } 379 380 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 381 { 382 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 383 struct hclge_tqp *tqp; 384 u64 *buff = data; 385 int i; 386 387 for (i = 0; i < kinfo->num_tqps; i++) { 388 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 389 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 390 } 391 392 for (i = 0; i < kinfo->num_tqps; i++) { 393 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 394 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 395 } 396 397 return buff; 398 } 399 400 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 401 { 402 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 403 404 return kinfo->num_tqps * (2); 405 } 406 407 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 408 { 409 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 410 u8 *buff = data; 411 int i = 0; 412 413 for (i = 0; i < kinfo->num_tqps; i++) { 414 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 415 struct hclge_tqp, q); 416 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 417 tqp->index); 418 buff = buff + ETH_GSTRING_LEN; 419 } 420 421 for (i = 0; i < kinfo->num_tqps; i++) { 422 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 423 struct hclge_tqp, q); 424 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 425 tqp->index); 426 buff = buff + ETH_GSTRING_LEN; 427 } 428 429 return buff; 430 } 431 432 static u64 *hclge_comm_get_stats(void *comm_stats, 433 const struct hclge_comm_stats_str strs[], 434 int size, u64 *data) 435 { 436 u64 *buf = data; 437 u32 i; 438 439 for (i = 0; i < size; i++) 440 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 441 442 return buf + size; 443 } 444 445 static u8 *hclge_comm_get_strings(u32 stringset, 446 const struct hclge_comm_stats_str strs[], 447 int size, u8 *data) 448 { 449 char *buff = (char *)data; 450 u32 i; 451 452 if (stringset != ETH_SS_STATS) 453 return buff; 454 455 for (i = 0; i < size; i++) { 456 snprintf(buff, ETH_GSTRING_LEN, 457 strs[i].desc); 458 buff = buff + ETH_GSTRING_LEN; 459 } 460 461 return (u8 *)buff; 462 } 463 464 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, 465 struct net_device_stats *net_stats) 466 { 467 net_stats->tx_dropped = 0; 468 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; 469 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 470 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 471 472 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 473 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 474 475 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 476 net_stats->rx_length_errors = 477 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 478 net_stats->rx_length_errors += 479 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 480 net_stats->rx_over_errors = 481 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 482 } 483 484 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 485 { 486 struct hnae3_handle *handle; 487 int status; 488 489 handle = &hdev->vport[0].nic; 490 if (handle->client) { 491 status = hclge_tqps_update_stats(handle); 492 if (status) { 493 dev_err(&hdev->pdev->dev, 494 "Update TQPS stats fail, status = %d.\n", 495 status); 496 } 497 } 498 499 status = hclge_mac_update_stats(hdev); 500 if (status) 501 dev_err(&hdev->pdev->dev, 502 "Update MAC stats fail, status = %d.\n", status); 503 504 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); 505 } 506 507 static void hclge_update_stats(struct hnae3_handle *handle, 508 struct net_device_stats *net_stats) 509 { 510 struct hclge_vport *vport = hclge_get_vport(handle); 511 struct hclge_dev *hdev = vport->back; 512 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 513 int status; 514 515 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 516 return; 517 518 status = hclge_mac_update_stats(hdev); 519 if (status) 520 dev_err(&hdev->pdev->dev, 521 "Update MAC stats fail, status = %d.\n", 522 status); 523 524 status = hclge_tqps_update_stats(handle); 525 if (status) 526 dev_err(&hdev->pdev->dev, 527 "Update TQPS stats fail, status = %d.\n", 528 status); 529 530 hclge_update_netstat(hw_stats, net_stats); 531 532 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 533 } 534 535 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 536 { 537 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ 538 HNAE3_SUPPORT_PHY_LOOPBACK |\ 539 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ 540 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) 541 542 struct hclge_vport *vport = hclge_get_vport(handle); 543 struct hclge_dev *hdev = vport->back; 544 int count = 0; 545 546 /* Loopback test support rules: 547 * mac: only GE mode support 548 * serdes: all mac mode will support include GE/XGE/LGE/CGE 549 * phy: only support when phy device exist on board 550 */ 551 if (stringset == ETH_SS_TEST) { 552 /* clear loopback bit flags at first */ 553 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 554 if (hdev->pdev->revision >= 0x21 || 555 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 556 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 557 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 558 count += 1; 559 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; 560 } 561 562 count += 2; 563 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 564 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 565 } else if (stringset == ETH_SS_STATS) { 566 count = ARRAY_SIZE(g_mac_stats_string) + 567 hclge_tqps_get_sset_count(handle, stringset); 568 } 569 570 return count; 571 } 572 573 static void hclge_get_strings(struct hnae3_handle *handle, 574 u32 stringset, 575 u8 *data) 576 { 577 u8 *p = (char *)data; 578 int size; 579 580 if (stringset == ETH_SS_STATS) { 581 size = ARRAY_SIZE(g_mac_stats_string); 582 p = hclge_comm_get_strings(stringset, 583 g_mac_stats_string, 584 size, 585 p); 586 p = hclge_tqps_get_strings(handle, p); 587 } else if (stringset == ETH_SS_TEST) { 588 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { 589 memcpy(p, 590 hns3_nic_test_strs[HNAE3_LOOP_APP], 591 ETH_GSTRING_LEN); 592 p += ETH_GSTRING_LEN; 593 } 594 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { 595 memcpy(p, 596 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], 597 ETH_GSTRING_LEN); 598 p += ETH_GSTRING_LEN; 599 } 600 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { 601 memcpy(p, 602 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], 603 ETH_GSTRING_LEN); 604 p += ETH_GSTRING_LEN; 605 } 606 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 607 memcpy(p, 608 hns3_nic_test_strs[HNAE3_LOOP_PHY], 609 ETH_GSTRING_LEN); 610 p += ETH_GSTRING_LEN; 611 } 612 } 613 } 614 615 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 616 { 617 struct hclge_vport *vport = hclge_get_vport(handle); 618 struct hclge_dev *hdev = vport->back; 619 u64 *p; 620 621 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, 622 g_mac_stats_string, 623 ARRAY_SIZE(g_mac_stats_string), 624 data); 625 p = hclge_tqps_get_stats(handle, p); 626 } 627 628 static int hclge_parse_func_status(struct hclge_dev *hdev, 629 struct hclge_func_status_cmd *status) 630 { 631 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 632 return -EINVAL; 633 634 /* Set the pf to main pf */ 635 if (status->pf_state & HCLGE_PF_STATE_MAIN) 636 hdev->flag |= HCLGE_FLAG_MAIN; 637 else 638 hdev->flag &= ~HCLGE_FLAG_MAIN; 639 640 return 0; 641 } 642 643 static int hclge_query_function_status(struct hclge_dev *hdev) 644 { 645 struct hclge_func_status_cmd *req; 646 struct hclge_desc desc; 647 int timeout = 0; 648 int ret; 649 650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 651 req = (struct hclge_func_status_cmd *)desc.data; 652 653 do { 654 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 655 if (ret) { 656 dev_err(&hdev->pdev->dev, 657 "query function status failed %d.\n", 658 ret); 659 660 return ret; 661 } 662 663 /* Check pf reset is done */ 664 if (req->pf_state) 665 break; 666 usleep_range(1000, 2000); 667 } while (timeout++ < 5); 668 669 ret = hclge_parse_func_status(hdev, req); 670 671 return ret; 672 } 673 674 static int hclge_query_pf_resource(struct hclge_dev *hdev) 675 { 676 struct hclge_pf_res_cmd *req; 677 struct hclge_desc desc; 678 int ret; 679 680 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 681 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 682 if (ret) { 683 dev_err(&hdev->pdev->dev, 684 "query pf resource failed %d.\n", ret); 685 return ret; 686 } 687 688 req = (struct hclge_pf_res_cmd *)desc.data; 689 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 690 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 691 692 if (req->tx_buf_size) 693 hdev->tx_buf_size = 694 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; 695 else 696 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 697 698 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); 699 700 if (req->dv_buf_size) 701 hdev->dv_buf_size = 702 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; 703 else 704 hdev->dv_buf_size = HCLGE_DEFAULT_DV; 705 706 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); 707 708 if (hnae3_dev_roce_supported(hdev)) { 709 hdev->roce_base_msix_offset = 710 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 711 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); 712 hdev->num_roce_msi = 713 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), 714 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 715 716 /* PF should have NIC vectors and Roce vectors, 717 * NIC vectors are queued before Roce vectors. 718 */ 719 hdev->num_msi = hdev->num_roce_msi + 720 hdev->roce_base_msix_offset; 721 } else { 722 hdev->num_msi = 723 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), 724 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 725 } 726 727 return 0; 728 } 729 730 static int hclge_parse_speed(int speed_cmd, int *speed) 731 { 732 switch (speed_cmd) { 733 case 6: 734 *speed = HCLGE_MAC_SPEED_10M; 735 break; 736 case 7: 737 *speed = HCLGE_MAC_SPEED_100M; 738 break; 739 case 0: 740 *speed = HCLGE_MAC_SPEED_1G; 741 break; 742 case 1: 743 *speed = HCLGE_MAC_SPEED_10G; 744 break; 745 case 2: 746 *speed = HCLGE_MAC_SPEED_25G; 747 break; 748 case 3: 749 *speed = HCLGE_MAC_SPEED_40G; 750 break; 751 case 4: 752 *speed = HCLGE_MAC_SPEED_50G; 753 break; 754 case 5: 755 *speed = HCLGE_MAC_SPEED_100G; 756 break; 757 default: 758 return -EINVAL; 759 } 760 761 return 0; 762 } 763 764 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 765 u8 speed_ability) 766 { 767 unsigned long *supported = hdev->hw.mac.supported; 768 769 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 770 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 771 supported); 772 773 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 774 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 775 supported); 776 777 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 778 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 779 supported); 780 781 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 782 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 783 supported); 784 785 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 786 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 787 supported); 788 789 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); 790 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 791 } 792 793 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) 794 { 795 u8 media_type = hdev->hw.mac.media_type; 796 797 if (media_type != HNAE3_MEDIA_TYPE_FIBER) 798 return; 799 800 hclge_parse_fiber_link_mode(hdev, speed_ability); 801 } 802 803 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 804 { 805 struct hclge_cfg_param_cmd *req; 806 u64 mac_addr_tmp_high; 807 u64 mac_addr_tmp; 808 int i; 809 810 req = (struct hclge_cfg_param_cmd *)desc[0].data; 811 812 /* get the configuration */ 813 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 814 HCLGE_CFG_VMDQ_M, 815 HCLGE_CFG_VMDQ_S); 816 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 817 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 818 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 819 HCLGE_CFG_TQP_DESC_N_M, 820 HCLGE_CFG_TQP_DESC_N_S); 821 822 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), 823 HCLGE_CFG_PHY_ADDR_M, 824 HCLGE_CFG_PHY_ADDR_S); 825 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), 826 HCLGE_CFG_MEDIA_TP_M, 827 HCLGE_CFG_MEDIA_TP_S); 828 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), 829 HCLGE_CFG_RX_BUF_LEN_M, 830 HCLGE_CFG_RX_BUF_LEN_S); 831 /* get mac_address */ 832 mac_addr_tmp = __le32_to_cpu(req->param[2]); 833 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), 834 HCLGE_CFG_MAC_ADDR_H_M, 835 HCLGE_CFG_MAC_ADDR_H_S); 836 837 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 838 839 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), 840 HCLGE_CFG_DEFAULT_SPEED_M, 841 HCLGE_CFG_DEFAULT_SPEED_S); 842 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 843 HCLGE_CFG_RSS_SIZE_M, 844 HCLGE_CFG_RSS_SIZE_S); 845 846 for (i = 0; i < ETH_ALEN; i++) 847 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 848 849 req = (struct hclge_cfg_param_cmd *)desc[1].data; 850 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 851 852 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), 853 HCLGE_CFG_SPEED_ABILITY_M, 854 HCLGE_CFG_SPEED_ABILITY_S); 855 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), 856 HCLGE_CFG_UMV_TBL_SPACE_M, 857 HCLGE_CFG_UMV_TBL_SPACE_S); 858 if (!cfg->umv_space) 859 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 860 } 861 862 /* hclge_get_cfg: query the static parameter from flash 863 * @hdev: pointer to struct hclge_dev 864 * @hcfg: the config structure to be getted 865 */ 866 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 867 { 868 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 869 struct hclge_cfg_param_cmd *req; 870 int i, ret; 871 872 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 873 u32 offset = 0; 874 875 req = (struct hclge_cfg_param_cmd *)desc[i].data; 876 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 877 true); 878 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, 879 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 880 /* Len should be united by 4 bytes when send to hardware */ 881 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 882 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 883 req->offset = cpu_to_le32(offset); 884 } 885 886 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 887 if (ret) { 888 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); 889 return ret; 890 } 891 892 hclge_parse_cfg(hcfg, desc); 893 894 return 0; 895 } 896 897 static int hclge_get_cap(struct hclge_dev *hdev) 898 { 899 int ret; 900 901 ret = hclge_query_function_status(hdev); 902 if (ret) { 903 dev_err(&hdev->pdev->dev, 904 "query function status error %d.\n", ret); 905 return ret; 906 } 907 908 /* get pf resource */ 909 ret = hclge_query_pf_resource(hdev); 910 if (ret) 911 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); 912 913 return ret; 914 } 915 916 static int hclge_configure(struct hclge_dev *hdev) 917 { 918 struct hclge_cfg cfg; 919 int ret, i; 920 921 ret = hclge_get_cfg(hdev, &cfg); 922 if (ret) { 923 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 924 return ret; 925 } 926 927 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 928 hdev->base_tqp_pid = 0; 929 hdev->rss_size_max = cfg.rss_size_max; 930 hdev->rx_buf_len = cfg.rx_buf_len; 931 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 932 hdev->hw.mac.media_type = cfg.media_type; 933 hdev->hw.mac.phy_addr = cfg.phy_addr; 934 hdev->num_desc = cfg.tqp_desc_num; 935 hdev->tm_info.num_pg = 1; 936 hdev->tc_max = cfg.tc_num; 937 hdev->tm_info.hw_pfc_map = 0; 938 hdev->wanted_umv_size = cfg.umv_space; 939 940 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 941 if (ret) { 942 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 943 return ret; 944 } 945 946 hclge_parse_link_mode(hdev, cfg.speed_ability); 947 948 if ((hdev->tc_max > HNAE3_MAX_TC) || 949 (hdev->tc_max < 1)) { 950 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 951 hdev->tc_max); 952 hdev->tc_max = 1; 953 } 954 955 /* Dev does not support DCB */ 956 if (!hnae3_dev_dcb_supported(hdev)) { 957 hdev->tc_max = 1; 958 hdev->pfc_max = 0; 959 } else { 960 hdev->pfc_max = hdev->tc_max; 961 } 962 963 hdev->tm_info.num_tc = 1; 964 965 /* Currently not support uncontiuous tc */ 966 for (i = 0; i < hdev->tm_info.num_tc; i++) 967 hnae3_set_bit(hdev->hw_tc_map, i, 1); 968 969 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 970 971 return ret; 972 } 973 974 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, 975 int tso_mss_max) 976 { 977 struct hclge_cfg_tso_status_cmd *req; 978 struct hclge_desc desc; 979 u16 tso_mss; 980 981 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 982 983 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 984 985 tso_mss = 0; 986 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 987 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 988 req->tso_mss_min = cpu_to_le16(tso_mss); 989 990 tso_mss = 0; 991 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 992 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 993 req->tso_mss_max = cpu_to_le16(tso_mss); 994 995 return hclge_cmd_send(&hdev->hw, &desc, 1); 996 } 997 998 static int hclge_config_gro(struct hclge_dev *hdev, bool en) 999 { 1000 struct hclge_cfg_gro_status_cmd *req; 1001 struct hclge_desc desc; 1002 int ret; 1003 1004 if (!hnae3_dev_gro_supported(hdev)) 1005 return 0; 1006 1007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); 1008 req = (struct hclge_cfg_gro_status_cmd *)desc.data; 1009 1010 req->gro_en = cpu_to_le16(en ? 1 : 0); 1011 1012 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1013 if (ret) 1014 dev_err(&hdev->pdev->dev, 1015 "GRO hardware config cmd failed, ret = %d\n", ret); 1016 1017 return ret; 1018 } 1019 1020 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1021 { 1022 struct hclge_tqp *tqp; 1023 int i; 1024 1025 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1026 sizeof(struct hclge_tqp), GFP_KERNEL); 1027 if (!hdev->htqp) 1028 return -ENOMEM; 1029 1030 tqp = hdev->htqp; 1031 1032 for (i = 0; i < hdev->num_tqps; i++) { 1033 tqp->dev = &hdev->pdev->dev; 1034 tqp->index = i; 1035 1036 tqp->q.ae_algo = &ae_algo; 1037 tqp->q.buf_size = hdev->rx_buf_len; 1038 tqp->q.desc_num = hdev->num_desc; 1039 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 1040 i * HCLGE_TQP_REG_SIZE; 1041 1042 tqp++; 1043 } 1044 1045 return 0; 1046 } 1047 1048 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1049 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1050 { 1051 struct hclge_tqp_map_cmd *req; 1052 struct hclge_desc desc; 1053 int ret; 1054 1055 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1056 1057 req = (struct hclge_tqp_map_cmd *)desc.data; 1058 req->tqp_id = cpu_to_le16(tqp_pid); 1059 req->tqp_vf = func_id; 1060 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 1061 1 << HCLGE_TQP_MAP_EN_B; 1062 req->tqp_vid = cpu_to_le16(tqp_vid); 1063 1064 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1065 if (ret) 1066 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); 1067 1068 return ret; 1069 } 1070 1071 static int hclge_assign_tqp(struct hclge_vport *vport) 1072 { 1073 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1074 struct hclge_dev *hdev = vport->back; 1075 int i, alloced; 1076 1077 for (i = 0, alloced = 0; i < hdev->num_tqps && 1078 alloced < kinfo->num_tqps; i++) { 1079 if (!hdev->htqp[i].alloced) { 1080 hdev->htqp[i].q.handle = &vport->nic; 1081 hdev->htqp[i].q.tqp_index = alloced; 1082 hdev->htqp[i].q.desc_num = kinfo->num_desc; 1083 kinfo->tqp[alloced] = &hdev->htqp[i].q; 1084 hdev->htqp[i].alloced = true; 1085 alloced++; 1086 } 1087 } 1088 vport->alloc_tqps = kinfo->num_tqps; 1089 1090 return 0; 1091 } 1092 1093 static int hclge_knic_setup(struct hclge_vport *vport, 1094 u16 num_tqps, u16 num_desc) 1095 { 1096 struct hnae3_handle *nic = &vport->nic; 1097 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1098 struct hclge_dev *hdev = vport->back; 1099 int i, ret; 1100 1101 kinfo->num_desc = num_desc; 1102 kinfo->rx_buf_len = hdev->rx_buf_len; 1103 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); 1104 kinfo->rss_size 1105 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); 1106 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; 1107 1108 for (i = 0; i < HNAE3_MAX_TC; i++) { 1109 if (hdev->hw_tc_map & BIT(i)) { 1110 kinfo->tc_info[i].enable = true; 1111 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 1112 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 1113 kinfo->tc_info[i].tc = i; 1114 } else { 1115 /* Set to default queue if TC is disable */ 1116 kinfo->tc_info[i].enable = false; 1117 kinfo->tc_info[i].tqp_offset = 0; 1118 kinfo->tc_info[i].tqp_count = 1; 1119 kinfo->tc_info[i].tc = 0; 1120 } 1121 } 1122 1123 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 1124 sizeof(struct hnae3_queue *), GFP_KERNEL); 1125 if (!kinfo->tqp) 1126 return -ENOMEM; 1127 1128 ret = hclge_assign_tqp(vport); 1129 if (ret) 1130 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1131 1132 return ret; 1133 } 1134 1135 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1136 struct hclge_vport *vport) 1137 { 1138 struct hnae3_handle *nic = &vport->nic; 1139 struct hnae3_knic_private_info *kinfo; 1140 u16 i; 1141 1142 kinfo = &nic->kinfo; 1143 for (i = 0; i < kinfo->num_tqps; i++) { 1144 struct hclge_tqp *q = 1145 container_of(kinfo->tqp[i], struct hclge_tqp, q); 1146 bool is_pf; 1147 int ret; 1148 1149 is_pf = !(vport->vport_id); 1150 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1151 i, is_pf); 1152 if (ret) 1153 return ret; 1154 } 1155 1156 return 0; 1157 } 1158 1159 static int hclge_map_tqp(struct hclge_dev *hdev) 1160 { 1161 struct hclge_vport *vport = hdev->vport; 1162 u16 i, num_vport; 1163 1164 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1165 for (i = 0; i < num_vport; i++) { 1166 int ret; 1167 1168 ret = hclge_map_tqp_to_vport(hdev, vport); 1169 if (ret) 1170 return ret; 1171 1172 vport++; 1173 } 1174 1175 return 0; 1176 } 1177 1178 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) 1179 { 1180 /* this would be initialized later */ 1181 } 1182 1183 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1184 { 1185 struct hnae3_handle *nic = &vport->nic; 1186 struct hclge_dev *hdev = vport->back; 1187 int ret; 1188 1189 nic->pdev = hdev->pdev; 1190 nic->ae_algo = &ae_algo; 1191 nic->numa_node_mask = hdev->numa_node_mask; 1192 1193 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { 1194 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); 1195 if (ret) { 1196 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", 1197 ret); 1198 return ret; 1199 } 1200 } else { 1201 hclge_unic_setup(vport, num_tqps); 1202 } 1203 1204 return 0; 1205 } 1206 1207 static int hclge_alloc_vport(struct hclge_dev *hdev) 1208 { 1209 struct pci_dev *pdev = hdev->pdev; 1210 struct hclge_vport *vport; 1211 u32 tqp_main_vport; 1212 u32 tqp_per_vport; 1213 int num_vport, i; 1214 int ret; 1215 1216 /* We need to alloc a vport for main NIC of PF */ 1217 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1218 1219 if (hdev->num_tqps < num_vport) { 1220 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", 1221 hdev->num_tqps, num_vport); 1222 return -EINVAL; 1223 } 1224 1225 /* Alloc the same number of TQPs for every vport */ 1226 tqp_per_vport = hdev->num_tqps / num_vport; 1227 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1228 1229 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1230 GFP_KERNEL); 1231 if (!vport) 1232 return -ENOMEM; 1233 1234 hdev->vport = vport; 1235 hdev->num_alloc_vport = num_vport; 1236 1237 if (IS_ENABLED(CONFIG_PCI_IOV)) 1238 hdev->num_alloc_vfs = hdev->num_req_vfs; 1239 1240 for (i = 0; i < num_vport; i++) { 1241 vport->back = hdev; 1242 vport->vport_id = i; 1243 vport->mps = HCLGE_MAC_DEFAULT_FRAME; 1244 1245 if (i == 0) 1246 ret = hclge_vport_setup(vport, tqp_main_vport); 1247 else 1248 ret = hclge_vport_setup(vport, tqp_per_vport); 1249 if (ret) { 1250 dev_err(&pdev->dev, 1251 "vport setup failed for vport %d, %d\n", 1252 i, ret); 1253 return ret; 1254 } 1255 1256 vport++; 1257 } 1258 1259 return 0; 1260 } 1261 1262 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1263 struct hclge_pkt_buf_alloc *buf_alloc) 1264 { 1265 /* TX buffer size is unit by 128 byte */ 1266 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1267 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1268 struct hclge_tx_buff_alloc_cmd *req; 1269 struct hclge_desc desc; 1270 int ret; 1271 u8 i; 1272 1273 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1274 1275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1276 for (i = 0; i < HCLGE_TC_NUM; i++) { 1277 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1278 1279 req->tx_pkt_buff[i] = 1280 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1281 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1282 } 1283 1284 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1285 if (ret) 1286 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1287 ret); 1288 1289 return ret; 1290 } 1291 1292 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1293 struct hclge_pkt_buf_alloc *buf_alloc) 1294 { 1295 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1296 1297 if (ret) 1298 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); 1299 1300 return ret; 1301 } 1302 1303 static int hclge_get_tc_num(struct hclge_dev *hdev) 1304 { 1305 int i, cnt = 0; 1306 1307 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1308 if (hdev->hw_tc_map & BIT(i)) 1309 cnt++; 1310 return cnt; 1311 } 1312 1313 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) 1314 { 1315 int i, cnt = 0; 1316 1317 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1318 if (hdev->hw_tc_map & BIT(i) && 1319 hdev->tm_info.hw_pfc_map & BIT(i)) 1320 cnt++; 1321 return cnt; 1322 } 1323 1324 /* Get the number of pfc enabled TCs, which have private buffer */ 1325 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1326 struct hclge_pkt_buf_alloc *buf_alloc) 1327 { 1328 struct hclge_priv_buf *priv; 1329 int i, cnt = 0; 1330 1331 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1332 priv = &buf_alloc->priv_buf[i]; 1333 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1334 priv->enable) 1335 cnt++; 1336 } 1337 1338 return cnt; 1339 } 1340 1341 /* Get the number of pfc disabled TCs, which have private buffer */ 1342 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1343 struct hclge_pkt_buf_alloc *buf_alloc) 1344 { 1345 struct hclge_priv_buf *priv; 1346 int i, cnt = 0; 1347 1348 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1349 priv = &buf_alloc->priv_buf[i]; 1350 if (hdev->hw_tc_map & BIT(i) && 1351 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1352 priv->enable) 1353 cnt++; 1354 } 1355 1356 return cnt; 1357 } 1358 1359 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1360 { 1361 struct hclge_priv_buf *priv; 1362 u32 rx_priv = 0; 1363 int i; 1364 1365 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1366 priv = &buf_alloc->priv_buf[i]; 1367 if (priv->enable) 1368 rx_priv += priv->buf_size; 1369 } 1370 return rx_priv; 1371 } 1372 1373 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1374 { 1375 u32 i, total_tx_size = 0; 1376 1377 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1378 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1379 1380 return total_tx_size; 1381 } 1382 1383 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1384 struct hclge_pkt_buf_alloc *buf_alloc, 1385 u32 rx_all) 1386 { 1387 u32 shared_buf_min, shared_buf_tc, shared_std; 1388 int tc_num, pfc_enable_num; 1389 u32 shared_buf, aligned_mps; 1390 u32 rx_priv; 1391 int i; 1392 1393 tc_num = hclge_get_tc_num(hdev); 1394 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1395 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); 1396 1397 if (hnae3_dev_dcb_supported(hdev)) 1398 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size; 1399 else 1400 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF 1401 + hdev->dv_buf_size; 1402 1403 shared_buf_tc = pfc_enable_num * aligned_mps + 1404 (tc_num - pfc_enable_num) * aligned_mps / 2 + 1405 aligned_mps; 1406 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), 1407 HCLGE_BUF_SIZE_UNIT); 1408 1409 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1410 if (rx_all < rx_priv + shared_std) 1411 return false; 1412 1413 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); 1414 buf_alloc->s_buf.buf_size = shared_buf; 1415 if (hnae3_dev_dcb_supported(hdev)) { 1416 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; 1417 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 1418 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); 1419 } else { 1420 buf_alloc->s_buf.self.high = aligned_mps + 1421 HCLGE_NON_DCB_ADDITIONAL_BUF; 1422 buf_alloc->s_buf.self.low = 1423 roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); 1424 } 1425 1426 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1427 if ((hdev->hw_tc_map & BIT(i)) && 1428 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1429 buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; 1430 buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; 1431 } else { 1432 buf_alloc->s_buf.tc_thrd[i].low = 0; 1433 buf_alloc->s_buf.tc_thrd[i].high = aligned_mps; 1434 } 1435 } 1436 1437 return true; 1438 } 1439 1440 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1441 struct hclge_pkt_buf_alloc *buf_alloc) 1442 { 1443 u32 i, total_size; 1444 1445 total_size = hdev->pkt_buf_size; 1446 1447 /* alloc tx buffer for all enabled tc */ 1448 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1449 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1450 1451 if (total_size < hdev->tx_buf_size) 1452 return -ENOMEM; 1453 1454 if (hdev->hw_tc_map & BIT(i)) 1455 priv->tx_buf_size = hdev->tx_buf_size; 1456 else 1457 priv->tx_buf_size = 0; 1458 1459 total_size -= priv->tx_buf_size; 1460 } 1461 1462 return 0; 1463 } 1464 1465 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1466 * @hdev: pointer to struct hclge_dev 1467 * @buf_alloc: pointer to buffer calculation data 1468 * @return: 0: calculate sucessful, negative: fail 1469 */ 1470 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1471 struct hclge_pkt_buf_alloc *buf_alloc) 1472 { 1473 u32 rx_all = hdev->pkt_buf_size, aligned_mps; 1474 int no_pfc_priv_num, pfc_priv_num; 1475 struct hclge_priv_buf *priv; 1476 int i; 1477 1478 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); 1479 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1480 1481 /* When DCB is not supported, rx private 1482 * buffer is not allocated. 1483 */ 1484 if (!hnae3_dev_dcb_supported(hdev)) { 1485 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1486 return -ENOMEM; 1487 1488 return 0; 1489 } 1490 1491 /* step 1, try to alloc private buffer for all enabled tc */ 1492 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1493 priv = &buf_alloc->priv_buf[i]; 1494 if (hdev->hw_tc_map & BIT(i)) { 1495 priv->enable = 1; 1496 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1497 priv->wl.low = aligned_mps; 1498 priv->wl.high = 1499 roundup(priv->wl.low + aligned_mps, 1500 HCLGE_BUF_SIZE_UNIT); 1501 priv->buf_size = priv->wl.high + 1502 hdev->dv_buf_size; 1503 } else { 1504 priv->wl.low = 0; 1505 priv->wl.high = 2 * aligned_mps; 1506 priv->buf_size = priv->wl.high + 1507 hdev->dv_buf_size; 1508 } 1509 } else { 1510 priv->enable = 0; 1511 priv->wl.low = 0; 1512 priv->wl.high = 0; 1513 priv->buf_size = 0; 1514 } 1515 } 1516 1517 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1518 return 0; 1519 1520 /* step 2, try to decrease the buffer size of 1521 * no pfc TC's private buffer 1522 */ 1523 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1524 priv = &buf_alloc->priv_buf[i]; 1525 1526 priv->enable = 0; 1527 priv->wl.low = 0; 1528 priv->wl.high = 0; 1529 priv->buf_size = 0; 1530 1531 if (!(hdev->hw_tc_map & BIT(i))) 1532 continue; 1533 1534 priv->enable = 1; 1535 1536 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1537 priv->wl.low = 256; 1538 priv->wl.high = priv->wl.low + aligned_mps; 1539 priv->buf_size = priv->wl.high + hdev->dv_buf_size; 1540 } else { 1541 priv->wl.low = 0; 1542 priv->wl.high = aligned_mps; 1543 priv->buf_size = priv->wl.high + hdev->dv_buf_size; 1544 } 1545 } 1546 1547 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1548 return 0; 1549 1550 /* step 3, try to reduce the number of pfc disabled TCs, 1551 * which have private buffer 1552 */ 1553 /* get the total no pfc enable TC number, which have private buffer */ 1554 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1555 1556 /* let the last to be cleared first */ 1557 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1558 priv = &buf_alloc->priv_buf[i]; 1559 1560 if (hdev->hw_tc_map & BIT(i) && 1561 !(hdev->tm_info.hw_pfc_map & BIT(i))) { 1562 /* Clear the no pfc TC private buffer */ 1563 priv->wl.low = 0; 1564 priv->wl.high = 0; 1565 priv->buf_size = 0; 1566 priv->enable = 0; 1567 no_pfc_priv_num--; 1568 } 1569 1570 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1571 no_pfc_priv_num == 0) 1572 break; 1573 } 1574 1575 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1576 return 0; 1577 1578 /* step 4, try to reduce the number of pfc enabled TCs 1579 * which have private buffer. 1580 */ 1581 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1582 1583 /* let the last to be cleared first */ 1584 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1585 priv = &buf_alloc->priv_buf[i]; 1586 1587 if (hdev->hw_tc_map & BIT(i) && 1588 hdev->tm_info.hw_pfc_map & BIT(i)) { 1589 /* Reduce the number of pfc TC with private buffer */ 1590 priv->wl.low = 0; 1591 priv->enable = 0; 1592 priv->wl.high = 0; 1593 priv->buf_size = 0; 1594 pfc_priv_num--; 1595 } 1596 1597 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1598 pfc_priv_num == 0) 1599 break; 1600 } 1601 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1602 return 0; 1603 1604 return -ENOMEM; 1605 } 1606 1607 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1608 struct hclge_pkt_buf_alloc *buf_alloc) 1609 { 1610 struct hclge_rx_priv_buff_cmd *req; 1611 struct hclge_desc desc; 1612 int ret; 1613 int i; 1614 1615 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 1616 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 1617 1618 /* Alloc private buffer TCs */ 1619 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1620 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1621 1622 req->buf_num[i] = 1623 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 1624 req->buf_num[i] |= 1625 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 1626 } 1627 1628 req->shared_buf = 1629 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1630 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1631 1632 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1633 if (ret) 1634 dev_err(&hdev->pdev->dev, 1635 "rx private buffer alloc cmd failed %d\n", ret); 1636 1637 return ret; 1638 } 1639 1640 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1641 struct hclge_pkt_buf_alloc *buf_alloc) 1642 { 1643 struct hclge_rx_priv_wl_buf *req; 1644 struct hclge_priv_buf *priv; 1645 struct hclge_desc desc[2]; 1646 int i, j; 1647 int ret; 1648 1649 for (i = 0; i < 2; i++) { 1650 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 1651 false); 1652 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 1653 1654 /* The first descriptor set the NEXT bit to 1 */ 1655 if (i == 0) 1656 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1657 else 1658 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1659 1660 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1661 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1662 1663 priv = &buf_alloc->priv_buf[idx]; 1664 req->tc_wl[j].high = 1665 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1666 req->tc_wl[j].high |= 1667 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1668 req->tc_wl[j].low = 1669 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 1670 req->tc_wl[j].low |= 1671 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1672 } 1673 } 1674 1675 /* Send 2 descriptor at one time */ 1676 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1677 if (ret) 1678 dev_err(&hdev->pdev->dev, 1679 "rx private waterline config cmd failed %d\n", 1680 ret); 1681 return ret; 1682 } 1683 1684 static int hclge_common_thrd_config(struct hclge_dev *hdev, 1685 struct hclge_pkt_buf_alloc *buf_alloc) 1686 { 1687 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1688 struct hclge_rx_com_thrd *req; 1689 struct hclge_desc desc[2]; 1690 struct hclge_tc_thrd *tc; 1691 int i, j; 1692 int ret; 1693 1694 for (i = 0; i < 2; i++) { 1695 hclge_cmd_setup_basic_desc(&desc[i], 1696 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 1697 req = (struct hclge_rx_com_thrd *)&desc[i].data; 1698 1699 /* The first descriptor set the NEXT bit to 1 */ 1700 if (i == 0) 1701 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1702 else 1703 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1704 1705 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1706 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 1707 1708 req->com_thrd[j].high = 1709 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 1710 req->com_thrd[j].high |= 1711 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1712 req->com_thrd[j].low = 1713 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 1714 req->com_thrd[j].low |= 1715 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1716 } 1717 } 1718 1719 /* Send 2 descriptors at one time */ 1720 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1721 if (ret) 1722 dev_err(&hdev->pdev->dev, 1723 "common threshold config cmd failed %d\n", ret); 1724 return ret; 1725 } 1726 1727 static int hclge_common_wl_config(struct hclge_dev *hdev, 1728 struct hclge_pkt_buf_alloc *buf_alloc) 1729 { 1730 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1731 struct hclge_rx_com_wl *req; 1732 struct hclge_desc desc; 1733 int ret; 1734 1735 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 1736 1737 req = (struct hclge_rx_com_wl *)desc.data; 1738 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 1739 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1740 1741 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 1742 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1743 1744 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1745 if (ret) 1746 dev_err(&hdev->pdev->dev, 1747 "common waterline config cmd failed %d\n", ret); 1748 1749 return ret; 1750 } 1751 1752 int hclge_buffer_alloc(struct hclge_dev *hdev) 1753 { 1754 struct hclge_pkt_buf_alloc *pkt_buf; 1755 int ret; 1756 1757 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 1758 if (!pkt_buf) 1759 return -ENOMEM; 1760 1761 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 1762 if (ret) { 1763 dev_err(&hdev->pdev->dev, 1764 "could not calc tx buffer size for all TCs %d\n", ret); 1765 goto out; 1766 } 1767 1768 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 1769 if (ret) { 1770 dev_err(&hdev->pdev->dev, 1771 "could not alloc tx buffers %d\n", ret); 1772 goto out; 1773 } 1774 1775 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 1776 if (ret) { 1777 dev_err(&hdev->pdev->dev, 1778 "could not calc rx priv buffer size for all TCs %d\n", 1779 ret); 1780 goto out; 1781 } 1782 1783 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 1784 if (ret) { 1785 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 1786 ret); 1787 goto out; 1788 } 1789 1790 if (hnae3_dev_dcb_supported(hdev)) { 1791 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 1792 if (ret) { 1793 dev_err(&hdev->pdev->dev, 1794 "could not configure rx private waterline %d\n", 1795 ret); 1796 goto out; 1797 } 1798 1799 ret = hclge_common_thrd_config(hdev, pkt_buf); 1800 if (ret) { 1801 dev_err(&hdev->pdev->dev, 1802 "could not configure common threshold %d\n", 1803 ret); 1804 goto out; 1805 } 1806 } 1807 1808 ret = hclge_common_wl_config(hdev, pkt_buf); 1809 if (ret) 1810 dev_err(&hdev->pdev->dev, 1811 "could not configure common waterline %d\n", ret); 1812 1813 out: 1814 kfree(pkt_buf); 1815 return ret; 1816 } 1817 1818 static int hclge_init_roce_base_info(struct hclge_vport *vport) 1819 { 1820 struct hnae3_handle *roce = &vport->roce; 1821 struct hnae3_handle *nic = &vport->nic; 1822 1823 roce->rinfo.num_vectors = vport->back->num_roce_msi; 1824 1825 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 1826 vport->back->num_msi_left == 0) 1827 return -EINVAL; 1828 1829 roce->rinfo.base_vector = vport->back->roce_base_vector; 1830 1831 roce->rinfo.netdev = nic->kinfo.netdev; 1832 roce->rinfo.roce_io_base = vport->back->hw.io_base; 1833 1834 roce->pdev = nic->pdev; 1835 roce->ae_algo = nic->ae_algo; 1836 roce->numa_node_mask = nic->numa_node_mask; 1837 1838 return 0; 1839 } 1840 1841 static int hclge_init_msi(struct hclge_dev *hdev) 1842 { 1843 struct pci_dev *pdev = hdev->pdev; 1844 int vectors; 1845 int i; 1846 1847 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1848 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1849 if (vectors < 0) { 1850 dev_err(&pdev->dev, 1851 "failed(%d) to allocate MSI/MSI-X vectors\n", 1852 vectors); 1853 return vectors; 1854 } 1855 if (vectors < hdev->num_msi) 1856 dev_warn(&hdev->pdev->dev, 1857 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1858 hdev->num_msi, vectors); 1859 1860 hdev->num_msi = vectors; 1861 hdev->num_msi_left = vectors; 1862 hdev->base_msi_vector = pdev->irq; 1863 hdev->roce_base_vector = hdev->base_msi_vector + 1864 hdev->roce_base_msix_offset; 1865 1866 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1867 sizeof(u16), GFP_KERNEL); 1868 if (!hdev->vector_status) { 1869 pci_free_irq_vectors(pdev); 1870 return -ENOMEM; 1871 } 1872 1873 for (i = 0; i < hdev->num_msi; i++) 1874 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 1875 1876 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1877 sizeof(int), GFP_KERNEL); 1878 if (!hdev->vector_irq) { 1879 pci_free_irq_vectors(pdev); 1880 return -ENOMEM; 1881 } 1882 1883 return 0; 1884 } 1885 1886 static u8 hclge_check_speed_dup(u8 duplex, int speed) 1887 { 1888 1889 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) 1890 duplex = HCLGE_MAC_FULL; 1891 1892 return duplex; 1893 } 1894 1895 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, 1896 u8 duplex) 1897 { 1898 struct hclge_config_mac_speed_dup_cmd *req; 1899 struct hclge_desc desc; 1900 int ret; 1901 1902 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 1903 1904 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 1905 1906 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); 1907 1908 switch (speed) { 1909 case HCLGE_MAC_SPEED_10M: 1910 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1911 HCLGE_CFG_SPEED_S, 6); 1912 break; 1913 case HCLGE_MAC_SPEED_100M: 1914 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1915 HCLGE_CFG_SPEED_S, 7); 1916 break; 1917 case HCLGE_MAC_SPEED_1G: 1918 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1919 HCLGE_CFG_SPEED_S, 0); 1920 break; 1921 case HCLGE_MAC_SPEED_10G: 1922 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1923 HCLGE_CFG_SPEED_S, 1); 1924 break; 1925 case HCLGE_MAC_SPEED_25G: 1926 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1927 HCLGE_CFG_SPEED_S, 2); 1928 break; 1929 case HCLGE_MAC_SPEED_40G: 1930 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1931 HCLGE_CFG_SPEED_S, 3); 1932 break; 1933 case HCLGE_MAC_SPEED_50G: 1934 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1935 HCLGE_CFG_SPEED_S, 4); 1936 break; 1937 case HCLGE_MAC_SPEED_100G: 1938 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1939 HCLGE_CFG_SPEED_S, 5); 1940 break; 1941 default: 1942 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 1943 return -EINVAL; 1944 } 1945 1946 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 1947 1); 1948 1949 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1950 if (ret) { 1951 dev_err(&hdev->pdev->dev, 1952 "mac speed/duplex config cmd failed %d.\n", ret); 1953 return ret; 1954 } 1955 1956 return 0; 1957 } 1958 1959 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 1960 { 1961 int ret; 1962 1963 duplex = hclge_check_speed_dup(duplex, speed); 1964 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) 1965 return 0; 1966 1967 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); 1968 if (ret) 1969 return ret; 1970 1971 hdev->hw.mac.speed = speed; 1972 hdev->hw.mac.duplex = duplex; 1973 1974 return 0; 1975 } 1976 1977 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 1978 u8 duplex) 1979 { 1980 struct hclge_vport *vport = hclge_get_vport(handle); 1981 struct hclge_dev *hdev = vport->back; 1982 1983 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 1984 } 1985 1986 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 1987 { 1988 struct hclge_config_auto_neg_cmd *req; 1989 struct hclge_desc desc; 1990 u32 flag = 0; 1991 int ret; 1992 1993 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 1994 1995 req = (struct hclge_config_auto_neg_cmd *)desc.data; 1996 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); 1997 req->cfg_an_cmd_flag = cpu_to_le32(flag); 1998 1999 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2000 if (ret) 2001 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2002 ret); 2003 2004 return ret; 2005 } 2006 2007 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2008 { 2009 struct hclge_vport *vport = hclge_get_vport(handle); 2010 struct hclge_dev *hdev = vport->back; 2011 2012 return hclge_set_autoneg_en(hdev, enable); 2013 } 2014 2015 static int hclge_get_autoneg(struct hnae3_handle *handle) 2016 { 2017 struct hclge_vport *vport = hclge_get_vport(handle); 2018 struct hclge_dev *hdev = vport->back; 2019 struct phy_device *phydev = hdev->hw.mac.phydev; 2020 2021 if (phydev) 2022 return phydev->autoneg; 2023 2024 return hdev->hw.mac.autoneg; 2025 } 2026 2027 static int hclge_mac_init(struct hclge_dev *hdev) 2028 { 2029 struct hclge_mac *mac = &hdev->hw.mac; 2030 int ret; 2031 2032 hdev->support_sfp_query = true; 2033 hdev->hw.mac.duplex = HCLGE_MAC_FULL; 2034 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, 2035 hdev->hw.mac.duplex); 2036 if (ret) { 2037 dev_err(&hdev->pdev->dev, 2038 "Config mac speed dup fail ret=%d\n", ret); 2039 return ret; 2040 } 2041 2042 mac->link = 0; 2043 2044 ret = hclge_set_mac_mtu(hdev, hdev->mps); 2045 if (ret) { 2046 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); 2047 return ret; 2048 } 2049 2050 ret = hclge_buffer_alloc(hdev); 2051 if (ret) 2052 dev_err(&hdev->pdev->dev, 2053 "allocate buffer fail, ret=%d\n", ret); 2054 2055 return ret; 2056 } 2057 2058 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2059 { 2060 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2061 schedule_work(&hdev->mbx_service_task); 2062 } 2063 2064 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2065 { 2066 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 2067 schedule_work(&hdev->rst_service_task); 2068 } 2069 2070 static void hclge_task_schedule(struct hclge_dev *hdev) 2071 { 2072 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 2073 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2074 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) 2075 (void)schedule_work(&hdev->service_task); 2076 } 2077 2078 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2079 { 2080 struct hclge_link_status_cmd *req; 2081 struct hclge_desc desc; 2082 int link_status; 2083 int ret; 2084 2085 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2086 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2087 if (ret) { 2088 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2089 ret); 2090 return ret; 2091 } 2092 2093 req = (struct hclge_link_status_cmd *)desc.data; 2094 link_status = req->status & HCLGE_LINK_STATUS_UP_M; 2095 2096 return !!link_status; 2097 } 2098 2099 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2100 { 2101 int mac_state; 2102 int link_stat; 2103 2104 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) 2105 return 0; 2106 2107 mac_state = hclge_get_mac_link_status(hdev); 2108 2109 if (hdev->hw.mac.phydev) { 2110 if (hdev->hw.mac.phydev->state == PHY_RUNNING) 2111 link_stat = mac_state & 2112 hdev->hw.mac.phydev->link; 2113 else 2114 link_stat = 0; 2115 2116 } else { 2117 link_stat = mac_state; 2118 } 2119 2120 return !!link_stat; 2121 } 2122 2123 static void hclge_update_link_status(struct hclge_dev *hdev) 2124 { 2125 struct hnae3_client *client = hdev->nic_client; 2126 struct hnae3_handle *handle; 2127 int state; 2128 int i; 2129 2130 if (!client) 2131 return; 2132 state = hclge_get_mac_phy_link(hdev); 2133 if (state != hdev->hw.mac.link) { 2134 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2135 handle = &hdev->vport[i].nic; 2136 client->ops->link_status_change(handle, state); 2137 } 2138 hdev->hw.mac.link = state; 2139 } 2140 } 2141 2142 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) 2143 { 2144 struct hclge_sfp_speed_cmd *resp = NULL; 2145 struct hclge_desc desc; 2146 int ret; 2147 2148 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true); 2149 resp = (struct hclge_sfp_speed_cmd *)desc.data; 2150 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2151 if (ret == -EOPNOTSUPP) { 2152 dev_warn(&hdev->pdev->dev, 2153 "IMP do not support get SFP speed %d\n", ret); 2154 return ret; 2155 } else if (ret) { 2156 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); 2157 return ret; 2158 } 2159 2160 *speed = resp->sfp_speed; 2161 2162 return 0; 2163 } 2164 2165 static int hclge_update_speed_duplex(struct hclge_dev *hdev) 2166 { 2167 struct hclge_mac mac = hdev->hw.mac; 2168 int speed; 2169 int ret; 2170 2171 /* get the speed from SFP cmd when phy 2172 * doesn't exit. 2173 */ 2174 if (mac.phydev) 2175 return 0; 2176 2177 /* if IMP does not support get SFP/qSFP speed, return directly */ 2178 if (!hdev->support_sfp_query) 2179 return 0; 2180 2181 ret = hclge_get_sfp_speed(hdev, &speed); 2182 if (ret == -EOPNOTSUPP) { 2183 hdev->support_sfp_query = false; 2184 return ret; 2185 } else if (ret) { 2186 return ret; 2187 } 2188 2189 if (speed == HCLGE_MAC_SPEED_UNKNOWN) 2190 return 0; /* do nothing if no SFP */ 2191 2192 /* must config full duplex for SFP */ 2193 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); 2194 } 2195 2196 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) 2197 { 2198 struct hclge_vport *vport = hclge_get_vport(handle); 2199 struct hclge_dev *hdev = vport->back; 2200 2201 return hclge_update_speed_duplex(hdev); 2202 } 2203 2204 static int hclge_get_status(struct hnae3_handle *handle) 2205 { 2206 struct hclge_vport *vport = hclge_get_vport(handle); 2207 struct hclge_dev *hdev = vport->back; 2208 2209 hclge_update_link_status(hdev); 2210 2211 return hdev->hw.mac.link; 2212 } 2213 2214 static void hclge_service_timer(struct timer_list *t) 2215 { 2216 struct hclge_dev *hdev = from_timer(hdev, t, service_timer); 2217 2218 mod_timer(&hdev->service_timer, jiffies + HZ); 2219 hdev->hw_stats.stats_timer++; 2220 hclge_task_schedule(hdev); 2221 } 2222 2223 static void hclge_service_complete(struct hclge_dev *hdev) 2224 { 2225 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); 2226 2227 /* Flush memory before next watchdog */ 2228 smp_mb__before_atomic(); 2229 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 2230 } 2231 2232 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 2233 { 2234 u32 rst_src_reg, cmdq_src_reg, msix_src_reg; 2235 2236 /* fetch the events from their corresponding regs */ 2237 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 2238 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 2239 msix_src_reg = hclge_read_dev(&hdev->hw, 2240 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); 2241 2242 /* Assumption: If by any chance reset and mailbox events are reported 2243 * together then we will only process reset event in this go and will 2244 * defer the processing of the mailbox events. Since, we would have not 2245 * cleared RX CMDQ event this time we would receive again another 2246 * interrupt from H/W just for the mailbox. 2247 */ 2248 2249 /* check for vector0 reset event sources */ 2250 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { 2251 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); 2252 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 2253 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 2254 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2255 return HCLGE_VECTOR0_EVENT_RST; 2256 } 2257 2258 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { 2259 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); 2260 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 2261 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 2262 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2263 return HCLGE_VECTOR0_EVENT_RST; 2264 } 2265 2266 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { 2267 dev_info(&hdev->pdev->dev, "core reset interrupt\n"); 2268 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 2269 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); 2270 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2271 return HCLGE_VECTOR0_EVENT_RST; 2272 } 2273 2274 /* check for vector0 msix event source */ 2275 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) 2276 return HCLGE_VECTOR0_EVENT_ERR; 2277 2278 /* check for vector0 mailbox(=CMDQ RX) event source */ 2279 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 2280 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 2281 *clearval = cmdq_src_reg; 2282 return HCLGE_VECTOR0_EVENT_MBX; 2283 } 2284 2285 return HCLGE_VECTOR0_EVENT_OTHER; 2286 } 2287 2288 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 2289 u32 regclr) 2290 { 2291 switch (event_type) { 2292 case HCLGE_VECTOR0_EVENT_RST: 2293 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 2294 break; 2295 case HCLGE_VECTOR0_EVENT_MBX: 2296 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 2297 break; 2298 default: 2299 break; 2300 } 2301 } 2302 2303 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) 2304 { 2305 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, 2306 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | 2307 BIT(HCLGE_VECTOR0_CORERESET_INT_B) | 2308 BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); 2309 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); 2310 } 2311 2312 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 2313 { 2314 writel(enable ? 1 : 0, vector->addr); 2315 } 2316 2317 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 2318 { 2319 struct hclge_dev *hdev = data; 2320 u32 event_cause; 2321 u32 clearval; 2322 2323 hclge_enable_vector(&hdev->misc_vector, false); 2324 event_cause = hclge_check_event_cause(hdev, &clearval); 2325 2326 /* vector 0 interrupt is shared with reset and mailbox source events.*/ 2327 switch (event_cause) { 2328 case HCLGE_VECTOR0_EVENT_ERR: 2329 /* we do not know what type of reset is required now. This could 2330 * only be decided after we fetch the type of errors which 2331 * caused this event. Therefore, we will do below for now: 2332 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we 2333 * have defered type of reset to be used. 2334 * 2. Schedule the reset serivce task. 2335 * 3. When service task receives HNAE3_UNKNOWN_RESET type it 2336 * will fetch the correct type of reset. This would be done 2337 * by first decoding the types of errors. 2338 */ 2339 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); 2340 /* fall through */ 2341 case HCLGE_VECTOR0_EVENT_RST: 2342 hclge_reset_task_schedule(hdev); 2343 break; 2344 case HCLGE_VECTOR0_EVENT_MBX: 2345 /* If we are here then, 2346 * 1. Either we are not handling any mbx task and we are not 2347 * scheduled as well 2348 * OR 2349 * 2. We could be handling a mbx task but nothing more is 2350 * scheduled. 2351 * In both cases, we should schedule mbx task as there are more 2352 * mbx messages reported by this interrupt. 2353 */ 2354 hclge_mbx_task_schedule(hdev); 2355 break; 2356 default: 2357 dev_warn(&hdev->pdev->dev, 2358 "received unknown or unhandled event of vector0\n"); 2359 break; 2360 } 2361 2362 /* clear the source of interrupt if it is not cause by reset */ 2363 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) { 2364 hclge_clear_event_cause(hdev, event_cause, clearval); 2365 hclge_enable_vector(&hdev->misc_vector, true); 2366 } 2367 2368 return IRQ_HANDLED; 2369 } 2370 2371 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 2372 { 2373 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { 2374 dev_warn(&hdev->pdev->dev, 2375 "vector(vector_id %d) has been freed.\n", vector_id); 2376 return; 2377 } 2378 2379 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 2380 hdev->num_msi_left += 1; 2381 hdev->num_msi_used -= 1; 2382 } 2383 2384 static void hclge_get_misc_vector(struct hclge_dev *hdev) 2385 { 2386 struct hclge_misc_vector *vector = &hdev->misc_vector; 2387 2388 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 2389 2390 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 2391 hdev->vector_status[0] = 0; 2392 2393 hdev->num_msi_left -= 1; 2394 hdev->num_msi_used += 1; 2395 } 2396 2397 static int hclge_misc_irq_init(struct hclge_dev *hdev) 2398 { 2399 int ret; 2400 2401 hclge_get_misc_vector(hdev); 2402 2403 /* this would be explicitly freed in the end */ 2404 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 2405 0, "hclge_misc", hdev); 2406 if (ret) { 2407 hclge_free_vector(hdev, 0); 2408 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 2409 hdev->misc_vector.vector_irq); 2410 } 2411 2412 return ret; 2413 } 2414 2415 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 2416 { 2417 free_irq(hdev->misc_vector.vector_irq, hdev); 2418 hclge_free_vector(hdev, 0); 2419 } 2420 2421 static int hclge_notify_client(struct hclge_dev *hdev, 2422 enum hnae3_reset_notify_type type) 2423 { 2424 struct hnae3_client *client = hdev->nic_client; 2425 u16 i; 2426 2427 if (!client->ops->reset_notify) 2428 return -EOPNOTSUPP; 2429 2430 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2431 struct hnae3_handle *handle = &hdev->vport[i].nic; 2432 int ret; 2433 2434 ret = client->ops->reset_notify(handle, type); 2435 if (ret) { 2436 dev_err(&hdev->pdev->dev, 2437 "notify nic client failed %d(%d)\n", type, ret); 2438 return ret; 2439 } 2440 } 2441 2442 return 0; 2443 } 2444 2445 static int hclge_notify_roce_client(struct hclge_dev *hdev, 2446 enum hnae3_reset_notify_type type) 2447 { 2448 struct hnae3_client *client = hdev->roce_client; 2449 int ret = 0; 2450 u16 i; 2451 2452 if (!client) 2453 return 0; 2454 2455 if (!client->ops->reset_notify) 2456 return -EOPNOTSUPP; 2457 2458 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2459 struct hnae3_handle *handle = &hdev->vport[i].roce; 2460 2461 ret = client->ops->reset_notify(handle, type); 2462 if (ret) { 2463 dev_err(&hdev->pdev->dev, 2464 "notify roce client failed %d(%d)", 2465 type, ret); 2466 return ret; 2467 } 2468 } 2469 2470 return ret; 2471 } 2472 2473 static int hclge_reset_wait(struct hclge_dev *hdev) 2474 { 2475 #define HCLGE_RESET_WATI_MS 100 2476 #define HCLGE_RESET_WAIT_CNT 200 2477 u32 val, reg, reg_bit; 2478 u32 cnt = 0; 2479 2480 switch (hdev->reset_type) { 2481 case HNAE3_IMP_RESET: 2482 reg = HCLGE_GLOBAL_RESET_REG; 2483 reg_bit = HCLGE_IMP_RESET_BIT; 2484 break; 2485 case HNAE3_GLOBAL_RESET: 2486 reg = HCLGE_GLOBAL_RESET_REG; 2487 reg_bit = HCLGE_GLOBAL_RESET_BIT; 2488 break; 2489 case HNAE3_CORE_RESET: 2490 reg = HCLGE_GLOBAL_RESET_REG; 2491 reg_bit = HCLGE_CORE_RESET_BIT; 2492 break; 2493 case HNAE3_FUNC_RESET: 2494 reg = HCLGE_FUN_RST_ING; 2495 reg_bit = HCLGE_FUN_RST_ING_B; 2496 break; 2497 case HNAE3_FLR_RESET: 2498 break; 2499 default: 2500 dev_err(&hdev->pdev->dev, 2501 "Wait for unsupported reset type: %d\n", 2502 hdev->reset_type); 2503 return -EINVAL; 2504 } 2505 2506 if (hdev->reset_type == HNAE3_FLR_RESET) { 2507 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && 2508 cnt++ < HCLGE_RESET_WAIT_CNT) 2509 msleep(HCLGE_RESET_WATI_MS); 2510 2511 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { 2512 dev_err(&hdev->pdev->dev, 2513 "flr wait timeout: %d\n", cnt); 2514 return -EBUSY; 2515 } 2516 2517 return 0; 2518 } 2519 2520 val = hclge_read_dev(&hdev->hw, reg); 2521 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 2522 msleep(HCLGE_RESET_WATI_MS); 2523 val = hclge_read_dev(&hdev->hw, reg); 2524 cnt++; 2525 } 2526 2527 if (cnt >= HCLGE_RESET_WAIT_CNT) { 2528 dev_warn(&hdev->pdev->dev, 2529 "Wait for reset timeout: %d\n", hdev->reset_type); 2530 return -EBUSY; 2531 } 2532 2533 return 0; 2534 } 2535 2536 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) 2537 { 2538 struct hclge_vf_rst_cmd *req; 2539 struct hclge_desc desc; 2540 2541 req = (struct hclge_vf_rst_cmd *)desc.data; 2542 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); 2543 req->dest_vfid = func_id; 2544 2545 if (reset) 2546 req->vf_rst = 0x1; 2547 2548 return hclge_cmd_send(&hdev->hw, &desc, 1); 2549 } 2550 2551 int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) 2552 { 2553 int i; 2554 2555 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { 2556 struct hclge_vport *vport = &hdev->vport[i]; 2557 int ret; 2558 2559 /* Send cmd to set/clear VF's FUNC_RST_ING */ 2560 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); 2561 if (ret) { 2562 dev_err(&hdev->pdev->dev, 2563 "set vf(%d) rst failed %d!\n", 2564 vport->vport_id, ret); 2565 return ret; 2566 } 2567 2568 if (!reset) 2569 continue; 2570 2571 /* Inform VF to process the reset. 2572 * hclge_inform_reset_assert_to_vf may fail if VF 2573 * driver is not loaded. 2574 */ 2575 ret = hclge_inform_reset_assert_to_vf(vport); 2576 if (ret) 2577 dev_warn(&hdev->pdev->dev, 2578 "inform reset to vf(%d) failed %d!\n", 2579 vport->vport_id, ret); 2580 } 2581 2582 return 0; 2583 } 2584 2585 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 2586 { 2587 struct hclge_desc desc; 2588 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 2589 int ret; 2590 2591 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 2592 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 2593 req->fun_reset_vfid = func_id; 2594 2595 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2596 if (ret) 2597 dev_err(&hdev->pdev->dev, 2598 "send function reset cmd fail, status =%d\n", ret); 2599 2600 return ret; 2601 } 2602 2603 static void hclge_do_reset(struct hclge_dev *hdev) 2604 { 2605 struct pci_dev *pdev = hdev->pdev; 2606 u32 val; 2607 2608 switch (hdev->reset_type) { 2609 case HNAE3_GLOBAL_RESET: 2610 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2611 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 2612 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2613 dev_info(&pdev->dev, "Global Reset requested\n"); 2614 break; 2615 case HNAE3_CORE_RESET: 2616 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2617 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); 2618 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2619 dev_info(&pdev->dev, "Core Reset requested\n"); 2620 break; 2621 case HNAE3_FUNC_RESET: 2622 dev_info(&pdev->dev, "PF Reset requested\n"); 2623 /* schedule again to check later */ 2624 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 2625 hclge_reset_task_schedule(hdev); 2626 break; 2627 case HNAE3_FLR_RESET: 2628 dev_info(&pdev->dev, "FLR requested\n"); 2629 /* schedule again to check later */ 2630 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); 2631 hclge_reset_task_schedule(hdev); 2632 break; 2633 default: 2634 dev_warn(&pdev->dev, 2635 "Unsupported reset type: %d\n", hdev->reset_type); 2636 break; 2637 } 2638 } 2639 2640 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, 2641 unsigned long *addr) 2642 { 2643 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2644 2645 /* first, resolve any unknown reset type to the known type(s) */ 2646 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) { 2647 /* we will intentionally ignore any errors from this function 2648 * as we will end up in *some* reset request in any case 2649 */ 2650 hclge_handle_hw_msix_error(hdev, addr); 2651 clear_bit(HNAE3_UNKNOWN_RESET, addr); 2652 /* We defered the clearing of the error event which caused 2653 * interrupt since it was not posssible to do that in 2654 * interrupt context (and this is the reason we introduced 2655 * new UNKNOWN reset type). Now, the errors have been 2656 * handled and cleared in hardware we can safely enable 2657 * interrupts. This is an exception to the norm. 2658 */ 2659 hclge_enable_vector(&hdev->misc_vector, true); 2660 } 2661 2662 /* return the highest priority reset level amongst all */ 2663 if (test_bit(HNAE3_IMP_RESET, addr)) { 2664 rst_level = HNAE3_IMP_RESET; 2665 clear_bit(HNAE3_IMP_RESET, addr); 2666 clear_bit(HNAE3_GLOBAL_RESET, addr); 2667 clear_bit(HNAE3_CORE_RESET, addr); 2668 clear_bit(HNAE3_FUNC_RESET, addr); 2669 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { 2670 rst_level = HNAE3_GLOBAL_RESET; 2671 clear_bit(HNAE3_GLOBAL_RESET, addr); 2672 clear_bit(HNAE3_CORE_RESET, addr); 2673 clear_bit(HNAE3_FUNC_RESET, addr); 2674 } else if (test_bit(HNAE3_CORE_RESET, addr)) { 2675 rst_level = HNAE3_CORE_RESET; 2676 clear_bit(HNAE3_CORE_RESET, addr); 2677 clear_bit(HNAE3_FUNC_RESET, addr); 2678 } else if (test_bit(HNAE3_FUNC_RESET, addr)) { 2679 rst_level = HNAE3_FUNC_RESET; 2680 clear_bit(HNAE3_FUNC_RESET, addr); 2681 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 2682 rst_level = HNAE3_FLR_RESET; 2683 clear_bit(HNAE3_FLR_RESET, addr); 2684 } 2685 2686 return rst_level; 2687 } 2688 2689 static void hclge_clear_reset_cause(struct hclge_dev *hdev) 2690 { 2691 u32 clearval = 0; 2692 2693 switch (hdev->reset_type) { 2694 case HNAE3_IMP_RESET: 2695 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2696 break; 2697 case HNAE3_GLOBAL_RESET: 2698 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2699 break; 2700 case HNAE3_CORE_RESET: 2701 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2702 break; 2703 default: 2704 break; 2705 } 2706 2707 if (!clearval) 2708 return; 2709 2710 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); 2711 hclge_enable_vector(&hdev->misc_vector, true); 2712 } 2713 2714 static int hclge_reset_prepare_down(struct hclge_dev *hdev) 2715 { 2716 int ret = 0; 2717 2718 switch (hdev->reset_type) { 2719 case HNAE3_FUNC_RESET: 2720 /* fall through */ 2721 case HNAE3_FLR_RESET: 2722 ret = hclge_set_all_vf_rst(hdev, true); 2723 break; 2724 default: 2725 break; 2726 } 2727 2728 return ret; 2729 } 2730 2731 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) 2732 { 2733 u32 reg_val; 2734 int ret = 0; 2735 2736 switch (hdev->reset_type) { 2737 case HNAE3_FUNC_RESET: 2738 /* There is no mechanism for PF to know if VF has stopped IO 2739 * for now, just wait 100 ms for VF to stop IO 2740 */ 2741 msleep(100); 2742 ret = hclge_func_reset_cmd(hdev, 0); 2743 if (ret) { 2744 dev_err(&hdev->pdev->dev, 2745 "asserting function reset fail %d!\n", ret); 2746 return ret; 2747 } 2748 2749 /* After performaning pf reset, it is not necessary to do the 2750 * mailbox handling or send any command to firmware, because 2751 * any mailbox handling or command to firmware is only valid 2752 * after hclge_cmd_init is called. 2753 */ 2754 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 2755 break; 2756 case HNAE3_FLR_RESET: 2757 /* There is no mechanism for PF to know if VF has stopped IO 2758 * for now, just wait 100 ms for VF to stop IO 2759 */ 2760 msleep(100); 2761 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 2762 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 2763 break; 2764 case HNAE3_IMP_RESET: 2765 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 2766 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, 2767 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); 2768 break; 2769 default: 2770 break; 2771 } 2772 2773 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); 2774 2775 return ret; 2776 } 2777 2778 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) 2779 { 2780 #define MAX_RESET_FAIL_CNT 5 2781 #define RESET_UPGRADE_DELAY_SEC 10 2782 2783 if (hdev->reset_pending) { 2784 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", 2785 hdev->reset_pending); 2786 return true; 2787 } else if ((hdev->reset_type != HNAE3_IMP_RESET) && 2788 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) & 2789 BIT(HCLGE_IMP_RESET_BIT))) { 2790 dev_info(&hdev->pdev->dev, 2791 "reset failed because IMP Reset is pending\n"); 2792 hclge_clear_reset_cause(hdev); 2793 return false; 2794 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { 2795 hdev->reset_fail_cnt++; 2796 if (is_timeout) { 2797 set_bit(hdev->reset_type, &hdev->reset_pending); 2798 dev_info(&hdev->pdev->dev, 2799 "re-schedule to wait for hw reset done\n"); 2800 return true; 2801 } 2802 2803 dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); 2804 hclge_clear_reset_cause(hdev); 2805 mod_timer(&hdev->reset_timer, 2806 jiffies + RESET_UPGRADE_DELAY_SEC * HZ); 2807 2808 return false; 2809 } 2810 2811 hclge_clear_reset_cause(hdev); 2812 dev_err(&hdev->pdev->dev, "Reset fail!\n"); 2813 return false; 2814 } 2815 2816 static int hclge_reset_prepare_up(struct hclge_dev *hdev) 2817 { 2818 int ret = 0; 2819 2820 switch (hdev->reset_type) { 2821 case HNAE3_FUNC_RESET: 2822 /* fall through */ 2823 case HNAE3_FLR_RESET: 2824 ret = hclge_set_all_vf_rst(hdev, false); 2825 break; 2826 default: 2827 break; 2828 } 2829 2830 return ret; 2831 } 2832 2833 static void hclge_reset(struct hclge_dev *hdev) 2834 { 2835 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2836 bool is_timeout = false; 2837 int ret; 2838 2839 /* Initialize ae_dev reset status as well, in case enet layer wants to 2840 * know if device is undergoing reset 2841 */ 2842 ae_dev->reset_type = hdev->reset_type; 2843 hdev->reset_count++; 2844 /* perform reset of the stack & ae device for a client */ 2845 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 2846 if (ret) 2847 goto err_reset; 2848 2849 ret = hclge_reset_prepare_down(hdev); 2850 if (ret) 2851 goto err_reset; 2852 2853 rtnl_lock(); 2854 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2855 if (ret) 2856 goto err_reset_lock; 2857 2858 rtnl_unlock(); 2859 2860 ret = hclge_reset_prepare_wait(hdev); 2861 if (ret) 2862 goto err_reset; 2863 2864 if (hclge_reset_wait(hdev)) { 2865 is_timeout = true; 2866 goto err_reset; 2867 } 2868 2869 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 2870 if (ret) 2871 goto err_reset; 2872 2873 rtnl_lock(); 2874 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 2875 if (ret) 2876 goto err_reset_lock; 2877 2878 ret = hclge_reset_ae_dev(hdev->ae_dev); 2879 if (ret) 2880 goto err_reset_lock; 2881 2882 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 2883 if (ret) 2884 goto err_reset_lock; 2885 2886 hclge_clear_reset_cause(hdev); 2887 2888 ret = hclge_reset_prepare_up(hdev); 2889 if (ret) 2890 goto err_reset_lock; 2891 2892 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2893 if (ret) 2894 goto err_reset_lock; 2895 2896 rtnl_unlock(); 2897 2898 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 2899 if (ret) 2900 goto err_reset; 2901 2902 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); 2903 if (ret) 2904 goto err_reset; 2905 2906 hdev->last_reset_time = jiffies; 2907 hdev->reset_fail_cnt = 0; 2908 ae_dev->reset_type = HNAE3_NONE_RESET; 2909 2910 return; 2911 2912 err_reset_lock: 2913 rtnl_unlock(); 2914 err_reset: 2915 if (hclge_reset_err_handle(hdev, is_timeout)) 2916 hclge_reset_task_schedule(hdev); 2917 } 2918 2919 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) 2920 { 2921 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 2922 struct hclge_dev *hdev = ae_dev->priv; 2923 2924 /* We might end up getting called broadly because of 2 below cases: 2925 * 1. Recoverable error was conveyed through APEI and only way to bring 2926 * normalcy is to reset. 2927 * 2. A new reset request from the stack due to timeout 2928 * 2929 * For the first case,error event might not have ae handle available. 2930 * check if this is a new reset request and we are not here just because 2931 * last reset attempt did not succeed and watchdog hit us again. We will 2932 * know this if last reset request did not occur very recently (watchdog 2933 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 2934 * In case of new request we reset the "reset level" to PF reset. 2935 * And if it is a repeat reset request of the most recent one then we 2936 * want to make sure we throttle the reset request. Therefore, we will 2937 * not allow it again before 3*HZ times. 2938 */ 2939 if (!handle) 2940 handle = &hdev->vport[0].nic; 2941 2942 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ))) 2943 return; 2944 else if (hdev->default_reset_request) 2945 hdev->reset_level = 2946 hclge_get_reset_level(hdev, 2947 &hdev->default_reset_request); 2948 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) 2949 hdev->reset_level = HNAE3_FUNC_RESET; 2950 2951 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", 2952 hdev->reset_level); 2953 2954 /* request reset & schedule reset task */ 2955 set_bit(hdev->reset_level, &hdev->reset_request); 2956 hclge_reset_task_schedule(hdev); 2957 2958 if (hdev->reset_level < HNAE3_GLOBAL_RESET) 2959 hdev->reset_level++; 2960 } 2961 2962 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 2963 enum hnae3_reset_type rst_type) 2964 { 2965 struct hclge_dev *hdev = ae_dev->priv; 2966 2967 set_bit(rst_type, &hdev->default_reset_request); 2968 } 2969 2970 static void hclge_reset_timer(struct timer_list *t) 2971 { 2972 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); 2973 2974 dev_info(&hdev->pdev->dev, 2975 "triggering global reset in reset timer\n"); 2976 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); 2977 hclge_reset_event(hdev->pdev, NULL); 2978 } 2979 2980 static void hclge_reset_subtask(struct hclge_dev *hdev) 2981 { 2982 /* check if there is any ongoing reset in the hardware. This status can 2983 * be checked from reset_pending. If there is then, we need to wait for 2984 * hardware to complete reset. 2985 * a. If we are able to figure out in reasonable time that hardware 2986 * has fully resetted then, we can proceed with driver, client 2987 * reset. 2988 * b. else, we can come back later to check this status so re-sched 2989 * now. 2990 */ 2991 hdev->last_reset_time = jiffies; 2992 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); 2993 if (hdev->reset_type != HNAE3_NONE_RESET) 2994 hclge_reset(hdev); 2995 2996 /* check if we got any *new* reset requests to be honored */ 2997 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); 2998 if (hdev->reset_type != HNAE3_NONE_RESET) 2999 hclge_do_reset(hdev); 3000 3001 hdev->reset_type = HNAE3_NONE_RESET; 3002 } 3003 3004 static void hclge_reset_service_task(struct work_struct *work) 3005 { 3006 struct hclge_dev *hdev = 3007 container_of(work, struct hclge_dev, rst_service_task); 3008 3009 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3010 return; 3011 3012 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 3013 3014 hclge_reset_subtask(hdev); 3015 3016 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 3017 } 3018 3019 static void hclge_mailbox_service_task(struct work_struct *work) 3020 { 3021 struct hclge_dev *hdev = 3022 container_of(work, struct hclge_dev, mbx_service_task); 3023 3024 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 3025 return; 3026 3027 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 3028 3029 hclge_mbx_handler(hdev); 3030 3031 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 3032 } 3033 3034 static void hclge_update_vport_alive(struct hclge_dev *hdev) 3035 { 3036 int i; 3037 3038 /* start from vport 1 for PF is always alive */ 3039 for (i = 1; i < hdev->num_alloc_vport; i++) { 3040 struct hclge_vport *vport = &hdev->vport[i]; 3041 3042 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) 3043 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 3044 3045 /* If vf is not alive, set to default value */ 3046 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 3047 vport->mps = HCLGE_MAC_DEFAULT_FRAME; 3048 } 3049 } 3050 3051 static void hclge_service_task(struct work_struct *work) 3052 { 3053 struct hclge_dev *hdev = 3054 container_of(work, struct hclge_dev, service_task); 3055 3056 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { 3057 hclge_update_stats_for_all(hdev); 3058 hdev->hw_stats.stats_timer = 0; 3059 } 3060 3061 hclge_update_speed_duplex(hdev); 3062 hclge_update_link_status(hdev); 3063 hclge_update_vport_alive(hdev); 3064 hclge_service_complete(hdev); 3065 } 3066 3067 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 3068 { 3069 /* VF handle has no client */ 3070 if (!handle->client) 3071 return container_of(handle, struct hclge_vport, nic); 3072 else if (handle->client->type == HNAE3_CLIENT_ROCE) 3073 return container_of(handle, struct hclge_vport, roce); 3074 else 3075 return container_of(handle, struct hclge_vport, nic); 3076 } 3077 3078 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 3079 struct hnae3_vector_info *vector_info) 3080 { 3081 struct hclge_vport *vport = hclge_get_vport(handle); 3082 struct hnae3_vector_info *vector = vector_info; 3083 struct hclge_dev *hdev = vport->back; 3084 int alloc = 0; 3085 int i, j; 3086 3087 vector_num = min(hdev->num_msi_left, vector_num); 3088 3089 for (j = 0; j < vector_num; j++) { 3090 for (i = 1; i < hdev->num_msi; i++) { 3091 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 3092 vector->vector = pci_irq_vector(hdev->pdev, i); 3093 vector->io_addr = hdev->hw.io_base + 3094 HCLGE_VECTOR_REG_BASE + 3095 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 3096 vport->vport_id * 3097 HCLGE_VECTOR_VF_OFFSET; 3098 hdev->vector_status[i] = vport->vport_id; 3099 hdev->vector_irq[i] = vector->vector; 3100 3101 vector++; 3102 alloc++; 3103 3104 break; 3105 } 3106 } 3107 } 3108 hdev->num_msi_left -= alloc; 3109 hdev->num_msi_used += alloc; 3110 3111 return alloc; 3112 } 3113 3114 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 3115 { 3116 int i; 3117 3118 for (i = 0; i < hdev->num_msi; i++) 3119 if (vector == hdev->vector_irq[i]) 3120 return i; 3121 3122 return -EINVAL; 3123 } 3124 3125 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 3126 { 3127 struct hclge_vport *vport = hclge_get_vport(handle); 3128 struct hclge_dev *hdev = vport->back; 3129 int vector_id; 3130 3131 vector_id = hclge_get_vector_index(hdev, vector); 3132 if (vector_id < 0) { 3133 dev_err(&hdev->pdev->dev, 3134 "Get vector index fail. vector_id =%d\n", vector_id); 3135 return vector_id; 3136 } 3137 3138 hclge_free_vector(hdev, vector_id); 3139 3140 return 0; 3141 } 3142 3143 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 3144 { 3145 return HCLGE_RSS_KEY_SIZE; 3146 } 3147 3148 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 3149 { 3150 return HCLGE_RSS_IND_TBL_SIZE; 3151 } 3152 3153 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 3154 const u8 hfunc, const u8 *key) 3155 { 3156 struct hclge_rss_config_cmd *req; 3157 struct hclge_desc desc; 3158 int key_offset; 3159 int key_size; 3160 int ret; 3161 3162 req = (struct hclge_rss_config_cmd *)desc.data; 3163 3164 for (key_offset = 0; key_offset < 3; key_offset++) { 3165 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 3166 false); 3167 3168 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 3169 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 3170 3171 if (key_offset == 2) 3172 key_size = 3173 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; 3174 else 3175 key_size = HCLGE_RSS_HASH_KEY_NUM; 3176 3177 memcpy(req->hash_key, 3178 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 3179 3180 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3181 if (ret) { 3182 dev_err(&hdev->pdev->dev, 3183 "Configure RSS config fail, status = %d\n", 3184 ret); 3185 return ret; 3186 } 3187 } 3188 return 0; 3189 } 3190 3191 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) 3192 { 3193 struct hclge_rss_indirection_table_cmd *req; 3194 struct hclge_desc desc; 3195 int i, j; 3196 int ret; 3197 3198 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 3199 3200 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 3201 hclge_cmd_setup_basic_desc 3202 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 3203 3204 req->start_table_index = 3205 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 3206 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 3207 3208 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 3209 req->rss_result[j] = 3210 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 3211 3212 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3213 if (ret) { 3214 dev_err(&hdev->pdev->dev, 3215 "Configure rss indir table fail,status = %d\n", 3216 ret); 3217 return ret; 3218 } 3219 } 3220 return 0; 3221 } 3222 3223 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 3224 u16 *tc_size, u16 *tc_offset) 3225 { 3226 struct hclge_rss_tc_mode_cmd *req; 3227 struct hclge_desc desc; 3228 int ret; 3229 int i; 3230 3231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 3232 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 3233 3234 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3235 u16 mode = 0; 3236 3237 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 3238 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, 3239 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 3240 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 3241 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 3242 3243 req->rss_tc_mode[i] = cpu_to_le16(mode); 3244 } 3245 3246 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3247 if (ret) 3248 dev_err(&hdev->pdev->dev, 3249 "Configure rss tc mode fail, status = %d\n", ret); 3250 3251 return ret; 3252 } 3253 3254 static void hclge_get_rss_type(struct hclge_vport *vport) 3255 { 3256 if (vport->rss_tuple_sets.ipv4_tcp_en || 3257 vport->rss_tuple_sets.ipv4_udp_en || 3258 vport->rss_tuple_sets.ipv4_sctp_en || 3259 vport->rss_tuple_sets.ipv6_tcp_en || 3260 vport->rss_tuple_sets.ipv6_udp_en || 3261 vport->rss_tuple_sets.ipv6_sctp_en) 3262 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; 3263 else if (vport->rss_tuple_sets.ipv4_fragment_en || 3264 vport->rss_tuple_sets.ipv6_fragment_en) 3265 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; 3266 else 3267 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; 3268 } 3269 3270 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 3271 { 3272 struct hclge_rss_input_tuple_cmd *req; 3273 struct hclge_desc desc; 3274 int ret; 3275 3276 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3277 3278 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3279 3280 /* Get the tuple cfg from pf */ 3281 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; 3282 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; 3283 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; 3284 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; 3285 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; 3286 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; 3287 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; 3288 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; 3289 hclge_get_rss_type(&hdev->vport[0]); 3290 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3291 if (ret) 3292 dev_err(&hdev->pdev->dev, 3293 "Configure rss input fail, status = %d\n", ret); 3294 return ret; 3295 } 3296 3297 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 3298 u8 *key, u8 *hfunc) 3299 { 3300 struct hclge_vport *vport = hclge_get_vport(handle); 3301 int i; 3302 3303 /* Get hash algorithm */ 3304 if (hfunc) { 3305 switch (vport->rss_algo) { 3306 case HCLGE_RSS_HASH_ALGO_TOEPLITZ: 3307 *hfunc = ETH_RSS_HASH_TOP; 3308 break; 3309 case HCLGE_RSS_HASH_ALGO_SIMPLE: 3310 *hfunc = ETH_RSS_HASH_XOR; 3311 break; 3312 default: 3313 *hfunc = ETH_RSS_HASH_UNKNOWN; 3314 break; 3315 } 3316 } 3317 3318 /* Get the RSS Key required by the user */ 3319 if (key) 3320 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 3321 3322 /* Get indirect table */ 3323 if (indir) 3324 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3325 indir[i] = vport->rss_indirection_tbl[i]; 3326 3327 return 0; 3328 } 3329 3330 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 3331 const u8 *key, const u8 hfunc) 3332 { 3333 struct hclge_vport *vport = hclge_get_vport(handle); 3334 struct hclge_dev *hdev = vport->back; 3335 u8 hash_algo; 3336 int ret, i; 3337 3338 /* Set the RSS Hash Key if specififed by the user */ 3339 if (key) { 3340 switch (hfunc) { 3341 case ETH_RSS_HASH_TOP: 3342 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3343 break; 3344 case ETH_RSS_HASH_XOR: 3345 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; 3346 break; 3347 case ETH_RSS_HASH_NO_CHANGE: 3348 hash_algo = vport->rss_algo; 3349 break; 3350 default: 3351 return -EINVAL; 3352 } 3353 3354 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 3355 if (ret) 3356 return ret; 3357 3358 /* Update the shadow RSS key with user specified qids */ 3359 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 3360 vport->rss_algo = hash_algo; 3361 } 3362 3363 /* Update the shadow RSS table with user specified qids */ 3364 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3365 vport->rss_indirection_tbl[i] = indir[i]; 3366 3367 /* Update the hardware */ 3368 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); 3369 } 3370 3371 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 3372 { 3373 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 3374 3375 if (nfc->data & RXH_L4_B_2_3) 3376 hash_sets |= HCLGE_D_PORT_BIT; 3377 else 3378 hash_sets &= ~HCLGE_D_PORT_BIT; 3379 3380 if (nfc->data & RXH_IP_SRC) 3381 hash_sets |= HCLGE_S_IP_BIT; 3382 else 3383 hash_sets &= ~HCLGE_S_IP_BIT; 3384 3385 if (nfc->data & RXH_IP_DST) 3386 hash_sets |= HCLGE_D_IP_BIT; 3387 else 3388 hash_sets &= ~HCLGE_D_IP_BIT; 3389 3390 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 3391 hash_sets |= HCLGE_V_TAG_BIT; 3392 3393 return hash_sets; 3394 } 3395 3396 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 3397 struct ethtool_rxnfc *nfc) 3398 { 3399 struct hclge_vport *vport = hclge_get_vport(handle); 3400 struct hclge_dev *hdev = vport->back; 3401 struct hclge_rss_input_tuple_cmd *req; 3402 struct hclge_desc desc; 3403 u8 tuple_sets; 3404 int ret; 3405 3406 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 3407 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 3408 return -EINVAL; 3409 3410 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3411 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3412 3413 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; 3414 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; 3415 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; 3416 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; 3417 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; 3418 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; 3419 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; 3420 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; 3421 3422 tuple_sets = hclge_get_rss_hash_bits(nfc); 3423 switch (nfc->flow_type) { 3424 case TCP_V4_FLOW: 3425 req->ipv4_tcp_en = tuple_sets; 3426 break; 3427 case TCP_V6_FLOW: 3428 req->ipv6_tcp_en = tuple_sets; 3429 break; 3430 case UDP_V4_FLOW: 3431 req->ipv4_udp_en = tuple_sets; 3432 break; 3433 case UDP_V6_FLOW: 3434 req->ipv6_udp_en = tuple_sets; 3435 break; 3436 case SCTP_V4_FLOW: 3437 req->ipv4_sctp_en = tuple_sets; 3438 break; 3439 case SCTP_V6_FLOW: 3440 if ((nfc->data & RXH_L4_B_0_1) || 3441 (nfc->data & RXH_L4_B_2_3)) 3442 return -EINVAL; 3443 3444 req->ipv6_sctp_en = tuple_sets; 3445 break; 3446 case IPV4_FLOW: 3447 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3448 break; 3449 case IPV6_FLOW: 3450 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3451 break; 3452 default: 3453 return -EINVAL; 3454 } 3455 3456 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3457 if (ret) { 3458 dev_err(&hdev->pdev->dev, 3459 "Set rss tuple fail, status = %d\n", ret); 3460 return ret; 3461 } 3462 3463 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 3464 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 3465 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 3466 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 3467 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 3468 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 3469 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 3470 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 3471 hclge_get_rss_type(vport); 3472 return 0; 3473 } 3474 3475 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 3476 struct ethtool_rxnfc *nfc) 3477 { 3478 struct hclge_vport *vport = hclge_get_vport(handle); 3479 u8 tuple_sets; 3480 3481 nfc->data = 0; 3482 3483 switch (nfc->flow_type) { 3484 case TCP_V4_FLOW: 3485 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; 3486 break; 3487 case UDP_V4_FLOW: 3488 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; 3489 break; 3490 case TCP_V6_FLOW: 3491 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; 3492 break; 3493 case UDP_V6_FLOW: 3494 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; 3495 break; 3496 case SCTP_V4_FLOW: 3497 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; 3498 break; 3499 case SCTP_V6_FLOW: 3500 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; 3501 break; 3502 case IPV4_FLOW: 3503 case IPV6_FLOW: 3504 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 3505 break; 3506 default: 3507 return -EINVAL; 3508 } 3509 3510 if (!tuple_sets) 3511 return 0; 3512 3513 if (tuple_sets & HCLGE_D_PORT_BIT) 3514 nfc->data |= RXH_L4_B_2_3; 3515 if (tuple_sets & HCLGE_S_PORT_BIT) 3516 nfc->data |= RXH_L4_B_0_1; 3517 if (tuple_sets & HCLGE_D_IP_BIT) 3518 nfc->data |= RXH_IP_DST; 3519 if (tuple_sets & HCLGE_S_IP_BIT) 3520 nfc->data |= RXH_IP_SRC; 3521 3522 return 0; 3523 } 3524 3525 static int hclge_get_tc_size(struct hnae3_handle *handle) 3526 { 3527 struct hclge_vport *vport = hclge_get_vport(handle); 3528 struct hclge_dev *hdev = vport->back; 3529 3530 return hdev->rss_size_max; 3531 } 3532 3533 int hclge_rss_init_hw(struct hclge_dev *hdev) 3534 { 3535 struct hclge_vport *vport = hdev->vport; 3536 u8 *rss_indir = vport[0].rss_indirection_tbl; 3537 u16 rss_size = vport[0].alloc_rss_size; 3538 u8 *key = vport[0].rss_hash_key; 3539 u8 hfunc = vport[0].rss_algo; 3540 u16 tc_offset[HCLGE_MAX_TC_NUM]; 3541 u16 tc_valid[HCLGE_MAX_TC_NUM]; 3542 u16 tc_size[HCLGE_MAX_TC_NUM]; 3543 u16 roundup_size; 3544 int i, ret; 3545 3546 ret = hclge_set_rss_indir_table(hdev, rss_indir); 3547 if (ret) 3548 return ret; 3549 3550 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 3551 if (ret) 3552 return ret; 3553 3554 ret = hclge_set_rss_input_tuple(hdev); 3555 if (ret) 3556 return ret; 3557 3558 /* Each TC have the same queue size, and tc_size set to hardware is 3559 * the log2 of roundup power of two of rss_size, the acutal queue 3560 * size is limited by indirection table. 3561 */ 3562 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 3563 dev_err(&hdev->pdev->dev, 3564 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 3565 rss_size); 3566 return -EINVAL; 3567 } 3568 3569 roundup_size = roundup_pow_of_two(rss_size); 3570 roundup_size = ilog2(roundup_size); 3571 3572 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3573 tc_valid[i] = 0; 3574 3575 if (!(hdev->hw_tc_map & BIT(i))) 3576 continue; 3577 3578 tc_valid[i] = 1; 3579 tc_size[i] = roundup_size; 3580 tc_offset[i] = rss_size * i; 3581 } 3582 3583 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 3584 } 3585 3586 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) 3587 { 3588 struct hclge_vport *vport = hdev->vport; 3589 int i, j; 3590 3591 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 3592 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3593 vport[j].rss_indirection_tbl[i] = 3594 i % vport[j].alloc_rss_size; 3595 } 3596 } 3597 3598 static void hclge_rss_init_cfg(struct hclge_dev *hdev) 3599 { 3600 struct hclge_vport *vport = hdev->vport; 3601 int i; 3602 3603 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 3604 vport[i].rss_tuple_sets.ipv4_tcp_en = 3605 HCLGE_RSS_INPUT_TUPLE_OTHER; 3606 vport[i].rss_tuple_sets.ipv4_udp_en = 3607 HCLGE_RSS_INPUT_TUPLE_OTHER; 3608 vport[i].rss_tuple_sets.ipv4_sctp_en = 3609 HCLGE_RSS_INPUT_TUPLE_SCTP; 3610 vport[i].rss_tuple_sets.ipv4_fragment_en = 3611 HCLGE_RSS_INPUT_TUPLE_OTHER; 3612 vport[i].rss_tuple_sets.ipv6_tcp_en = 3613 HCLGE_RSS_INPUT_TUPLE_OTHER; 3614 vport[i].rss_tuple_sets.ipv6_udp_en = 3615 HCLGE_RSS_INPUT_TUPLE_OTHER; 3616 vport[i].rss_tuple_sets.ipv6_sctp_en = 3617 HCLGE_RSS_INPUT_TUPLE_SCTP; 3618 vport[i].rss_tuple_sets.ipv6_fragment_en = 3619 HCLGE_RSS_INPUT_TUPLE_OTHER; 3620 3621 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3622 3623 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); 3624 } 3625 3626 hclge_rss_indir_init_cfg(hdev); 3627 } 3628 3629 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 3630 int vector_id, bool en, 3631 struct hnae3_ring_chain_node *ring_chain) 3632 { 3633 struct hclge_dev *hdev = vport->back; 3634 struct hnae3_ring_chain_node *node; 3635 struct hclge_desc desc; 3636 struct hclge_ctrl_vector_chain_cmd *req 3637 = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 3638 enum hclge_cmd_status status; 3639 enum hclge_opcode_type op; 3640 u16 tqp_type_and_id; 3641 int i; 3642 3643 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 3644 hclge_cmd_setup_basic_desc(&desc, op, false); 3645 req->int_vector_id = vector_id; 3646 3647 i = 0; 3648 for (node = ring_chain; node; node = node->next) { 3649 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 3650 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 3651 HCLGE_INT_TYPE_S, 3652 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3653 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 3654 HCLGE_TQP_ID_S, node->tqp_index); 3655 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 3656 HCLGE_INT_GL_IDX_S, 3657 hnae3_get_field(node->int_gl_idx, 3658 HNAE3_RING_GL_IDX_M, 3659 HNAE3_RING_GL_IDX_S)); 3660 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 3661 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 3662 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 3663 req->vfid = vport->vport_id; 3664 3665 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3666 if (status) { 3667 dev_err(&hdev->pdev->dev, 3668 "Map TQP fail, status is %d.\n", 3669 status); 3670 return -EIO; 3671 } 3672 i = 0; 3673 3674 hclge_cmd_setup_basic_desc(&desc, 3675 op, 3676 false); 3677 req->int_vector_id = vector_id; 3678 } 3679 } 3680 3681 if (i > 0) { 3682 req->int_cause_num = i; 3683 req->vfid = vport->vport_id; 3684 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3685 if (status) { 3686 dev_err(&hdev->pdev->dev, 3687 "Map TQP fail, status is %d.\n", status); 3688 return -EIO; 3689 } 3690 } 3691 3692 return 0; 3693 } 3694 3695 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, 3696 int vector, 3697 struct hnae3_ring_chain_node *ring_chain) 3698 { 3699 struct hclge_vport *vport = hclge_get_vport(handle); 3700 struct hclge_dev *hdev = vport->back; 3701 int vector_id; 3702 3703 vector_id = hclge_get_vector_index(hdev, vector); 3704 if (vector_id < 0) { 3705 dev_err(&hdev->pdev->dev, 3706 "Get vector index fail. vector_id =%d\n", vector_id); 3707 return vector_id; 3708 } 3709 3710 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 3711 } 3712 3713 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, 3714 int vector, 3715 struct hnae3_ring_chain_node *ring_chain) 3716 { 3717 struct hclge_vport *vport = hclge_get_vport(handle); 3718 struct hclge_dev *hdev = vport->back; 3719 int vector_id, ret; 3720 3721 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3722 return 0; 3723 3724 vector_id = hclge_get_vector_index(hdev, vector); 3725 if (vector_id < 0) { 3726 dev_err(&handle->pdev->dev, 3727 "Get vector index fail. ret =%d\n", vector_id); 3728 return vector_id; 3729 } 3730 3731 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 3732 if (ret) 3733 dev_err(&handle->pdev->dev, 3734 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 3735 vector_id, 3736 ret); 3737 3738 return ret; 3739 } 3740 3741 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 3742 struct hclge_promisc_param *param) 3743 { 3744 struct hclge_promisc_cfg_cmd *req; 3745 struct hclge_desc desc; 3746 int ret; 3747 3748 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 3749 3750 req = (struct hclge_promisc_cfg_cmd *)desc.data; 3751 req->vf_id = param->vf_id; 3752 3753 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on 3754 * pdev revision(0x20), new revision support them. The 3755 * value of this two fields will not return error when driver 3756 * send command to fireware in revision(0x20). 3757 */ 3758 req->flag = (param->enable << HCLGE_PROMISC_EN_B) | 3759 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; 3760 3761 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3762 if (ret) 3763 dev_err(&hdev->pdev->dev, 3764 "Set promisc mode fail, status is %d.\n", ret); 3765 3766 return ret; 3767 } 3768 3769 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 3770 bool en_mc, bool en_bc, int vport_id) 3771 { 3772 if (!param) 3773 return; 3774 3775 memset(param, 0, sizeof(struct hclge_promisc_param)); 3776 if (en_uc) 3777 param->enable = HCLGE_PROMISC_EN_UC; 3778 if (en_mc) 3779 param->enable |= HCLGE_PROMISC_EN_MC; 3780 if (en_bc) 3781 param->enable |= HCLGE_PROMISC_EN_BC; 3782 param->vf_id = vport_id; 3783 } 3784 3785 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 3786 bool en_mc_pmc) 3787 { 3788 struct hclge_vport *vport = hclge_get_vport(handle); 3789 struct hclge_dev *hdev = vport->back; 3790 struct hclge_promisc_param param; 3791 3792 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, 3793 vport->vport_id); 3794 return hclge_cmd_set_promisc_mode(hdev, ¶m); 3795 } 3796 3797 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) 3798 { 3799 struct hclge_get_fd_mode_cmd *req; 3800 struct hclge_desc desc; 3801 int ret; 3802 3803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); 3804 3805 req = (struct hclge_get_fd_mode_cmd *)desc.data; 3806 3807 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3808 if (ret) { 3809 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); 3810 return ret; 3811 } 3812 3813 *fd_mode = req->mode; 3814 3815 return ret; 3816 } 3817 3818 static int hclge_get_fd_allocation(struct hclge_dev *hdev, 3819 u32 *stage1_entry_num, 3820 u32 *stage2_entry_num, 3821 u16 *stage1_counter_num, 3822 u16 *stage2_counter_num) 3823 { 3824 struct hclge_get_fd_allocation_cmd *req; 3825 struct hclge_desc desc; 3826 int ret; 3827 3828 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); 3829 3830 req = (struct hclge_get_fd_allocation_cmd *)desc.data; 3831 3832 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3833 if (ret) { 3834 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", 3835 ret); 3836 return ret; 3837 } 3838 3839 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); 3840 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); 3841 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); 3842 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); 3843 3844 return ret; 3845 } 3846 3847 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) 3848 { 3849 struct hclge_set_fd_key_config_cmd *req; 3850 struct hclge_fd_key_cfg *stage; 3851 struct hclge_desc desc; 3852 int ret; 3853 3854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); 3855 3856 req = (struct hclge_set_fd_key_config_cmd *)desc.data; 3857 stage = &hdev->fd_cfg.key_cfg[stage_num]; 3858 req->stage = stage_num; 3859 req->key_select = stage->key_sel; 3860 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; 3861 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; 3862 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; 3863 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; 3864 req->tuple_mask = cpu_to_le32(~stage->tuple_active); 3865 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); 3866 3867 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3868 if (ret) 3869 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); 3870 3871 return ret; 3872 } 3873 3874 static int hclge_init_fd_config(struct hclge_dev *hdev) 3875 { 3876 #define LOW_2_WORDS 0x03 3877 struct hclge_fd_key_cfg *key_cfg; 3878 int ret; 3879 3880 if (!hnae3_dev_fd_supported(hdev)) 3881 return 0; 3882 3883 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); 3884 if (ret) 3885 return ret; 3886 3887 switch (hdev->fd_cfg.fd_mode) { 3888 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: 3889 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; 3890 break; 3891 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: 3892 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; 3893 break; 3894 default: 3895 dev_err(&hdev->pdev->dev, 3896 "Unsupported flow director mode %d\n", 3897 hdev->fd_cfg.fd_mode); 3898 return -EOPNOTSUPP; 3899 } 3900 3901 hdev->fd_cfg.fd_en = true; 3902 hdev->fd_cfg.proto_support = 3903 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | 3904 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; 3905 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; 3906 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, 3907 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; 3908 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; 3909 key_cfg->outer_sipv6_word_en = 0; 3910 key_cfg->outer_dipv6_word_en = 0; 3911 3912 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | 3913 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | 3914 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 3915 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 3916 3917 /* If use max 400bit key, we can support tuples for ether type */ 3918 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { 3919 hdev->fd_cfg.proto_support |= ETHER_FLOW; 3920 key_cfg->tuple_active |= 3921 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); 3922 } 3923 3924 /* roce_type is used to filter roce frames 3925 * dst_vport is used to specify the rule 3926 */ 3927 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); 3928 3929 ret = hclge_get_fd_allocation(hdev, 3930 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 3931 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], 3932 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], 3933 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); 3934 if (ret) 3935 return ret; 3936 3937 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); 3938 } 3939 3940 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, 3941 int loc, u8 *key, bool is_add) 3942 { 3943 struct hclge_fd_tcam_config_1_cmd *req1; 3944 struct hclge_fd_tcam_config_2_cmd *req2; 3945 struct hclge_fd_tcam_config_3_cmd *req3; 3946 struct hclge_desc desc[3]; 3947 int ret; 3948 3949 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); 3950 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3951 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); 3952 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3953 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); 3954 3955 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 3956 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 3957 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 3958 3959 req1->stage = stage; 3960 req1->xy_sel = sel_x ? 1 : 0; 3961 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); 3962 req1->index = cpu_to_le32(loc); 3963 req1->entry_vld = sel_x ? is_add : 0; 3964 3965 if (key) { 3966 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); 3967 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], 3968 sizeof(req2->tcam_data)); 3969 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + 3970 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); 3971 } 3972 3973 ret = hclge_cmd_send(&hdev->hw, desc, 3); 3974 if (ret) 3975 dev_err(&hdev->pdev->dev, 3976 "config tcam key fail, ret=%d\n", 3977 ret); 3978 3979 return ret; 3980 } 3981 3982 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, 3983 struct hclge_fd_ad_data *action) 3984 { 3985 struct hclge_fd_ad_config_cmd *req; 3986 struct hclge_desc desc; 3987 u64 ad_data = 0; 3988 int ret; 3989 3990 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); 3991 3992 req = (struct hclge_fd_ad_config_cmd *)desc.data; 3993 req->index = cpu_to_le32(loc); 3994 req->stage = stage; 3995 3996 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, 3997 action->write_rule_id_to_bd); 3998 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, 3999 action->rule_id); 4000 ad_data <<= 32; 4001 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); 4002 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, 4003 action->forward_to_direct_queue); 4004 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, 4005 action->queue_id); 4006 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); 4007 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, 4008 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); 4009 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); 4010 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, 4011 action->counter_id); 4012 4013 req->ad_data = cpu_to_le64(ad_data); 4014 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4015 if (ret) 4016 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); 4017 4018 return ret; 4019 } 4020 4021 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, 4022 struct hclge_fd_rule *rule) 4023 { 4024 u16 tmp_x_s, tmp_y_s; 4025 u32 tmp_x_l, tmp_y_l; 4026 int i; 4027 4028 if (rule->unused_tuple & tuple_bit) 4029 return true; 4030 4031 switch (tuple_bit) { 4032 case 0: 4033 return false; 4034 case BIT(INNER_DST_MAC): 4035 for (i = 0; i < 6; i++) { 4036 calc_x(key_x[5 - i], rule->tuples.dst_mac[i], 4037 rule->tuples_mask.dst_mac[i]); 4038 calc_y(key_y[5 - i], rule->tuples.dst_mac[i], 4039 rule->tuples_mask.dst_mac[i]); 4040 } 4041 4042 return true; 4043 case BIT(INNER_SRC_MAC): 4044 for (i = 0; i < 6; i++) { 4045 calc_x(key_x[5 - i], rule->tuples.src_mac[i], 4046 rule->tuples.src_mac[i]); 4047 calc_y(key_y[5 - i], rule->tuples.src_mac[i], 4048 rule->tuples.src_mac[i]); 4049 } 4050 4051 return true; 4052 case BIT(INNER_VLAN_TAG_FST): 4053 calc_x(tmp_x_s, rule->tuples.vlan_tag1, 4054 rule->tuples_mask.vlan_tag1); 4055 calc_y(tmp_y_s, rule->tuples.vlan_tag1, 4056 rule->tuples_mask.vlan_tag1); 4057 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 4058 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 4059 4060 return true; 4061 case BIT(INNER_ETH_TYPE): 4062 calc_x(tmp_x_s, rule->tuples.ether_proto, 4063 rule->tuples_mask.ether_proto); 4064 calc_y(tmp_y_s, rule->tuples.ether_proto, 4065 rule->tuples_mask.ether_proto); 4066 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 4067 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 4068 4069 return true; 4070 case BIT(INNER_IP_TOS): 4071 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); 4072 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); 4073 4074 return true; 4075 case BIT(INNER_IP_PROTO): 4076 calc_x(*key_x, rule->tuples.ip_proto, 4077 rule->tuples_mask.ip_proto); 4078 calc_y(*key_y, rule->tuples.ip_proto, 4079 rule->tuples_mask.ip_proto); 4080 4081 return true; 4082 case BIT(INNER_SRC_IP): 4083 calc_x(tmp_x_l, rule->tuples.src_ip[3], 4084 rule->tuples_mask.src_ip[3]); 4085 calc_y(tmp_y_l, rule->tuples.src_ip[3], 4086 rule->tuples_mask.src_ip[3]); 4087 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 4088 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 4089 4090 return true; 4091 case BIT(INNER_DST_IP): 4092 calc_x(tmp_x_l, rule->tuples.dst_ip[3], 4093 rule->tuples_mask.dst_ip[3]); 4094 calc_y(tmp_y_l, rule->tuples.dst_ip[3], 4095 rule->tuples_mask.dst_ip[3]); 4096 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 4097 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 4098 4099 return true; 4100 case BIT(INNER_SRC_PORT): 4101 calc_x(tmp_x_s, rule->tuples.src_port, 4102 rule->tuples_mask.src_port); 4103 calc_y(tmp_y_s, rule->tuples.src_port, 4104 rule->tuples_mask.src_port); 4105 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 4106 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 4107 4108 return true; 4109 case BIT(INNER_DST_PORT): 4110 calc_x(tmp_x_s, rule->tuples.dst_port, 4111 rule->tuples_mask.dst_port); 4112 calc_y(tmp_y_s, rule->tuples.dst_port, 4113 rule->tuples_mask.dst_port); 4114 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 4115 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 4116 4117 return true; 4118 default: 4119 return false; 4120 } 4121 } 4122 4123 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, 4124 u8 vf_id, u8 network_port_id) 4125 { 4126 u32 port_number = 0; 4127 4128 if (port_type == HOST_PORT) { 4129 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, 4130 pf_id); 4131 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, 4132 vf_id); 4133 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); 4134 } else { 4135 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, 4136 HCLGE_NETWORK_PORT_ID_S, network_port_id); 4137 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); 4138 } 4139 4140 return port_number; 4141 } 4142 4143 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, 4144 __le32 *key_x, __le32 *key_y, 4145 struct hclge_fd_rule *rule) 4146 { 4147 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; 4148 u8 cur_pos = 0, tuple_size, shift_bits; 4149 int i; 4150 4151 for (i = 0; i < MAX_META_DATA; i++) { 4152 tuple_size = meta_data_key_info[i].key_length; 4153 tuple_bit = key_cfg->meta_data_active & BIT(i); 4154 4155 switch (tuple_bit) { 4156 case BIT(ROCE_TYPE): 4157 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); 4158 cur_pos += tuple_size; 4159 break; 4160 case BIT(DST_VPORT): 4161 port_number = hclge_get_port_number(HOST_PORT, 0, 4162 rule->vf_id, 0); 4163 hnae3_set_field(meta_data, 4164 GENMASK(cur_pos + tuple_size, cur_pos), 4165 cur_pos, port_number); 4166 cur_pos += tuple_size; 4167 break; 4168 default: 4169 break; 4170 } 4171 } 4172 4173 calc_x(tmp_x, meta_data, 0xFFFFFFFF); 4174 calc_y(tmp_y, meta_data, 0xFFFFFFFF); 4175 shift_bits = sizeof(meta_data) * 8 - cur_pos; 4176 4177 *key_x = cpu_to_le32(tmp_x << shift_bits); 4178 *key_y = cpu_to_le32(tmp_y << shift_bits); 4179 } 4180 4181 /* A complete key is combined with meta data key and tuple key. 4182 * Meta data key is stored at the MSB region, and tuple key is stored at 4183 * the LSB region, unused bits will be filled 0. 4184 */ 4185 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, 4186 struct hclge_fd_rule *rule) 4187 { 4188 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; 4189 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; 4190 u8 *cur_key_x, *cur_key_y; 4191 int i, ret, tuple_size; 4192 u8 meta_data_region; 4193 4194 memset(key_x, 0, sizeof(key_x)); 4195 memset(key_y, 0, sizeof(key_y)); 4196 cur_key_x = key_x; 4197 cur_key_y = key_y; 4198 4199 for (i = 0 ; i < MAX_TUPLE; i++) { 4200 bool tuple_valid; 4201 u32 check_tuple; 4202 4203 tuple_size = tuple_key_info[i].key_length / 8; 4204 check_tuple = key_cfg->tuple_active & BIT(i); 4205 4206 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, 4207 cur_key_y, rule); 4208 if (tuple_valid) { 4209 cur_key_x += tuple_size; 4210 cur_key_y += tuple_size; 4211 } 4212 } 4213 4214 meta_data_region = hdev->fd_cfg.max_key_length / 8 - 4215 MAX_META_DATA_LENGTH / 8; 4216 4217 hclge_fd_convert_meta_data(key_cfg, 4218 (__le32 *)(key_x + meta_data_region), 4219 (__le32 *)(key_y + meta_data_region), 4220 rule); 4221 4222 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, 4223 true); 4224 if (ret) { 4225 dev_err(&hdev->pdev->dev, 4226 "fd key_y config fail, loc=%d, ret=%d\n", 4227 rule->queue_id, ret); 4228 return ret; 4229 } 4230 4231 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, 4232 true); 4233 if (ret) 4234 dev_err(&hdev->pdev->dev, 4235 "fd key_x config fail, loc=%d, ret=%d\n", 4236 rule->queue_id, ret); 4237 return ret; 4238 } 4239 4240 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, 4241 struct hclge_fd_rule *rule) 4242 { 4243 struct hclge_fd_ad_data ad_data; 4244 4245 ad_data.ad_id = rule->location; 4246 4247 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 4248 ad_data.drop_packet = true; 4249 ad_data.forward_to_direct_queue = false; 4250 ad_data.queue_id = 0; 4251 } else { 4252 ad_data.drop_packet = false; 4253 ad_data.forward_to_direct_queue = true; 4254 ad_data.queue_id = rule->queue_id; 4255 } 4256 4257 ad_data.use_counter = false; 4258 ad_data.counter_id = 0; 4259 4260 ad_data.use_next_stage = false; 4261 ad_data.next_input_key = 0; 4262 4263 ad_data.write_rule_id_to_bd = true; 4264 ad_data.rule_id = rule->location; 4265 4266 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); 4267 } 4268 4269 static int hclge_fd_check_spec(struct hclge_dev *hdev, 4270 struct ethtool_rx_flow_spec *fs, u32 *unused) 4271 { 4272 struct ethtool_tcpip4_spec *tcp_ip4_spec; 4273 struct ethtool_usrip4_spec *usr_ip4_spec; 4274 struct ethtool_tcpip6_spec *tcp_ip6_spec; 4275 struct ethtool_usrip6_spec *usr_ip6_spec; 4276 struct ethhdr *ether_spec; 4277 4278 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 4279 return -EINVAL; 4280 4281 if (!(fs->flow_type & hdev->fd_cfg.proto_support)) 4282 return -EOPNOTSUPP; 4283 4284 if ((fs->flow_type & FLOW_EXT) && 4285 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { 4286 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); 4287 return -EOPNOTSUPP; 4288 } 4289 4290 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 4291 case SCTP_V4_FLOW: 4292 case TCP_V4_FLOW: 4293 case UDP_V4_FLOW: 4294 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; 4295 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 4296 4297 if (!tcp_ip4_spec->ip4src) 4298 *unused |= BIT(INNER_SRC_IP); 4299 4300 if (!tcp_ip4_spec->ip4dst) 4301 *unused |= BIT(INNER_DST_IP); 4302 4303 if (!tcp_ip4_spec->psrc) 4304 *unused |= BIT(INNER_SRC_PORT); 4305 4306 if (!tcp_ip4_spec->pdst) 4307 *unused |= BIT(INNER_DST_PORT); 4308 4309 if (!tcp_ip4_spec->tos) 4310 *unused |= BIT(INNER_IP_TOS); 4311 4312 break; 4313 case IP_USER_FLOW: 4314 usr_ip4_spec = &fs->h_u.usr_ip4_spec; 4315 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 4316 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 4317 4318 if (!usr_ip4_spec->ip4src) 4319 *unused |= BIT(INNER_SRC_IP); 4320 4321 if (!usr_ip4_spec->ip4dst) 4322 *unused |= BIT(INNER_DST_IP); 4323 4324 if (!usr_ip4_spec->tos) 4325 *unused |= BIT(INNER_IP_TOS); 4326 4327 if (!usr_ip4_spec->proto) 4328 *unused |= BIT(INNER_IP_PROTO); 4329 4330 if (usr_ip4_spec->l4_4_bytes) 4331 return -EOPNOTSUPP; 4332 4333 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) 4334 return -EOPNOTSUPP; 4335 4336 break; 4337 case SCTP_V6_FLOW: 4338 case TCP_V6_FLOW: 4339 case UDP_V6_FLOW: 4340 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; 4341 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 4342 BIT(INNER_IP_TOS); 4343 4344 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && 4345 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) 4346 *unused |= BIT(INNER_SRC_IP); 4347 4348 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && 4349 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) 4350 *unused |= BIT(INNER_DST_IP); 4351 4352 if (!tcp_ip6_spec->psrc) 4353 *unused |= BIT(INNER_SRC_PORT); 4354 4355 if (!tcp_ip6_spec->pdst) 4356 *unused |= BIT(INNER_DST_PORT); 4357 4358 if (tcp_ip6_spec->tclass) 4359 return -EOPNOTSUPP; 4360 4361 break; 4362 case IPV6_USER_FLOW: 4363 usr_ip6_spec = &fs->h_u.usr_ip6_spec; 4364 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 4365 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | 4366 BIT(INNER_DST_PORT); 4367 4368 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && 4369 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) 4370 *unused |= BIT(INNER_SRC_IP); 4371 4372 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && 4373 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) 4374 *unused |= BIT(INNER_DST_IP); 4375 4376 if (!usr_ip6_spec->l4_proto) 4377 *unused |= BIT(INNER_IP_PROTO); 4378 4379 if (usr_ip6_spec->tclass) 4380 return -EOPNOTSUPP; 4381 4382 if (usr_ip6_spec->l4_4_bytes) 4383 return -EOPNOTSUPP; 4384 4385 break; 4386 case ETHER_FLOW: 4387 ether_spec = &fs->h_u.ether_spec; 4388 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 4389 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | 4390 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); 4391 4392 if (is_zero_ether_addr(ether_spec->h_source)) 4393 *unused |= BIT(INNER_SRC_MAC); 4394 4395 if (is_zero_ether_addr(ether_spec->h_dest)) 4396 *unused |= BIT(INNER_DST_MAC); 4397 4398 if (!ether_spec->h_proto) 4399 *unused |= BIT(INNER_ETH_TYPE); 4400 4401 break; 4402 default: 4403 return -EOPNOTSUPP; 4404 } 4405 4406 if ((fs->flow_type & FLOW_EXT)) { 4407 if (fs->h_ext.vlan_etype) 4408 return -EOPNOTSUPP; 4409 if (!fs->h_ext.vlan_tci) 4410 *unused |= BIT(INNER_VLAN_TAG_FST); 4411 4412 if (fs->m_ext.vlan_tci) { 4413 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) 4414 return -EINVAL; 4415 } 4416 } else { 4417 *unused |= BIT(INNER_VLAN_TAG_FST); 4418 } 4419 4420 if (fs->flow_type & FLOW_MAC_EXT) { 4421 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) 4422 return -EOPNOTSUPP; 4423 4424 if (is_zero_ether_addr(fs->h_ext.h_dest)) 4425 *unused |= BIT(INNER_DST_MAC); 4426 else 4427 *unused &= ~(BIT(INNER_DST_MAC)); 4428 } 4429 4430 return 0; 4431 } 4432 4433 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) 4434 { 4435 struct hclge_fd_rule *rule = NULL; 4436 struct hlist_node *node2; 4437 4438 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 4439 if (rule->location >= location) 4440 break; 4441 } 4442 4443 return rule && rule->location == location; 4444 } 4445 4446 static int hclge_fd_update_rule_list(struct hclge_dev *hdev, 4447 struct hclge_fd_rule *new_rule, 4448 u16 location, 4449 bool is_add) 4450 { 4451 struct hclge_fd_rule *rule = NULL, *parent = NULL; 4452 struct hlist_node *node2; 4453 4454 if (is_add && !new_rule) 4455 return -EINVAL; 4456 4457 hlist_for_each_entry_safe(rule, node2, 4458 &hdev->fd_rule_list, rule_node) { 4459 if (rule->location >= location) 4460 break; 4461 parent = rule; 4462 } 4463 4464 if (rule && rule->location == location) { 4465 hlist_del(&rule->rule_node); 4466 kfree(rule); 4467 hdev->hclge_fd_rule_num--; 4468 4469 if (!is_add) 4470 return 0; 4471 4472 } else if (!is_add) { 4473 dev_err(&hdev->pdev->dev, 4474 "delete fail, rule %d is inexistent\n", 4475 location); 4476 return -EINVAL; 4477 } 4478 4479 INIT_HLIST_NODE(&new_rule->rule_node); 4480 4481 if (parent) 4482 hlist_add_behind(&new_rule->rule_node, &parent->rule_node); 4483 else 4484 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); 4485 4486 hdev->hclge_fd_rule_num++; 4487 4488 return 0; 4489 } 4490 4491 static int hclge_fd_get_tuple(struct hclge_dev *hdev, 4492 struct ethtool_rx_flow_spec *fs, 4493 struct hclge_fd_rule *rule) 4494 { 4495 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 4496 4497 switch (flow_type) { 4498 case SCTP_V4_FLOW: 4499 case TCP_V4_FLOW: 4500 case UDP_V4_FLOW: 4501 rule->tuples.src_ip[3] = 4502 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); 4503 rule->tuples_mask.src_ip[3] = 4504 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); 4505 4506 rule->tuples.dst_ip[3] = 4507 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); 4508 rule->tuples_mask.dst_ip[3] = 4509 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); 4510 4511 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); 4512 rule->tuples_mask.src_port = 4513 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); 4514 4515 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); 4516 rule->tuples_mask.dst_port = 4517 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); 4518 4519 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; 4520 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; 4521 4522 rule->tuples.ether_proto = ETH_P_IP; 4523 rule->tuples_mask.ether_proto = 0xFFFF; 4524 4525 break; 4526 case IP_USER_FLOW: 4527 rule->tuples.src_ip[3] = 4528 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); 4529 rule->tuples_mask.src_ip[3] = 4530 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); 4531 4532 rule->tuples.dst_ip[3] = 4533 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); 4534 rule->tuples_mask.dst_ip[3] = 4535 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); 4536 4537 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; 4538 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; 4539 4540 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; 4541 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; 4542 4543 rule->tuples.ether_proto = ETH_P_IP; 4544 rule->tuples_mask.ether_proto = 0xFFFF; 4545 4546 break; 4547 case SCTP_V6_FLOW: 4548 case TCP_V6_FLOW: 4549 case UDP_V6_FLOW: 4550 be32_to_cpu_array(rule->tuples.src_ip, 4551 fs->h_u.tcp_ip6_spec.ip6src, 4); 4552 be32_to_cpu_array(rule->tuples_mask.src_ip, 4553 fs->m_u.tcp_ip6_spec.ip6src, 4); 4554 4555 be32_to_cpu_array(rule->tuples.dst_ip, 4556 fs->h_u.tcp_ip6_spec.ip6dst, 4); 4557 be32_to_cpu_array(rule->tuples_mask.dst_ip, 4558 fs->m_u.tcp_ip6_spec.ip6dst, 4); 4559 4560 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); 4561 rule->tuples_mask.src_port = 4562 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); 4563 4564 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); 4565 rule->tuples_mask.dst_port = 4566 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); 4567 4568 rule->tuples.ether_proto = ETH_P_IPV6; 4569 rule->tuples_mask.ether_proto = 0xFFFF; 4570 4571 break; 4572 case IPV6_USER_FLOW: 4573 be32_to_cpu_array(rule->tuples.src_ip, 4574 fs->h_u.usr_ip6_spec.ip6src, 4); 4575 be32_to_cpu_array(rule->tuples_mask.src_ip, 4576 fs->m_u.usr_ip6_spec.ip6src, 4); 4577 4578 be32_to_cpu_array(rule->tuples.dst_ip, 4579 fs->h_u.usr_ip6_spec.ip6dst, 4); 4580 be32_to_cpu_array(rule->tuples_mask.dst_ip, 4581 fs->m_u.usr_ip6_spec.ip6dst, 4); 4582 4583 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; 4584 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; 4585 4586 rule->tuples.ether_proto = ETH_P_IPV6; 4587 rule->tuples_mask.ether_proto = 0xFFFF; 4588 4589 break; 4590 case ETHER_FLOW: 4591 ether_addr_copy(rule->tuples.src_mac, 4592 fs->h_u.ether_spec.h_source); 4593 ether_addr_copy(rule->tuples_mask.src_mac, 4594 fs->m_u.ether_spec.h_source); 4595 4596 ether_addr_copy(rule->tuples.dst_mac, 4597 fs->h_u.ether_spec.h_dest); 4598 ether_addr_copy(rule->tuples_mask.dst_mac, 4599 fs->m_u.ether_spec.h_dest); 4600 4601 rule->tuples.ether_proto = 4602 be16_to_cpu(fs->h_u.ether_spec.h_proto); 4603 rule->tuples_mask.ether_proto = 4604 be16_to_cpu(fs->m_u.ether_spec.h_proto); 4605 4606 break; 4607 default: 4608 return -EOPNOTSUPP; 4609 } 4610 4611 switch (flow_type) { 4612 case SCTP_V4_FLOW: 4613 case SCTP_V6_FLOW: 4614 rule->tuples.ip_proto = IPPROTO_SCTP; 4615 rule->tuples_mask.ip_proto = 0xFF; 4616 break; 4617 case TCP_V4_FLOW: 4618 case TCP_V6_FLOW: 4619 rule->tuples.ip_proto = IPPROTO_TCP; 4620 rule->tuples_mask.ip_proto = 0xFF; 4621 break; 4622 case UDP_V4_FLOW: 4623 case UDP_V6_FLOW: 4624 rule->tuples.ip_proto = IPPROTO_UDP; 4625 rule->tuples_mask.ip_proto = 0xFF; 4626 break; 4627 default: 4628 break; 4629 } 4630 4631 if ((fs->flow_type & FLOW_EXT)) { 4632 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); 4633 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); 4634 } 4635 4636 if (fs->flow_type & FLOW_MAC_EXT) { 4637 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); 4638 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); 4639 } 4640 4641 return 0; 4642 } 4643 4644 static int hclge_add_fd_entry(struct hnae3_handle *handle, 4645 struct ethtool_rxnfc *cmd) 4646 { 4647 struct hclge_vport *vport = hclge_get_vport(handle); 4648 struct hclge_dev *hdev = vport->back; 4649 u16 dst_vport_id = 0, q_index = 0; 4650 struct ethtool_rx_flow_spec *fs; 4651 struct hclge_fd_rule *rule; 4652 u32 unused = 0; 4653 u8 action; 4654 int ret; 4655 4656 if (!hnae3_dev_fd_supported(hdev)) 4657 return -EOPNOTSUPP; 4658 4659 if (!hdev->fd_cfg.fd_en) { 4660 dev_warn(&hdev->pdev->dev, 4661 "Please enable flow director first\n"); 4662 return -EOPNOTSUPP; 4663 } 4664 4665 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 4666 4667 ret = hclge_fd_check_spec(hdev, fs, &unused); 4668 if (ret) { 4669 dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); 4670 return ret; 4671 } 4672 4673 if (fs->ring_cookie == RX_CLS_FLOW_DISC) { 4674 action = HCLGE_FD_ACTION_DROP_PACKET; 4675 } else { 4676 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 4677 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 4678 u16 tqps; 4679 4680 if (vf > hdev->num_req_vfs) { 4681 dev_err(&hdev->pdev->dev, 4682 "Error: vf id (%d) > max vf num (%d)\n", 4683 vf, hdev->num_req_vfs); 4684 return -EINVAL; 4685 } 4686 4687 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; 4688 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; 4689 4690 if (ring >= tqps) { 4691 dev_err(&hdev->pdev->dev, 4692 "Error: queue id (%d) > max tqp num (%d)\n", 4693 ring, tqps - 1); 4694 return -EINVAL; 4695 } 4696 4697 action = HCLGE_FD_ACTION_ACCEPT_PACKET; 4698 q_index = ring; 4699 } 4700 4701 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 4702 if (!rule) 4703 return -ENOMEM; 4704 4705 ret = hclge_fd_get_tuple(hdev, fs, rule); 4706 if (ret) 4707 goto free_rule; 4708 4709 rule->flow_type = fs->flow_type; 4710 4711 rule->location = fs->location; 4712 rule->unused_tuple = unused; 4713 rule->vf_id = dst_vport_id; 4714 rule->queue_id = q_index; 4715 rule->action = action; 4716 4717 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 4718 if (ret) 4719 goto free_rule; 4720 4721 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 4722 if (ret) 4723 goto free_rule; 4724 4725 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); 4726 if (ret) 4727 goto free_rule; 4728 4729 return ret; 4730 4731 free_rule: 4732 kfree(rule); 4733 return ret; 4734 } 4735 4736 static int hclge_del_fd_entry(struct hnae3_handle *handle, 4737 struct ethtool_rxnfc *cmd) 4738 { 4739 struct hclge_vport *vport = hclge_get_vport(handle); 4740 struct hclge_dev *hdev = vport->back; 4741 struct ethtool_rx_flow_spec *fs; 4742 int ret; 4743 4744 if (!hnae3_dev_fd_supported(hdev)) 4745 return -EOPNOTSUPP; 4746 4747 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 4748 4749 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 4750 return -EINVAL; 4751 4752 if (!hclge_fd_rule_exist(hdev, fs->location)) { 4753 dev_err(&hdev->pdev->dev, 4754 "Delete fail, rule %d is inexistent\n", 4755 fs->location); 4756 return -ENOENT; 4757 } 4758 4759 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 4760 fs->location, NULL, false); 4761 if (ret) 4762 return ret; 4763 4764 return hclge_fd_update_rule_list(hdev, NULL, fs->location, 4765 false); 4766 } 4767 4768 static void hclge_del_all_fd_entries(struct hnae3_handle *handle, 4769 bool clear_list) 4770 { 4771 struct hclge_vport *vport = hclge_get_vport(handle); 4772 struct hclge_dev *hdev = vport->back; 4773 struct hclge_fd_rule *rule; 4774 struct hlist_node *node; 4775 4776 if (!hnae3_dev_fd_supported(hdev)) 4777 return; 4778 4779 if (clear_list) { 4780 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, 4781 rule_node) { 4782 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 4783 rule->location, NULL, false); 4784 hlist_del(&rule->rule_node); 4785 kfree(rule); 4786 hdev->hclge_fd_rule_num--; 4787 } 4788 } else { 4789 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, 4790 rule_node) 4791 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 4792 rule->location, NULL, false); 4793 } 4794 } 4795 4796 static int hclge_restore_fd_entries(struct hnae3_handle *handle) 4797 { 4798 struct hclge_vport *vport = hclge_get_vport(handle); 4799 struct hclge_dev *hdev = vport->back; 4800 struct hclge_fd_rule *rule; 4801 struct hlist_node *node; 4802 int ret; 4803 4804 /* Return ok here, because reset error handling will check this 4805 * return value. If error is returned here, the reset process will 4806 * fail. 4807 */ 4808 if (!hnae3_dev_fd_supported(hdev)) 4809 return 0; 4810 4811 /* if fd is disabled, should not restore it when reset */ 4812 if (!hdev->fd_cfg.fd_en) 4813 return 0; 4814 4815 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 4816 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 4817 if (!ret) 4818 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 4819 4820 if (ret) { 4821 dev_warn(&hdev->pdev->dev, 4822 "Restore rule %d failed, remove it\n", 4823 rule->location); 4824 hlist_del(&rule->rule_node); 4825 kfree(rule); 4826 hdev->hclge_fd_rule_num--; 4827 } 4828 } 4829 return 0; 4830 } 4831 4832 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, 4833 struct ethtool_rxnfc *cmd) 4834 { 4835 struct hclge_vport *vport = hclge_get_vport(handle); 4836 struct hclge_dev *hdev = vport->back; 4837 4838 if (!hnae3_dev_fd_supported(hdev)) 4839 return -EOPNOTSUPP; 4840 4841 cmd->rule_cnt = hdev->hclge_fd_rule_num; 4842 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 4843 4844 return 0; 4845 } 4846 4847 static int hclge_get_fd_rule_info(struct hnae3_handle *handle, 4848 struct ethtool_rxnfc *cmd) 4849 { 4850 struct hclge_vport *vport = hclge_get_vport(handle); 4851 struct hclge_fd_rule *rule = NULL; 4852 struct hclge_dev *hdev = vport->back; 4853 struct ethtool_rx_flow_spec *fs; 4854 struct hlist_node *node2; 4855 4856 if (!hnae3_dev_fd_supported(hdev)) 4857 return -EOPNOTSUPP; 4858 4859 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 4860 4861 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 4862 if (rule->location >= fs->location) 4863 break; 4864 } 4865 4866 if (!rule || fs->location != rule->location) 4867 return -ENOENT; 4868 4869 fs->flow_type = rule->flow_type; 4870 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 4871 case SCTP_V4_FLOW: 4872 case TCP_V4_FLOW: 4873 case UDP_V4_FLOW: 4874 fs->h_u.tcp_ip4_spec.ip4src = 4875 cpu_to_be32(rule->tuples.src_ip[3]); 4876 fs->m_u.tcp_ip4_spec.ip4src = 4877 rule->unused_tuple & BIT(INNER_SRC_IP) ? 4878 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); 4879 4880 fs->h_u.tcp_ip4_spec.ip4dst = 4881 cpu_to_be32(rule->tuples.dst_ip[3]); 4882 fs->m_u.tcp_ip4_spec.ip4dst = 4883 rule->unused_tuple & BIT(INNER_DST_IP) ? 4884 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); 4885 4886 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); 4887 fs->m_u.tcp_ip4_spec.psrc = 4888 rule->unused_tuple & BIT(INNER_SRC_PORT) ? 4889 0 : cpu_to_be16(rule->tuples_mask.src_port); 4890 4891 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); 4892 fs->m_u.tcp_ip4_spec.pdst = 4893 rule->unused_tuple & BIT(INNER_DST_PORT) ? 4894 0 : cpu_to_be16(rule->tuples_mask.dst_port); 4895 4896 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; 4897 fs->m_u.tcp_ip4_spec.tos = 4898 rule->unused_tuple & BIT(INNER_IP_TOS) ? 4899 0 : rule->tuples_mask.ip_tos; 4900 4901 break; 4902 case IP_USER_FLOW: 4903 fs->h_u.usr_ip4_spec.ip4src = 4904 cpu_to_be32(rule->tuples.src_ip[3]); 4905 fs->m_u.tcp_ip4_spec.ip4src = 4906 rule->unused_tuple & BIT(INNER_SRC_IP) ? 4907 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); 4908 4909 fs->h_u.usr_ip4_spec.ip4dst = 4910 cpu_to_be32(rule->tuples.dst_ip[3]); 4911 fs->m_u.usr_ip4_spec.ip4dst = 4912 rule->unused_tuple & BIT(INNER_DST_IP) ? 4913 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); 4914 4915 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; 4916 fs->m_u.usr_ip4_spec.tos = 4917 rule->unused_tuple & BIT(INNER_IP_TOS) ? 4918 0 : rule->tuples_mask.ip_tos; 4919 4920 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; 4921 fs->m_u.usr_ip4_spec.proto = 4922 rule->unused_tuple & BIT(INNER_IP_PROTO) ? 4923 0 : rule->tuples_mask.ip_proto; 4924 4925 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 4926 4927 break; 4928 case SCTP_V6_FLOW: 4929 case TCP_V6_FLOW: 4930 case UDP_V6_FLOW: 4931 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, 4932 rule->tuples.src_ip, 4); 4933 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 4934 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); 4935 else 4936 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, 4937 rule->tuples_mask.src_ip, 4); 4938 4939 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, 4940 rule->tuples.dst_ip, 4); 4941 if (rule->unused_tuple & BIT(INNER_DST_IP)) 4942 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); 4943 else 4944 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, 4945 rule->tuples_mask.dst_ip, 4); 4946 4947 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); 4948 fs->m_u.tcp_ip6_spec.psrc = 4949 rule->unused_tuple & BIT(INNER_SRC_PORT) ? 4950 0 : cpu_to_be16(rule->tuples_mask.src_port); 4951 4952 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); 4953 fs->m_u.tcp_ip6_spec.pdst = 4954 rule->unused_tuple & BIT(INNER_DST_PORT) ? 4955 0 : cpu_to_be16(rule->tuples_mask.dst_port); 4956 4957 break; 4958 case IPV6_USER_FLOW: 4959 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, 4960 rule->tuples.src_ip, 4); 4961 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 4962 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); 4963 else 4964 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, 4965 rule->tuples_mask.src_ip, 4); 4966 4967 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, 4968 rule->tuples.dst_ip, 4); 4969 if (rule->unused_tuple & BIT(INNER_DST_IP)) 4970 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); 4971 else 4972 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, 4973 rule->tuples_mask.dst_ip, 4); 4974 4975 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; 4976 fs->m_u.usr_ip6_spec.l4_proto = 4977 rule->unused_tuple & BIT(INNER_IP_PROTO) ? 4978 0 : rule->tuples_mask.ip_proto; 4979 4980 break; 4981 case ETHER_FLOW: 4982 ether_addr_copy(fs->h_u.ether_spec.h_source, 4983 rule->tuples.src_mac); 4984 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) 4985 eth_zero_addr(fs->m_u.ether_spec.h_source); 4986 else 4987 ether_addr_copy(fs->m_u.ether_spec.h_source, 4988 rule->tuples_mask.src_mac); 4989 4990 ether_addr_copy(fs->h_u.ether_spec.h_dest, 4991 rule->tuples.dst_mac); 4992 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 4993 eth_zero_addr(fs->m_u.ether_spec.h_dest); 4994 else 4995 ether_addr_copy(fs->m_u.ether_spec.h_dest, 4996 rule->tuples_mask.dst_mac); 4997 4998 fs->h_u.ether_spec.h_proto = 4999 cpu_to_be16(rule->tuples.ether_proto); 5000 fs->m_u.ether_spec.h_proto = 5001 rule->unused_tuple & BIT(INNER_ETH_TYPE) ? 5002 0 : cpu_to_be16(rule->tuples_mask.ether_proto); 5003 5004 break; 5005 default: 5006 return -EOPNOTSUPP; 5007 } 5008 5009 if (fs->flow_type & FLOW_EXT) { 5010 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); 5011 fs->m_ext.vlan_tci = 5012 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? 5013 cpu_to_be16(VLAN_VID_MASK) : 5014 cpu_to_be16(rule->tuples_mask.vlan_tag1); 5015 } 5016 5017 if (fs->flow_type & FLOW_MAC_EXT) { 5018 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); 5019 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 5020 eth_zero_addr(fs->m_u.ether_spec.h_dest); 5021 else 5022 ether_addr_copy(fs->m_u.ether_spec.h_dest, 5023 rule->tuples_mask.dst_mac); 5024 } 5025 5026 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 5027 fs->ring_cookie = RX_CLS_FLOW_DISC; 5028 } else { 5029 u64 vf_id; 5030 5031 fs->ring_cookie = rule->queue_id; 5032 vf_id = rule->vf_id; 5033 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 5034 fs->ring_cookie |= vf_id; 5035 } 5036 5037 return 0; 5038 } 5039 5040 static int hclge_get_all_rules(struct hnae3_handle *handle, 5041 struct ethtool_rxnfc *cmd, u32 *rule_locs) 5042 { 5043 struct hclge_vport *vport = hclge_get_vport(handle); 5044 struct hclge_dev *hdev = vport->back; 5045 struct hclge_fd_rule *rule; 5046 struct hlist_node *node2; 5047 int cnt = 0; 5048 5049 if (!hnae3_dev_fd_supported(hdev)) 5050 return -EOPNOTSUPP; 5051 5052 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 5053 5054 hlist_for_each_entry_safe(rule, node2, 5055 &hdev->fd_rule_list, rule_node) { 5056 if (cnt == cmd->rule_cnt) 5057 return -EMSGSIZE; 5058 5059 rule_locs[cnt] = rule->location; 5060 cnt++; 5061 } 5062 5063 cmd->rule_cnt = cnt; 5064 5065 return 0; 5066 } 5067 5068 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) 5069 { 5070 struct hclge_vport *vport = hclge_get_vport(handle); 5071 struct hclge_dev *hdev = vport->back; 5072 5073 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || 5074 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); 5075 } 5076 5077 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) 5078 { 5079 struct hclge_vport *vport = hclge_get_vport(handle); 5080 struct hclge_dev *hdev = vport->back; 5081 5082 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 5083 } 5084 5085 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) 5086 { 5087 struct hclge_vport *vport = hclge_get_vport(handle); 5088 struct hclge_dev *hdev = vport->back; 5089 5090 return hdev->reset_count; 5091 } 5092 5093 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) 5094 { 5095 struct hclge_vport *vport = hclge_get_vport(handle); 5096 struct hclge_dev *hdev = vport->back; 5097 5098 hdev->fd_cfg.fd_en = enable; 5099 if (!enable) 5100 hclge_del_all_fd_entries(handle, false); 5101 else 5102 hclge_restore_fd_entries(handle); 5103 } 5104 5105 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 5106 { 5107 struct hclge_desc desc; 5108 struct hclge_config_mac_mode_cmd *req = 5109 (struct hclge_config_mac_mode_cmd *)desc.data; 5110 u32 loop_en = 0; 5111 int ret; 5112 5113 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 5114 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); 5115 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); 5116 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); 5117 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); 5118 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); 5119 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); 5120 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 5121 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); 5122 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); 5123 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); 5124 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); 5125 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); 5126 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); 5127 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); 5128 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 5129 5130 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5131 if (ret) 5132 dev_err(&hdev->pdev->dev, 5133 "mac enable fail, ret =%d.\n", ret); 5134 } 5135 5136 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) 5137 { 5138 struct hclge_config_mac_mode_cmd *req; 5139 struct hclge_desc desc; 5140 u32 loop_en; 5141 int ret; 5142 5143 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 5144 /* 1 Read out the MAC mode config at first */ 5145 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 5146 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5147 if (ret) { 5148 dev_err(&hdev->pdev->dev, 5149 "mac loopback get fail, ret =%d.\n", ret); 5150 return ret; 5151 } 5152 5153 /* 2 Then setup the loopback flag */ 5154 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 5155 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 5156 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); 5157 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); 5158 5159 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 5160 5161 /* 3 Config mac work mode with loopback flag 5162 * and its original configure parameters 5163 */ 5164 hclge_cmd_reuse_desc(&desc, false); 5165 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5166 if (ret) 5167 dev_err(&hdev->pdev->dev, 5168 "mac loopback set fail, ret =%d.\n", ret); 5169 return ret; 5170 } 5171 5172 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, 5173 enum hnae3_loop loop_mode) 5174 { 5175 #define HCLGE_SERDES_RETRY_MS 10 5176 #define HCLGE_SERDES_RETRY_NUM 100 5177 struct hclge_serdes_lb_cmd *req; 5178 struct hclge_desc desc; 5179 int ret, i = 0; 5180 u8 loop_mode_b; 5181 5182 req = (struct hclge_serdes_lb_cmd *)desc.data; 5183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); 5184 5185 switch (loop_mode) { 5186 case HNAE3_LOOP_SERIAL_SERDES: 5187 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 5188 break; 5189 case HNAE3_LOOP_PARALLEL_SERDES: 5190 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 5191 break; 5192 default: 5193 dev_err(&hdev->pdev->dev, 5194 "unsupported serdes loopback mode %d\n", loop_mode); 5195 return -ENOTSUPP; 5196 } 5197 5198 if (en) { 5199 req->enable = loop_mode_b; 5200 req->mask = loop_mode_b; 5201 } else { 5202 req->mask = loop_mode_b; 5203 } 5204 5205 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5206 if (ret) { 5207 dev_err(&hdev->pdev->dev, 5208 "serdes loopback set fail, ret = %d\n", ret); 5209 return ret; 5210 } 5211 5212 do { 5213 msleep(HCLGE_SERDES_RETRY_MS); 5214 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, 5215 true); 5216 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5217 if (ret) { 5218 dev_err(&hdev->pdev->dev, 5219 "serdes loopback get, ret = %d\n", ret); 5220 return ret; 5221 } 5222 } while (++i < HCLGE_SERDES_RETRY_NUM && 5223 !(req->result & HCLGE_CMD_SERDES_DONE_B)); 5224 5225 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { 5226 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); 5227 return -EBUSY; 5228 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { 5229 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); 5230 return -EIO; 5231 } 5232 5233 hclge_cfg_mac_mode(hdev, en); 5234 return 0; 5235 } 5236 5237 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, 5238 int stream_id, bool enable) 5239 { 5240 struct hclge_desc desc; 5241 struct hclge_cfg_com_tqp_queue_cmd *req = 5242 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 5243 int ret; 5244 5245 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 5246 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 5247 req->stream_id = cpu_to_le16(stream_id); 5248 req->enable |= enable << HCLGE_TQP_ENABLE_B; 5249 5250 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5251 if (ret) 5252 dev_err(&hdev->pdev->dev, 5253 "Tqp enable fail, status =%d.\n", ret); 5254 return ret; 5255 } 5256 5257 static int hclge_set_loopback(struct hnae3_handle *handle, 5258 enum hnae3_loop loop_mode, bool en) 5259 { 5260 struct hclge_vport *vport = hclge_get_vport(handle); 5261 struct hclge_dev *hdev = vport->back; 5262 int i, ret; 5263 5264 switch (loop_mode) { 5265 case HNAE3_LOOP_APP: 5266 ret = hclge_set_app_loopback(hdev, en); 5267 break; 5268 case HNAE3_LOOP_SERIAL_SERDES: 5269 case HNAE3_LOOP_PARALLEL_SERDES: 5270 ret = hclge_set_serdes_loopback(hdev, en, loop_mode); 5271 break; 5272 default: 5273 ret = -ENOTSUPP; 5274 dev_err(&hdev->pdev->dev, 5275 "loop_mode %d is not supported\n", loop_mode); 5276 break; 5277 } 5278 5279 for (i = 0; i < vport->alloc_tqps; i++) { 5280 ret = hclge_tqp_enable(hdev, i, 0, en); 5281 if (ret) 5282 return ret; 5283 } 5284 5285 return 0; 5286 } 5287 5288 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 5289 { 5290 struct hclge_vport *vport = hclge_get_vport(handle); 5291 struct hnae3_queue *queue; 5292 struct hclge_tqp *tqp; 5293 int i; 5294 5295 for (i = 0; i < vport->alloc_tqps; i++) { 5296 queue = handle->kinfo.tqp[i]; 5297 tqp = container_of(queue, struct hclge_tqp, q); 5298 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 5299 } 5300 } 5301 5302 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) 5303 { 5304 struct hclge_vport *vport = hclge_get_vport(handle); 5305 struct hclge_dev *hdev = vport->back; 5306 5307 if (enable) { 5308 mod_timer(&hdev->service_timer, jiffies + HZ); 5309 } else { 5310 del_timer_sync(&hdev->service_timer); 5311 cancel_work_sync(&hdev->service_task); 5312 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 5313 } 5314 } 5315 5316 static int hclge_ae_start(struct hnae3_handle *handle) 5317 { 5318 struct hclge_vport *vport = hclge_get_vport(handle); 5319 struct hclge_dev *hdev = vport->back; 5320 5321 /* mac enable */ 5322 hclge_cfg_mac_mode(hdev, true); 5323 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 5324 hdev->hw.mac.link = 0; 5325 5326 /* reset tqp stats */ 5327 hclge_reset_tqp_stats(handle); 5328 5329 hclge_mac_start_phy(hdev); 5330 5331 return 0; 5332 } 5333 5334 static void hclge_ae_stop(struct hnae3_handle *handle) 5335 { 5336 struct hclge_vport *vport = hclge_get_vport(handle); 5337 struct hclge_dev *hdev = vport->back; 5338 int i; 5339 5340 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5341 5342 /* If it is not PF reset, the firmware will disable the MAC, 5343 * so it only need to stop phy here. 5344 */ 5345 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && 5346 hdev->reset_type != HNAE3_FUNC_RESET) { 5347 hclge_mac_stop_phy(hdev); 5348 return; 5349 } 5350 5351 for (i = 0; i < handle->kinfo.num_tqps; i++) 5352 hclge_reset_tqp(handle, i); 5353 5354 /* Mac disable */ 5355 hclge_cfg_mac_mode(hdev, false); 5356 5357 hclge_mac_stop_phy(hdev); 5358 5359 /* reset tqp stats */ 5360 hclge_reset_tqp_stats(handle); 5361 hclge_update_link_status(hdev); 5362 } 5363 5364 int hclge_vport_start(struct hclge_vport *vport) 5365 { 5366 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 5367 vport->last_active_jiffies = jiffies; 5368 return 0; 5369 } 5370 5371 void hclge_vport_stop(struct hclge_vport *vport) 5372 { 5373 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 5374 } 5375 5376 static int hclge_client_start(struct hnae3_handle *handle) 5377 { 5378 struct hclge_vport *vport = hclge_get_vport(handle); 5379 5380 return hclge_vport_start(vport); 5381 } 5382 5383 static void hclge_client_stop(struct hnae3_handle *handle) 5384 { 5385 struct hclge_vport *vport = hclge_get_vport(handle); 5386 5387 hclge_vport_stop(vport); 5388 } 5389 5390 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 5391 u16 cmdq_resp, u8 resp_code, 5392 enum hclge_mac_vlan_tbl_opcode op) 5393 { 5394 struct hclge_dev *hdev = vport->back; 5395 int return_status = -EIO; 5396 5397 if (cmdq_resp) { 5398 dev_err(&hdev->pdev->dev, 5399 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", 5400 cmdq_resp); 5401 return -EIO; 5402 } 5403 5404 if (op == HCLGE_MAC_VLAN_ADD) { 5405 if ((!resp_code) || (resp_code == 1)) { 5406 return_status = 0; 5407 } else if (resp_code == 2) { 5408 return_status = -ENOSPC; 5409 dev_err(&hdev->pdev->dev, 5410 "add mac addr failed for uc_overflow.\n"); 5411 } else if (resp_code == 3) { 5412 return_status = -ENOSPC; 5413 dev_err(&hdev->pdev->dev, 5414 "add mac addr failed for mc_overflow.\n"); 5415 } else { 5416 dev_err(&hdev->pdev->dev, 5417 "add mac addr failed for undefined, code=%d.\n", 5418 resp_code); 5419 } 5420 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 5421 if (!resp_code) { 5422 return_status = 0; 5423 } else if (resp_code == 1) { 5424 return_status = -ENOENT; 5425 dev_dbg(&hdev->pdev->dev, 5426 "remove mac addr failed for miss.\n"); 5427 } else { 5428 dev_err(&hdev->pdev->dev, 5429 "remove mac addr failed for undefined, code=%d.\n", 5430 resp_code); 5431 } 5432 } else if (op == HCLGE_MAC_VLAN_LKUP) { 5433 if (!resp_code) { 5434 return_status = 0; 5435 } else if (resp_code == 1) { 5436 return_status = -ENOENT; 5437 dev_dbg(&hdev->pdev->dev, 5438 "lookup mac addr failed for miss.\n"); 5439 } else { 5440 dev_err(&hdev->pdev->dev, 5441 "lookup mac addr failed for undefined, code=%d.\n", 5442 resp_code); 5443 } 5444 } else { 5445 return_status = -EINVAL; 5446 dev_err(&hdev->pdev->dev, 5447 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", 5448 op); 5449 } 5450 5451 return return_status; 5452 } 5453 5454 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 5455 { 5456 int word_num; 5457 int bit_num; 5458 5459 if (vfid > 255 || vfid < 0) 5460 return -EIO; 5461 5462 if (vfid >= 0 && vfid <= 191) { 5463 word_num = vfid / 32; 5464 bit_num = vfid % 32; 5465 if (clr) 5466 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 5467 else 5468 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 5469 } else { 5470 word_num = (vfid - 192) / 32; 5471 bit_num = vfid % 32; 5472 if (clr) 5473 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 5474 else 5475 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 5476 } 5477 5478 return 0; 5479 } 5480 5481 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 5482 { 5483 #define HCLGE_DESC_NUMBER 3 5484 #define HCLGE_FUNC_NUMBER_PER_DESC 6 5485 int i, j; 5486 5487 for (i = 1; i < HCLGE_DESC_NUMBER; i++) 5488 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 5489 if (desc[i].data[j]) 5490 return false; 5491 5492 return true; 5493 } 5494 5495 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 5496 const u8 *addr) 5497 { 5498 const unsigned char *mac_addr = addr; 5499 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 5500 (mac_addr[0]) | (mac_addr[1] << 8); 5501 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 5502 5503 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 5504 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 5505 } 5506 5507 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 5508 struct hclge_mac_vlan_tbl_entry_cmd *req) 5509 { 5510 struct hclge_dev *hdev = vport->back; 5511 struct hclge_desc desc; 5512 u8 resp_code; 5513 u16 retval; 5514 int ret; 5515 5516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 5517 5518 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 5519 5520 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5521 if (ret) { 5522 dev_err(&hdev->pdev->dev, 5523 "del mac addr failed for cmd_send, ret =%d.\n", 5524 ret); 5525 return ret; 5526 } 5527 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 5528 retval = le16_to_cpu(desc.retval); 5529 5530 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 5531 HCLGE_MAC_VLAN_REMOVE); 5532 } 5533 5534 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 5535 struct hclge_mac_vlan_tbl_entry_cmd *req, 5536 struct hclge_desc *desc, 5537 bool is_mc) 5538 { 5539 struct hclge_dev *hdev = vport->back; 5540 u8 resp_code; 5541 u16 retval; 5542 int ret; 5543 5544 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 5545 if (is_mc) { 5546 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 5547 memcpy(desc[0].data, 5548 req, 5549 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 5550 hclge_cmd_setup_basic_desc(&desc[1], 5551 HCLGE_OPC_MAC_VLAN_ADD, 5552 true); 5553 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 5554 hclge_cmd_setup_basic_desc(&desc[2], 5555 HCLGE_OPC_MAC_VLAN_ADD, 5556 true); 5557 ret = hclge_cmd_send(&hdev->hw, desc, 3); 5558 } else { 5559 memcpy(desc[0].data, 5560 req, 5561 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 5562 ret = hclge_cmd_send(&hdev->hw, desc, 1); 5563 } 5564 if (ret) { 5565 dev_err(&hdev->pdev->dev, 5566 "lookup mac addr failed for cmd_send, ret =%d.\n", 5567 ret); 5568 return ret; 5569 } 5570 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 5571 retval = le16_to_cpu(desc[0].retval); 5572 5573 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 5574 HCLGE_MAC_VLAN_LKUP); 5575 } 5576 5577 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 5578 struct hclge_mac_vlan_tbl_entry_cmd *req, 5579 struct hclge_desc *mc_desc) 5580 { 5581 struct hclge_dev *hdev = vport->back; 5582 int cfg_status; 5583 u8 resp_code; 5584 u16 retval; 5585 int ret; 5586 5587 if (!mc_desc) { 5588 struct hclge_desc desc; 5589 5590 hclge_cmd_setup_basic_desc(&desc, 5591 HCLGE_OPC_MAC_VLAN_ADD, 5592 false); 5593 memcpy(desc.data, req, 5594 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 5595 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5596 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 5597 retval = le16_to_cpu(desc.retval); 5598 5599 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 5600 resp_code, 5601 HCLGE_MAC_VLAN_ADD); 5602 } else { 5603 hclge_cmd_reuse_desc(&mc_desc[0], false); 5604 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 5605 hclge_cmd_reuse_desc(&mc_desc[1], false); 5606 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 5607 hclge_cmd_reuse_desc(&mc_desc[2], false); 5608 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 5609 memcpy(mc_desc[0].data, req, 5610 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 5611 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 5612 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 5613 retval = le16_to_cpu(mc_desc[0].retval); 5614 5615 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 5616 resp_code, 5617 HCLGE_MAC_VLAN_ADD); 5618 } 5619 5620 if (ret) { 5621 dev_err(&hdev->pdev->dev, 5622 "add mac addr failed for cmd_send, ret =%d.\n", 5623 ret); 5624 return ret; 5625 } 5626 5627 return cfg_status; 5628 } 5629 5630 static int hclge_init_umv_space(struct hclge_dev *hdev) 5631 { 5632 u16 allocated_size = 0; 5633 int ret; 5634 5635 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, 5636 true); 5637 if (ret) 5638 return ret; 5639 5640 if (allocated_size < hdev->wanted_umv_size) 5641 dev_warn(&hdev->pdev->dev, 5642 "Alloc umv space failed, want %d, get %d\n", 5643 hdev->wanted_umv_size, allocated_size); 5644 5645 mutex_init(&hdev->umv_mutex); 5646 hdev->max_umv_size = allocated_size; 5647 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); 5648 hdev->share_umv_size = hdev->priv_umv_size + 5649 hdev->max_umv_size % (hdev->num_req_vfs + 2); 5650 5651 return 0; 5652 } 5653 5654 static int hclge_uninit_umv_space(struct hclge_dev *hdev) 5655 { 5656 int ret; 5657 5658 if (hdev->max_umv_size > 0) { 5659 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, 5660 false); 5661 if (ret) 5662 return ret; 5663 hdev->max_umv_size = 0; 5664 } 5665 mutex_destroy(&hdev->umv_mutex); 5666 5667 return 0; 5668 } 5669 5670 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 5671 u16 *allocated_size, bool is_alloc) 5672 { 5673 struct hclge_umv_spc_alc_cmd *req; 5674 struct hclge_desc desc; 5675 int ret; 5676 5677 req = (struct hclge_umv_spc_alc_cmd *)desc.data; 5678 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); 5679 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); 5680 req->space_size = cpu_to_le32(space_size); 5681 5682 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5683 if (ret) { 5684 dev_err(&hdev->pdev->dev, 5685 "%s umv space failed for cmd_send, ret =%d\n", 5686 is_alloc ? "allocate" : "free", ret); 5687 return ret; 5688 } 5689 5690 if (is_alloc && allocated_size) 5691 *allocated_size = le32_to_cpu(desc.data[1]); 5692 5693 return 0; 5694 } 5695 5696 static void hclge_reset_umv_space(struct hclge_dev *hdev) 5697 { 5698 struct hclge_vport *vport; 5699 int i; 5700 5701 for (i = 0; i < hdev->num_alloc_vport; i++) { 5702 vport = &hdev->vport[i]; 5703 vport->used_umv_num = 0; 5704 } 5705 5706 mutex_lock(&hdev->umv_mutex); 5707 hdev->share_umv_size = hdev->priv_umv_size + 5708 hdev->max_umv_size % (hdev->num_req_vfs + 2); 5709 mutex_unlock(&hdev->umv_mutex); 5710 } 5711 5712 static bool hclge_is_umv_space_full(struct hclge_vport *vport) 5713 { 5714 struct hclge_dev *hdev = vport->back; 5715 bool is_full; 5716 5717 mutex_lock(&hdev->umv_mutex); 5718 is_full = (vport->used_umv_num >= hdev->priv_umv_size && 5719 hdev->share_umv_size == 0); 5720 mutex_unlock(&hdev->umv_mutex); 5721 5722 return is_full; 5723 } 5724 5725 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) 5726 { 5727 struct hclge_dev *hdev = vport->back; 5728 5729 mutex_lock(&hdev->umv_mutex); 5730 if (is_free) { 5731 if (vport->used_umv_num > hdev->priv_umv_size) 5732 hdev->share_umv_size++; 5733 vport->used_umv_num--; 5734 } else { 5735 if (vport->used_umv_num >= hdev->priv_umv_size) 5736 hdev->share_umv_size--; 5737 vport->used_umv_num++; 5738 } 5739 mutex_unlock(&hdev->umv_mutex); 5740 } 5741 5742 static int hclge_add_uc_addr(struct hnae3_handle *handle, 5743 const unsigned char *addr) 5744 { 5745 struct hclge_vport *vport = hclge_get_vport(handle); 5746 5747 return hclge_add_uc_addr_common(vport, addr); 5748 } 5749 5750 int hclge_add_uc_addr_common(struct hclge_vport *vport, 5751 const unsigned char *addr) 5752 { 5753 struct hclge_dev *hdev = vport->back; 5754 struct hclge_mac_vlan_tbl_entry_cmd req; 5755 struct hclge_desc desc; 5756 u16 egress_port = 0; 5757 int ret; 5758 5759 /* mac addr check */ 5760 if (is_zero_ether_addr(addr) || 5761 is_broadcast_ether_addr(addr) || 5762 is_multicast_ether_addr(addr)) { 5763 dev_err(&hdev->pdev->dev, 5764 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 5765 addr, 5766 is_zero_ether_addr(addr), 5767 is_broadcast_ether_addr(addr), 5768 is_multicast_ether_addr(addr)); 5769 return -EINVAL; 5770 } 5771 5772 memset(&req, 0, sizeof(req)); 5773 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5774 5775 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 5776 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 5777 5778 req.egress_port = cpu_to_le16(egress_port); 5779 5780 hclge_prepare_mac_addr(&req, addr); 5781 5782 /* Lookup the mac address in the mac_vlan table, and add 5783 * it if the entry is inexistent. Repeated unicast entry 5784 * is not allowed in the mac vlan table. 5785 */ 5786 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 5787 if (ret == -ENOENT) { 5788 if (!hclge_is_umv_space_full(vport)) { 5789 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); 5790 if (!ret) 5791 hclge_update_umv_space(vport, false); 5792 return ret; 5793 } 5794 5795 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", 5796 hdev->priv_umv_size); 5797 5798 return -ENOSPC; 5799 } 5800 5801 /* check if we just hit the duplicate */ 5802 if (!ret) 5803 ret = -EINVAL; 5804 5805 dev_err(&hdev->pdev->dev, 5806 "PF failed to add unicast entry(%pM) in the MAC table\n", 5807 addr); 5808 5809 return ret; 5810 } 5811 5812 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 5813 const unsigned char *addr) 5814 { 5815 struct hclge_vport *vport = hclge_get_vport(handle); 5816 5817 return hclge_rm_uc_addr_common(vport, addr); 5818 } 5819 5820 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 5821 const unsigned char *addr) 5822 { 5823 struct hclge_dev *hdev = vport->back; 5824 struct hclge_mac_vlan_tbl_entry_cmd req; 5825 int ret; 5826 5827 /* mac addr check */ 5828 if (is_zero_ether_addr(addr) || 5829 is_broadcast_ether_addr(addr) || 5830 is_multicast_ether_addr(addr)) { 5831 dev_dbg(&hdev->pdev->dev, 5832 "Remove mac err! invalid mac:%pM.\n", 5833 addr); 5834 return -EINVAL; 5835 } 5836 5837 memset(&req, 0, sizeof(req)); 5838 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5839 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 5840 hclge_prepare_mac_addr(&req, addr); 5841 ret = hclge_remove_mac_vlan_tbl(vport, &req); 5842 if (!ret) 5843 hclge_update_umv_space(vport, true); 5844 5845 return ret; 5846 } 5847 5848 static int hclge_add_mc_addr(struct hnae3_handle *handle, 5849 const unsigned char *addr) 5850 { 5851 struct hclge_vport *vport = hclge_get_vport(handle); 5852 5853 return hclge_add_mc_addr_common(vport, addr); 5854 } 5855 5856 int hclge_add_mc_addr_common(struct hclge_vport *vport, 5857 const unsigned char *addr) 5858 { 5859 struct hclge_dev *hdev = vport->back; 5860 struct hclge_mac_vlan_tbl_entry_cmd req; 5861 struct hclge_desc desc[3]; 5862 int status; 5863 5864 /* mac addr check */ 5865 if (!is_multicast_ether_addr(addr)) { 5866 dev_err(&hdev->pdev->dev, 5867 "Add mc mac err! invalid mac:%pM.\n", 5868 addr); 5869 return -EINVAL; 5870 } 5871 memset(&req, 0, sizeof(req)); 5872 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5873 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 5874 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 5875 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5876 hclge_prepare_mac_addr(&req, addr); 5877 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 5878 if (!status) { 5879 /* This mac addr exist, update VFID for it */ 5880 hclge_update_desc_vfid(desc, vport->vport_id, false); 5881 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 5882 } else { 5883 /* This mac addr do not exist, add new entry for it */ 5884 memset(desc[0].data, 0, sizeof(desc[0].data)); 5885 memset(desc[1].data, 0, sizeof(desc[0].data)); 5886 memset(desc[2].data, 0, sizeof(desc[0].data)); 5887 hclge_update_desc_vfid(desc, vport->vport_id, false); 5888 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 5889 } 5890 5891 if (status == -ENOSPC) 5892 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); 5893 5894 return status; 5895 } 5896 5897 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 5898 const unsigned char *addr) 5899 { 5900 struct hclge_vport *vport = hclge_get_vport(handle); 5901 5902 return hclge_rm_mc_addr_common(vport, addr); 5903 } 5904 5905 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 5906 const unsigned char *addr) 5907 { 5908 struct hclge_dev *hdev = vport->back; 5909 struct hclge_mac_vlan_tbl_entry_cmd req; 5910 enum hclge_cmd_status status; 5911 struct hclge_desc desc[3]; 5912 5913 /* mac addr check */ 5914 if (!is_multicast_ether_addr(addr)) { 5915 dev_dbg(&hdev->pdev->dev, 5916 "Remove mc mac err! invalid mac:%pM.\n", 5917 addr); 5918 return -EINVAL; 5919 } 5920 5921 memset(&req, 0, sizeof(req)); 5922 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5923 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 5924 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 5925 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5926 hclge_prepare_mac_addr(&req, addr); 5927 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 5928 if (!status) { 5929 /* This mac addr exist, remove this handle's VFID for it */ 5930 hclge_update_desc_vfid(desc, vport->vport_id, true); 5931 5932 if (hclge_is_all_function_id_zero(desc)) 5933 /* All the vfid is zero, so need to delete this entry */ 5934 status = hclge_remove_mac_vlan_tbl(vport, &req); 5935 else 5936 /* Not all the vfid is zero, update the vfid */ 5937 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 5938 5939 } else { 5940 /* Maybe this mac address is in mta table, but it cannot be 5941 * deleted here because an entry of mta represents an address 5942 * range rather than a specific address. the delete action to 5943 * all entries will take effect in update_mta_status called by 5944 * hns3_nic_set_rx_mode. 5945 */ 5946 status = 0; 5947 } 5948 5949 return status; 5950 } 5951 5952 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 5953 u16 cmdq_resp, u8 resp_code) 5954 { 5955 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 5956 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 5957 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 5958 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 5959 5960 int return_status; 5961 5962 if (cmdq_resp) { 5963 dev_err(&hdev->pdev->dev, 5964 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", 5965 cmdq_resp); 5966 return -EIO; 5967 } 5968 5969 switch (resp_code) { 5970 case HCLGE_ETHERTYPE_SUCCESS_ADD: 5971 case HCLGE_ETHERTYPE_ALREADY_ADD: 5972 return_status = 0; 5973 break; 5974 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 5975 dev_err(&hdev->pdev->dev, 5976 "add mac ethertype failed for manager table overflow.\n"); 5977 return_status = -EIO; 5978 break; 5979 case HCLGE_ETHERTYPE_KEY_CONFLICT: 5980 dev_err(&hdev->pdev->dev, 5981 "add mac ethertype failed for key conflict.\n"); 5982 return_status = -EIO; 5983 break; 5984 default: 5985 dev_err(&hdev->pdev->dev, 5986 "add mac ethertype failed for undefined, code=%d.\n", 5987 resp_code); 5988 return_status = -EIO; 5989 } 5990 5991 return return_status; 5992 } 5993 5994 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 5995 const struct hclge_mac_mgr_tbl_entry_cmd *req) 5996 { 5997 struct hclge_desc desc; 5998 u8 resp_code; 5999 u16 retval; 6000 int ret; 6001 6002 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 6003 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 6004 6005 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6006 if (ret) { 6007 dev_err(&hdev->pdev->dev, 6008 "add mac ethertype failed for cmd_send, ret =%d.\n", 6009 ret); 6010 return ret; 6011 } 6012 6013 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 6014 retval = le16_to_cpu(desc.retval); 6015 6016 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 6017 } 6018 6019 static int init_mgr_tbl(struct hclge_dev *hdev) 6020 { 6021 int ret; 6022 int i; 6023 6024 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 6025 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 6026 if (ret) { 6027 dev_err(&hdev->pdev->dev, 6028 "add mac ethertype failed, ret =%d.\n", 6029 ret); 6030 return ret; 6031 } 6032 } 6033 6034 return 0; 6035 } 6036 6037 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 6038 { 6039 struct hclge_vport *vport = hclge_get_vport(handle); 6040 struct hclge_dev *hdev = vport->back; 6041 6042 ether_addr_copy(p, hdev->hw.mac.mac_addr); 6043 } 6044 6045 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, 6046 bool is_first) 6047 { 6048 const unsigned char *new_addr = (const unsigned char *)p; 6049 struct hclge_vport *vport = hclge_get_vport(handle); 6050 struct hclge_dev *hdev = vport->back; 6051 int ret; 6052 6053 /* mac addr check */ 6054 if (is_zero_ether_addr(new_addr) || 6055 is_broadcast_ether_addr(new_addr) || 6056 is_multicast_ether_addr(new_addr)) { 6057 dev_err(&hdev->pdev->dev, 6058 "Change uc mac err! invalid mac:%p.\n", 6059 new_addr); 6060 return -EINVAL; 6061 } 6062 6063 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) 6064 dev_warn(&hdev->pdev->dev, 6065 "remove old uc mac address fail.\n"); 6066 6067 ret = hclge_add_uc_addr(handle, new_addr); 6068 if (ret) { 6069 dev_err(&hdev->pdev->dev, 6070 "add uc mac address fail, ret =%d.\n", 6071 ret); 6072 6073 if (!is_first && 6074 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) 6075 dev_err(&hdev->pdev->dev, 6076 "restore uc mac address fail.\n"); 6077 6078 return -EIO; 6079 } 6080 6081 ret = hclge_pause_addr_cfg(hdev, new_addr); 6082 if (ret) { 6083 dev_err(&hdev->pdev->dev, 6084 "configure mac pause address fail, ret =%d.\n", 6085 ret); 6086 return -EIO; 6087 } 6088 6089 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 6090 6091 return 0; 6092 } 6093 6094 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, 6095 int cmd) 6096 { 6097 struct hclge_vport *vport = hclge_get_vport(handle); 6098 struct hclge_dev *hdev = vport->back; 6099 6100 if (!hdev->hw.mac.phydev) 6101 return -EOPNOTSUPP; 6102 6103 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); 6104 } 6105 6106 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 6107 u8 fe_type, bool filter_en) 6108 { 6109 struct hclge_vlan_filter_ctrl_cmd *req; 6110 struct hclge_desc desc; 6111 int ret; 6112 6113 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 6114 6115 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 6116 req->vlan_type = vlan_type; 6117 req->vlan_fe = filter_en ? fe_type : 0; 6118 6119 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6120 if (ret) 6121 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 6122 ret); 6123 6124 return ret; 6125 } 6126 6127 #define HCLGE_FILTER_TYPE_VF 0 6128 #define HCLGE_FILTER_TYPE_PORT 1 6129 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) 6130 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) 6131 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) 6132 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) 6133 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) 6134 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ 6135 | HCLGE_FILTER_FE_ROCE_EGRESS_B) 6136 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ 6137 | HCLGE_FILTER_FE_ROCE_INGRESS_B) 6138 6139 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 6140 { 6141 struct hclge_vport *vport = hclge_get_vport(handle); 6142 struct hclge_dev *hdev = vport->back; 6143 6144 if (hdev->pdev->revision >= 0x21) { 6145 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 6146 HCLGE_FILTER_FE_EGRESS, enable); 6147 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 6148 HCLGE_FILTER_FE_INGRESS, enable); 6149 } else { 6150 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 6151 HCLGE_FILTER_FE_EGRESS_V1_B, enable); 6152 } 6153 if (enable) 6154 handle->netdev_flags |= HNAE3_VLAN_FLTR; 6155 else 6156 handle->netdev_flags &= ~HNAE3_VLAN_FLTR; 6157 } 6158 6159 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 6160 bool is_kill, u16 vlan, u8 qos, 6161 __be16 proto) 6162 { 6163 #define HCLGE_MAX_VF_BYTES 16 6164 struct hclge_vlan_filter_vf_cfg_cmd *req0; 6165 struct hclge_vlan_filter_vf_cfg_cmd *req1; 6166 struct hclge_desc desc[2]; 6167 u8 vf_byte_val; 6168 u8 vf_byte_off; 6169 int ret; 6170 6171 hclge_cmd_setup_basic_desc(&desc[0], 6172 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 6173 hclge_cmd_setup_basic_desc(&desc[1], 6174 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 6175 6176 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 6177 6178 vf_byte_off = vfid / 8; 6179 vf_byte_val = 1 << (vfid % 8); 6180 6181 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 6182 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 6183 6184 req0->vlan_id = cpu_to_le16(vlan); 6185 req0->vlan_cfg = is_kill; 6186 6187 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 6188 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 6189 else 6190 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 6191 6192 ret = hclge_cmd_send(&hdev->hw, desc, 2); 6193 if (ret) { 6194 dev_err(&hdev->pdev->dev, 6195 "Send vf vlan command fail, ret =%d.\n", 6196 ret); 6197 return ret; 6198 } 6199 6200 if (!is_kill) { 6201 #define HCLGE_VF_VLAN_NO_ENTRY 2 6202 if (!req0->resp_code || req0->resp_code == 1) 6203 return 0; 6204 6205 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { 6206 dev_warn(&hdev->pdev->dev, 6207 "vf vlan table is full, vf vlan filter is disabled\n"); 6208 return 0; 6209 } 6210 6211 dev_err(&hdev->pdev->dev, 6212 "Add vf vlan filter fail, ret =%d.\n", 6213 req0->resp_code); 6214 } else { 6215 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 6216 if (!req0->resp_code) 6217 return 0; 6218 6219 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { 6220 dev_warn(&hdev->pdev->dev, 6221 "vlan %d filter is not in vf vlan table\n", 6222 vlan); 6223 return 0; 6224 } 6225 6226 dev_err(&hdev->pdev->dev, 6227 "Kill vf vlan filter fail, ret =%d.\n", 6228 req0->resp_code); 6229 } 6230 6231 return -EIO; 6232 } 6233 6234 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 6235 u16 vlan_id, bool is_kill) 6236 { 6237 struct hclge_vlan_filter_pf_cfg_cmd *req; 6238 struct hclge_desc desc; 6239 u8 vlan_offset_byte_val; 6240 u8 vlan_offset_byte; 6241 u8 vlan_offset_160; 6242 int ret; 6243 6244 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 6245 6246 vlan_offset_160 = vlan_id / 160; 6247 vlan_offset_byte = (vlan_id % 160) / 8; 6248 vlan_offset_byte_val = 1 << (vlan_id % 8); 6249 6250 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 6251 req->vlan_offset = vlan_offset_160; 6252 req->vlan_cfg = is_kill; 6253 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 6254 6255 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6256 if (ret) 6257 dev_err(&hdev->pdev->dev, 6258 "port vlan command, send fail, ret =%d.\n", ret); 6259 return ret; 6260 } 6261 6262 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 6263 u16 vport_id, u16 vlan_id, u8 qos, 6264 bool is_kill) 6265 { 6266 u16 vport_idx, vport_num = 0; 6267 int ret; 6268 6269 if (is_kill && !vlan_id) 6270 return 0; 6271 6272 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 6273 0, proto); 6274 if (ret) { 6275 dev_err(&hdev->pdev->dev, 6276 "Set %d vport vlan filter config fail, ret =%d.\n", 6277 vport_id, ret); 6278 return ret; 6279 } 6280 6281 /* vlan 0 may be added twice when 8021q module is enabled */ 6282 if (!is_kill && !vlan_id && 6283 test_bit(vport_id, hdev->vlan_table[vlan_id])) 6284 return 0; 6285 6286 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 6287 dev_err(&hdev->pdev->dev, 6288 "Add port vlan failed, vport %d is already in vlan %d\n", 6289 vport_id, vlan_id); 6290 return -EINVAL; 6291 } 6292 6293 if (is_kill && 6294 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 6295 dev_err(&hdev->pdev->dev, 6296 "Delete port vlan failed, vport %d is not in vlan %d\n", 6297 vport_id, vlan_id); 6298 return -EINVAL; 6299 } 6300 6301 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) 6302 vport_num++; 6303 6304 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 6305 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 6306 is_kill); 6307 6308 return ret; 6309 } 6310 6311 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 6312 u16 vlan_id, bool is_kill) 6313 { 6314 struct hclge_vport *vport = hclge_get_vport(handle); 6315 struct hclge_dev *hdev = vport->back; 6316 6317 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, 6318 0, is_kill); 6319 } 6320 6321 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 6322 u16 vlan, u8 qos, __be16 proto) 6323 { 6324 struct hclge_vport *vport = hclge_get_vport(handle); 6325 struct hclge_dev *hdev = vport->back; 6326 6327 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) 6328 return -EINVAL; 6329 if (proto != htons(ETH_P_8021Q)) 6330 return -EPROTONOSUPPORT; 6331 6332 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); 6333 } 6334 6335 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 6336 { 6337 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 6338 struct hclge_vport_vtag_tx_cfg_cmd *req; 6339 struct hclge_dev *hdev = vport->back; 6340 struct hclge_desc desc; 6341 int status; 6342 6343 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 6344 6345 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 6346 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 6347 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 6348 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 6349 vcfg->accept_tag1 ? 1 : 0); 6350 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 6351 vcfg->accept_untag1 ? 1 : 0); 6352 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 6353 vcfg->accept_tag2 ? 1 : 0); 6354 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 6355 vcfg->accept_untag2 ? 1 : 0); 6356 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 6357 vcfg->insert_tag1_en ? 1 : 0); 6358 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 6359 vcfg->insert_tag2_en ? 1 : 0); 6360 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 6361 6362 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 6363 req->vf_bitmap[req->vf_offset] = 6364 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 6365 6366 status = hclge_cmd_send(&hdev->hw, &desc, 1); 6367 if (status) 6368 dev_err(&hdev->pdev->dev, 6369 "Send port txvlan cfg command fail, ret =%d\n", 6370 status); 6371 6372 return status; 6373 } 6374 6375 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 6376 { 6377 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 6378 struct hclge_vport_vtag_rx_cfg_cmd *req; 6379 struct hclge_dev *hdev = vport->back; 6380 struct hclge_desc desc; 6381 int status; 6382 6383 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 6384 6385 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 6386 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 6387 vcfg->strip_tag1_en ? 1 : 0); 6388 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 6389 vcfg->strip_tag2_en ? 1 : 0); 6390 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 6391 vcfg->vlan1_vlan_prionly ? 1 : 0); 6392 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 6393 vcfg->vlan2_vlan_prionly ? 1 : 0); 6394 6395 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 6396 req->vf_bitmap[req->vf_offset] = 6397 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 6398 6399 status = hclge_cmd_send(&hdev->hw, &desc, 1); 6400 if (status) 6401 dev_err(&hdev->pdev->dev, 6402 "Send port rxvlan cfg command fail, ret =%d\n", 6403 status); 6404 6405 return status; 6406 } 6407 6408 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 6409 { 6410 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 6411 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 6412 struct hclge_desc desc; 6413 int status; 6414 6415 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 6416 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 6417 rx_req->ot_fst_vlan_type = 6418 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 6419 rx_req->ot_sec_vlan_type = 6420 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 6421 rx_req->in_fst_vlan_type = 6422 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 6423 rx_req->in_sec_vlan_type = 6424 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 6425 6426 status = hclge_cmd_send(&hdev->hw, &desc, 1); 6427 if (status) { 6428 dev_err(&hdev->pdev->dev, 6429 "Send rxvlan protocol type command fail, ret =%d\n", 6430 status); 6431 return status; 6432 } 6433 6434 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 6435 6436 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; 6437 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 6438 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 6439 6440 status = hclge_cmd_send(&hdev->hw, &desc, 1); 6441 if (status) 6442 dev_err(&hdev->pdev->dev, 6443 "Send txvlan protocol type command fail, ret =%d\n", 6444 status); 6445 6446 return status; 6447 } 6448 6449 static int hclge_init_vlan_config(struct hclge_dev *hdev) 6450 { 6451 #define HCLGE_DEF_VLAN_TYPE 0x8100 6452 6453 struct hnae3_handle *handle = &hdev->vport[0].nic; 6454 struct hclge_vport *vport; 6455 int ret; 6456 int i; 6457 6458 if (hdev->pdev->revision >= 0x21) { 6459 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 6460 HCLGE_FILTER_FE_EGRESS, true); 6461 if (ret) 6462 return ret; 6463 6464 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 6465 HCLGE_FILTER_FE_INGRESS, true); 6466 if (ret) 6467 return ret; 6468 } else { 6469 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 6470 HCLGE_FILTER_FE_EGRESS_V1_B, 6471 true); 6472 if (ret) 6473 return ret; 6474 } 6475 6476 handle->netdev_flags |= HNAE3_VLAN_FLTR; 6477 6478 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 6479 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 6480 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 6481 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 6482 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; 6483 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; 6484 6485 ret = hclge_set_vlan_protocol_type(hdev); 6486 if (ret) 6487 return ret; 6488 6489 for (i = 0; i < hdev->num_alloc_vport; i++) { 6490 vport = &hdev->vport[i]; 6491 vport->txvlan_cfg.accept_tag1 = true; 6492 vport->txvlan_cfg.accept_untag1 = true; 6493 6494 /* accept_tag2 and accept_untag2 are not supported on 6495 * pdev revision(0x20), new revision support them. The 6496 * value of this two fields will not return error when driver 6497 * send command to fireware in revision(0x20). 6498 * This two fields can not configured by user. 6499 */ 6500 vport->txvlan_cfg.accept_tag2 = true; 6501 vport->txvlan_cfg.accept_untag2 = true; 6502 6503 vport->txvlan_cfg.insert_tag1_en = false; 6504 vport->txvlan_cfg.insert_tag2_en = false; 6505 vport->txvlan_cfg.default_tag1 = 0; 6506 vport->txvlan_cfg.default_tag2 = 0; 6507 6508 ret = hclge_set_vlan_tx_offload_cfg(vport); 6509 if (ret) 6510 return ret; 6511 6512 vport->rxvlan_cfg.strip_tag1_en = false; 6513 vport->rxvlan_cfg.strip_tag2_en = true; 6514 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 6515 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 6516 6517 ret = hclge_set_vlan_rx_offload_cfg(vport); 6518 if (ret) 6519 return ret; 6520 } 6521 6522 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 6523 } 6524 6525 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 6526 { 6527 struct hclge_vport *vport = hclge_get_vport(handle); 6528 6529 vport->rxvlan_cfg.strip_tag1_en = false; 6530 vport->rxvlan_cfg.strip_tag2_en = enable; 6531 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 6532 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 6533 6534 return hclge_set_vlan_rx_offload_cfg(vport); 6535 } 6536 6537 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) 6538 { 6539 struct hclge_config_max_frm_size_cmd *req; 6540 struct hclge_desc desc; 6541 6542 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 6543 6544 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 6545 req->max_frm_size = cpu_to_le16(new_mps); 6546 req->min_frm_size = HCLGE_MAC_MIN_FRAME; 6547 6548 return hclge_cmd_send(&hdev->hw, &desc, 1); 6549 } 6550 6551 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 6552 { 6553 struct hclge_vport *vport = hclge_get_vport(handle); 6554 6555 return hclge_set_vport_mtu(vport, new_mtu); 6556 } 6557 6558 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) 6559 { 6560 struct hclge_dev *hdev = vport->back; 6561 int i, max_frm_size, ret = 0; 6562 6563 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 6564 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 6565 max_frm_size > HCLGE_MAC_MAX_FRAME) 6566 return -EINVAL; 6567 6568 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 6569 mutex_lock(&hdev->vport_lock); 6570 /* VF's mps must fit within hdev->mps */ 6571 if (vport->vport_id && max_frm_size > hdev->mps) { 6572 mutex_unlock(&hdev->vport_lock); 6573 return -EINVAL; 6574 } else if (vport->vport_id) { 6575 vport->mps = max_frm_size; 6576 mutex_unlock(&hdev->vport_lock); 6577 return 0; 6578 } 6579 6580 /* PF's mps must be greater then VF's mps */ 6581 for (i = 1; i < hdev->num_alloc_vport; i++) 6582 if (max_frm_size < hdev->vport[i].mps) { 6583 mutex_unlock(&hdev->vport_lock); 6584 return -EINVAL; 6585 } 6586 6587 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 6588 6589 ret = hclge_set_mac_mtu(hdev, max_frm_size); 6590 if (ret) { 6591 dev_err(&hdev->pdev->dev, 6592 "Change mtu fail, ret =%d\n", ret); 6593 goto out; 6594 } 6595 6596 hdev->mps = max_frm_size; 6597 vport->mps = max_frm_size; 6598 6599 ret = hclge_buffer_alloc(hdev); 6600 if (ret) 6601 dev_err(&hdev->pdev->dev, 6602 "Allocate buffer fail, ret =%d\n", ret); 6603 6604 out: 6605 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 6606 mutex_unlock(&hdev->vport_lock); 6607 return ret; 6608 } 6609 6610 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 6611 bool enable) 6612 { 6613 struct hclge_reset_tqp_queue_cmd *req; 6614 struct hclge_desc desc; 6615 int ret; 6616 6617 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 6618 6619 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 6620 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 6621 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); 6622 6623 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6624 if (ret) { 6625 dev_err(&hdev->pdev->dev, 6626 "Send tqp reset cmd error, status =%d\n", ret); 6627 return ret; 6628 } 6629 6630 return 0; 6631 } 6632 6633 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 6634 { 6635 struct hclge_reset_tqp_queue_cmd *req; 6636 struct hclge_desc desc; 6637 int ret; 6638 6639 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 6640 6641 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 6642 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 6643 6644 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6645 if (ret) { 6646 dev_err(&hdev->pdev->dev, 6647 "Get reset status error, status =%d\n", ret); 6648 return ret; 6649 } 6650 6651 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 6652 } 6653 6654 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) 6655 { 6656 struct hnae3_queue *queue; 6657 struct hclge_tqp *tqp; 6658 6659 queue = handle->kinfo.tqp[queue_id]; 6660 tqp = container_of(queue, struct hclge_tqp, q); 6661 6662 return tqp->index; 6663 } 6664 6665 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 6666 { 6667 struct hclge_vport *vport = hclge_get_vport(handle); 6668 struct hclge_dev *hdev = vport->back; 6669 int reset_try_times = 0; 6670 int reset_status; 6671 u16 queue_gid; 6672 int ret = 0; 6673 6674 queue_gid = hclge_covert_handle_qid_global(handle, queue_id); 6675 6676 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 6677 if (ret) { 6678 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 6679 return ret; 6680 } 6681 6682 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 6683 if (ret) { 6684 dev_err(&hdev->pdev->dev, 6685 "Send reset tqp cmd fail, ret = %d\n", ret); 6686 return ret; 6687 } 6688 6689 reset_try_times = 0; 6690 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 6691 /* Wait for tqp hw reset */ 6692 msleep(20); 6693 reset_status = hclge_get_reset_status(hdev, queue_gid); 6694 if (reset_status) 6695 break; 6696 } 6697 6698 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 6699 dev_err(&hdev->pdev->dev, "Reset TQP fail\n"); 6700 return ret; 6701 } 6702 6703 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 6704 if (ret) 6705 dev_err(&hdev->pdev->dev, 6706 "Deassert the soft reset fail, ret = %d\n", ret); 6707 6708 return ret; 6709 } 6710 6711 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) 6712 { 6713 struct hclge_dev *hdev = vport->back; 6714 int reset_try_times = 0; 6715 int reset_status; 6716 u16 queue_gid; 6717 int ret; 6718 6719 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); 6720 6721 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 6722 if (ret) { 6723 dev_warn(&hdev->pdev->dev, 6724 "Send reset tqp cmd fail, ret = %d\n", ret); 6725 return; 6726 } 6727 6728 reset_try_times = 0; 6729 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 6730 /* Wait for tqp hw reset */ 6731 msleep(20); 6732 reset_status = hclge_get_reset_status(hdev, queue_gid); 6733 if (reset_status) 6734 break; 6735 } 6736 6737 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 6738 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 6739 return; 6740 } 6741 6742 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 6743 if (ret) 6744 dev_warn(&hdev->pdev->dev, 6745 "Deassert the soft reset fail, ret = %d\n", ret); 6746 } 6747 6748 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 6749 { 6750 struct hclge_vport *vport = hclge_get_vport(handle); 6751 struct hclge_dev *hdev = vport->back; 6752 6753 return hdev->fw_version; 6754 } 6755 6756 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 6757 { 6758 struct phy_device *phydev = hdev->hw.mac.phydev; 6759 6760 if (!phydev) 6761 return; 6762 6763 phy_set_asym_pause(phydev, rx_en, tx_en); 6764 } 6765 6766 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 6767 { 6768 int ret; 6769 6770 if (rx_en && tx_en) 6771 hdev->fc_mode_last_time = HCLGE_FC_FULL; 6772 else if (rx_en && !tx_en) 6773 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 6774 else if (!rx_en && tx_en) 6775 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 6776 else 6777 hdev->fc_mode_last_time = HCLGE_FC_NONE; 6778 6779 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 6780 return 0; 6781 6782 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 6783 if (ret) { 6784 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", 6785 ret); 6786 return ret; 6787 } 6788 6789 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 6790 6791 return 0; 6792 } 6793 6794 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 6795 { 6796 struct phy_device *phydev = hdev->hw.mac.phydev; 6797 u16 remote_advertising = 0; 6798 u16 local_advertising = 0; 6799 u32 rx_pause, tx_pause; 6800 u8 flowctl; 6801 6802 if (!phydev->link || !phydev->autoneg) 6803 return 0; 6804 6805 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); 6806 6807 if (phydev->pause) 6808 remote_advertising = LPA_PAUSE_CAP; 6809 6810 if (phydev->asym_pause) 6811 remote_advertising |= LPA_PAUSE_ASYM; 6812 6813 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 6814 remote_advertising); 6815 tx_pause = flowctl & FLOW_CTRL_TX; 6816 rx_pause = flowctl & FLOW_CTRL_RX; 6817 6818 if (phydev->duplex == HCLGE_MAC_HALF) { 6819 tx_pause = 0; 6820 rx_pause = 0; 6821 } 6822 6823 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 6824 } 6825 6826 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 6827 u32 *rx_en, u32 *tx_en) 6828 { 6829 struct hclge_vport *vport = hclge_get_vport(handle); 6830 struct hclge_dev *hdev = vport->back; 6831 6832 *auto_neg = hclge_get_autoneg(handle); 6833 6834 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 6835 *rx_en = 0; 6836 *tx_en = 0; 6837 return; 6838 } 6839 6840 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 6841 *rx_en = 1; 6842 *tx_en = 0; 6843 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 6844 *tx_en = 1; 6845 *rx_en = 0; 6846 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 6847 *rx_en = 1; 6848 *tx_en = 1; 6849 } else { 6850 *rx_en = 0; 6851 *tx_en = 0; 6852 } 6853 } 6854 6855 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 6856 u32 rx_en, u32 tx_en) 6857 { 6858 struct hclge_vport *vport = hclge_get_vport(handle); 6859 struct hclge_dev *hdev = vport->back; 6860 struct phy_device *phydev = hdev->hw.mac.phydev; 6861 u32 fc_autoneg; 6862 6863 fc_autoneg = hclge_get_autoneg(handle); 6864 if (auto_neg != fc_autoneg) { 6865 dev_info(&hdev->pdev->dev, 6866 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 6867 return -EOPNOTSUPP; 6868 } 6869 6870 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 6871 dev_info(&hdev->pdev->dev, 6872 "Priority flow control enabled. Cannot set link flow control.\n"); 6873 return -EOPNOTSUPP; 6874 } 6875 6876 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 6877 6878 if (!fc_autoneg) 6879 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 6880 6881 /* Only support flow control negotiation for netdev with 6882 * phy attached for now. 6883 */ 6884 if (!phydev) 6885 return -EOPNOTSUPP; 6886 6887 return phy_start_aneg(phydev); 6888 } 6889 6890 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 6891 u8 *auto_neg, u32 *speed, u8 *duplex) 6892 { 6893 struct hclge_vport *vport = hclge_get_vport(handle); 6894 struct hclge_dev *hdev = vport->back; 6895 6896 if (speed) 6897 *speed = hdev->hw.mac.speed; 6898 if (duplex) 6899 *duplex = hdev->hw.mac.duplex; 6900 if (auto_neg) 6901 *auto_neg = hdev->hw.mac.autoneg; 6902 } 6903 6904 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) 6905 { 6906 struct hclge_vport *vport = hclge_get_vport(handle); 6907 struct hclge_dev *hdev = vport->back; 6908 6909 if (media_type) 6910 *media_type = hdev->hw.mac.media_type; 6911 } 6912 6913 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 6914 u8 *tp_mdix_ctrl, u8 *tp_mdix) 6915 { 6916 struct hclge_vport *vport = hclge_get_vport(handle); 6917 struct hclge_dev *hdev = vport->back; 6918 struct phy_device *phydev = hdev->hw.mac.phydev; 6919 int mdix_ctrl, mdix, retval, is_resolved; 6920 6921 if (!phydev) { 6922 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 6923 *tp_mdix = ETH_TP_MDI_INVALID; 6924 return; 6925 } 6926 6927 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 6928 6929 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 6930 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 6931 HCLGE_PHY_MDIX_CTRL_S); 6932 6933 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 6934 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 6935 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 6936 6937 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 6938 6939 switch (mdix_ctrl) { 6940 case 0x0: 6941 *tp_mdix_ctrl = ETH_TP_MDI; 6942 break; 6943 case 0x1: 6944 *tp_mdix_ctrl = ETH_TP_MDI_X; 6945 break; 6946 case 0x3: 6947 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 6948 break; 6949 default: 6950 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 6951 break; 6952 } 6953 6954 if (!is_resolved) 6955 *tp_mdix = ETH_TP_MDI_INVALID; 6956 else if (mdix) 6957 *tp_mdix = ETH_TP_MDI_X; 6958 else 6959 *tp_mdix = ETH_TP_MDI; 6960 } 6961 6962 static int hclge_init_instance_hw(struct hclge_dev *hdev) 6963 { 6964 return hclge_mac_connect_phy(hdev); 6965 } 6966 6967 static void hclge_uninit_instance_hw(struct hclge_dev *hdev) 6968 { 6969 hclge_mac_disconnect_phy(hdev); 6970 } 6971 6972 static int hclge_init_client_instance(struct hnae3_client *client, 6973 struct hnae3_ae_dev *ae_dev) 6974 { 6975 struct hclge_dev *hdev = ae_dev->priv; 6976 struct hclge_vport *vport; 6977 int i, ret; 6978 6979 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 6980 vport = &hdev->vport[i]; 6981 6982 switch (client->type) { 6983 case HNAE3_CLIENT_KNIC: 6984 6985 hdev->nic_client = client; 6986 vport->nic.client = client; 6987 ret = client->ops->init_instance(&vport->nic); 6988 if (ret) 6989 goto clear_nic; 6990 6991 ret = hclge_init_instance_hw(hdev); 6992 if (ret) { 6993 client->ops->uninit_instance(&vport->nic, 6994 0); 6995 goto clear_nic; 6996 } 6997 6998 hnae3_set_client_init_flag(client, ae_dev, 1); 6999 7000 if (hdev->roce_client && 7001 hnae3_dev_roce_supported(hdev)) { 7002 struct hnae3_client *rc = hdev->roce_client; 7003 7004 ret = hclge_init_roce_base_info(vport); 7005 if (ret) 7006 goto clear_roce; 7007 7008 ret = rc->ops->init_instance(&vport->roce); 7009 if (ret) 7010 goto clear_roce; 7011 7012 hnae3_set_client_init_flag(hdev->roce_client, 7013 ae_dev, 1); 7014 } 7015 7016 break; 7017 case HNAE3_CLIENT_UNIC: 7018 hdev->nic_client = client; 7019 vport->nic.client = client; 7020 7021 ret = client->ops->init_instance(&vport->nic); 7022 if (ret) 7023 goto clear_nic; 7024 7025 hnae3_set_client_init_flag(client, ae_dev, 1); 7026 7027 break; 7028 case HNAE3_CLIENT_ROCE: 7029 if (hnae3_dev_roce_supported(hdev)) { 7030 hdev->roce_client = client; 7031 vport->roce.client = client; 7032 } 7033 7034 if (hdev->roce_client && hdev->nic_client) { 7035 ret = hclge_init_roce_base_info(vport); 7036 if (ret) 7037 goto clear_roce; 7038 7039 ret = client->ops->init_instance(&vport->roce); 7040 if (ret) 7041 goto clear_roce; 7042 7043 hnae3_set_client_init_flag(client, ae_dev, 1); 7044 } 7045 7046 break; 7047 default: 7048 return -EINVAL; 7049 } 7050 } 7051 7052 return 0; 7053 7054 clear_nic: 7055 hdev->nic_client = NULL; 7056 vport->nic.client = NULL; 7057 return ret; 7058 clear_roce: 7059 hdev->roce_client = NULL; 7060 vport->roce.client = NULL; 7061 return ret; 7062 } 7063 7064 static void hclge_uninit_client_instance(struct hnae3_client *client, 7065 struct hnae3_ae_dev *ae_dev) 7066 { 7067 struct hclge_dev *hdev = ae_dev->priv; 7068 struct hclge_vport *vport; 7069 int i; 7070 7071 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 7072 vport = &hdev->vport[i]; 7073 if (hdev->roce_client) { 7074 hdev->roce_client->ops->uninit_instance(&vport->roce, 7075 0); 7076 hdev->roce_client = NULL; 7077 vport->roce.client = NULL; 7078 } 7079 if (client->type == HNAE3_CLIENT_ROCE) 7080 return; 7081 if (hdev->nic_client && client->ops->uninit_instance) { 7082 hclge_uninit_instance_hw(hdev); 7083 client->ops->uninit_instance(&vport->nic, 0); 7084 hdev->nic_client = NULL; 7085 vport->nic.client = NULL; 7086 } 7087 } 7088 } 7089 7090 static int hclge_pci_init(struct hclge_dev *hdev) 7091 { 7092 struct pci_dev *pdev = hdev->pdev; 7093 struct hclge_hw *hw; 7094 int ret; 7095 7096 ret = pci_enable_device(pdev); 7097 if (ret) { 7098 dev_err(&pdev->dev, "failed to enable PCI device\n"); 7099 return ret; 7100 } 7101 7102 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 7103 if (ret) { 7104 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 7105 if (ret) { 7106 dev_err(&pdev->dev, 7107 "can't set consistent PCI DMA"); 7108 goto err_disable_device; 7109 } 7110 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 7111 } 7112 7113 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 7114 if (ret) { 7115 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 7116 goto err_disable_device; 7117 } 7118 7119 pci_set_master(pdev); 7120 hw = &hdev->hw; 7121 hw->io_base = pcim_iomap(pdev, 2, 0); 7122 if (!hw->io_base) { 7123 dev_err(&pdev->dev, "Can't map configuration register space\n"); 7124 ret = -ENOMEM; 7125 goto err_clr_master; 7126 } 7127 7128 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 7129 7130 return 0; 7131 err_clr_master: 7132 pci_clear_master(pdev); 7133 pci_release_regions(pdev); 7134 err_disable_device: 7135 pci_disable_device(pdev); 7136 7137 return ret; 7138 } 7139 7140 static void hclge_pci_uninit(struct hclge_dev *hdev) 7141 { 7142 struct pci_dev *pdev = hdev->pdev; 7143 7144 pcim_iounmap(pdev, hdev->hw.io_base); 7145 pci_free_irq_vectors(pdev); 7146 pci_clear_master(pdev); 7147 pci_release_mem_regions(pdev); 7148 pci_disable_device(pdev); 7149 } 7150 7151 static void hclge_state_init(struct hclge_dev *hdev) 7152 { 7153 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 7154 set_bit(HCLGE_STATE_DOWN, &hdev->state); 7155 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 7156 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 7157 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 7158 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 7159 } 7160 7161 static void hclge_state_uninit(struct hclge_dev *hdev) 7162 { 7163 set_bit(HCLGE_STATE_DOWN, &hdev->state); 7164 7165 if (hdev->service_timer.function) 7166 del_timer_sync(&hdev->service_timer); 7167 if (hdev->reset_timer.function) 7168 del_timer_sync(&hdev->reset_timer); 7169 if (hdev->service_task.func) 7170 cancel_work_sync(&hdev->service_task); 7171 if (hdev->rst_service_task.func) 7172 cancel_work_sync(&hdev->rst_service_task); 7173 if (hdev->mbx_service_task.func) 7174 cancel_work_sync(&hdev->mbx_service_task); 7175 } 7176 7177 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) 7178 { 7179 #define HCLGE_FLR_WAIT_MS 100 7180 #define HCLGE_FLR_WAIT_CNT 50 7181 struct hclge_dev *hdev = ae_dev->priv; 7182 int cnt = 0; 7183 7184 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); 7185 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); 7186 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); 7187 hclge_reset_event(hdev->pdev, NULL); 7188 7189 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && 7190 cnt++ < HCLGE_FLR_WAIT_CNT) 7191 msleep(HCLGE_FLR_WAIT_MS); 7192 7193 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) 7194 dev_err(&hdev->pdev->dev, 7195 "flr wait down timeout: %d\n", cnt); 7196 } 7197 7198 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) 7199 { 7200 struct hclge_dev *hdev = ae_dev->priv; 7201 7202 set_bit(HNAE3_FLR_DONE, &hdev->flr_state); 7203 } 7204 7205 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 7206 { 7207 struct pci_dev *pdev = ae_dev->pdev; 7208 struct hclge_dev *hdev; 7209 int ret; 7210 7211 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 7212 if (!hdev) { 7213 ret = -ENOMEM; 7214 goto out; 7215 } 7216 7217 hdev->pdev = pdev; 7218 hdev->ae_dev = ae_dev; 7219 hdev->reset_type = HNAE3_NONE_RESET; 7220 hdev->reset_level = HNAE3_FUNC_RESET; 7221 ae_dev->priv = hdev; 7222 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 7223 7224 mutex_init(&hdev->vport_lock); 7225 7226 ret = hclge_pci_init(hdev); 7227 if (ret) { 7228 dev_err(&pdev->dev, "PCI init failed\n"); 7229 goto out; 7230 } 7231 7232 /* Firmware command queue initialize */ 7233 ret = hclge_cmd_queue_init(hdev); 7234 if (ret) { 7235 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 7236 goto err_pci_uninit; 7237 } 7238 7239 /* Firmware command initialize */ 7240 ret = hclge_cmd_init(hdev); 7241 if (ret) 7242 goto err_cmd_uninit; 7243 7244 ret = hclge_get_cap(hdev); 7245 if (ret) { 7246 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 7247 ret); 7248 goto err_cmd_uninit; 7249 } 7250 7251 ret = hclge_configure(hdev); 7252 if (ret) { 7253 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 7254 goto err_cmd_uninit; 7255 } 7256 7257 ret = hclge_init_msi(hdev); 7258 if (ret) { 7259 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 7260 goto err_cmd_uninit; 7261 } 7262 7263 ret = hclge_misc_irq_init(hdev); 7264 if (ret) { 7265 dev_err(&pdev->dev, 7266 "Misc IRQ(vector0) init error, ret = %d.\n", 7267 ret); 7268 goto err_msi_uninit; 7269 } 7270 7271 ret = hclge_alloc_tqps(hdev); 7272 if (ret) { 7273 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 7274 goto err_msi_irq_uninit; 7275 } 7276 7277 ret = hclge_alloc_vport(hdev); 7278 if (ret) { 7279 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 7280 goto err_msi_irq_uninit; 7281 } 7282 7283 ret = hclge_map_tqp(hdev); 7284 if (ret) { 7285 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 7286 goto err_msi_irq_uninit; 7287 } 7288 7289 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 7290 ret = hclge_mac_mdio_config(hdev); 7291 if (ret) { 7292 dev_err(&hdev->pdev->dev, 7293 "mdio config fail ret=%d\n", ret); 7294 goto err_msi_irq_uninit; 7295 } 7296 } 7297 7298 ret = hclge_init_umv_space(hdev); 7299 if (ret) { 7300 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); 7301 goto err_msi_irq_uninit; 7302 } 7303 7304 ret = hclge_mac_init(hdev); 7305 if (ret) { 7306 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 7307 goto err_mdiobus_unreg; 7308 } 7309 7310 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 7311 if (ret) { 7312 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 7313 goto err_mdiobus_unreg; 7314 } 7315 7316 ret = hclge_config_gro(hdev, true); 7317 if (ret) 7318 goto err_mdiobus_unreg; 7319 7320 ret = hclge_init_vlan_config(hdev); 7321 if (ret) { 7322 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 7323 goto err_mdiobus_unreg; 7324 } 7325 7326 ret = hclge_tm_schd_init(hdev); 7327 if (ret) { 7328 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 7329 goto err_mdiobus_unreg; 7330 } 7331 7332 hclge_rss_init_cfg(hdev); 7333 ret = hclge_rss_init_hw(hdev); 7334 if (ret) { 7335 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 7336 goto err_mdiobus_unreg; 7337 } 7338 7339 ret = init_mgr_tbl(hdev); 7340 if (ret) { 7341 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 7342 goto err_mdiobus_unreg; 7343 } 7344 7345 ret = hclge_init_fd_config(hdev); 7346 if (ret) { 7347 dev_err(&pdev->dev, 7348 "fd table init fail, ret=%d\n", ret); 7349 goto err_mdiobus_unreg; 7350 } 7351 7352 ret = hclge_hw_error_set_state(hdev, true); 7353 if (ret) { 7354 dev_err(&pdev->dev, 7355 "fail(%d) to enable hw error interrupts\n", ret); 7356 goto err_mdiobus_unreg; 7357 } 7358 7359 hclge_dcb_ops_set(hdev); 7360 7361 timer_setup(&hdev->service_timer, hclge_service_timer, 0); 7362 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); 7363 INIT_WORK(&hdev->service_task, hclge_service_task); 7364 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); 7365 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); 7366 7367 hclge_clear_all_event_cause(hdev); 7368 7369 /* Enable MISC vector(vector0) */ 7370 hclge_enable_vector(&hdev->misc_vector, true); 7371 7372 hclge_state_init(hdev); 7373 hdev->last_reset_time = jiffies; 7374 7375 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 7376 return 0; 7377 7378 err_mdiobus_unreg: 7379 if (hdev->hw.mac.phydev) 7380 mdiobus_unregister(hdev->hw.mac.mdio_bus); 7381 err_msi_irq_uninit: 7382 hclge_misc_irq_uninit(hdev); 7383 err_msi_uninit: 7384 pci_free_irq_vectors(pdev); 7385 err_cmd_uninit: 7386 hclge_destroy_cmd_queue(&hdev->hw); 7387 err_pci_uninit: 7388 pcim_iounmap(pdev, hdev->hw.io_base); 7389 pci_clear_master(pdev); 7390 pci_release_regions(pdev); 7391 pci_disable_device(pdev); 7392 out: 7393 return ret; 7394 } 7395 7396 static void hclge_stats_clear(struct hclge_dev *hdev) 7397 { 7398 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); 7399 } 7400 7401 static void hclge_reset_vport_state(struct hclge_dev *hdev) 7402 { 7403 struct hclge_vport *vport = hdev->vport; 7404 int i; 7405 7406 for (i = 0; i < hdev->num_alloc_vport; i++) { 7407 hclge_vport_start(vport); 7408 vport++; 7409 } 7410 } 7411 7412 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 7413 { 7414 struct hclge_dev *hdev = ae_dev->priv; 7415 struct pci_dev *pdev = ae_dev->pdev; 7416 int ret; 7417 7418 set_bit(HCLGE_STATE_DOWN, &hdev->state); 7419 7420 hclge_stats_clear(hdev); 7421 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 7422 7423 ret = hclge_cmd_init(hdev); 7424 if (ret) { 7425 dev_err(&pdev->dev, "Cmd queue init failed\n"); 7426 return ret; 7427 } 7428 7429 ret = hclge_map_tqp(hdev); 7430 if (ret) { 7431 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 7432 return ret; 7433 } 7434 7435 hclge_reset_umv_space(hdev); 7436 7437 ret = hclge_mac_init(hdev); 7438 if (ret) { 7439 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 7440 return ret; 7441 } 7442 7443 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 7444 if (ret) { 7445 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 7446 return ret; 7447 } 7448 7449 ret = hclge_config_gro(hdev, true); 7450 if (ret) 7451 return ret; 7452 7453 ret = hclge_init_vlan_config(hdev); 7454 if (ret) { 7455 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 7456 return ret; 7457 } 7458 7459 ret = hclge_tm_init_hw(hdev); 7460 if (ret) { 7461 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 7462 return ret; 7463 } 7464 7465 ret = hclge_rss_init_hw(hdev); 7466 if (ret) { 7467 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 7468 return ret; 7469 } 7470 7471 ret = hclge_init_fd_config(hdev); 7472 if (ret) { 7473 dev_err(&pdev->dev, 7474 "fd table init fail, ret=%d\n", ret); 7475 return ret; 7476 } 7477 7478 /* Re-enable the hw error interrupts because 7479 * the interrupts get disabled on core/global reset. 7480 */ 7481 ret = hclge_hw_error_set_state(hdev, true); 7482 if (ret) { 7483 dev_err(&pdev->dev, 7484 "fail(%d) to re-enable HNS hw error interrupts\n", ret); 7485 return ret; 7486 } 7487 7488 hclge_reset_vport_state(hdev); 7489 7490 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 7491 HCLGE_DRIVER_NAME); 7492 7493 return 0; 7494 } 7495 7496 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 7497 { 7498 struct hclge_dev *hdev = ae_dev->priv; 7499 struct hclge_mac *mac = &hdev->hw.mac; 7500 7501 hclge_state_uninit(hdev); 7502 7503 if (mac->phydev) 7504 mdiobus_unregister(mac->mdio_bus); 7505 7506 hclge_uninit_umv_space(hdev); 7507 7508 /* Disable MISC vector(vector0) */ 7509 hclge_enable_vector(&hdev->misc_vector, false); 7510 synchronize_irq(hdev->misc_vector.vector_irq); 7511 7512 hclge_hw_error_set_state(hdev, false); 7513 hclge_destroy_cmd_queue(&hdev->hw); 7514 hclge_misc_irq_uninit(hdev); 7515 hclge_pci_uninit(hdev); 7516 mutex_destroy(&hdev->vport_lock); 7517 ae_dev->priv = NULL; 7518 } 7519 7520 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 7521 { 7522 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 7523 struct hclge_vport *vport = hclge_get_vport(handle); 7524 struct hclge_dev *hdev = vport->back; 7525 7526 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 7527 } 7528 7529 static void hclge_get_channels(struct hnae3_handle *handle, 7530 struct ethtool_channels *ch) 7531 { 7532 struct hclge_vport *vport = hclge_get_vport(handle); 7533 7534 ch->max_combined = hclge_get_max_channels(handle); 7535 ch->other_count = 1; 7536 ch->max_other = 1; 7537 ch->combined_count = vport->alloc_tqps; 7538 } 7539 7540 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 7541 u16 *alloc_tqps, u16 *max_rss_size) 7542 { 7543 struct hclge_vport *vport = hclge_get_vport(handle); 7544 struct hclge_dev *hdev = vport->back; 7545 7546 *alloc_tqps = vport->alloc_tqps; 7547 *max_rss_size = hdev->rss_size_max; 7548 } 7549 7550 static void hclge_release_tqp(struct hclge_vport *vport) 7551 { 7552 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 7553 struct hclge_dev *hdev = vport->back; 7554 int i; 7555 7556 for (i = 0; i < kinfo->num_tqps; i++) { 7557 struct hclge_tqp *tqp = 7558 container_of(kinfo->tqp[i], struct hclge_tqp, q); 7559 7560 tqp->q.handle = NULL; 7561 tqp->q.tqp_index = 0; 7562 tqp->alloced = false; 7563 } 7564 7565 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 7566 kinfo->tqp = NULL; 7567 } 7568 7569 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) 7570 { 7571 struct hclge_vport *vport = hclge_get_vport(handle); 7572 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 7573 struct hclge_dev *hdev = vport->back; 7574 int cur_rss_size = kinfo->rss_size; 7575 int cur_tqps = kinfo->num_tqps; 7576 u16 tc_offset[HCLGE_MAX_TC_NUM]; 7577 u16 tc_valid[HCLGE_MAX_TC_NUM]; 7578 u16 tc_size[HCLGE_MAX_TC_NUM]; 7579 u16 roundup_size; 7580 u32 *rss_indir; 7581 int ret, i; 7582 7583 /* Free old tqps, and reallocate with new tqp number when nic setup */ 7584 hclge_release_tqp(vport); 7585 7586 ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); 7587 if (ret) { 7588 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); 7589 return ret; 7590 } 7591 7592 ret = hclge_map_tqp_to_vport(hdev, vport); 7593 if (ret) { 7594 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); 7595 return ret; 7596 } 7597 7598 ret = hclge_tm_schd_init(hdev); 7599 if (ret) { 7600 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); 7601 return ret; 7602 } 7603 7604 roundup_size = roundup_pow_of_two(kinfo->rss_size); 7605 roundup_size = ilog2(roundup_size); 7606 /* Set the RSS TC mode according to the new RSS size */ 7607 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 7608 tc_valid[i] = 0; 7609 7610 if (!(hdev->hw_tc_map & BIT(i))) 7611 continue; 7612 7613 tc_valid[i] = 1; 7614 tc_size[i] = roundup_size; 7615 tc_offset[i] = kinfo->rss_size * i; 7616 } 7617 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 7618 if (ret) 7619 return ret; 7620 7621 /* Reinitializes the rss indirect table according to the new RSS size */ 7622 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 7623 if (!rss_indir) 7624 return -ENOMEM; 7625 7626 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 7627 rss_indir[i] = i % kinfo->rss_size; 7628 7629 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 7630 if (ret) 7631 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 7632 ret); 7633 7634 kfree(rss_indir); 7635 7636 if (!ret) 7637 dev_info(&hdev->pdev->dev, 7638 "Channels changed, rss_size from %d to %d, tqps from %d to %d", 7639 cur_rss_size, kinfo->rss_size, 7640 cur_tqps, kinfo->rss_size * kinfo->num_tc); 7641 7642 return ret; 7643 } 7644 7645 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, 7646 u32 *regs_num_64_bit) 7647 { 7648 struct hclge_desc desc; 7649 u32 total_num; 7650 int ret; 7651 7652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); 7653 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7654 if (ret) { 7655 dev_err(&hdev->pdev->dev, 7656 "Query register number cmd failed, ret = %d.\n", ret); 7657 return ret; 7658 } 7659 7660 *regs_num_32_bit = le32_to_cpu(desc.data[0]); 7661 *regs_num_64_bit = le32_to_cpu(desc.data[1]); 7662 7663 total_num = *regs_num_32_bit + *regs_num_64_bit; 7664 if (!total_num) 7665 return -EINVAL; 7666 7667 return 0; 7668 } 7669 7670 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, 7671 void *data) 7672 { 7673 #define HCLGE_32_BIT_REG_RTN_DATANUM 8 7674 7675 struct hclge_desc *desc; 7676 u32 *reg_val = data; 7677 __le32 *desc_data; 7678 int cmd_num; 7679 int i, k, n; 7680 int ret; 7681 7682 if (regs_num == 0) 7683 return 0; 7684 7685 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); 7686 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 7687 if (!desc) 7688 return -ENOMEM; 7689 7690 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); 7691 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 7692 if (ret) { 7693 dev_err(&hdev->pdev->dev, 7694 "Query 32 bit register cmd failed, ret = %d.\n", ret); 7695 kfree(desc); 7696 return ret; 7697 } 7698 7699 for (i = 0; i < cmd_num; i++) { 7700 if (i == 0) { 7701 desc_data = (__le32 *)(&desc[i].data[0]); 7702 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; 7703 } else { 7704 desc_data = (__le32 *)(&desc[i]); 7705 n = HCLGE_32_BIT_REG_RTN_DATANUM; 7706 } 7707 for (k = 0; k < n; k++) { 7708 *reg_val++ = le32_to_cpu(*desc_data++); 7709 7710 regs_num--; 7711 if (!regs_num) 7712 break; 7713 } 7714 } 7715 7716 kfree(desc); 7717 return 0; 7718 } 7719 7720 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, 7721 void *data) 7722 { 7723 #define HCLGE_64_BIT_REG_RTN_DATANUM 4 7724 7725 struct hclge_desc *desc; 7726 u64 *reg_val = data; 7727 __le64 *desc_data; 7728 int cmd_num; 7729 int i, k, n; 7730 int ret; 7731 7732 if (regs_num == 0) 7733 return 0; 7734 7735 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); 7736 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 7737 if (!desc) 7738 return -ENOMEM; 7739 7740 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); 7741 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 7742 if (ret) { 7743 dev_err(&hdev->pdev->dev, 7744 "Query 64 bit register cmd failed, ret = %d.\n", ret); 7745 kfree(desc); 7746 return ret; 7747 } 7748 7749 for (i = 0; i < cmd_num; i++) { 7750 if (i == 0) { 7751 desc_data = (__le64 *)(&desc[i].data[0]); 7752 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; 7753 } else { 7754 desc_data = (__le64 *)(&desc[i]); 7755 n = HCLGE_64_BIT_REG_RTN_DATANUM; 7756 } 7757 for (k = 0; k < n; k++) { 7758 *reg_val++ = le64_to_cpu(*desc_data++); 7759 7760 regs_num--; 7761 if (!regs_num) 7762 break; 7763 } 7764 } 7765 7766 kfree(desc); 7767 return 0; 7768 } 7769 7770 #define MAX_SEPARATE_NUM 4 7771 #define SEPARATOR_VALUE 0xFFFFFFFF 7772 #define REG_NUM_PER_LINE 4 7773 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) 7774 7775 static int hclge_get_regs_len(struct hnae3_handle *handle) 7776 { 7777 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; 7778 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 7779 struct hclge_vport *vport = hclge_get_vport(handle); 7780 struct hclge_dev *hdev = vport->back; 7781 u32 regs_num_32_bit, regs_num_64_bit; 7782 int ret; 7783 7784 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 7785 if (ret) { 7786 dev_err(&hdev->pdev->dev, 7787 "Get register number failed, ret = %d.\n", ret); 7788 return -EOPNOTSUPP; 7789 } 7790 7791 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; 7792 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; 7793 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; 7794 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; 7795 7796 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + 7797 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE + 7798 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); 7799 } 7800 7801 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, 7802 void *data) 7803 { 7804 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 7805 struct hclge_vport *vport = hclge_get_vport(handle); 7806 struct hclge_dev *hdev = vport->back; 7807 u32 regs_num_32_bit, regs_num_64_bit; 7808 int i, j, reg_um, separator_num; 7809 u32 *reg = data; 7810 int ret; 7811 7812 *version = hdev->fw_version; 7813 7814 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 7815 if (ret) { 7816 dev_err(&hdev->pdev->dev, 7817 "Get register number failed, ret = %d.\n", ret); 7818 return; 7819 } 7820 7821 /* fetching per-PF registers valus from PF PCIe register space */ 7822 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); 7823 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 7824 for (i = 0; i < reg_um; i++) 7825 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); 7826 for (i = 0; i < separator_num; i++) 7827 *reg++ = SEPARATOR_VALUE; 7828 7829 reg_um = sizeof(common_reg_addr_list) / sizeof(u32); 7830 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 7831 for (i = 0; i < reg_um; i++) 7832 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); 7833 for (i = 0; i < separator_num; i++) 7834 *reg++ = SEPARATOR_VALUE; 7835 7836 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); 7837 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 7838 for (j = 0; j < kinfo->num_tqps; j++) { 7839 for (i = 0; i < reg_um; i++) 7840 *reg++ = hclge_read_dev(&hdev->hw, 7841 ring_reg_addr_list[i] + 7842 0x200 * j); 7843 for (i = 0; i < separator_num; i++) 7844 *reg++ = SEPARATOR_VALUE; 7845 } 7846 7847 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); 7848 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; 7849 for (j = 0; j < hdev->num_msi_used - 1; j++) { 7850 for (i = 0; i < reg_um; i++) 7851 *reg++ = hclge_read_dev(&hdev->hw, 7852 tqp_intr_reg_addr_list[i] + 7853 4 * j); 7854 for (i = 0; i < separator_num; i++) 7855 *reg++ = SEPARATOR_VALUE; 7856 } 7857 7858 /* fetching PF common registers values from firmware */ 7859 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); 7860 if (ret) { 7861 dev_err(&hdev->pdev->dev, 7862 "Get 32 bit register failed, ret = %d.\n", ret); 7863 return; 7864 } 7865 7866 reg += regs_num_32_bit; 7867 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); 7868 if (ret) 7869 dev_err(&hdev->pdev->dev, 7870 "Get 64 bit register failed, ret = %d.\n", ret); 7871 } 7872 7873 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 7874 { 7875 struct hclge_set_led_state_cmd *req; 7876 struct hclge_desc desc; 7877 int ret; 7878 7879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 7880 7881 req = (struct hclge_set_led_state_cmd *)desc.data; 7882 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 7883 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 7884 7885 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7886 if (ret) 7887 dev_err(&hdev->pdev->dev, 7888 "Send set led state cmd error, ret =%d\n", ret); 7889 7890 return ret; 7891 } 7892 7893 enum hclge_led_status { 7894 HCLGE_LED_OFF, 7895 HCLGE_LED_ON, 7896 HCLGE_LED_NO_CHANGE = 0xFF, 7897 }; 7898 7899 static int hclge_set_led_id(struct hnae3_handle *handle, 7900 enum ethtool_phys_id_state status) 7901 { 7902 struct hclge_vport *vport = hclge_get_vport(handle); 7903 struct hclge_dev *hdev = vport->back; 7904 7905 switch (status) { 7906 case ETHTOOL_ID_ACTIVE: 7907 return hclge_set_led_status(hdev, HCLGE_LED_ON); 7908 case ETHTOOL_ID_INACTIVE: 7909 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 7910 default: 7911 return -EINVAL; 7912 } 7913 } 7914 7915 static void hclge_get_link_mode(struct hnae3_handle *handle, 7916 unsigned long *supported, 7917 unsigned long *advertising) 7918 { 7919 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 7920 struct hclge_vport *vport = hclge_get_vport(handle); 7921 struct hclge_dev *hdev = vport->back; 7922 unsigned int idx = 0; 7923 7924 for (; idx < size; idx++) { 7925 supported[idx] = hdev->hw.mac.supported[idx]; 7926 advertising[idx] = hdev->hw.mac.advertising[idx]; 7927 } 7928 } 7929 7930 static int hclge_gro_en(struct hnae3_handle *handle, int enable) 7931 { 7932 struct hclge_vport *vport = hclge_get_vport(handle); 7933 struct hclge_dev *hdev = vport->back; 7934 7935 return hclge_config_gro(hdev, enable); 7936 } 7937 7938 static const struct hnae3_ae_ops hclge_ops = { 7939 .init_ae_dev = hclge_init_ae_dev, 7940 .uninit_ae_dev = hclge_uninit_ae_dev, 7941 .flr_prepare = hclge_flr_prepare, 7942 .flr_done = hclge_flr_done, 7943 .init_client_instance = hclge_init_client_instance, 7944 .uninit_client_instance = hclge_uninit_client_instance, 7945 .map_ring_to_vector = hclge_map_ring_to_vector, 7946 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 7947 .get_vector = hclge_get_vector, 7948 .put_vector = hclge_put_vector, 7949 .set_promisc_mode = hclge_set_promisc_mode, 7950 .set_loopback = hclge_set_loopback, 7951 .start = hclge_ae_start, 7952 .stop = hclge_ae_stop, 7953 .client_start = hclge_client_start, 7954 .client_stop = hclge_client_stop, 7955 .get_status = hclge_get_status, 7956 .get_ksettings_an_result = hclge_get_ksettings_an_result, 7957 .update_speed_duplex_h = hclge_update_speed_duplex_h, 7958 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 7959 .get_media_type = hclge_get_media_type, 7960 .get_rss_key_size = hclge_get_rss_key_size, 7961 .get_rss_indir_size = hclge_get_rss_indir_size, 7962 .get_rss = hclge_get_rss, 7963 .set_rss = hclge_set_rss, 7964 .set_rss_tuple = hclge_set_rss_tuple, 7965 .get_rss_tuple = hclge_get_rss_tuple, 7966 .get_tc_size = hclge_get_tc_size, 7967 .get_mac_addr = hclge_get_mac_addr, 7968 .set_mac_addr = hclge_set_mac_addr, 7969 .do_ioctl = hclge_do_ioctl, 7970 .add_uc_addr = hclge_add_uc_addr, 7971 .rm_uc_addr = hclge_rm_uc_addr, 7972 .add_mc_addr = hclge_add_mc_addr, 7973 .rm_mc_addr = hclge_rm_mc_addr, 7974 .set_autoneg = hclge_set_autoneg, 7975 .get_autoneg = hclge_get_autoneg, 7976 .get_pauseparam = hclge_get_pauseparam, 7977 .set_pauseparam = hclge_set_pauseparam, 7978 .set_mtu = hclge_set_mtu, 7979 .reset_queue = hclge_reset_tqp, 7980 .get_stats = hclge_get_stats, 7981 .update_stats = hclge_update_stats, 7982 .get_strings = hclge_get_strings, 7983 .get_sset_count = hclge_get_sset_count, 7984 .get_fw_version = hclge_get_fw_version, 7985 .get_mdix_mode = hclge_get_mdix_mode, 7986 .enable_vlan_filter = hclge_enable_vlan_filter, 7987 .set_vlan_filter = hclge_set_vlan_filter, 7988 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 7989 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 7990 .reset_event = hclge_reset_event, 7991 .set_default_reset_request = hclge_set_def_reset_request, 7992 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 7993 .set_channels = hclge_set_channels, 7994 .get_channels = hclge_get_channels, 7995 .get_regs_len = hclge_get_regs_len, 7996 .get_regs = hclge_get_regs, 7997 .set_led_id = hclge_set_led_id, 7998 .get_link_mode = hclge_get_link_mode, 7999 .add_fd_entry = hclge_add_fd_entry, 8000 .del_fd_entry = hclge_del_fd_entry, 8001 .del_all_fd_entries = hclge_del_all_fd_entries, 8002 .get_fd_rule_cnt = hclge_get_fd_rule_cnt, 8003 .get_fd_rule_info = hclge_get_fd_rule_info, 8004 .get_fd_all_rules = hclge_get_all_rules, 8005 .restore_fd_rules = hclge_restore_fd_entries, 8006 .enable_fd = hclge_enable_fd, 8007 .dbg_run_cmd = hclge_dbg_run_cmd, 8008 .handle_hw_ras_error = hclge_handle_hw_ras_error, 8009 .get_hw_reset_stat = hclge_get_hw_reset_stat, 8010 .ae_dev_resetting = hclge_ae_dev_resetting, 8011 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, 8012 .set_gro_en = hclge_gro_en, 8013 .get_global_queue_id = hclge_covert_handle_qid_global, 8014 .set_timer_task = hclge_set_timer_task, 8015 }; 8016 8017 static struct hnae3_ae_algo ae_algo = { 8018 .ops = &hclge_ops, 8019 .pdev_id_table = ae_algo_pci_tbl, 8020 }; 8021 8022 static int hclge_init(void) 8023 { 8024 pr_info("%s is initializing\n", HCLGE_NAME); 8025 8026 hnae3_register_ae_algo(&ae_algo); 8027 8028 return 0; 8029 } 8030 8031 static void hclge_exit(void) 8032 { 8033 hnae3_unregister_ae_algo(&ae_algo); 8034 } 8035 module_init(hclge_init); 8036 module_exit(hclge_exit); 8037 8038 MODULE_LICENSE("GPL"); 8039 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 8040 MODULE_DESCRIPTION("HCLGE Driver"); 8041 MODULE_VERSION(HCLGE_MOD_VERSION); 8042