1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/acpi.h> 5 #include <linux/device.h> 6 #include <linux/etherdevice.h> 7 #include <linux/init.h> 8 #include <linux/interrupt.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/platform_device.h> 14 #include <linux/if_vlan.h> 15 #include <net/rtnetlink.h> 16 #include "hclge_cmd.h" 17 #include "hclge_dcb.h" 18 #include "hclge_main.h" 19 #include "hclge_mbx.h" 20 #include "hclge_mdio.h" 21 #include "hclge_tm.h" 22 #include "hnae3.h" 23 24 #define HCLGE_NAME "hclge" 25 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 26 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 27 28 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); 29 static int hclge_init_vlan_config(struct hclge_dev *hdev); 30 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 31 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 32 u16 *allocated_size, bool is_alloc); 33 34 static struct hnae3_ae_algo ae_algo; 35 36 static const struct pci_device_id ae_algo_pci_tbl[] = { 37 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 38 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 39 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 40 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 41 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 44 /* required last entry */ 45 {0, } 46 }; 47 48 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 49 50 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 51 "App Loopback test", 52 "Serdes serial Loopback test", 53 "Serdes parallel Loopback test", 54 "Phy Loopback test" 55 }; 56 57 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 58 {"mac_tx_mac_pause_num", 59 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 60 {"mac_rx_mac_pause_num", 61 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 62 {"mac_tx_pfc_pri0_pkt_num", 63 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 64 {"mac_tx_pfc_pri1_pkt_num", 65 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 66 {"mac_tx_pfc_pri2_pkt_num", 67 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 68 {"mac_tx_pfc_pri3_pkt_num", 69 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 70 {"mac_tx_pfc_pri4_pkt_num", 71 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 72 {"mac_tx_pfc_pri5_pkt_num", 73 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 74 {"mac_tx_pfc_pri6_pkt_num", 75 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 76 {"mac_tx_pfc_pri7_pkt_num", 77 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 78 {"mac_rx_pfc_pri0_pkt_num", 79 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 80 {"mac_rx_pfc_pri1_pkt_num", 81 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 82 {"mac_rx_pfc_pri2_pkt_num", 83 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 84 {"mac_rx_pfc_pri3_pkt_num", 85 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 86 {"mac_rx_pfc_pri4_pkt_num", 87 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 88 {"mac_rx_pfc_pri5_pkt_num", 89 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 90 {"mac_rx_pfc_pri6_pkt_num", 91 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 92 {"mac_rx_pfc_pri7_pkt_num", 93 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 94 {"mac_tx_total_pkt_num", 95 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 96 {"mac_tx_total_oct_num", 97 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 98 {"mac_tx_good_pkt_num", 99 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 100 {"mac_tx_bad_pkt_num", 101 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 102 {"mac_tx_good_oct_num", 103 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 104 {"mac_tx_bad_oct_num", 105 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 106 {"mac_tx_uni_pkt_num", 107 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 108 {"mac_tx_multi_pkt_num", 109 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 110 {"mac_tx_broad_pkt_num", 111 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 112 {"mac_tx_undersize_pkt_num", 113 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 114 {"mac_tx_oversize_pkt_num", 115 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 116 {"mac_tx_64_oct_pkt_num", 117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 118 {"mac_tx_65_127_oct_pkt_num", 119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 120 {"mac_tx_128_255_oct_pkt_num", 121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 122 {"mac_tx_256_511_oct_pkt_num", 123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 124 {"mac_tx_512_1023_oct_pkt_num", 125 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 126 {"mac_tx_1024_1518_oct_pkt_num", 127 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 128 {"mac_tx_1519_2047_oct_pkt_num", 129 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 130 {"mac_tx_2048_4095_oct_pkt_num", 131 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 132 {"mac_tx_4096_8191_oct_pkt_num", 133 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 134 {"mac_tx_8192_9216_oct_pkt_num", 135 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 136 {"mac_tx_9217_12287_oct_pkt_num", 137 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 138 {"mac_tx_12288_16383_oct_pkt_num", 139 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 140 {"mac_tx_1519_max_good_pkt_num", 141 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 142 {"mac_tx_1519_max_bad_pkt_num", 143 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 144 {"mac_rx_total_pkt_num", 145 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 146 {"mac_rx_total_oct_num", 147 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 148 {"mac_rx_good_pkt_num", 149 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 150 {"mac_rx_bad_pkt_num", 151 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 152 {"mac_rx_good_oct_num", 153 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 154 {"mac_rx_bad_oct_num", 155 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 156 {"mac_rx_uni_pkt_num", 157 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 158 {"mac_rx_multi_pkt_num", 159 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 160 {"mac_rx_broad_pkt_num", 161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 162 {"mac_rx_undersize_pkt_num", 163 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 164 {"mac_rx_oversize_pkt_num", 165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 166 {"mac_rx_64_oct_pkt_num", 167 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 168 {"mac_rx_65_127_oct_pkt_num", 169 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 170 {"mac_rx_128_255_oct_pkt_num", 171 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 172 {"mac_rx_256_511_oct_pkt_num", 173 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 174 {"mac_rx_512_1023_oct_pkt_num", 175 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 176 {"mac_rx_1024_1518_oct_pkt_num", 177 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 178 {"mac_rx_1519_2047_oct_pkt_num", 179 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 180 {"mac_rx_2048_4095_oct_pkt_num", 181 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 182 {"mac_rx_4096_8191_oct_pkt_num", 183 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 184 {"mac_rx_8192_9216_oct_pkt_num", 185 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 186 {"mac_rx_9217_12287_oct_pkt_num", 187 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 188 {"mac_rx_12288_16383_oct_pkt_num", 189 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 190 {"mac_rx_1519_max_good_pkt_num", 191 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 192 {"mac_rx_1519_max_bad_pkt_num", 193 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 194 195 {"mac_tx_fragment_pkt_num", 196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 197 {"mac_tx_undermin_pkt_num", 198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 199 {"mac_tx_jabber_pkt_num", 200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 201 {"mac_tx_err_all_pkt_num", 202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 203 {"mac_tx_from_app_good_pkt_num", 204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 205 {"mac_tx_from_app_bad_pkt_num", 206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 207 {"mac_rx_fragment_pkt_num", 208 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 209 {"mac_rx_undermin_pkt_num", 210 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 211 {"mac_rx_jabber_pkt_num", 212 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 213 {"mac_rx_fcs_err_pkt_num", 214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 215 {"mac_rx_send_app_good_pkt_num", 216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 217 {"mac_rx_send_app_bad_pkt_num", 218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 219 }; 220 221 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 222 { 223 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 224 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), 225 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), 226 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), 227 .i_port_bitmap = 0x1, 228 }, 229 }; 230 231 static int hclge_mac_update_stats(struct hclge_dev *hdev) 232 { 233 #define HCLGE_MAC_CMD_NUM 21 234 #define HCLGE_RTN_DATA_NUM 4 235 236 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 237 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 238 __le64 *desc_data; 239 int i, k, n; 240 int ret; 241 242 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 243 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 244 if (ret) { 245 dev_err(&hdev->pdev->dev, 246 "Get MAC pkt stats fail, status = %d.\n", ret); 247 248 return ret; 249 } 250 251 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 252 if (unlikely(i == 0)) { 253 desc_data = (__le64 *)(&desc[i].data[0]); 254 n = HCLGE_RTN_DATA_NUM - 2; 255 } else { 256 desc_data = (__le64 *)(&desc[i]); 257 n = HCLGE_RTN_DATA_NUM; 258 } 259 for (k = 0; k < n; k++) { 260 *data++ += le64_to_cpu(*desc_data); 261 desc_data++; 262 } 263 } 264 265 return 0; 266 } 267 268 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 269 { 270 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 271 struct hclge_vport *vport = hclge_get_vport(handle); 272 struct hclge_dev *hdev = vport->back; 273 struct hnae3_queue *queue; 274 struct hclge_desc desc[1]; 275 struct hclge_tqp *tqp; 276 int ret, i; 277 278 for (i = 0; i < kinfo->num_tqps; i++) { 279 queue = handle->kinfo.tqp[i]; 280 tqp = container_of(queue, struct hclge_tqp, q); 281 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 282 hclge_cmd_setup_basic_desc(&desc[0], 283 HCLGE_OPC_QUERY_RX_STATUS, 284 true); 285 286 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 287 ret = hclge_cmd_send(&hdev->hw, desc, 1); 288 if (ret) { 289 dev_err(&hdev->pdev->dev, 290 "Query tqp stat fail, status = %d,queue = %d\n", 291 ret, i); 292 return ret; 293 } 294 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 295 le32_to_cpu(desc[0].data[1]); 296 } 297 298 for (i = 0; i < kinfo->num_tqps; i++) { 299 queue = handle->kinfo.tqp[i]; 300 tqp = container_of(queue, struct hclge_tqp, q); 301 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 302 hclge_cmd_setup_basic_desc(&desc[0], 303 HCLGE_OPC_QUERY_TX_STATUS, 304 true); 305 306 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 307 ret = hclge_cmd_send(&hdev->hw, desc, 1); 308 if (ret) { 309 dev_err(&hdev->pdev->dev, 310 "Query tqp stat fail, status = %d,queue = %d\n", 311 ret, i); 312 return ret; 313 } 314 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 315 le32_to_cpu(desc[0].data[1]); 316 } 317 318 return 0; 319 } 320 321 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 322 { 323 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 324 struct hclge_tqp *tqp; 325 u64 *buff = data; 326 int i; 327 328 for (i = 0; i < kinfo->num_tqps; i++) { 329 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 330 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 331 } 332 333 for (i = 0; i < kinfo->num_tqps; i++) { 334 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 335 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 336 } 337 338 return buff; 339 } 340 341 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 342 { 343 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 344 345 return kinfo->num_tqps * (2); 346 } 347 348 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 349 { 350 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 351 u8 *buff = data; 352 int i = 0; 353 354 for (i = 0; i < kinfo->num_tqps; i++) { 355 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 356 struct hclge_tqp, q); 357 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd", 358 tqp->index); 359 buff = buff + ETH_GSTRING_LEN; 360 } 361 362 for (i = 0; i < kinfo->num_tqps; i++) { 363 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 364 struct hclge_tqp, q); 365 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd", 366 tqp->index); 367 buff = buff + ETH_GSTRING_LEN; 368 } 369 370 return buff; 371 } 372 373 static u64 *hclge_comm_get_stats(void *comm_stats, 374 const struct hclge_comm_stats_str strs[], 375 int size, u64 *data) 376 { 377 u64 *buf = data; 378 u32 i; 379 380 for (i = 0; i < size; i++) 381 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 382 383 return buf + size; 384 } 385 386 static u8 *hclge_comm_get_strings(u32 stringset, 387 const struct hclge_comm_stats_str strs[], 388 int size, u8 *data) 389 { 390 char *buff = (char *)data; 391 u32 i; 392 393 if (stringset != ETH_SS_STATS) 394 return buff; 395 396 for (i = 0; i < size; i++) { 397 snprintf(buff, ETH_GSTRING_LEN, 398 strs[i].desc); 399 buff = buff + ETH_GSTRING_LEN; 400 } 401 402 return (u8 *)buff; 403 } 404 405 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, 406 struct net_device_stats *net_stats) 407 { 408 net_stats->tx_dropped = 0; 409 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; 410 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 411 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 412 413 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 414 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 415 416 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 417 net_stats->rx_length_errors = 418 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 419 net_stats->rx_length_errors += 420 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 421 net_stats->rx_over_errors = 422 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 423 } 424 425 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 426 { 427 struct hnae3_handle *handle; 428 int status; 429 430 handle = &hdev->vport[0].nic; 431 if (handle->client) { 432 status = hclge_tqps_update_stats(handle); 433 if (status) { 434 dev_err(&hdev->pdev->dev, 435 "Update TQPS stats fail, status = %d.\n", 436 status); 437 } 438 } 439 440 status = hclge_mac_update_stats(hdev); 441 if (status) 442 dev_err(&hdev->pdev->dev, 443 "Update MAC stats fail, status = %d.\n", status); 444 445 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); 446 } 447 448 static void hclge_update_stats(struct hnae3_handle *handle, 449 struct net_device_stats *net_stats) 450 { 451 struct hclge_vport *vport = hclge_get_vport(handle); 452 struct hclge_dev *hdev = vport->back; 453 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 454 int status; 455 456 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 457 return; 458 459 status = hclge_mac_update_stats(hdev); 460 if (status) 461 dev_err(&hdev->pdev->dev, 462 "Update MAC stats fail, status = %d.\n", 463 status); 464 465 status = hclge_tqps_update_stats(handle); 466 if (status) 467 dev_err(&hdev->pdev->dev, 468 "Update TQPS stats fail, status = %d.\n", 469 status); 470 471 hclge_update_netstat(hw_stats, net_stats); 472 473 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 474 } 475 476 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 477 { 478 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ 479 HNAE3_SUPPORT_PHY_LOOPBACK |\ 480 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ 481 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) 482 483 struct hclge_vport *vport = hclge_get_vport(handle); 484 struct hclge_dev *hdev = vport->back; 485 int count = 0; 486 487 /* Loopback test support rules: 488 * mac: only GE mode support 489 * serdes: all mac mode will support include GE/XGE/LGE/CGE 490 * phy: only support when phy device exist on board 491 */ 492 if (stringset == ETH_SS_TEST) { 493 /* clear loopback bit flags at first */ 494 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 495 if (hdev->pdev->revision >= 0x21 || 496 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 497 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 498 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 499 count += 1; 500 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; 501 } 502 503 count += 2; 504 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 505 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 506 } else if (stringset == ETH_SS_STATS) { 507 count = ARRAY_SIZE(g_mac_stats_string) + 508 hclge_tqps_get_sset_count(handle, stringset); 509 } 510 511 return count; 512 } 513 514 static void hclge_get_strings(struct hnae3_handle *handle, 515 u32 stringset, 516 u8 *data) 517 { 518 u8 *p = (char *)data; 519 int size; 520 521 if (stringset == ETH_SS_STATS) { 522 size = ARRAY_SIZE(g_mac_stats_string); 523 p = hclge_comm_get_strings(stringset, 524 g_mac_stats_string, 525 size, 526 p); 527 p = hclge_tqps_get_strings(handle, p); 528 } else if (stringset == ETH_SS_TEST) { 529 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { 530 memcpy(p, 531 hns3_nic_test_strs[HNAE3_LOOP_APP], 532 ETH_GSTRING_LEN); 533 p += ETH_GSTRING_LEN; 534 } 535 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { 536 memcpy(p, 537 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], 538 ETH_GSTRING_LEN); 539 p += ETH_GSTRING_LEN; 540 } 541 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { 542 memcpy(p, 543 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], 544 ETH_GSTRING_LEN); 545 p += ETH_GSTRING_LEN; 546 } 547 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 548 memcpy(p, 549 hns3_nic_test_strs[HNAE3_LOOP_PHY], 550 ETH_GSTRING_LEN); 551 p += ETH_GSTRING_LEN; 552 } 553 } 554 } 555 556 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 557 { 558 struct hclge_vport *vport = hclge_get_vport(handle); 559 struct hclge_dev *hdev = vport->back; 560 u64 *p; 561 562 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, 563 g_mac_stats_string, 564 ARRAY_SIZE(g_mac_stats_string), 565 data); 566 p = hclge_tqps_get_stats(handle, p); 567 } 568 569 static int hclge_parse_func_status(struct hclge_dev *hdev, 570 struct hclge_func_status_cmd *status) 571 { 572 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 573 return -EINVAL; 574 575 /* Set the pf to main pf */ 576 if (status->pf_state & HCLGE_PF_STATE_MAIN) 577 hdev->flag |= HCLGE_FLAG_MAIN; 578 else 579 hdev->flag &= ~HCLGE_FLAG_MAIN; 580 581 return 0; 582 } 583 584 static int hclge_query_function_status(struct hclge_dev *hdev) 585 { 586 struct hclge_func_status_cmd *req; 587 struct hclge_desc desc; 588 int timeout = 0; 589 int ret; 590 591 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 592 req = (struct hclge_func_status_cmd *)desc.data; 593 594 do { 595 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 596 if (ret) { 597 dev_err(&hdev->pdev->dev, 598 "query function status failed %d.\n", 599 ret); 600 601 return ret; 602 } 603 604 /* Check pf reset is done */ 605 if (req->pf_state) 606 break; 607 usleep_range(1000, 2000); 608 } while (timeout++ < 5); 609 610 ret = hclge_parse_func_status(hdev, req); 611 612 return ret; 613 } 614 615 static int hclge_query_pf_resource(struct hclge_dev *hdev) 616 { 617 struct hclge_pf_res_cmd *req; 618 struct hclge_desc desc; 619 int ret; 620 621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 622 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 623 if (ret) { 624 dev_err(&hdev->pdev->dev, 625 "query pf resource failed %d.\n", ret); 626 return ret; 627 } 628 629 req = (struct hclge_pf_res_cmd *)desc.data; 630 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 631 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 632 633 if (hnae3_dev_roce_supported(hdev)) { 634 hdev->roce_base_msix_offset = 635 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), 636 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); 637 hdev->num_roce_msi = 638 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), 639 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 640 641 /* PF should have NIC vectors and Roce vectors, 642 * NIC vectors are queued before Roce vectors. 643 */ 644 hdev->num_msi = hdev->num_roce_msi + 645 hdev->roce_base_msix_offset; 646 } else { 647 hdev->num_msi = 648 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), 649 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 650 } 651 652 return 0; 653 } 654 655 static int hclge_parse_speed(int speed_cmd, int *speed) 656 { 657 switch (speed_cmd) { 658 case 6: 659 *speed = HCLGE_MAC_SPEED_10M; 660 break; 661 case 7: 662 *speed = HCLGE_MAC_SPEED_100M; 663 break; 664 case 0: 665 *speed = HCLGE_MAC_SPEED_1G; 666 break; 667 case 1: 668 *speed = HCLGE_MAC_SPEED_10G; 669 break; 670 case 2: 671 *speed = HCLGE_MAC_SPEED_25G; 672 break; 673 case 3: 674 *speed = HCLGE_MAC_SPEED_40G; 675 break; 676 case 4: 677 *speed = HCLGE_MAC_SPEED_50G; 678 break; 679 case 5: 680 *speed = HCLGE_MAC_SPEED_100G; 681 break; 682 default: 683 return -EINVAL; 684 } 685 686 return 0; 687 } 688 689 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 690 u8 speed_ability) 691 { 692 unsigned long *supported = hdev->hw.mac.supported; 693 694 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 695 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 696 supported); 697 698 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 699 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 700 supported); 701 702 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 703 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 704 supported); 705 706 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 707 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 708 supported); 709 710 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 711 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 712 supported); 713 714 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); 715 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 716 } 717 718 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) 719 { 720 u8 media_type = hdev->hw.mac.media_type; 721 722 if (media_type != HNAE3_MEDIA_TYPE_FIBER) 723 return; 724 725 hclge_parse_fiber_link_mode(hdev, speed_ability); 726 } 727 728 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 729 { 730 struct hclge_cfg_param_cmd *req; 731 u64 mac_addr_tmp_high; 732 u64 mac_addr_tmp; 733 int i; 734 735 req = (struct hclge_cfg_param_cmd *)desc[0].data; 736 737 /* get the configuration */ 738 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 739 HCLGE_CFG_VMDQ_M, 740 HCLGE_CFG_VMDQ_S); 741 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 742 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 743 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 744 HCLGE_CFG_TQP_DESC_N_M, 745 HCLGE_CFG_TQP_DESC_N_S); 746 747 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), 748 HCLGE_CFG_PHY_ADDR_M, 749 HCLGE_CFG_PHY_ADDR_S); 750 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), 751 HCLGE_CFG_MEDIA_TP_M, 752 HCLGE_CFG_MEDIA_TP_S); 753 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), 754 HCLGE_CFG_RX_BUF_LEN_M, 755 HCLGE_CFG_RX_BUF_LEN_S); 756 /* get mac_address */ 757 mac_addr_tmp = __le32_to_cpu(req->param[2]); 758 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), 759 HCLGE_CFG_MAC_ADDR_H_M, 760 HCLGE_CFG_MAC_ADDR_H_S); 761 762 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 763 764 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), 765 HCLGE_CFG_DEFAULT_SPEED_M, 766 HCLGE_CFG_DEFAULT_SPEED_S); 767 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 768 HCLGE_CFG_RSS_SIZE_M, 769 HCLGE_CFG_RSS_SIZE_S); 770 771 for (i = 0; i < ETH_ALEN; i++) 772 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 773 774 req = (struct hclge_cfg_param_cmd *)desc[1].data; 775 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 776 777 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), 778 HCLGE_CFG_SPEED_ABILITY_M, 779 HCLGE_CFG_SPEED_ABILITY_S); 780 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), 781 HCLGE_CFG_UMV_TBL_SPACE_M, 782 HCLGE_CFG_UMV_TBL_SPACE_S); 783 if (!cfg->umv_space) 784 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 785 } 786 787 /* hclge_get_cfg: query the static parameter from flash 788 * @hdev: pointer to struct hclge_dev 789 * @hcfg: the config structure to be getted 790 */ 791 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 792 { 793 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 794 struct hclge_cfg_param_cmd *req; 795 int i, ret; 796 797 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 798 u32 offset = 0; 799 800 req = (struct hclge_cfg_param_cmd *)desc[i].data; 801 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 802 true); 803 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, 804 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 805 /* Len should be united by 4 bytes when send to hardware */ 806 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 807 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 808 req->offset = cpu_to_le32(offset); 809 } 810 811 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 812 if (ret) { 813 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); 814 return ret; 815 } 816 817 hclge_parse_cfg(hcfg, desc); 818 819 return 0; 820 } 821 822 static int hclge_get_cap(struct hclge_dev *hdev) 823 { 824 int ret; 825 826 ret = hclge_query_function_status(hdev); 827 if (ret) { 828 dev_err(&hdev->pdev->dev, 829 "query function status error %d.\n", ret); 830 return ret; 831 } 832 833 /* get pf resource */ 834 ret = hclge_query_pf_resource(hdev); 835 if (ret) 836 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret); 837 838 return ret; 839 } 840 841 static int hclge_configure(struct hclge_dev *hdev) 842 { 843 struct hclge_cfg cfg; 844 int ret, i; 845 846 ret = hclge_get_cfg(hdev, &cfg); 847 if (ret) { 848 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 849 return ret; 850 } 851 852 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 853 hdev->base_tqp_pid = 0; 854 hdev->rss_size_max = cfg.rss_size_max; 855 hdev->rx_buf_len = cfg.rx_buf_len; 856 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 857 hdev->hw.mac.media_type = cfg.media_type; 858 hdev->hw.mac.phy_addr = cfg.phy_addr; 859 hdev->num_desc = cfg.tqp_desc_num; 860 hdev->tm_info.num_pg = 1; 861 hdev->tc_max = cfg.tc_num; 862 hdev->tm_info.hw_pfc_map = 0; 863 hdev->wanted_umv_size = cfg.umv_space; 864 865 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 866 if (ret) { 867 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 868 return ret; 869 } 870 871 hclge_parse_link_mode(hdev, cfg.speed_ability); 872 873 if ((hdev->tc_max > HNAE3_MAX_TC) || 874 (hdev->tc_max < 1)) { 875 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 876 hdev->tc_max); 877 hdev->tc_max = 1; 878 } 879 880 /* Dev does not support DCB */ 881 if (!hnae3_dev_dcb_supported(hdev)) { 882 hdev->tc_max = 1; 883 hdev->pfc_max = 0; 884 } else { 885 hdev->pfc_max = hdev->tc_max; 886 } 887 888 hdev->tm_info.num_tc = hdev->tc_max; 889 890 /* Currently not support uncontiuous tc */ 891 for (i = 0; i < hdev->tm_info.num_tc; i++) 892 hnae3_set_bit(hdev->hw_tc_map, i, 1); 893 894 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 895 896 return ret; 897 } 898 899 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, 900 int tso_mss_max) 901 { 902 struct hclge_cfg_tso_status_cmd *req; 903 struct hclge_desc desc; 904 u16 tso_mss; 905 906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 907 908 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 909 910 tso_mss = 0; 911 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 912 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 913 req->tso_mss_min = cpu_to_le16(tso_mss); 914 915 tso_mss = 0; 916 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 917 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 918 req->tso_mss_max = cpu_to_le16(tso_mss); 919 920 return hclge_cmd_send(&hdev->hw, &desc, 1); 921 } 922 923 static int hclge_alloc_tqps(struct hclge_dev *hdev) 924 { 925 struct hclge_tqp *tqp; 926 int i; 927 928 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 929 sizeof(struct hclge_tqp), GFP_KERNEL); 930 if (!hdev->htqp) 931 return -ENOMEM; 932 933 tqp = hdev->htqp; 934 935 for (i = 0; i < hdev->num_tqps; i++) { 936 tqp->dev = &hdev->pdev->dev; 937 tqp->index = i; 938 939 tqp->q.ae_algo = &ae_algo; 940 tqp->q.buf_size = hdev->rx_buf_len; 941 tqp->q.desc_num = hdev->num_desc; 942 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 943 i * HCLGE_TQP_REG_SIZE; 944 945 tqp++; 946 } 947 948 return 0; 949 } 950 951 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 952 u16 tqp_pid, u16 tqp_vid, bool is_pf) 953 { 954 struct hclge_tqp_map_cmd *req; 955 struct hclge_desc desc; 956 int ret; 957 958 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 959 960 req = (struct hclge_tqp_map_cmd *)desc.data; 961 req->tqp_id = cpu_to_le16(tqp_pid); 962 req->tqp_vf = func_id; 963 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 964 1 << HCLGE_TQP_MAP_EN_B; 965 req->tqp_vid = cpu_to_le16(tqp_vid); 966 967 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 968 if (ret) 969 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); 970 971 return ret; 972 } 973 974 static int hclge_assign_tqp(struct hclge_vport *vport) 975 { 976 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 977 struct hclge_dev *hdev = vport->back; 978 int i, alloced; 979 980 for (i = 0, alloced = 0; i < hdev->num_tqps && 981 alloced < kinfo->num_tqps; i++) { 982 if (!hdev->htqp[i].alloced) { 983 hdev->htqp[i].q.handle = &vport->nic; 984 hdev->htqp[i].q.tqp_index = alloced; 985 hdev->htqp[i].q.desc_num = kinfo->num_desc; 986 kinfo->tqp[alloced] = &hdev->htqp[i].q; 987 hdev->htqp[i].alloced = true; 988 alloced++; 989 } 990 } 991 vport->alloc_tqps = kinfo->num_tqps; 992 993 return 0; 994 } 995 996 static int hclge_knic_setup(struct hclge_vport *vport, 997 u16 num_tqps, u16 num_desc) 998 { 999 struct hnae3_handle *nic = &vport->nic; 1000 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1001 struct hclge_dev *hdev = vport->back; 1002 int i, ret; 1003 1004 kinfo->num_desc = num_desc; 1005 kinfo->rx_buf_len = hdev->rx_buf_len; 1006 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); 1007 kinfo->rss_size 1008 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); 1009 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; 1010 1011 for (i = 0; i < HNAE3_MAX_TC; i++) { 1012 if (hdev->hw_tc_map & BIT(i)) { 1013 kinfo->tc_info[i].enable = true; 1014 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 1015 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 1016 kinfo->tc_info[i].tc = i; 1017 } else { 1018 /* Set to default queue if TC is disable */ 1019 kinfo->tc_info[i].enable = false; 1020 kinfo->tc_info[i].tqp_offset = 0; 1021 kinfo->tc_info[i].tqp_count = 1; 1022 kinfo->tc_info[i].tc = 0; 1023 } 1024 } 1025 1026 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 1027 sizeof(struct hnae3_queue *), GFP_KERNEL); 1028 if (!kinfo->tqp) 1029 return -ENOMEM; 1030 1031 ret = hclge_assign_tqp(vport); 1032 if (ret) 1033 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1034 1035 return ret; 1036 } 1037 1038 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1039 struct hclge_vport *vport) 1040 { 1041 struct hnae3_handle *nic = &vport->nic; 1042 struct hnae3_knic_private_info *kinfo; 1043 u16 i; 1044 1045 kinfo = &nic->kinfo; 1046 for (i = 0; i < kinfo->num_tqps; i++) { 1047 struct hclge_tqp *q = 1048 container_of(kinfo->tqp[i], struct hclge_tqp, q); 1049 bool is_pf; 1050 int ret; 1051 1052 is_pf = !(vport->vport_id); 1053 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1054 i, is_pf); 1055 if (ret) 1056 return ret; 1057 } 1058 1059 return 0; 1060 } 1061 1062 static int hclge_map_tqp(struct hclge_dev *hdev) 1063 { 1064 struct hclge_vport *vport = hdev->vport; 1065 u16 i, num_vport; 1066 1067 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1068 for (i = 0; i < num_vport; i++) { 1069 int ret; 1070 1071 ret = hclge_map_tqp_to_vport(hdev, vport); 1072 if (ret) 1073 return ret; 1074 1075 vport++; 1076 } 1077 1078 return 0; 1079 } 1080 1081 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) 1082 { 1083 /* this would be initialized later */ 1084 } 1085 1086 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1087 { 1088 struct hnae3_handle *nic = &vport->nic; 1089 struct hclge_dev *hdev = vport->back; 1090 int ret; 1091 1092 nic->pdev = hdev->pdev; 1093 nic->ae_algo = &ae_algo; 1094 nic->numa_node_mask = hdev->numa_node_mask; 1095 1096 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { 1097 ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); 1098 if (ret) { 1099 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", 1100 ret); 1101 return ret; 1102 } 1103 } else { 1104 hclge_unic_setup(vport, num_tqps); 1105 } 1106 1107 return 0; 1108 } 1109 1110 static int hclge_alloc_vport(struct hclge_dev *hdev) 1111 { 1112 struct pci_dev *pdev = hdev->pdev; 1113 struct hclge_vport *vport; 1114 u32 tqp_main_vport; 1115 u32 tqp_per_vport; 1116 int num_vport, i; 1117 int ret; 1118 1119 /* We need to alloc a vport for main NIC of PF */ 1120 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1121 1122 if (hdev->num_tqps < num_vport) { 1123 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", 1124 hdev->num_tqps, num_vport); 1125 return -EINVAL; 1126 } 1127 1128 /* Alloc the same number of TQPs for every vport */ 1129 tqp_per_vport = hdev->num_tqps / num_vport; 1130 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1131 1132 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1133 GFP_KERNEL); 1134 if (!vport) 1135 return -ENOMEM; 1136 1137 hdev->vport = vport; 1138 hdev->num_alloc_vport = num_vport; 1139 1140 if (IS_ENABLED(CONFIG_PCI_IOV)) 1141 hdev->num_alloc_vfs = hdev->num_req_vfs; 1142 1143 for (i = 0; i < num_vport; i++) { 1144 vport->back = hdev; 1145 vport->vport_id = i; 1146 1147 if (i == 0) 1148 ret = hclge_vport_setup(vport, tqp_main_vport); 1149 else 1150 ret = hclge_vport_setup(vport, tqp_per_vport); 1151 if (ret) { 1152 dev_err(&pdev->dev, 1153 "vport setup failed for vport %d, %d\n", 1154 i, ret); 1155 return ret; 1156 } 1157 1158 vport++; 1159 } 1160 1161 return 0; 1162 } 1163 1164 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1165 struct hclge_pkt_buf_alloc *buf_alloc) 1166 { 1167 /* TX buffer size is unit by 128 byte */ 1168 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1169 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1170 struct hclge_tx_buff_alloc_cmd *req; 1171 struct hclge_desc desc; 1172 int ret; 1173 u8 i; 1174 1175 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1176 1177 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1178 for (i = 0; i < HCLGE_TC_NUM; i++) { 1179 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1180 1181 req->tx_pkt_buff[i] = 1182 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1183 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1184 } 1185 1186 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1187 if (ret) 1188 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1189 ret); 1190 1191 return ret; 1192 } 1193 1194 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1195 struct hclge_pkt_buf_alloc *buf_alloc) 1196 { 1197 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1198 1199 if (ret) 1200 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); 1201 1202 return ret; 1203 } 1204 1205 static int hclge_get_tc_num(struct hclge_dev *hdev) 1206 { 1207 int i, cnt = 0; 1208 1209 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1210 if (hdev->hw_tc_map & BIT(i)) 1211 cnt++; 1212 return cnt; 1213 } 1214 1215 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) 1216 { 1217 int i, cnt = 0; 1218 1219 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1220 if (hdev->hw_tc_map & BIT(i) && 1221 hdev->tm_info.hw_pfc_map & BIT(i)) 1222 cnt++; 1223 return cnt; 1224 } 1225 1226 /* Get the number of pfc enabled TCs, which have private buffer */ 1227 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1228 struct hclge_pkt_buf_alloc *buf_alloc) 1229 { 1230 struct hclge_priv_buf *priv; 1231 int i, cnt = 0; 1232 1233 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1234 priv = &buf_alloc->priv_buf[i]; 1235 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1236 priv->enable) 1237 cnt++; 1238 } 1239 1240 return cnt; 1241 } 1242 1243 /* Get the number of pfc disabled TCs, which have private buffer */ 1244 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1245 struct hclge_pkt_buf_alloc *buf_alloc) 1246 { 1247 struct hclge_priv_buf *priv; 1248 int i, cnt = 0; 1249 1250 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1251 priv = &buf_alloc->priv_buf[i]; 1252 if (hdev->hw_tc_map & BIT(i) && 1253 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1254 priv->enable) 1255 cnt++; 1256 } 1257 1258 return cnt; 1259 } 1260 1261 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1262 { 1263 struct hclge_priv_buf *priv; 1264 u32 rx_priv = 0; 1265 int i; 1266 1267 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1268 priv = &buf_alloc->priv_buf[i]; 1269 if (priv->enable) 1270 rx_priv += priv->buf_size; 1271 } 1272 return rx_priv; 1273 } 1274 1275 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1276 { 1277 u32 i, total_tx_size = 0; 1278 1279 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1280 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1281 1282 return total_tx_size; 1283 } 1284 1285 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1286 struct hclge_pkt_buf_alloc *buf_alloc, 1287 u32 rx_all) 1288 { 1289 u32 shared_buf_min, shared_buf_tc, shared_std; 1290 int tc_num, pfc_enable_num; 1291 u32 shared_buf; 1292 u32 rx_priv; 1293 int i; 1294 1295 tc_num = hclge_get_tc_num(hdev); 1296 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1297 1298 if (hnae3_dev_dcb_supported(hdev)) 1299 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; 1300 else 1301 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; 1302 1303 shared_buf_tc = pfc_enable_num * hdev->mps + 1304 (tc_num - pfc_enable_num) * hdev->mps / 2 + 1305 hdev->mps; 1306 shared_std = max_t(u32, shared_buf_min, shared_buf_tc); 1307 1308 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1309 if (rx_all <= rx_priv + shared_std) 1310 return false; 1311 1312 shared_buf = rx_all - rx_priv; 1313 buf_alloc->s_buf.buf_size = shared_buf; 1314 buf_alloc->s_buf.self.high = shared_buf; 1315 buf_alloc->s_buf.self.low = 2 * hdev->mps; 1316 1317 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1318 if ((hdev->hw_tc_map & BIT(i)) && 1319 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1320 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; 1321 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1322 } else { 1323 buf_alloc->s_buf.tc_thrd[i].low = 0; 1324 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; 1325 } 1326 } 1327 1328 return true; 1329 } 1330 1331 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1332 struct hclge_pkt_buf_alloc *buf_alloc) 1333 { 1334 u32 i, total_size; 1335 1336 total_size = hdev->pkt_buf_size; 1337 1338 /* alloc tx buffer for all enabled tc */ 1339 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1340 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1341 1342 if (total_size < HCLGE_DEFAULT_TX_BUF) 1343 return -ENOMEM; 1344 1345 if (hdev->hw_tc_map & BIT(i)) 1346 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1347 else 1348 priv->tx_buf_size = 0; 1349 1350 total_size -= priv->tx_buf_size; 1351 } 1352 1353 return 0; 1354 } 1355 1356 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1357 * @hdev: pointer to struct hclge_dev 1358 * @buf_alloc: pointer to buffer calculation data 1359 * @return: 0: calculate sucessful, negative: fail 1360 */ 1361 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1362 struct hclge_pkt_buf_alloc *buf_alloc) 1363 { 1364 #define HCLGE_BUF_SIZE_UNIT 128 1365 u32 rx_all = hdev->pkt_buf_size, aligned_mps; 1366 int no_pfc_priv_num, pfc_priv_num; 1367 struct hclge_priv_buf *priv; 1368 int i; 1369 1370 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); 1371 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1372 1373 /* When DCB is not supported, rx private 1374 * buffer is not allocated. 1375 */ 1376 if (!hnae3_dev_dcb_supported(hdev)) { 1377 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1378 return -ENOMEM; 1379 1380 return 0; 1381 } 1382 1383 /* step 1, try to alloc private buffer for all enabled tc */ 1384 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1385 priv = &buf_alloc->priv_buf[i]; 1386 if (hdev->hw_tc_map & BIT(i)) { 1387 priv->enable = 1; 1388 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1389 priv->wl.low = aligned_mps; 1390 priv->wl.high = priv->wl.low + aligned_mps; 1391 priv->buf_size = priv->wl.high + 1392 HCLGE_DEFAULT_DV; 1393 } else { 1394 priv->wl.low = 0; 1395 priv->wl.high = 2 * aligned_mps; 1396 priv->buf_size = priv->wl.high; 1397 } 1398 } else { 1399 priv->enable = 0; 1400 priv->wl.low = 0; 1401 priv->wl.high = 0; 1402 priv->buf_size = 0; 1403 } 1404 } 1405 1406 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1407 return 0; 1408 1409 /* step 2, try to decrease the buffer size of 1410 * no pfc TC's private buffer 1411 */ 1412 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1413 priv = &buf_alloc->priv_buf[i]; 1414 1415 priv->enable = 0; 1416 priv->wl.low = 0; 1417 priv->wl.high = 0; 1418 priv->buf_size = 0; 1419 1420 if (!(hdev->hw_tc_map & BIT(i))) 1421 continue; 1422 1423 priv->enable = 1; 1424 1425 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1426 priv->wl.low = 128; 1427 priv->wl.high = priv->wl.low + aligned_mps; 1428 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; 1429 } else { 1430 priv->wl.low = 0; 1431 priv->wl.high = aligned_mps; 1432 priv->buf_size = priv->wl.high; 1433 } 1434 } 1435 1436 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1437 return 0; 1438 1439 /* step 3, try to reduce the number of pfc disabled TCs, 1440 * which have private buffer 1441 */ 1442 /* get the total no pfc enable TC number, which have private buffer */ 1443 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1444 1445 /* let the last to be cleared first */ 1446 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1447 priv = &buf_alloc->priv_buf[i]; 1448 1449 if (hdev->hw_tc_map & BIT(i) && 1450 !(hdev->tm_info.hw_pfc_map & BIT(i))) { 1451 /* Clear the no pfc TC private buffer */ 1452 priv->wl.low = 0; 1453 priv->wl.high = 0; 1454 priv->buf_size = 0; 1455 priv->enable = 0; 1456 no_pfc_priv_num--; 1457 } 1458 1459 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1460 no_pfc_priv_num == 0) 1461 break; 1462 } 1463 1464 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1465 return 0; 1466 1467 /* step 4, try to reduce the number of pfc enabled TCs 1468 * which have private buffer. 1469 */ 1470 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1471 1472 /* let the last to be cleared first */ 1473 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1474 priv = &buf_alloc->priv_buf[i]; 1475 1476 if (hdev->hw_tc_map & BIT(i) && 1477 hdev->tm_info.hw_pfc_map & BIT(i)) { 1478 /* Reduce the number of pfc TC with private buffer */ 1479 priv->wl.low = 0; 1480 priv->enable = 0; 1481 priv->wl.high = 0; 1482 priv->buf_size = 0; 1483 pfc_priv_num--; 1484 } 1485 1486 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1487 pfc_priv_num == 0) 1488 break; 1489 } 1490 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1491 return 0; 1492 1493 return -ENOMEM; 1494 } 1495 1496 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1497 struct hclge_pkt_buf_alloc *buf_alloc) 1498 { 1499 struct hclge_rx_priv_buff_cmd *req; 1500 struct hclge_desc desc; 1501 int ret; 1502 int i; 1503 1504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 1505 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 1506 1507 /* Alloc private buffer TCs */ 1508 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1509 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1510 1511 req->buf_num[i] = 1512 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 1513 req->buf_num[i] |= 1514 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 1515 } 1516 1517 req->shared_buf = 1518 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1519 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1520 1521 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1522 if (ret) 1523 dev_err(&hdev->pdev->dev, 1524 "rx private buffer alloc cmd failed %d\n", ret); 1525 1526 return ret; 1527 } 1528 1529 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1530 struct hclge_pkt_buf_alloc *buf_alloc) 1531 { 1532 struct hclge_rx_priv_wl_buf *req; 1533 struct hclge_priv_buf *priv; 1534 struct hclge_desc desc[2]; 1535 int i, j; 1536 int ret; 1537 1538 for (i = 0; i < 2; i++) { 1539 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 1540 false); 1541 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 1542 1543 /* The first descriptor set the NEXT bit to 1 */ 1544 if (i == 0) 1545 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1546 else 1547 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1548 1549 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1550 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1551 1552 priv = &buf_alloc->priv_buf[idx]; 1553 req->tc_wl[j].high = 1554 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1555 req->tc_wl[j].high |= 1556 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1557 req->tc_wl[j].low = 1558 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 1559 req->tc_wl[j].low |= 1560 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1561 } 1562 } 1563 1564 /* Send 2 descriptor at one time */ 1565 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1566 if (ret) 1567 dev_err(&hdev->pdev->dev, 1568 "rx private waterline config cmd failed %d\n", 1569 ret); 1570 return ret; 1571 } 1572 1573 static int hclge_common_thrd_config(struct hclge_dev *hdev, 1574 struct hclge_pkt_buf_alloc *buf_alloc) 1575 { 1576 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1577 struct hclge_rx_com_thrd *req; 1578 struct hclge_desc desc[2]; 1579 struct hclge_tc_thrd *tc; 1580 int i, j; 1581 int ret; 1582 1583 for (i = 0; i < 2; i++) { 1584 hclge_cmd_setup_basic_desc(&desc[i], 1585 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 1586 req = (struct hclge_rx_com_thrd *)&desc[i].data; 1587 1588 /* The first descriptor set the NEXT bit to 1 */ 1589 if (i == 0) 1590 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1591 else 1592 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1593 1594 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1595 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 1596 1597 req->com_thrd[j].high = 1598 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 1599 req->com_thrd[j].high |= 1600 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1601 req->com_thrd[j].low = 1602 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 1603 req->com_thrd[j].low |= 1604 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1605 } 1606 } 1607 1608 /* Send 2 descriptors at one time */ 1609 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1610 if (ret) 1611 dev_err(&hdev->pdev->dev, 1612 "common threshold config cmd failed %d\n", ret); 1613 return ret; 1614 } 1615 1616 static int hclge_common_wl_config(struct hclge_dev *hdev, 1617 struct hclge_pkt_buf_alloc *buf_alloc) 1618 { 1619 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1620 struct hclge_rx_com_wl *req; 1621 struct hclge_desc desc; 1622 int ret; 1623 1624 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 1625 1626 req = (struct hclge_rx_com_wl *)desc.data; 1627 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 1628 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1629 1630 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 1631 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 1632 1633 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1634 if (ret) 1635 dev_err(&hdev->pdev->dev, 1636 "common waterline config cmd failed %d\n", ret); 1637 1638 return ret; 1639 } 1640 1641 int hclge_buffer_alloc(struct hclge_dev *hdev) 1642 { 1643 struct hclge_pkt_buf_alloc *pkt_buf; 1644 int ret; 1645 1646 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 1647 if (!pkt_buf) 1648 return -ENOMEM; 1649 1650 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 1651 if (ret) { 1652 dev_err(&hdev->pdev->dev, 1653 "could not calc tx buffer size for all TCs %d\n", ret); 1654 goto out; 1655 } 1656 1657 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 1658 if (ret) { 1659 dev_err(&hdev->pdev->dev, 1660 "could not alloc tx buffers %d\n", ret); 1661 goto out; 1662 } 1663 1664 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 1665 if (ret) { 1666 dev_err(&hdev->pdev->dev, 1667 "could not calc rx priv buffer size for all TCs %d\n", 1668 ret); 1669 goto out; 1670 } 1671 1672 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 1673 if (ret) { 1674 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 1675 ret); 1676 goto out; 1677 } 1678 1679 if (hnae3_dev_dcb_supported(hdev)) { 1680 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 1681 if (ret) { 1682 dev_err(&hdev->pdev->dev, 1683 "could not configure rx private waterline %d\n", 1684 ret); 1685 goto out; 1686 } 1687 1688 ret = hclge_common_thrd_config(hdev, pkt_buf); 1689 if (ret) { 1690 dev_err(&hdev->pdev->dev, 1691 "could not configure common threshold %d\n", 1692 ret); 1693 goto out; 1694 } 1695 } 1696 1697 ret = hclge_common_wl_config(hdev, pkt_buf); 1698 if (ret) 1699 dev_err(&hdev->pdev->dev, 1700 "could not configure common waterline %d\n", ret); 1701 1702 out: 1703 kfree(pkt_buf); 1704 return ret; 1705 } 1706 1707 static int hclge_init_roce_base_info(struct hclge_vport *vport) 1708 { 1709 struct hnae3_handle *roce = &vport->roce; 1710 struct hnae3_handle *nic = &vport->nic; 1711 1712 roce->rinfo.num_vectors = vport->back->num_roce_msi; 1713 1714 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 1715 vport->back->num_msi_left == 0) 1716 return -EINVAL; 1717 1718 roce->rinfo.base_vector = vport->back->roce_base_vector; 1719 1720 roce->rinfo.netdev = nic->kinfo.netdev; 1721 roce->rinfo.roce_io_base = vport->back->hw.io_base; 1722 1723 roce->pdev = nic->pdev; 1724 roce->ae_algo = nic->ae_algo; 1725 roce->numa_node_mask = nic->numa_node_mask; 1726 1727 return 0; 1728 } 1729 1730 static int hclge_init_msi(struct hclge_dev *hdev) 1731 { 1732 struct pci_dev *pdev = hdev->pdev; 1733 int vectors; 1734 int i; 1735 1736 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1737 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1738 if (vectors < 0) { 1739 dev_err(&pdev->dev, 1740 "failed(%d) to allocate MSI/MSI-X vectors\n", 1741 vectors); 1742 return vectors; 1743 } 1744 if (vectors < hdev->num_msi) 1745 dev_warn(&hdev->pdev->dev, 1746 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1747 hdev->num_msi, vectors); 1748 1749 hdev->num_msi = vectors; 1750 hdev->num_msi_left = vectors; 1751 hdev->base_msi_vector = pdev->irq; 1752 hdev->roce_base_vector = hdev->base_msi_vector + 1753 hdev->roce_base_msix_offset; 1754 1755 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1756 sizeof(u16), GFP_KERNEL); 1757 if (!hdev->vector_status) { 1758 pci_free_irq_vectors(pdev); 1759 return -ENOMEM; 1760 } 1761 1762 for (i = 0; i < hdev->num_msi; i++) 1763 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 1764 1765 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 1766 sizeof(int), GFP_KERNEL); 1767 if (!hdev->vector_irq) { 1768 pci_free_irq_vectors(pdev); 1769 return -ENOMEM; 1770 } 1771 1772 return 0; 1773 } 1774 1775 static u8 hclge_check_speed_dup(u8 duplex, int speed) 1776 { 1777 1778 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) 1779 duplex = HCLGE_MAC_FULL; 1780 1781 return duplex; 1782 } 1783 1784 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, 1785 u8 duplex) 1786 { 1787 struct hclge_config_mac_speed_dup_cmd *req; 1788 struct hclge_desc desc; 1789 int ret; 1790 1791 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 1792 1793 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 1794 1795 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); 1796 1797 switch (speed) { 1798 case HCLGE_MAC_SPEED_10M: 1799 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1800 HCLGE_CFG_SPEED_S, 6); 1801 break; 1802 case HCLGE_MAC_SPEED_100M: 1803 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1804 HCLGE_CFG_SPEED_S, 7); 1805 break; 1806 case HCLGE_MAC_SPEED_1G: 1807 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1808 HCLGE_CFG_SPEED_S, 0); 1809 break; 1810 case HCLGE_MAC_SPEED_10G: 1811 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1812 HCLGE_CFG_SPEED_S, 1); 1813 break; 1814 case HCLGE_MAC_SPEED_25G: 1815 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1816 HCLGE_CFG_SPEED_S, 2); 1817 break; 1818 case HCLGE_MAC_SPEED_40G: 1819 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1820 HCLGE_CFG_SPEED_S, 3); 1821 break; 1822 case HCLGE_MAC_SPEED_50G: 1823 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1824 HCLGE_CFG_SPEED_S, 4); 1825 break; 1826 case HCLGE_MAC_SPEED_100G: 1827 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 1828 HCLGE_CFG_SPEED_S, 5); 1829 break; 1830 default: 1831 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 1832 return -EINVAL; 1833 } 1834 1835 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 1836 1); 1837 1838 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1839 if (ret) { 1840 dev_err(&hdev->pdev->dev, 1841 "mac speed/duplex config cmd failed %d.\n", ret); 1842 return ret; 1843 } 1844 1845 return 0; 1846 } 1847 1848 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 1849 { 1850 int ret; 1851 1852 duplex = hclge_check_speed_dup(duplex, speed); 1853 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) 1854 return 0; 1855 1856 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); 1857 if (ret) 1858 return ret; 1859 1860 hdev->hw.mac.speed = speed; 1861 hdev->hw.mac.duplex = duplex; 1862 1863 return 0; 1864 } 1865 1866 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 1867 u8 duplex) 1868 { 1869 struct hclge_vport *vport = hclge_get_vport(handle); 1870 struct hclge_dev *hdev = vport->back; 1871 1872 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 1873 } 1874 1875 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, 1876 u8 *duplex) 1877 { 1878 struct hclge_query_an_speed_dup_cmd *req; 1879 struct hclge_desc desc; 1880 int speed_tmp; 1881 int ret; 1882 1883 req = (struct hclge_query_an_speed_dup_cmd *)desc.data; 1884 1885 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); 1886 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1887 if (ret) { 1888 dev_err(&hdev->pdev->dev, 1889 "mac speed/autoneg/duplex query cmd failed %d\n", 1890 ret); 1891 return ret; 1892 } 1893 1894 *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); 1895 speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, 1896 HCLGE_QUERY_SPEED_S); 1897 1898 ret = hclge_parse_speed(speed_tmp, speed); 1899 if (ret) 1900 dev_err(&hdev->pdev->dev, 1901 "could not parse speed(=%d), %d\n", speed_tmp, ret); 1902 1903 return ret; 1904 } 1905 1906 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 1907 { 1908 struct hclge_config_auto_neg_cmd *req; 1909 struct hclge_desc desc; 1910 u32 flag = 0; 1911 int ret; 1912 1913 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 1914 1915 req = (struct hclge_config_auto_neg_cmd *)desc.data; 1916 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); 1917 req->cfg_an_cmd_flag = cpu_to_le32(flag); 1918 1919 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1920 if (ret) 1921 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 1922 ret); 1923 1924 return ret; 1925 } 1926 1927 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 1928 { 1929 struct hclge_vport *vport = hclge_get_vport(handle); 1930 struct hclge_dev *hdev = vport->back; 1931 1932 return hclge_set_autoneg_en(hdev, enable); 1933 } 1934 1935 static int hclge_get_autoneg(struct hnae3_handle *handle) 1936 { 1937 struct hclge_vport *vport = hclge_get_vport(handle); 1938 struct hclge_dev *hdev = vport->back; 1939 struct phy_device *phydev = hdev->hw.mac.phydev; 1940 1941 if (phydev) 1942 return phydev->autoneg; 1943 1944 return hdev->hw.mac.autoneg; 1945 } 1946 1947 static int hclge_mac_init(struct hclge_dev *hdev) 1948 { 1949 struct hnae3_handle *handle = &hdev->vport[0].nic; 1950 struct net_device *netdev = handle->kinfo.netdev; 1951 struct hclge_mac *mac = &hdev->hw.mac; 1952 int mtu; 1953 int ret; 1954 1955 hdev->hw.mac.duplex = HCLGE_MAC_FULL; 1956 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, 1957 hdev->hw.mac.duplex); 1958 if (ret) { 1959 dev_err(&hdev->pdev->dev, 1960 "Config mac speed dup fail ret=%d\n", ret); 1961 return ret; 1962 } 1963 1964 mac->link = 0; 1965 1966 if (netdev) 1967 mtu = netdev->mtu; 1968 else 1969 mtu = ETH_DATA_LEN; 1970 1971 ret = hclge_set_mtu(handle, mtu); 1972 if (ret) 1973 dev_err(&hdev->pdev->dev, 1974 "set mtu failed ret=%d\n", ret); 1975 1976 return ret; 1977 } 1978 1979 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 1980 { 1981 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 1982 schedule_work(&hdev->mbx_service_task); 1983 } 1984 1985 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 1986 { 1987 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 1988 schedule_work(&hdev->rst_service_task); 1989 } 1990 1991 static void hclge_task_schedule(struct hclge_dev *hdev) 1992 { 1993 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 1994 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 1995 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) 1996 (void)schedule_work(&hdev->service_task); 1997 } 1998 1999 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2000 { 2001 struct hclge_link_status_cmd *req; 2002 struct hclge_desc desc; 2003 int link_status; 2004 int ret; 2005 2006 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2007 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2008 if (ret) { 2009 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2010 ret); 2011 return ret; 2012 } 2013 2014 req = (struct hclge_link_status_cmd *)desc.data; 2015 link_status = req->status & HCLGE_LINK_STATUS_UP_M; 2016 2017 return !!link_status; 2018 } 2019 2020 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2021 { 2022 int mac_state; 2023 int link_stat; 2024 2025 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) 2026 return 0; 2027 2028 mac_state = hclge_get_mac_link_status(hdev); 2029 2030 if (hdev->hw.mac.phydev) { 2031 if (hdev->hw.mac.phydev->state == PHY_RUNNING) 2032 link_stat = mac_state & 2033 hdev->hw.mac.phydev->link; 2034 else 2035 link_stat = 0; 2036 2037 } else { 2038 link_stat = mac_state; 2039 } 2040 2041 return !!link_stat; 2042 } 2043 2044 static void hclge_update_link_status(struct hclge_dev *hdev) 2045 { 2046 struct hnae3_client *client = hdev->nic_client; 2047 struct hnae3_handle *handle; 2048 int state; 2049 int i; 2050 2051 if (!client) 2052 return; 2053 state = hclge_get_mac_phy_link(hdev); 2054 if (state != hdev->hw.mac.link) { 2055 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2056 handle = &hdev->vport[i].nic; 2057 client->ops->link_status_change(handle, state); 2058 } 2059 hdev->hw.mac.link = state; 2060 } 2061 } 2062 2063 static int hclge_update_speed_duplex(struct hclge_dev *hdev) 2064 { 2065 struct hclge_mac mac = hdev->hw.mac; 2066 u8 duplex; 2067 int speed; 2068 int ret; 2069 2070 /* get the speed and duplex as autoneg'result from mac cmd when phy 2071 * doesn't exit. 2072 */ 2073 if (mac.phydev || !mac.autoneg) 2074 return 0; 2075 2076 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); 2077 if (ret) { 2078 dev_err(&hdev->pdev->dev, 2079 "mac autoneg/speed/duplex query failed %d\n", ret); 2080 return ret; 2081 } 2082 2083 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2084 if (ret) { 2085 dev_err(&hdev->pdev->dev, 2086 "mac speed/duplex config failed %d\n", ret); 2087 return ret; 2088 } 2089 2090 return 0; 2091 } 2092 2093 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) 2094 { 2095 struct hclge_vport *vport = hclge_get_vport(handle); 2096 struct hclge_dev *hdev = vport->back; 2097 2098 return hclge_update_speed_duplex(hdev); 2099 } 2100 2101 static int hclge_get_status(struct hnae3_handle *handle) 2102 { 2103 struct hclge_vport *vport = hclge_get_vport(handle); 2104 struct hclge_dev *hdev = vport->back; 2105 2106 hclge_update_link_status(hdev); 2107 2108 return hdev->hw.mac.link; 2109 } 2110 2111 static void hclge_service_timer(struct timer_list *t) 2112 { 2113 struct hclge_dev *hdev = from_timer(hdev, t, service_timer); 2114 2115 mod_timer(&hdev->service_timer, jiffies + HZ); 2116 hdev->hw_stats.stats_timer++; 2117 hclge_task_schedule(hdev); 2118 } 2119 2120 static void hclge_service_complete(struct hclge_dev *hdev) 2121 { 2122 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); 2123 2124 /* Flush memory before next watchdog */ 2125 smp_mb__before_atomic(); 2126 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 2127 } 2128 2129 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 2130 { 2131 u32 rst_src_reg; 2132 u32 cmdq_src_reg; 2133 2134 /* fetch the events from their corresponding regs */ 2135 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 2136 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 2137 2138 /* Assumption: If by any chance reset and mailbox events are reported 2139 * together then we will only process reset event in this go and will 2140 * defer the processing of the mailbox events. Since, we would have not 2141 * cleared RX CMDQ event this time we would receive again another 2142 * interrupt from H/W just for the mailbox. 2143 */ 2144 2145 /* check for vector0 reset event sources */ 2146 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { 2147 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 2148 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 2149 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2150 return HCLGE_VECTOR0_EVENT_RST; 2151 } 2152 2153 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { 2154 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 2155 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); 2156 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2157 return HCLGE_VECTOR0_EVENT_RST; 2158 } 2159 2160 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { 2161 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 2162 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2163 return HCLGE_VECTOR0_EVENT_RST; 2164 } 2165 2166 /* check for vector0 mailbox(=CMDQ RX) event source */ 2167 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 2168 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 2169 *clearval = cmdq_src_reg; 2170 return HCLGE_VECTOR0_EVENT_MBX; 2171 } 2172 2173 return HCLGE_VECTOR0_EVENT_OTHER; 2174 } 2175 2176 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 2177 u32 regclr) 2178 { 2179 switch (event_type) { 2180 case HCLGE_VECTOR0_EVENT_RST: 2181 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 2182 break; 2183 case HCLGE_VECTOR0_EVENT_MBX: 2184 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 2185 break; 2186 default: 2187 break; 2188 } 2189 } 2190 2191 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) 2192 { 2193 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, 2194 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | 2195 BIT(HCLGE_VECTOR0_CORERESET_INT_B) | 2196 BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); 2197 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); 2198 } 2199 2200 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 2201 { 2202 writel(enable ? 1 : 0, vector->addr); 2203 } 2204 2205 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 2206 { 2207 struct hclge_dev *hdev = data; 2208 u32 event_cause; 2209 u32 clearval; 2210 2211 hclge_enable_vector(&hdev->misc_vector, false); 2212 event_cause = hclge_check_event_cause(hdev, &clearval); 2213 2214 /* vector 0 interrupt is shared with reset and mailbox source events.*/ 2215 switch (event_cause) { 2216 case HCLGE_VECTOR0_EVENT_RST: 2217 hclge_reset_task_schedule(hdev); 2218 break; 2219 case HCLGE_VECTOR0_EVENT_MBX: 2220 /* If we are here then, 2221 * 1. Either we are not handling any mbx task and we are not 2222 * scheduled as well 2223 * OR 2224 * 2. We could be handling a mbx task but nothing more is 2225 * scheduled. 2226 * In both cases, we should schedule mbx task as there are more 2227 * mbx messages reported by this interrupt. 2228 */ 2229 hclge_mbx_task_schedule(hdev); 2230 break; 2231 default: 2232 dev_warn(&hdev->pdev->dev, 2233 "received unknown or unhandled event of vector0\n"); 2234 break; 2235 } 2236 2237 /* clear the source of interrupt if it is not cause by reset */ 2238 if (event_cause != HCLGE_VECTOR0_EVENT_RST) { 2239 hclge_clear_event_cause(hdev, event_cause, clearval); 2240 hclge_enable_vector(&hdev->misc_vector, true); 2241 } 2242 2243 return IRQ_HANDLED; 2244 } 2245 2246 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 2247 { 2248 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { 2249 dev_warn(&hdev->pdev->dev, 2250 "vector(vector_id %d) has been freed.\n", vector_id); 2251 return; 2252 } 2253 2254 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 2255 hdev->num_msi_left += 1; 2256 hdev->num_msi_used -= 1; 2257 } 2258 2259 static void hclge_get_misc_vector(struct hclge_dev *hdev) 2260 { 2261 struct hclge_misc_vector *vector = &hdev->misc_vector; 2262 2263 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 2264 2265 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 2266 hdev->vector_status[0] = 0; 2267 2268 hdev->num_msi_left -= 1; 2269 hdev->num_msi_used += 1; 2270 } 2271 2272 static int hclge_misc_irq_init(struct hclge_dev *hdev) 2273 { 2274 int ret; 2275 2276 hclge_get_misc_vector(hdev); 2277 2278 /* this would be explicitly freed in the end */ 2279 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 2280 0, "hclge_misc", hdev); 2281 if (ret) { 2282 hclge_free_vector(hdev, 0); 2283 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 2284 hdev->misc_vector.vector_irq); 2285 } 2286 2287 return ret; 2288 } 2289 2290 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 2291 { 2292 free_irq(hdev->misc_vector.vector_irq, hdev); 2293 hclge_free_vector(hdev, 0); 2294 } 2295 2296 static int hclge_notify_client(struct hclge_dev *hdev, 2297 enum hnae3_reset_notify_type type) 2298 { 2299 struct hnae3_client *client = hdev->nic_client; 2300 u16 i; 2301 2302 if (!client->ops->reset_notify) 2303 return -EOPNOTSUPP; 2304 2305 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2306 struct hnae3_handle *handle = &hdev->vport[i].nic; 2307 int ret; 2308 2309 ret = client->ops->reset_notify(handle, type); 2310 if (ret) 2311 return ret; 2312 } 2313 2314 return 0; 2315 } 2316 2317 static int hclge_reset_wait(struct hclge_dev *hdev) 2318 { 2319 #define HCLGE_RESET_WATI_MS 100 2320 #define HCLGE_RESET_WAIT_CNT 5 2321 u32 val, reg, reg_bit; 2322 u32 cnt = 0; 2323 2324 switch (hdev->reset_type) { 2325 case HNAE3_GLOBAL_RESET: 2326 reg = HCLGE_GLOBAL_RESET_REG; 2327 reg_bit = HCLGE_GLOBAL_RESET_BIT; 2328 break; 2329 case HNAE3_CORE_RESET: 2330 reg = HCLGE_GLOBAL_RESET_REG; 2331 reg_bit = HCLGE_CORE_RESET_BIT; 2332 break; 2333 case HNAE3_FUNC_RESET: 2334 reg = HCLGE_FUN_RST_ING; 2335 reg_bit = HCLGE_FUN_RST_ING_B; 2336 break; 2337 default: 2338 dev_err(&hdev->pdev->dev, 2339 "Wait for unsupported reset type: %d\n", 2340 hdev->reset_type); 2341 return -EINVAL; 2342 } 2343 2344 val = hclge_read_dev(&hdev->hw, reg); 2345 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 2346 msleep(HCLGE_RESET_WATI_MS); 2347 val = hclge_read_dev(&hdev->hw, reg); 2348 cnt++; 2349 } 2350 2351 if (cnt >= HCLGE_RESET_WAIT_CNT) { 2352 dev_warn(&hdev->pdev->dev, 2353 "Wait for reset timeout: %d\n", hdev->reset_type); 2354 return -EBUSY; 2355 } 2356 2357 return 0; 2358 } 2359 2360 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 2361 { 2362 struct hclge_desc desc; 2363 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 2364 int ret; 2365 2366 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 2367 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 2368 req->fun_reset_vfid = func_id; 2369 2370 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2371 if (ret) 2372 dev_err(&hdev->pdev->dev, 2373 "send function reset cmd fail, status =%d\n", ret); 2374 2375 return ret; 2376 } 2377 2378 static void hclge_do_reset(struct hclge_dev *hdev) 2379 { 2380 struct pci_dev *pdev = hdev->pdev; 2381 u32 val; 2382 2383 switch (hdev->reset_type) { 2384 case HNAE3_GLOBAL_RESET: 2385 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2386 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 2387 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2388 dev_info(&pdev->dev, "Global Reset requested\n"); 2389 break; 2390 case HNAE3_CORE_RESET: 2391 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2392 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); 2393 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2394 dev_info(&pdev->dev, "Core Reset requested\n"); 2395 break; 2396 case HNAE3_FUNC_RESET: 2397 dev_info(&pdev->dev, "PF Reset requested\n"); 2398 hclge_func_reset_cmd(hdev, 0); 2399 /* schedule again to check later */ 2400 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 2401 hclge_reset_task_schedule(hdev); 2402 break; 2403 default: 2404 dev_warn(&pdev->dev, 2405 "Unsupported reset type: %d\n", hdev->reset_type); 2406 break; 2407 } 2408 } 2409 2410 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, 2411 unsigned long *addr) 2412 { 2413 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2414 2415 /* return the highest priority reset level amongst all */ 2416 if (test_bit(HNAE3_GLOBAL_RESET, addr)) 2417 rst_level = HNAE3_GLOBAL_RESET; 2418 else if (test_bit(HNAE3_CORE_RESET, addr)) 2419 rst_level = HNAE3_CORE_RESET; 2420 else if (test_bit(HNAE3_IMP_RESET, addr)) 2421 rst_level = HNAE3_IMP_RESET; 2422 else if (test_bit(HNAE3_FUNC_RESET, addr)) 2423 rst_level = HNAE3_FUNC_RESET; 2424 2425 /* now, clear all other resets */ 2426 clear_bit(HNAE3_GLOBAL_RESET, addr); 2427 clear_bit(HNAE3_CORE_RESET, addr); 2428 clear_bit(HNAE3_IMP_RESET, addr); 2429 clear_bit(HNAE3_FUNC_RESET, addr); 2430 2431 return rst_level; 2432 } 2433 2434 static void hclge_clear_reset_cause(struct hclge_dev *hdev) 2435 { 2436 u32 clearval = 0; 2437 2438 switch (hdev->reset_type) { 2439 case HNAE3_IMP_RESET: 2440 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2441 break; 2442 case HNAE3_GLOBAL_RESET: 2443 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2444 break; 2445 case HNAE3_CORE_RESET: 2446 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2447 break; 2448 default: 2449 break; 2450 } 2451 2452 if (!clearval) 2453 return; 2454 2455 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); 2456 hclge_enable_vector(&hdev->misc_vector, true); 2457 } 2458 2459 static void hclge_reset(struct hclge_dev *hdev) 2460 { 2461 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2462 struct hnae3_handle *handle; 2463 2464 /* Initialize ae_dev reset status as well, in case enet layer wants to 2465 * know if device is undergoing reset 2466 */ 2467 ae_dev->reset_type = hdev->reset_type; 2468 /* perform reset of the stack & ae device for a client */ 2469 handle = &hdev->vport[0].nic; 2470 rtnl_lock(); 2471 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2472 2473 if (!hclge_reset_wait(hdev)) { 2474 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 2475 hclge_reset_ae_dev(hdev->ae_dev); 2476 hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 2477 2478 hclge_clear_reset_cause(hdev); 2479 } else { 2480 /* schedule again to check pending resets later */ 2481 set_bit(hdev->reset_type, &hdev->reset_pending); 2482 hclge_reset_task_schedule(hdev); 2483 } 2484 2485 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2486 handle->last_reset_time = jiffies; 2487 rtnl_unlock(); 2488 ae_dev->reset_type = HNAE3_NONE_RESET; 2489 } 2490 2491 static void hclge_reset_event(struct hnae3_handle *handle) 2492 { 2493 struct hclge_vport *vport = hclge_get_vport(handle); 2494 struct hclge_dev *hdev = vport->back; 2495 2496 /* check if this is a new reset request and we are not here just because 2497 * last reset attempt did not succeed and watchdog hit us again. We will 2498 * know this if last reset request did not occur very recently (watchdog 2499 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 2500 * In case of new request we reset the "reset level" to PF reset. 2501 * And if it is a repeat reset request of the most recent one then we 2502 * want to make sure we throttle the reset request. Therefore, we will 2503 * not allow it again before 3*HZ times. 2504 */ 2505 if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) 2506 return; 2507 else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) 2508 handle->reset_level = HNAE3_FUNC_RESET; 2509 2510 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", 2511 handle->reset_level); 2512 2513 /* request reset & schedule reset task */ 2514 set_bit(handle->reset_level, &hdev->reset_request); 2515 hclge_reset_task_schedule(hdev); 2516 2517 if (handle->reset_level < HNAE3_GLOBAL_RESET) 2518 handle->reset_level++; 2519 } 2520 2521 static void hclge_reset_subtask(struct hclge_dev *hdev) 2522 { 2523 /* check if there is any ongoing reset in the hardware. This status can 2524 * be checked from reset_pending. If there is then, we need to wait for 2525 * hardware to complete reset. 2526 * a. If we are able to figure out in reasonable time that hardware 2527 * has fully resetted then, we can proceed with driver, client 2528 * reset. 2529 * b. else, we can come back later to check this status so re-sched 2530 * now. 2531 */ 2532 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); 2533 if (hdev->reset_type != HNAE3_NONE_RESET) 2534 hclge_reset(hdev); 2535 2536 /* check if we got any *new* reset requests to be honored */ 2537 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); 2538 if (hdev->reset_type != HNAE3_NONE_RESET) 2539 hclge_do_reset(hdev); 2540 2541 hdev->reset_type = HNAE3_NONE_RESET; 2542 } 2543 2544 static void hclge_reset_service_task(struct work_struct *work) 2545 { 2546 struct hclge_dev *hdev = 2547 container_of(work, struct hclge_dev, rst_service_task); 2548 2549 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2550 return; 2551 2552 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 2553 2554 hclge_reset_subtask(hdev); 2555 2556 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 2557 } 2558 2559 static void hclge_mailbox_service_task(struct work_struct *work) 2560 { 2561 struct hclge_dev *hdev = 2562 container_of(work, struct hclge_dev, mbx_service_task); 2563 2564 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 2565 return; 2566 2567 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 2568 2569 hclge_mbx_handler(hdev); 2570 2571 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 2572 } 2573 2574 static void hclge_service_task(struct work_struct *work) 2575 { 2576 struct hclge_dev *hdev = 2577 container_of(work, struct hclge_dev, service_task); 2578 2579 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { 2580 hclge_update_stats_for_all(hdev); 2581 hdev->hw_stats.stats_timer = 0; 2582 } 2583 2584 hclge_update_speed_duplex(hdev); 2585 hclge_update_link_status(hdev); 2586 hclge_service_complete(hdev); 2587 } 2588 2589 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 2590 { 2591 /* VF handle has no client */ 2592 if (!handle->client) 2593 return container_of(handle, struct hclge_vport, nic); 2594 else if (handle->client->type == HNAE3_CLIENT_ROCE) 2595 return container_of(handle, struct hclge_vport, roce); 2596 else 2597 return container_of(handle, struct hclge_vport, nic); 2598 } 2599 2600 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 2601 struct hnae3_vector_info *vector_info) 2602 { 2603 struct hclge_vport *vport = hclge_get_vport(handle); 2604 struct hnae3_vector_info *vector = vector_info; 2605 struct hclge_dev *hdev = vport->back; 2606 int alloc = 0; 2607 int i, j; 2608 2609 vector_num = min(hdev->num_msi_left, vector_num); 2610 2611 for (j = 0; j < vector_num; j++) { 2612 for (i = 1; i < hdev->num_msi; i++) { 2613 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 2614 vector->vector = pci_irq_vector(hdev->pdev, i); 2615 vector->io_addr = hdev->hw.io_base + 2616 HCLGE_VECTOR_REG_BASE + 2617 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 2618 vport->vport_id * 2619 HCLGE_VECTOR_VF_OFFSET; 2620 hdev->vector_status[i] = vport->vport_id; 2621 hdev->vector_irq[i] = vector->vector; 2622 2623 vector++; 2624 alloc++; 2625 2626 break; 2627 } 2628 } 2629 } 2630 hdev->num_msi_left -= alloc; 2631 hdev->num_msi_used += alloc; 2632 2633 return alloc; 2634 } 2635 2636 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 2637 { 2638 int i; 2639 2640 for (i = 0; i < hdev->num_msi; i++) 2641 if (vector == hdev->vector_irq[i]) 2642 return i; 2643 2644 return -EINVAL; 2645 } 2646 2647 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 2648 { 2649 struct hclge_vport *vport = hclge_get_vport(handle); 2650 struct hclge_dev *hdev = vport->back; 2651 int vector_id; 2652 2653 vector_id = hclge_get_vector_index(hdev, vector); 2654 if (vector_id < 0) { 2655 dev_err(&hdev->pdev->dev, 2656 "Get vector index fail. vector_id =%d\n", vector_id); 2657 return vector_id; 2658 } 2659 2660 hclge_free_vector(hdev, vector_id); 2661 2662 return 0; 2663 } 2664 2665 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 2666 { 2667 return HCLGE_RSS_KEY_SIZE; 2668 } 2669 2670 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 2671 { 2672 return HCLGE_RSS_IND_TBL_SIZE; 2673 } 2674 2675 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 2676 const u8 hfunc, const u8 *key) 2677 { 2678 struct hclge_rss_config_cmd *req; 2679 struct hclge_desc desc; 2680 int key_offset; 2681 int key_size; 2682 int ret; 2683 2684 req = (struct hclge_rss_config_cmd *)desc.data; 2685 2686 for (key_offset = 0; key_offset < 3; key_offset++) { 2687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 2688 false); 2689 2690 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 2691 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 2692 2693 if (key_offset == 2) 2694 key_size = 2695 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; 2696 else 2697 key_size = HCLGE_RSS_HASH_KEY_NUM; 2698 2699 memcpy(req->hash_key, 2700 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 2701 2702 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2703 if (ret) { 2704 dev_err(&hdev->pdev->dev, 2705 "Configure RSS config fail, status = %d\n", 2706 ret); 2707 return ret; 2708 } 2709 } 2710 return 0; 2711 } 2712 2713 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) 2714 { 2715 struct hclge_rss_indirection_table_cmd *req; 2716 struct hclge_desc desc; 2717 int i, j; 2718 int ret; 2719 2720 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 2721 2722 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 2723 hclge_cmd_setup_basic_desc 2724 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 2725 2726 req->start_table_index = 2727 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 2728 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 2729 2730 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 2731 req->rss_result[j] = 2732 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 2733 2734 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2735 if (ret) { 2736 dev_err(&hdev->pdev->dev, 2737 "Configure rss indir table fail,status = %d\n", 2738 ret); 2739 return ret; 2740 } 2741 } 2742 return 0; 2743 } 2744 2745 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 2746 u16 *tc_size, u16 *tc_offset) 2747 { 2748 struct hclge_rss_tc_mode_cmd *req; 2749 struct hclge_desc desc; 2750 int ret; 2751 int i; 2752 2753 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 2754 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 2755 2756 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2757 u16 mode = 0; 2758 2759 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 2760 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, 2761 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 2762 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 2763 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 2764 2765 req->rss_tc_mode[i] = cpu_to_le16(mode); 2766 } 2767 2768 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2769 if (ret) 2770 dev_err(&hdev->pdev->dev, 2771 "Configure rss tc mode fail, status = %d\n", ret); 2772 2773 return ret; 2774 } 2775 2776 static void hclge_get_rss_type(struct hclge_vport *vport) 2777 { 2778 if (vport->rss_tuple_sets.ipv4_tcp_en || 2779 vport->rss_tuple_sets.ipv4_udp_en || 2780 vport->rss_tuple_sets.ipv4_sctp_en || 2781 vport->rss_tuple_sets.ipv6_tcp_en || 2782 vport->rss_tuple_sets.ipv6_udp_en || 2783 vport->rss_tuple_sets.ipv6_sctp_en) 2784 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4; 2785 else if (vport->rss_tuple_sets.ipv4_fragment_en || 2786 vport->rss_tuple_sets.ipv6_fragment_en) 2787 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3; 2788 else 2789 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE; 2790 } 2791 2792 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 2793 { 2794 struct hclge_rss_input_tuple_cmd *req; 2795 struct hclge_desc desc; 2796 int ret; 2797 2798 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 2799 2800 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 2801 2802 /* Get the tuple cfg from pf */ 2803 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; 2804 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; 2805 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; 2806 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; 2807 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; 2808 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; 2809 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; 2810 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; 2811 hclge_get_rss_type(&hdev->vport[0]); 2812 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2813 if (ret) 2814 dev_err(&hdev->pdev->dev, 2815 "Configure rss input fail, status = %d\n", ret); 2816 return ret; 2817 } 2818 2819 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 2820 u8 *key, u8 *hfunc) 2821 { 2822 struct hclge_vport *vport = hclge_get_vport(handle); 2823 int i; 2824 2825 /* Get hash algorithm */ 2826 if (hfunc) { 2827 switch (vport->rss_algo) { 2828 case HCLGE_RSS_HASH_ALGO_TOEPLITZ: 2829 *hfunc = ETH_RSS_HASH_TOP; 2830 break; 2831 case HCLGE_RSS_HASH_ALGO_SIMPLE: 2832 *hfunc = ETH_RSS_HASH_XOR; 2833 break; 2834 default: 2835 *hfunc = ETH_RSS_HASH_UNKNOWN; 2836 break; 2837 } 2838 } 2839 2840 /* Get the RSS Key required by the user */ 2841 if (key) 2842 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 2843 2844 /* Get indirect table */ 2845 if (indir) 2846 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 2847 indir[i] = vport->rss_indirection_tbl[i]; 2848 2849 return 0; 2850 } 2851 2852 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 2853 const u8 *key, const u8 hfunc) 2854 { 2855 struct hclge_vport *vport = hclge_get_vport(handle); 2856 struct hclge_dev *hdev = vport->back; 2857 u8 hash_algo; 2858 int ret, i; 2859 2860 /* Set the RSS Hash Key if specififed by the user */ 2861 if (key) { 2862 switch (hfunc) { 2863 case ETH_RSS_HASH_TOP: 2864 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 2865 break; 2866 case ETH_RSS_HASH_XOR: 2867 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; 2868 break; 2869 case ETH_RSS_HASH_NO_CHANGE: 2870 hash_algo = vport->rss_algo; 2871 break; 2872 default: 2873 return -EINVAL; 2874 } 2875 2876 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 2877 if (ret) 2878 return ret; 2879 2880 /* Update the shadow RSS key with user specified qids */ 2881 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 2882 vport->rss_algo = hash_algo; 2883 } 2884 2885 /* Update the shadow RSS table with user specified qids */ 2886 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 2887 vport->rss_indirection_tbl[i] = indir[i]; 2888 2889 /* Update the hardware */ 2890 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); 2891 } 2892 2893 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 2894 { 2895 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 2896 2897 if (nfc->data & RXH_L4_B_2_3) 2898 hash_sets |= HCLGE_D_PORT_BIT; 2899 else 2900 hash_sets &= ~HCLGE_D_PORT_BIT; 2901 2902 if (nfc->data & RXH_IP_SRC) 2903 hash_sets |= HCLGE_S_IP_BIT; 2904 else 2905 hash_sets &= ~HCLGE_S_IP_BIT; 2906 2907 if (nfc->data & RXH_IP_DST) 2908 hash_sets |= HCLGE_D_IP_BIT; 2909 else 2910 hash_sets &= ~HCLGE_D_IP_BIT; 2911 2912 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 2913 hash_sets |= HCLGE_V_TAG_BIT; 2914 2915 return hash_sets; 2916 } 2917 2918 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 2919 struct ethtool_rxnfc *nfc) 2920 { 2921 struct hclge_vport *vport = hclge_get_vport(handle); 2922 struct hclge_dev *hdev = vport->back; 2923 struct hclge_rss_input_tuple_cmd *req; 2924 struct hclge_desc desc; 2925 u8 tuple_sets; 2926 int ret; 2927 2928 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 2929 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 2930 return -EINVAL; 2931 2932 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 2933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 2934 2935 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; 2936 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; 2937 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; 2938 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; 2939 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; 2940 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; 2941 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; 2942 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; 2943 2944 tuple_sets = hclge_get_rss_hash_bits(nfc); 2945 switch (nfc->flow_type) { 2946 case TCP_V4_FLOW: 2947 req->ipv4_tcp_en = tuple_sets; 2948 break; 2949 case TCP_V6_FLOW: 2950 req->ipv6_tcp_en = tuple_sets; 2951 break; 2952 case UDP_V4_FLOW: 2953 req->ipv4_udp_en = tuple_sets; 2954 break; 2955 case UDP_V6_FLOW: 2956 req->ipv6_udp_en = tuple_sets; 2957 break; 2958 case SCTP_V4_FLOW: 2959 req->ipv4_sctp_en = tuple_sets; 2960 break; 2961 case SCTP_V6_FLOW: 2962 if ((nfc->data & RXH_L4_B_0_1) || 2963 (nfc->data & RXH_L4_B_2_3)) 2964 return -EINVAL; 2965 2966 req->ipv6_sctp_en = tuple_sets; 2967 break; 2968 case IPV4_FLOW: 2969 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2970 break; 2971 case IPV6_FLOW: 2972 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2973 break; 2974 default: 2975 return -EINVAL; 2976 } 2977 2978 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2979 if (ret) { 2980 dev_err(&hdev->pdev->dev, 2981 "Set rss tuple fail, status = %d\n", ret); 2982 return ret; 2983 } 2984 2985 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 2986 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 2987 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 2988 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 2989 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 2990 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 2991 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 2992 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 2993 hclge_get_rss_type(vport); 2994 return 0; 2995 } 2996 2997 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 2998 struct ethtool_rxnfc *nfc) 2999 { 3000 struct hclge_vport *vport = hclge_get_vport(handle); 3001 u8 tuple_sets; 3002 3003 nfc->data = 0; 3004 3005 switch (nfc->flow_type) { 3006 case TCP_V4_FLOW: 3007 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; 3008 break; 3009 case UDP_V4_FLOW: 3010 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; 3011 break; 3012 case TCP_V6_FLOW: 3013 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; 3014 break; 3015 case UDP_V6_FLOW: 3016 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; 3017 break; 3018 case SCTP_V4_FLOW: 3019 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; 3020 break; 3021 case SCTP_V6_FLOW: 3022 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; 3023 break; 3024 case IPV4_FLOW: 3025 case IPV6_FLOW: 3026 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 3027 break; 3028 default: 3029 return -EINVAL; 3030 } 3031 3032 if (!tuple_sets) 3033 return 0; 3034 3035 if (tuple_sets & HCLGE_D_PORT_BIT) 3036 nfc->data |= RXH_L4_B_2_3; 3037 if (tuple_sets & HCLGE_S_PORT_BIT) 3038 nfc->data |= RXH_L4_B_0_1; 3039 if (tuple_sets & HCLGE_D_IP_BIT) 3040 nfc->data |= RXH_IP_DST; 3041 if (tuple_sets & HCLGE_S_IP_BIT) 3042 nfc->data |= RXH_IP_SRC; 3043 3044 return 0; 3045 } 3046 3047 static int hclge_get_tc_size(struct hnae3_handle *handle) 3048 { 3049 struct hclge_vport *vport = hclge_get_vport(handle); 3050 struct hclge_dev *hdev = vport->back; 3051 3052 return hdev->rss_size_max; 3053 } 3054 3055 int hclge_rss_init_hw(struct hclge_dev *hdev) 3056 { 3057 struct hclge_vport *vport = hdev->vport; 3058 u8 *rss_indir = vport[0].rss_indirection_tbl; 3059 u16 rss_size = vport[0].alloc_rss_size; 3060 u8 *key = vport[0].rss_hash_key; 3061 u8 hfunc = vport[0].rss_algo; 3062 u16 tc_offset[HCLGE_MAX_TC_NUM]; 3063 u16 tc_valid[HCLGE_MAX_TC_NUM]; 3064 u16 tc_size[HCLGE_MAX_TC_NUM]; 3065 u16 roundup_size; 3066 int i, ret; 3067 3068 ret = hclge_set_rss_indir_table(hdev, rss_indir); 3069 if (ret) 3070 return ret; 3071 3072 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 3073 if (ret) 3074 return ret; 3075 3076 ret = hclge_set_rss_input_tuple(hdev); 3077 if (ret) 3078 return ret; 3079 3080 /* Each TC have the same queue size, and tc_size set to hardware is 3081 * the log2 of roundup power of two of rss_size, the acutal queue 3082 * size is limited by indirection table. 3083 */ 3084 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 3085 dev_err(&hdev->pdev->dev, 3086 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 3087 rss_size); 3088 return -EINVAL; 3089 } 3090 3091 roundup_size = roundup_pow_of_two(rss_size); 3092 roundup_size = ilog2(roundup_size); 3093 3094 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3095 tc_valid[i] = 0; 3096 3097 if (!(hdev->hw_tc_map & BIT(i))) 3098 continue; 3099 3100 tc_valid[i] = 1; 3101 tc_size[i] = roundup_size; 3102 tc_offset[i] = rss_size * i; 3103 } 3104 3105 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 3106 } 3107 3108 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) 3109 { 3110 struct hclge_vport *vport = hdev->vport; 3111 int i, j; 3112 3113 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 3114 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3115 vport[j].rss_indirection_tbl[i] = 3116 i % vport[j].alloc_rss_size; 3117 } 3118 } 3119 3120 static void hclge_rss_init_cfg(struct hclge_dev *hdev) 3121 { 3122 struct hclge_vport *vport = hdev->vport; 3123 int i; 3124 3125 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 3126 vport[i].rss_tuple_sets.ipv4_tcp_en = 3127 HCLGE_RSS_INPUT_TUPLE_OTHER; 3128 vport[i].rss_tuple_sets.ipv4_udp_en = 3129 HCLGE_RSS_INPUT_TUPLE_OTHER; 3130 vport[i].rss_tuple_sets.ipv4_sctp_en = 3131 HCLGE_RSS_INPUT_TUPLE_SCTP; 3132 vport[i].rss_tuple_sets.ipv4_fragment_en = 3133 HCLGE_RSS_INPUT_TUPLE_OTHER; 3134 vport[i].rss_tuple_sets.ipv6_tcp_en = 3135 HCLGE_RSS_INPUT_TUPLE_OTHER; 3136 vport[i].rss_tuple_sets.ipv6_udp_en = 3137 HCLGE_RSS_INPUT_TUPLE_OTHER; 3138 vport[i].rss_tuple_sets.ipv6_sctp_en = 3139 HCLGE_RSS_INPUT_TUPLE_SCTP; 3140 vport[i].rss_tuple_sets.ipv6_fragment_en = 3141 HCLGE_RSS_INPUT_TUPLE_OTHER; 3142 3143 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3144 3145 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); 3146 } 3147 3148 hclge_rss_indir_init_cfg(hdev); 3149 } 3150 3151 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 3152 int vector_id, bool en, 3153 struct hnae3_ring_chain_node *ring_chain) 3154 { 3155 struct hclge_dev *hdev = vport->back; 3156 struct hnae3_ring_chain_node *node; 3157 struct hclge_desc desc; 3158 struct hclge_ctrl_vector_chain_cmd *req 3159 = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 3160 enum hclge_cmd_status status; 3161 enum hclge_opcode_type op; 3162 u16 tqp_type_and_id; 3163 int i; 3164 3165 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 3166 hclge_cmd_setup_basic_desc(&desc, op, false); 3167 req->int_vector_id = vector_id; 3168 3169 i = 0; 3170 for (node = ring_chain; node; node = node->next) { 3171 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 3172 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 3173 HCLGE_INT_TYPE_S, 3174 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3175 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 3176 HCLGE_TQP_ID_S, node->tqp_index); 3177 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 3178 HCLGE_INT_GL_IDX_S, 3179 hnae3_get_field(node->int_gl_idx, 3180 HNAE3_RING_GL_IDX_M, 3181 HNAE3_RING_GL_IDX_S)); 3182 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 3183 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 3184 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 3185 req->vfid = vport->vport_id; 3186 3187 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3188 if (status) { 3189 dev_err(&hdev->pdev->dev, 3190 "Map TQP fail, status is %d.\n", 3191 status); 3192 return -EIO; 3193 } 3194 i = 0; 3195 3196 hclge_cmd_setup_basic_desc(&desc, 3197 op, 3198 false); 3199 req->int_vector_id = vector_id; 3200 } 3201 } 3202 3203 if (i > 0) { 3204 req->int_cause_num = i; 3205 req->vfid = vport->vport_id; 3206 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3207 if (status) { 3208 dev_err(&hdev->pdev->dev, 3209 "Map TQP fail, status is %d.\n", status); 3210 return -EIO; 3211 } 3212 } 3213 3214 return 0; 3215 } 3216 3217 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, 3218 int vector, 3219 struct hnae3_ring_chain_node *ring_chain) 3220 { 3221 struct hclge_vport *vport = hclge_get_vport(handle); 3222 struct hclge_dev *hdev = vport->back; 3223 int vector_id; 3224 3225 vector_id = hclge_get_vector_index(hdev, vector); 3226 if (vector_id < 0) { 3227 dev_err(&hdev->pdev->dev, 3228 "Get vector index fail. vector_id =%d\n", vector_id); 3229 return vector_id; 3230 } 3231 3232 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 3233 } 3234 3235 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, 3236 int vector, 3237 struct hnae3_ring_chain_node *ring_chain) 3238 { 3239 struct hclge_vport *vport = hclge_get_vport(handle); 3240 struct hclge_dev *hdev = vport->back; 3241 int vector_id, ret; 3242 3243 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3244 return 0; 3245 3246 vector_id = hclge_get_vector_index(hdev, vector); 3247 if (vector_id < 0) { 3248 dev_err(&handle->pdev->dev, 3249 "Get vector index fail. ret =%d\n", vector_id); 3250 return vector_id; 3251 } 3252 3253 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 3254 if (ret) 3255 dev_err(&handle->pdev->dev, 3256 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 3257 vector_id, 3258 ret); 3259 3260 return ret; 3261 } 3262 3263 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 3264 struct hclge_promisc_param *param) 3265 { 3266 struct hclge_promisc_cfg_cmd *req; 3267 struct hclge_desc desc; 3268 int ret; 3269 3270 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 3271 3272 req = (struct hclge_promisc_cfg_cmd *)desc.data; 3273 req->vf_id = param->vf_id; 3274 3275 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on 3276 * pdev revision(0x20), new revision support them. The 3277 * value of this two fields will not return error when driver 3278 * send command to fireware in revision(0x20). 3279 */ 3280 req->flag = (param->enable << HCLGE_PROMISC_EN_B) | 3281 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; 3282 3283 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3284 if (ret) 3285 dev_err(&hdev->pdev->dev, 3286 "Set promisc mode fail, status is %d.\n", ret); 3287 3288 return ret; 3289 } 3290 3291 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 3292 bool en_mc, bool en_bc, int vport_id) 3293 { 3294 if (!param) 3295 return; 3296 3297 memset(param, 0, sizeof(struct hclge_promisc_param)); 3298 if (en_uc) 3299 param->enable = HCLGE_PROMISC_EN_UC; 3300 if (en_mc) 3301 param->enable |= HCLGE_PROMISC_EN_MC; 3302 if (en_bc) 3303 param->enable |= HCLGE_PROMISC_EN_BC; 3304 param->vf_id = vport_id; 3305 } 3306 3307 static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 3308 bool en_mc_pmc) 3309 { 3310 struct hclge_vport *vport = hclge_get_vport(handle); 3311 struct hclge_dev *hdev = vport->back; 3312 struct hclge_promisc_param param; 3313 3314 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, 3315 vport->vport_id); 3316 hclge_cmd_set_promisc_mode(hdev, ¶m); 3317 } 3318 3319 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) 3320 { 3321 struct hclge_get_fd_mode_cmd *req; 3322 struct hclge_desc desc; 3323 int ret; 3324 3325 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); 3326 3327 req = (struct hclge_get_fd_mode_cmd *)desc.data; 3328 3329 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3330 if (ret) { 3331 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); 3332 return ret; 3333 } 3334 3335 *fd_mode = req->mode; 3336 3337 return ret; 3338 } 3339 3340 static int hclge_get_fd_allocation(struct hclge_dev *hdev, 3341 u32 *stage1_entry_num, 3342 u32 *stage2_entry_num, 3343 u16 *stage1_counter_num, 3344 u16 *stage2_counter_num) 3345 { 3346 struct hclge_get_fd_allocation_cmd *req; 3347 struct hclge_desc desc; 3348 int ret; 3349 3350 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); 3351 3352 req = (struct hclge_get_fd_allocation_cmd *)desc.data; 3353 3354 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3355 if (ret) { 3356 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", 3357 ret); 3358 return ret; 3359 } 3360 3361 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); 3362 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); 3363 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); 3364 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); 3365 3366 return ret; 3367 } 3368 3369 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num) 3370 { 3371 struct hclge_set_fd_key_config_cmd *req; 3372 struct hclge_fd_key_cfg *stage; 3373 struct hclge_desc desc; 3374 int ret; 3375 3376 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); 3377 3378 req = (struct hclge_set_fd_key_config_cmd *)desc.data; 3379 stage = &hdev->fd_cfg.key_cfg[stage_num]; 3380 req->stage = stage_num; 3381 req->key_select = stage->key_sel; 3382 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; 3383 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; 3384 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; 3385 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; 3386 req->tuple_mask = cpu_to_le32(~stage->tuple_active); 3387 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); 3388 3389 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3390 if (ret) 3391 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); 3392 3393 return ret; 3394 } 3395 3396 static int hclge_init_fd_config(struct hclge_dev *hdev) 3397 { 3398 #define LOW_2_WORDS 0x03 3399 struct hclge_fd_key_cfg *key_cfg; 3400 int ret; 3401 3402 if (!hnae3_dev_fd_supported(hdev)) 3403 return 0; 3404 3405 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); 3406 if (ret) 3407 return ret; 3408 3409 switch (hdev->fd_cfg.fd_mode) { 3410 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: 3411 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; 3412 break; 3413 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: 3414 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; 3415 break; 3416 default: 3417 dev_err(&hdev->pdev->dev, 3418 "Unsupported flow director mode %d\n", 3419 hdev->fd_cfg.fd_mode); 3420 return -EOPNOTSUPP; 3421 } 3422 3423 hdev->fd_cfg.fd_en = true; 3424 hdev->fd_cfg.proto_support = 3425 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | 3426 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; 3427 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; 3428 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, 3429 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; 3430 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; 3431 key_cfg->outer_sipv6_word_en = 0; 3432 key_cfg->outer_dipv6_word_en = 0; 3433 3434 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | 3435 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | 3436 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 3437 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 3438 3439 /* If use max 400bit key, we can support tuples for ether type */ 3440 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) { 3441 hdev->fd_cfg.proto_support |= ETHER_FLOW; 3442 key_cfg->tuple_active |= 3443 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); 3444 } 3445 3446 /* roce_type is used to filter roce frames 3447 * dst_vport is used to specify the rule 3448 */ 3449 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); 3450 3451 ret = hclge_get_fd_allocation(hdev, 3452 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 3453 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], 3454 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], 3455 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); 3456 if (ret) 3457 return ret; 3458 3459 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); 3460 } 3461 3462 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, 3463 int loc, u8 *key, bool is_add) 3464 { 3465 struct hclge_fd_tcam_config_1_cmd *req1; 3466 struct hclge_fd_tcam_config_2_cmd *req2; 3467 struct hclge_fd_tcam_config_3_cmd *req3; 3468 struct hclge_desc desc[3]; 3469 int ret; 3470 3471 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); 3472 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3473 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); 3474 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3475 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); 3476 3477 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 3478 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 3479 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 3480 3481 req1->stage = stage; 3482 req1->xy_sel = sel_x ? 1 : 0; 3483 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); 3484 req1->index = cpu_to_le32(loc); 3485 req1->entry_vld = sel_x ? is_add : 0; 3486 3487 if (key) { 3488 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); 3489 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], 3490 sizeof(req2->tcam_data)); 3491 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + 3492 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); 3493 } 3494 3495 ret = hclge_cmd_send(&hdev->hw, desc, 3); 3496 if (ret) 3497 dev_err(&hdev->pdev->dev, 3498 "config tcam key fail, ret=%d\n", 3499 ret); 3500 3501 return ret; 3502 } 3503 3504 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, 3505 struct hclge_fd_ad_data *action) 3506 { 3507 struct hclge_fd_ad_config_cmd *req; 3508 struct hclge_desc desc; 3509 u64 ad_data = 0; 3510 int ret; 3511 3512 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); 3513 3514 req = (struct hclge_fd_ad_config_cmd *)desc.data; 3515 req->index = cpu_to_le32(loc); 3516 req->stage = stage; 3517 3518 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, 3519 action->write_rule_id_to_bd); 3520 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, 3521 action->rule_id); 3522 ad_data <<= 32; 3523 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); 3524 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, 3525 action->forward_to_direct_queue); 3526 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, 3527 action->queue_id); 3528 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); 3529 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, 3530 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); 3531 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); 3532 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, 3533 action->counter_id); 3534 3535 req->ad_data = cpu_to_le64(ad_data); 3536 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3537 if (ret) 3538 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); 3539 3540 return ret; 3541 } 3542 3543 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, 3544 struct hclge_fd_rule *rule) 3545 { 3546 u16 tmp_x_s, tmp_y_s; 3547 u32 tmp_x_l, tmp_y_l; 3548 int i; 3549 3550 if (rule->unused_tuple & tuple_bit) 3551 return true; 3552 3553 switch (tuple_bit) { 3554 case 0: 3555 return false; 3556 case BIT(INNER_DST_MAC): 3557 for (i = 0; i < 6; i++) { 3558 calc_x(key_x[5 - i], rule->tuples.dst_mac[i], 3559 rule->tuples_mask.dst_mac[i]); 3560 calc_y(key_y[5 - i], rule->tuples.dst_mac[i], 3561 rule->tuples_mask.dst_mac[i]); 3562 } 3563 3564 return true; 3565 case BIT(INNER_SRC_MAC): 3566 for (i = 0; i < 6; i++) { 3567 calc_x(key_x[5 - i], rule->tuples.src_mac[i], 3568 rule->tuples.src_mac[i]); 3569 calc_y(key_y[5 - i], rule->tuples.src_mac[i], 3570 rule->tuples.src_mac[i]); 3571 } 3572 3573 return true; 3574 case BIT(INNER_VLAN_TAG_FST): 3575 calc_x(tmp_x_s, rule->tuples.vlan_tag1, 3576 rule->tuples_mask.vlan_tag1); 3577 calc_y(tmp_y_s, rule->tuples.vlan_tag1, 3578 rule->tuples_mask.vlan_tag1); 3579 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 3580 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 3581 3582 return true; 3583 case BIT(INNER_ETH_TYPE): 3584 calc_x(tmp_x_s, rule->tuples.ether_proto, 3585 rule->tuples_mask.ether_proto); 3586 calc_y(tmp_y_s, rule->tuples.ether_proto, 3587 rule->tuples_mask.ether_proto); 3588 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 3589 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 3590 3591 return true; 3592 case BIT(INNER_IP_TOS): 3593 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); 3594 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); 3595 3596 return true; 3597 case BIT(INNER_IP_PROTO): 3598 calc_x(*key_x, rule->tuples.ip_proto, 3599 rule->tuples_mask.ip_proto); 3600 calc_y(*key_y, rule->tuples.ip_proto, 3601 rule->tuples_mask.ip_proto); 3602 3603 return true; 3604 case BIT(INNER_SRC_IP): 3605 calc_x(tmp_x_l, rule->tuples.src_ip[3], 3606 rule->tuples_mask.src_ip[3]); 3607 calc_y(tmp_y_l, rule->tuples.src_ip[3], 3608 rule->tuples_mask.src_ip[3]); 3609 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 3610 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 3611 3612 return true; 3613 case BIT(INNER_DST_IP): 3614 calc_x(tmp_x_l, rule->tuples.dst_ip[3], 3615 rule->tuples_mask.dst_ip[3]); 3616 calc_y(tmp_y_l, rule->tuples.dst_ip[3], 3617 rule->tuples_mask.dst_ip[3]); 3618 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 3619 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 3620 3621 return true; 3622 case BIT(INNER_SRC_PORT): 3623 calc_x(tmp_x_s, rule->tuples.src_port, 3624 rule->tuples_mask.src_port); 3625 calc_y(tmp_y_s, rule->tuples.src_port, 3626 rule->tuples_mask.src_port); 3627 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 3628 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 3629 3630 return true; 3631 case BIT(INNER_DST_PORT): 3632 calc_x(tmp_x_s, rule->tuples.dst_port, 3633 rule->tuples_mask.dst_port); 3634 calc_y(tmp_y_s, rule->tuples.dst_port, 3635 rule->tuples_mask.dst_port); 3636 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 3637 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 3638 3639 return true; 3640 default: 3641 return false; 3642 } 3643 } 3644 3645 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, 3646 u8 vf_id, u8 network_port_id) 3647 { 3648 u32 port_number = 0; 3649 3650 if (port_type == HOST_PORT) { 3651 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, 3652 pf_id); 3653 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, 3654 vf_id); 3655 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); 3656 } else { 3657 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, 3658 HCLGE_NETWORK_PORT_ID_S, network_port_id); 3659 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); 3660 } 3661 3662 return port_number; 3663 } 3664 3665 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, 3666 __le32 *key_x, __le32 *key_y, 3667 struct hclge_fd_rule *rule) 3668 { 3669 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; 3670 u8 cur_pos = 0, tuple_size, shift_bits; 3671 int i; 3672 3673 for (i = 0; i < MAX_META_DATA; i++) { 3674 tuple_size = meta_data_key_info[i].key_length; 3675 tuple_bit = key_cfg->meta_data_active & BIT(i); 3676 3677 switch (tuple_bit) { 3678 case BIT(ROCE_TYPE): 3679 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); 3680 cur_pos += tuple_size; 3681 break; 3682 case BIT(DST_VPORT): 3683 port_number = hclge_get_port_number(HOST_PORT, 0, 3684 rule->vf_id, 0); 3685 hnae3_set_field(meta_data, 3686 GENMASK(cur_pos + tuple_size, cur_pos), 3687 cur_pos, port_number); 3688 cur_pos += tuple_size; 3689 break; 3690 default: 3691 break; 3692 } 3693 } 3694 3695 calc_x(tmp_x, meta_data, 0xFFFFFFFF); 3696 calc_y(tmp_y, meta_data, 0xFFFFFFFF); 3697 shift_bits = sizeof(meta_data) * 8 - cur_pos; 3698 3699 *key_x = cpu_to_le32(tmp_x << shift_bits); 3700 *key_y = cpu_to_le32(tmp_y << shift_bits); 3701 } 3702 3703 /* A complete key is combined with meta data key and tuple key. 3704 * Meta data key is stored at the MSB region, and tuple key is stored at 3705 * the LSB region, unused bits will be filled 0. 3706 */ 3707 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, 3708 struct hclge_fd_rule *rule) 3709 { 3710 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; 3711 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; 3712 u8 *cur_key_x, *cur_key_y; 3713 int i, ret, tuple_size; 3714 u8 meta_data_region; 3715 3716 memset(key_x, 0, sizeof(key_x)); 3717 memset(key_y, 0, sizeof(key_y)); 3718 cur_key_x = key_x; 3719 cur_key_y = key_y; 3720 3721 for (i = 0 ; i < MAX_TUPLE; i++) { 3722 bool tuple_valid; 3723 u32 check_tuple; 3724 3725 tuple_size = tuple_key_info[i].key_length / 8; 3726 check_tuple = key_cfg->tuple_active & BIT(i); 3727 3728 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, 3729 cur_key_y, rule); 3730 if (tuple_valid) { 3731 cur_key_x += tuple_size; 3732 cur_key_y += tuple_size; 3733 } 3734 } 3735 3736 meta_data_region = hdev->fd_cfg.max_key_length / 8 - 3737 MAX_META_DATA_LENGTH / 8; 3738 3739 hclge_fd_convert_meta_data(key_cfg, 3740 (__le32 *)(key_x + meta_data_region), 3741 (__le32 *)(key_y + meta_data_region), 3742 rule); 3743 3744 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, 3745 true); 3746 if (ret) { 3747 dev_err(&hdev->pdev->dev, 3748 "fd key_y config fail, loc=%d, ret=%d\n", 3749 rule->queue_id, ret); 3750 return ret; 3751 } 3752 3753 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, 3754 true); 3755 if (ret) 3756 dev_err(&hdev->pdev->dev, 3757 "fd key_x config fail, loc=%d, ret=%d\n", 3758 rule->queue_id, ret); 3759 return ret; 3760 } 3761 3762 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, 3763 struct hclge_fd_rule *rule) 3764 { 3765 struct hclge_fd_ad_data ad_data; 3766 3767 ad_data.ad_id = rule->location; 3768 3769 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 3770 ad_data.drop_packet = true; 3771 ad_data.forward_to_direct_queue = false; 3772 ad_data.queue_id = 0; 3773 } else { 3774 ad_data.drop_packet = false; 3775 ad_data.forward_to_direct_queue = true; 3776 ad_data.queue_id = rule->queue_id; 3777 } 3778 3779 ad_data.use_counter = false; 3780 ad_data.counter_id = 0; 3781 3782 ad_data.use_next_stage = false; 3783 ad_data.next_input_key = 0; 3784 3785 ad_data.write_rule_id_to_bd = true; 3786 ad_data.rule_id = rule->location; 3787 3788 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); 3789 } 3790 3791 static int hclge_fd_check_spec(struct hclge_dev *hdev, 3792 struct ethtool_rx_flow_spec *fs, u32 *unused) 3793 { 3794 struct ethtool_tcpip4_spec *tcp_ip4_spec; 3795 struct ethtool_usrip4_spec *usr_ip4_spec; 3796 struct ethtool_tcpip6_spec *tcp_ip6_spec; 3797 struct ethtool_usrip6_spec *usr_ip6_spec; 3798 struct ethhdr *ether_spec; 3799 3800 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 3801 return -EINVAL; 3802 3803 if (!(fs->flow_type & hdev->fd_cfg.proto_support)) 3804 return -EOPNOTSUPP; 3805 3806 if ((fs->flow_type & FLOW_EXT) && 3807 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { 3808 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); 3809 return -EOPNOTSUPP; 3810 } 3811 3812 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 3813 case SCTP_V4_FLOW: 3814 case TCP_V4_FLOW: 3815 case UDP_V4_FLOW: 3816 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec; 3817 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 3818 3819 if (!tcp_ip4_spec->ip4src) 3820 *unused |= BIT(INNER_SRC_IP); 3821 3822 if (!tcp_ip4_spec->ip4dst) 3823 *unused |= BIT(INNER_DST_IP); 3824 3825 if (!tcp_ip4_spec->psrc) 3826 *unused |= BIT(INNER_SRC_PORT); 3827 3828 if (!tcp_ip4_spec->pdst) 3829 *unused |= BIT(INNER_DST_PORT); 3830 3831 if (!tcp_ip4_spec->tos) 3832 *unused |= BIT(INNER_IP_TOS); 3833 3834 break; 3835 case IP_USER_FLOW: 3836 usr_ip4_spec = &fs->h_u.usr_ip4_spec; 3837 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 3838 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 3839 3840 if (!usr_ip4_spec->ip4src) 3841 *unused |= BIT(INNER_SRC_IP); 3842 3843 if (!usr_ip4_spec->ip4dst) 3844 *unused |= BIT(INNER_DST_IP); 3845 3846 if (!usr_ip4_spec->tos) 3847 *unused |= BIT(INNER_IP_TOS); 3848 3849 if (!usr_ip4_spec->proto) 3850 *unused |= BIT(INNER_IP_PROTO); 3851 3852 if (usr_ip4_spec->l4_4_bytes) 3853 return -EOPNOTSUPP; 3854 3855 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4) 3856 return -EOPNOTSUPP; 3857 3858 break; 3859 case SCTP_V6_FLOW: 3860 case TCP_V6_FLOW: 3861 case UDP_V6_FLOW: 3862 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec; 3863 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 3864 BIT(INNER_IP_TOS); 3865 3866 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] && 3867 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3]) 3868 *unused |= BIT(INNER_SRC_IP); 3869 3870 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] && 3871 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3]) 3872 *unused |= BIT(INNER_DST_IP); 3873 3874 if (!tcp_ip6_spec->psrc) 3875 *unused |= BIT(INNER_SRC_PORT); 3876 3877 if (!tcp_ip6_spec->pdst) 3878 *unused |= BIT(INNER_DST_PORT); 3879 3880 if (tcp_ip6_spec->tclass) 3881 return -EOPNOTSUPP; 3882 3883 break; 3884 case IPV6_USER_FLOW: 3885 usr_ip6_spec = &fs->h_u.usr_ip6_spec; 3886 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 3887 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | 3888 BIT(INNER_DST_PORT); 3889 3890 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] && 3891 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3]) 3892 *unused |= BIT(INNER_SRC_IP); 3893 3894 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] && 3895 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3]) 3896 *unused |= BIT(INNER_DST_IP); 3897 3898 if (!usr_ip6_spec->l4_proto) 3899 *unused |= BIT(INNER_IP_PROTO); 3900 3901 if (usr_ip6_spec->tclass) 3902 return -EOPNOTSUPP; 3903 3904 if (usr_ip6_spec->l4_4_bytes) 3905 return -EOPNOTSUPP; 3906 3907 break; 3908 case ETHER_FLOW: 3909 ether_spec = &fs->h_u.ether_spec; 3910 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 3911 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | 3912 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); 3913 3914 if (is_zero_ether_addr(ether_spec->h_source)) 3915 *unused |= BIT(INNER_SRC_MAC); 3916 3917 if (is_zero_ether_addr(ether_spec->h_dest)) 3918 *unused |= BIT(INNER_DST_MAC); 3919 3920 if (!ether_spec->h_proto) 3921 *unused |= BIT(INNER_ETH_TYPE); 3922 3923 break; 3924 default: 3925 return -EOPNOTSUPP; 3926 } 3927 3928 if ((fs->flow_type & FLOW_EXT)) { 3929 if (fs->h_ext.vlan_etype) 3930 return -EOPNOTSUPP; 3931 if (!fs->h_ext.vlan_tci) 3932 *unused |= BIT(INNER_VLAN_TAG_FST); 3933 3934 if (fs->m_ext.vlan_tci) { 3935 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) 3936 return -EINVAL; 3937 } 3938 } else { 3939 *unused |= BIT(INNER_VLAN_TAG_FST); 3940 } 3941 3942 if (fs->flow_type & FLOW_MAC_EXT) { 3943 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW)) 3944 return -EOPNOTSUPP; 3945 3946 if (is_zero_ether_addr(fs->h_ext.h_dest)) 3947 *unused |= BIT(INNER_DST_MAC); 3948 else 3949 *unused &= ~(BIT(INNER_DST_MAC)); 3950 } 3951 3952 return 0; 3953 } 3954 3955 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) 3956 { 3957 struct hclge_fd_rule *rule = NULL; 3958 struct hlist_node *node2; 3959 3960 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 3961 if (rule->location >= location) 3962 break; 3963 } 3964 3965 return rule && rule->location == location; 3966 } 3967 3968 static int hclge_fd_update_rule_list(struct hclge_dev *hdev, 3969 struct hclge_fd_rule *new_rule, 3970 u16 location, 3971 bool is_add) 3972 { 3973 struct hclge_fd_rule *rule = NULL, *parent = NULL; 3974 struct hlist_node *node2; 3975 3976 if (is_add && !new_rule) 3977 return -EINVAL; 3978 3979 hlist_for_each_entry_safe(rule, node2, 3980 &hdev->fd_rule_list, rule_node) { 3981 if (rule->location >= location) 3982 break; 3983 parent = rule; 3984 } 3985 3986 if (rule && rule->location == location) { 3987 hlist_del(&rule->rule_node); 3988 kfree(rule); 3989 hdev->hclge_fd_rule_num--; 3990 3991 if (!is_add) 3992 return 0; 3993 3994 } else if (!is_add) { 3995 dev_err(&hdev->pdev->dev, 3996 "delete fail, rule %d is inexistent\n", 3997 location); 3998 return -EINVAL; 3999 } 4000 4001 INIT_HLIST_NODE(&new_rule->rule_node); 4002 4003 if (parent) 4004 hlist_add_behind(&new_rule->rule_node, &parent->rule_node); 4005 else 4006 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); 4007 4008 hdev->hclge_fd_rule_num++; 4009 4010 return 0; 4011 } 4012 4013 static int hclge_fd_get_tuple(struct hclge_dev *hdev, 4014 struct ethtool_rx_flow_spec *fs, 4015 struct hclge_fd_rule *rule) 4016 { 4017 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 4018 4019 switch (flow_type) { 4020 case SCTP_V4_FLOW: 4021 case TCP_V4_FLOW: 4022 case UDP_V4_FLOW: 4023 rule->tuples.src_ip[3] = 4024 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); 4025 rule->tuples_mask.src_ip[3] = 4026 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); 4027 4028 rule->tuples.dst_ip[3] = 4029 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); 4030 rule->tuples_mask.dst_ip[3] = 4031 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); 4032 4033 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); 4034 rule->tuples_mask.src_port = 4035 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); 4036 4037 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); 4038 rule->tuples_mask.dst_port = 4039 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); 4040 4041 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; 4042 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; 4043 4044 rule->tuples.ether_proto = ETH_P_IP; 4045 rule->tuples_mask.ether_proto = 0xFFFF; 4046 4047 break; 4048 case IP_USER_FLOW: 4049 rule->tuples.src_ip[3] = 4050 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); 4051 rule->tuples_mask.src_ip[3] = 4052 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); 4053 4054 rule->tuples.dst_ip[3] = 4055 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); 4056 rule->tuples_mask.dst_ip[3] = 4057 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); 4058 4059 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; 4060 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; 4061 4062 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; 4063 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; 4064 4065 rule->tuples.ether_proto = ETH_P_IP; 4066 rule->tuples_mask.ether_proto = 0xFFFF; 4067 4068 break; 4069 case SCTP_V6_FLOW: 4070 case TCP_V6_FLOW: 4071 case UDP_V6_FLOW: 4072 be32_to_cpu_array(rule->tuples.src_ip, 4073 fs->h_u.tcp_ip6_spec.ip6src, 4); 4074 be32_to_cpu_array(rule->tuples_mask.src_ip, 4075 fs->m_u.tcp_ip6_spec.ip6src, 4); 4076 4077 be32_to_cpu_array(rule->tuples.dst_ip, 4078 fs->h_u.tcp_ip6_spec.ip6dst, 4); 4079 be32_to_cpu_array(rule->tuples_mask.dst_ip, 4080 fs->m_u.tcp_ip6_spec.ip6dst, 4); 4081 4082 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); 4083 rule->tuples_mask.src_port = 4084 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); 4085 4086 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); 4087 rule->tuples_mask.dst_port = 4088 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); 4089 4090 rule->tuples.ether_proto = ETH_P_IPV6; 4091 rule->tuples_mask.ether_proto = 0xFFFF; 4092 4093 break; 4094 case IPV6_USER_FLOW: 4095 be32_to_cpu_array(rule->tuples.src_ip, 4096 fs->h_u.usr_ip6_spec.ip6src, 4); 4097 be32_to_cpu_array(rule->tuples_mask.src_ip, 4098 fs->m_u.usr_ip6_spec.ip6src, 4); 4099 4100 be32_to_cpu_array(rule->tuples.dst_ip, 4101 fs->h_u.usr_ip6_spec.ip6dst, 4); 4102 be32_to_cpu_array(rule->tuples_mask.dst_ip, 4103 fs->m_u.usr_ip6_spec.ip6dst, 4); 4104 4105 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; 4106 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; 4107 4108 rule->tuples.ether_proto = ETH_P_IPV6; 4109 rule->tuples_mask.ether_proto = 0xFFFF; 4110 4111 break; 4112 case ETHER_FLOW: 4113 ether_addr_copy(rule->tuples.src_mac, 4114 fs->h_u.ether_spec.h_source); 4115 ether_addr_copy(rule->tuples_mask.src_mac, 4116 fs->m_u.ether_spec.h_source); 4117 4118 ether_addr_copy(rule->tuples.dst_mac, 4119 fs->h_u.ether_spec.h_dest); 4120 ether_addr_copy(rule->tuples_mask.dst_mac, 4121 fs->m_u.ether_spec.h_dest); 4122 4123 rule->tuples.ether_proto = 4124 be16_to_cpu(fs->h_u.ether_spec.h_proto); 4125 rule->tuples_mask.ether_proto = 4126 be16_to_cpu(fs->m_u.ether_spec.h_proto); 4127 4128 break; 4129 default: 4130 return -EOPNOTSUPP; 4131 } 4132 4133 switch (flow_type) { 4134 case SCTP_V4_FLOW: 4135 case SCTP_V6_FLOW: 4136 rule->tuples.ip_proto = IPPROTO_SCTP; 4137 rule->tuples_mask.ip_proto = 0xFF; 4138 break; 4139 case TCP_V4_FLOW: 4140 case TCP_V6_FLOW: 4141 rule->tuples.ip_proto = IPPROTO_TCP; 4142 rule->tuples_mask.ip_proto = 0xFF; 4143 break; 4144 case UDP_V4_FLOW: 4145 case UDP_V6_FLOW: 4146 rule->tuples.ip_proto = IPPROTO_UDP; 4147 rule->tuples_mask.ip_proto = 0xFF; 4148 break; 4149 default: 4150 break; 4151 } 4152 4153 if ((fs->flow_type & FLOW_EXT)) { 4154 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); 4155 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); 4156 } 4157 4158 if (fs->flow_type & FLOW_MAC_EXT) { 4159 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); 4160 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); 4161 } 4162 4163 return 0; 4164 } 4165 4166 static int hclge_add_fd_entry(struct hnae3_handle *handle, 4167 struct ethtool_rxnfc *cmd) 4168 { 4169 struct hclge_vport *vport = hclge_get_vport(handle); 4170 struct hclge_dev *hdev = vport->back; 4171 u16 dst_vport_id = 0, q_index = 0; 4172 struct ethtool_rx_flow_spec *fs; 4173 struct hclge_fd_rule *rule; 4174 u32 unused = 0; 4175 u8 action; 4176 int ret; 4177 4178 if (!hnae3_dev_fd_supported(hdev)) 4179 return -EOPNOTSUPP; 4180 4181 if (!hdev->fd_cfg.fd_en) { 4182 dev_warn(&hdev->pdev->dev, 4183 "Please enable flow director first\n"); 4184 return -EOPNOTSUPP; 4185 } 4186 4187 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 4188 4189 ret = hclge_fd_check_spec(hdev, fs, &unused); 4190 if (ret) { 4191 dev_err(&hdev->pdev->dev, "Check fd spec failed\n"); 4192 return ret; 4193 } 4194 4195 if (fs->ring_cookie == RX_CLS_FLOW_DISC) { 4196 action = HCLGE_FD_ACTION_DROP_PACKET; 4197 } else { 4198 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); 4199 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); 4200 u16 tqps; 4201 4202 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; 4203 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; 4204 4205 if (ring >= tqps) { 4206 dev_err(&hdev->pdev->dev, 4207 "Error: queue id (%d) > max tqp num (%d)\n", 4208 ring, tqps - 1); 4209 return -EINVAL; 4210 } 4211 4212 if (vf > hdev->num_req_vfs) { 4213 dev_err(&hdev->pdev->dev, 4214 "Error: vf id (%d) > max vf num (%d)\n", 4215 vf, hdev->num_req_vfs); 4216 return -EINVAL; 4217 } 4218 4219 action = HCLGE_FD_ACTION_ACCEPT_PACKET; 4220 q_index = ring; 4221 } 4222 4223 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 4224 if (!rule) 4225 return -ENOMEM; 4226 4227 ret = hclge_fd_get_tuple(hdev, fs, rule); 4228 if (ret) 4229 goto free_rule; 4230 4231 rule->flow_type = fs->flow_type; 4232 4233 rule->location = fs->location; 4234 rule->unused_tuple = unused; 4235 rule->vf_id = dst_vport_id; 4236 rule->queue_id = q_index; 4237 rule->action = action; 4238 4239 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 4240 if (ret) 4241 goto free_rule; 4242 4243 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 4244 if (ret) 4245 goto free_rule; 4246 4247 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true); 4248 if (ret) 4249 goto free_rule; 4250 4251 return ret; 4252 4253 free_rule: 4254 kfree(rule); 4255 return ret; 4256 } 4257 4258 static int hclge_del_fd_entry(struct hnae3_handle *handle, 4259 struct ethtool_rxnfc *cmd) 4260 { 4261 struct hclge_vport *vport = hclge_get_vport(handle); 4262 struct hclge_dev *hdev = vport->back; 4263 struct ethtool_rx_flow_spec *fs; 4264 int ret; 4265 4266 if (!hnae3_dev_fd_supported(hdev)) 4267 return -EOPNOTSUPP; 4268 4269 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 4270 4271 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 4272 return -EINVAL; 4273 4274 if (!hclge_fd_rule_exist(hdev, fs->location)) { 4275 dev_err(&hdev->pdev->dev, 4276 "Delete fail, rule %d is inexistent\n", 4277 fs->location); 4278 return -ENOENT; 4279 } 4280 4281 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 4282 fs->location, NULL, false); 4283 if (ret) 4284 return ret; 4285 4286 return hclge_fd_update_rule_list(hdev, NULL, fs->location, 4287 false); 4288 } 4289 4290 static void hclge_del_all_fd_entries(struct hnae3_handle *handle, 4291 bool clear_list) 4292 { 4293 struct hclge_vport *vport = hclge_get_vport(handle); 4294 struct hclge_dev *hdev = vport->back; 4295 struct hclge_fd_rule *rule; 4296 struct hlist_node *node; 4297 4298 if (!hnae3_dev_fd_supported(hdev)) 4299 return; 4300 4301 if (clear_list) { 4302 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, 4303 rule_node) { 4304 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 4305 rule->location, NULL, false); 4306 hlist_del(&rule->rule_node); 4307 kfree(rule); 4308 hdev->hclge_fd_rule_num--; 4309 } 4310 } else { 4311 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, 4312 rule_node) 4313 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 4314 rule->location, NULL, false); 4315 } 4316 } 4317 4318 static int hclge_restore_fd_entries(struct hnae3_handle *handle) 4319 { 4320 struct hclge_vport *vport = hclge_get_vport(handle); 4321 struct hclge_dev *hdev = vport->back; 4322 struct hclge_fd_rule *rule; 4323 struct hlist_node *node; 4324 int ret; 4325 4326 if (!hnae3_dev_fd_supported(hdev)) 4327 return -EOPNOTSUPP; 4328 4329 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 4330 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 4331 if (!ret) 4332 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 4333 4334 if (ret) { 4335 dev_warn(&hdev->pdev->dev, 4336 "Restore rule %d failed, remove it\n", 4337 rule->location); 4338 hlist_del(&rule->rule_node); 4339 kfree(rule); 4340 hdev->hclge_fd_rule_num--; 4341 } 4342 } 4343 return 0; 4344 } 4345 4346 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, 4347 struct ethtool_rxnfc *cmd) 4348 { 4349 struct hclge_vport *vport = hclge_get_vport(handle); 4350 struct hclge_dev *hdev = vport->back; 4351 4352 if (!hnae3_dev_fd_supported(hdev)) 4353 return -EOPNOTSUPP; 4354 4355 cmd->rule_cnt = hdev->hclge_fd_rule_num; 4356 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 4357 4358 return 0; 4359 } 4360 4361 static int hclge_get_fd_rule_info(struct hnae3_handle *handle, 4362 struct ethtool_rxnfc *cmd) 4363 { 4364 struct hclge_vport *vport = hclge_get_vport(handle); 4365 struct hclge_fd_rule *rule = NULL; 4366 struct hclge_dev *hdev = vport->back; 4367 struct ethtool_rx_flow_spec *fs; 4368 struct hlist_node *node2; 4369 4370 if (!hnae3_dev_fd_supported(hdev)) 4371 return -EOPNOTSUPP; 4372 4373 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 4374 4375 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 4376 if (rule->location >= fs->location) 4377 break; 4378 } 4379 4380 if (!rule || fs->location != rule->location) 4381 return -ENOENT; 4382 4383 fs->flow_type = rule->flow_type; 4384 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 4385 case SCTP_V4_FLOW: 4386 case TCP_V4_FLOW: 4387 case UDP_V4_FLOW: 4388 fs->h_u.tcp_ip4_spec.ip4src = 4389 cpu_to_be32(rule->tuples.src_ip[3]); 4390 fs->m_u.tcp_ip4_spec.ip4src = 4391 rule->unused_tuple & BIT(INNER_SRC_IP) ? 4392 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); 4393 4394 fs->h_u.tcp_ip4_spec.ip4dst = 4395 cpu_to_be32(rule->tuples.dst_ip[3]); 4396 fs->m_u.tcp_ip4_spec.ip4dst = 4397 rule->unused_tuple & BIT(INNER_DST_IP) ? 4398 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); 4399 4400 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port); 4401 fs->m_u.tcp_ip4_spec.psrc = 4402 rule->unused_tuple & BIT(INNER_SRC_PORT) ? 4403 0 : cpu_to_be16(rule->tuples_mask.src_port); 4404 4405 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port); 4406 fs->m_u.tcp_ip4_spec.pdst = 4407 rule->unused_tuple & BIT(INNER_DST_PORT) ? 4408 0 : cpu_to_be16(rule->tuples_mask.dst_port); 4409 4410 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos; 4411 fs->m_u.tcp_ip4_spec.tos = 4412 rule->unused_tuple & BIT(INNER_IP_TOS) ? 4413 0 : rule->tuples_mask.ip_tos; 4414 4415 break; 4416 case IP_USER_FLOW: 4417 fs->h_u.usr_ip4_spec.ip4src = 4418 cpu_to_be32(rule->tuples.src_ip[3]); 4419 fs->m_u.tcp_ip4_spec.ip4src = 4420 rule->unused_tuple & BIT(INNER_SRC_IP) ? 4421 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]); 4422 4423 fs->h_u.usr_ip4_spec.ip4dst = 4424 cpu_to_be32(rule->tuples.dst_ip[3]); 4425 fs->m_u.usr_ip4_spec.ip4dst = 4426 rule->unused_tuple & BIT(INNER_DST_IP) ? 4427 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]); 4428 4429 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos; 4430 fs->m_u.usr_ip4_spec.tos = 4431 rule->unused_tuple & BIT(INNER_IP_TOS) ? 4432 0 : rule->tuples_mask.ip_tos; 4433 4434 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto; 4435 fs->m_u.usr_ip4_spec.proto = 4436 rule->unused_tuple & BIT(INNER_IP_PROTO) ? 4437 0 : rule->tuples_mask.ip_proto; 4438 4439 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 4440 4441 break; 4442 case SCTP_V6_FLOW: 4443 case TCP_V6_FLOW: 4444 case UDP_V6_FLOW: 4445 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src, 4446 rule->tuples.src_ip, 4); 4447 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 4448 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4); 4449 else 4450 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src, 4451 rule->tuples_mask.src_ip, 4); 4452 4453 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst, 4454 rule->tuples.dst_ip, 4); 4455 if (rule->unused_tuple & BIT(INNER_DST_IP)) 4456 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4); 4457 else 4458 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst, 4459 rule->tuples_mask.dst_ip, 4); 4460 4461 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port); 4462 fs->m_u.tcp_ip6_spec.psrc = 4463 rule->unused_tuple & BIT(INNER_SRC_PORT) ? 4464 0 : cpu_to_be16(rule->tuples_mask.src_port); 4465 4466 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port); 4467 fs->m_u.tcp_ip6_spec.pdst = 4468 rule->unused_tuple & BIT(INNER_DST_PORT) ? 4469 0 : cpu_to_be16(rule->tuples_mask.dst_port); 4470 4471 break; 4472 case IPV6_USER_FLOW: 4473 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src, 4474 rule->tuples.src_ip, 4); 4475 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 4476 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4); 4477 else 4478 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src, 4479 rule->tuples_mask.src_ip, 4); 4480 4481 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst, 4482 rule->tuples.dst_ip, 4); 4483 if (rule->unused_tuple & BIT(INNER_DST_IP)) 4484 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4); 4485 else 4486 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst, 4487 rule->tuples_mask.dst_ip, 4); 4488 4489 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto; 4490 fs->m_u.usr_ip6_spec.l4_proto = 4491 rule->unused_tuple & BIT(INNER_IP_PROTO) ? 4492 0 : rule->tuples_mask.ip_proto; 4493 4494 break; 4495 case ETHER_FLOW: 4496 ether_addr_copy(fs->h_u.ether_spec.h_source, 4497 rule->tuples.src_mac); 4498 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) 4499 eth_zero_addr(fs->m_u.ether_spec.h_source); 4500 else 4501 ether_addr_copy(fs->m_u.ether_spec.h_source, 4502 rule->tuples_mask.src_mac); 4503 4504 ether_addr_copy(fs->h_u.ether_spec.h_dest, 4505 rule->tuples.dst_mac); 4506 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 4507 eth_zero_addr(fs->m_u.ether_spec.h_dest); 4508 else 4509 ether_addr_copy(fs->m_u.ether_spec.h_dest, 4510 rule->tuples_mask.dst_mac); 4511 4512 fs->h_u.ether_spec.h_proto = 4513 cpu_to_be16(rule->tuples.ether_proto); 4514 fs->m_u.ether_spec.h_proto = 4515 rule->unused_tuple & BIT(INNER_ETH_TYPE) ? 4516 0 : cpu_to_be16(rule->tuples_mask.ether_proto); 4517 4518 break; 4519 default: 4520 return -EOPNOTSUPP; 4521 } 4522 4523 if (fs->flow_type & FLOW_EXT) { 4524 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); 4525 fs->m_ext.vlan_tci = 4526 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? 4527 cpu_to_be16(VLAN_VID_MASK) : 4528 cpu_to_be16(rule->tuples_mask.vlan_tag1); 4529 } 4530 4531 if (fs->flow_type & FLOW_MAC_EXT) { 4532 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); 4533 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 4534 eth_zero_addr(fs->m_u.ether_spec.h_dest); 4535 else 4536 ether_addr_copy(fs->m_u.ether_spec.h_dest, 4537 rule->tuples_mask.dst_mac); 4538 } 4539 4540 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 4541 fs->ring_cookie = RX_CLS_FLOW_DISC; 4542 } else { 4543 u64 vf_id; 4544 4545 fs->ring_cookie = rule->queue_id; 4546 vf_id = rule->vf_id; 4547 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 4548 fs->ring_cookie |= vf_id; 4549 } 4550 4551 return 0; 4552 } 4553 4554 static int hclge_get_all_rules(struct hnae3_handle *handle, 4555 struct ethtool_rxnfc *cmd, u32 *rule_locs) 4556 { 4557 struct hclge_vport *vport = hclge_get_vport(handle); 4558 struct hclge_dev *hdev = vport->back; 4559 struct hclge_fd_rule *rule; 4560 struct hlist_node *node2; 4561 int cnt = 0; 4562 4563 if (!hnae3_dev_fd_supported(hdev)) 4564 return -EOPNOTSUPP; 4565 4566 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 4567 4568 hlist_for_each_entry_safe(rule, node2, 4569 &hdev->fd_rule_list, rule_node) { 4570 if (cnt == cmd->rule_cnt) 4571 return -EMSGSIZE; 4572 4573 rule_locs[cnt] = rule->location; 4574 cnt++; 4575 } 4576 4577 cmd->rule_cnt = cnt; 4578 4579 return 0; 4580 } 4581 4582 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) 4583 { 4584 struct hclge_vport *vport = hclge_get_vport(handle); 4585 struct hclge_dev *hdev = vport->back; 4586 4587 hdev->fd_cfg.fd_en = enable; 4588 if (!enable) 4589 hclge_del_all_fd_entries(handle, false); 4590 else 4591 hclge_restore_fd_entries(handle); 4592 } 4593 4594 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 4595 { 4596 struct hclge_desc desc; 4597 struct hclge_config_mac_mode_cmd *req = 4598 (struct hclge_config_mac_mode_cmd *)desc.data; 4599 u32 loop_en = 0; 4600 int ret; 4601 4602 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 4603 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); 4604 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); 4605 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); 4606 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); 4607 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); 4608 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); 4609 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 4610 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); 4611 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); 4612 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); 4613 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); 4614 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); 4615 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); 4616 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); 4617 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 4618 4619 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4620 if (ret) 4621 dev_err(&hdev->pdev->dev, 4622 "mac enable fail, ret =%d.\n", ret); 4623 } 4624 4625 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) 4626 { 4627 struct hclge_config_mac_mode_cmd *req; 4628 struct hclge_desc desc; 4629 u32 loop_en; 4630 int ret; 4631 4632 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 4633 /* 1 Read out the MAC mode config at first */ 4634 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 4635 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4636 if (ret) { 4637 dev_err(&hdev->pdev->dev, 4638 "mac loopback get fail, ret =%d.\n", ret); 4639 return ret; 4640 } 4641 4642 /* 2 Then setup the loopback flag */ 4643 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 4644 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 4645 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); 4646 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); 4647 4648 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 4649 4650 /* 3 Config mac work mode with loopback flag 4651 * and its original configure parameters 4652 */ 4653 hclge_cmd_reuse_desc(&desc, false); 4654 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4655 if (ret) 4656 dev_err(&hdev->pdev->dev, 4657 "mac loopback set fail, ret =%d.\n", ret); 4658 return ret; 4659 } 4660 4661 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, 4662 enum hnae3_loop loop_mode) 4663 { 4664 #define HCLGE_SERDES_RETRY_MS 10 4665 #define HCLGE_SERDES_RETRY_NUM 100 4666 struct hclge_serdes_lb_cmd *req; 4667 struct hclge_desc desc; 4668 int ret, i = 0; 4669 u8 loop_mode_b; 4670 4671 req = (struct hclge_serdes_lb_cmd *)desc.data; 4672 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false); 4673 4674 switch (loop_mode) { 4675 case HNAE3_LOOP_SERIAL_SERDES: 4676 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 4677 break; 4678 case HNAE3_LOOP_PARALLEL_SERDES: 4679 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 4680 break; 4681 default: 4682 dev_err(&hdev->pdev->dev, 4683 "unsupported serdes loopback mode %d\n", loop_mode); 4684 return -ENOTSUPP; 4685 } 4686 4687 if (en) { 4688 req->enable = loop_mode_b; 4689 req->mask = loop_mode_b; 4690 } else { 4691 req->mask = loop_mode_b; 4692 } 4693 4694 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4695 if (ret) { 4696 dev_err(&hdev->pdev->dev, 4697 "serdes loopback set fail, ret = %d\n", ret); 4698 return ret; 4699 } 4700 4701 do { 4702 msleep(HCLGE_SERDES_RETRY_MS); 4703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, 4704 true); 4705 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4706 if (ret) { 4707 dev_err(&hdev->pdev->dev, 4708 "serdes loopback get, ret = %d\n", ret); 4709 return ret; 4710 } 4711 } while (++i < HCLGE_SERDES_RETRY_NUM && 4712 !(req->result & HCLGE_CMD_SERDES_DONE_B)); 4713 4714 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) { 4715 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n"); 4716 return -EBUSY; 4717 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) { 4718 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n"); 4719 return -EIO; 4720 } 4721 4722 hclge_cfg_mac_mode(hdev, en); 4723 return 0; 4724 } 4725 4726 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, 4727 int stream_id, bool enable) 4728 { 4729 struct hclge_desc desc; 4730 struct hclge_cfg_com_tqp_queue_cmd *req = 4731 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 4732 int ret; 4733 4734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 4735 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 4736 req->stream_id = cpu_to_le16(stream_id); 4737 req->enable |= enable << HCLGE_TQP_ENABLE_B; 4738 4739 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4740 if (ret) 4741 dev_err(&hdev->pdev->dev, 4742 "Tqp enable fail, status =%d.\n", ret); 4743 return ret; 4744 } 4745 4746 static int hclge_set_loopback(struct hnae3_handle *handle, 4747 enum hnae3_loop loop_mode, bool en) 4748 { 4749 struct hclge_vport *vport = hclge_get_vport(handle); 4750 struct hclge_dev *hdev = vport->back; 4751 int i, ret; 4752 4753 switch (loop_mode) { 4754 case HNAE3_LOOP_APP: 4755 ret = hclge_set_app_loopback(hdev, en); 4756 break; 4757 case HNAE3_LOOP_SERIAL_SERDES: 4758 case HNAE3_LOOP_PARALLEL_SERDES: 4759 ret = hclge_set_serdes_loopback(hdev, en, loop_mode); 4760 break; 4761 default: 4762 ret = -ENOTSUPP; 4763 dev_err(&hdev->pdev->dev, 4764 "loop_mode %d is not supported\n", loop_mode); 4765 break; 4766 } 4767 4768 for (i = 0; i < vport->alloc_tqps; i++) { 4769 ret = hclge_tqp_enable(hdev, i, 0, en); 4770 if (ret) 4771 return ret; 4772 } 4773 4774 return 0; 4775 } 4776 4777 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 4778 { 4779 struct hclge_vport *vport = hclge_get_vport(handle); 4780 struct hnae3_queue *queue; 4781 struct hclge_tqp *tqp; 4782 int i; 4783 4784 for (i = 0; i < vport->alloc_tqps; i++) { 4785 queue = handle->kinfo.tqp[i]; 4786 tqp = container_of(queue, struct hclge_tqp, q); 4787 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 4788 } 4789 } 4790 4791 static int hclge_ae_start(struct hnae3_handle *handle) 4792 { 4793 struct hclge_vport *vport = hclge_get_vport(handle); 4794 struct hclge_dev *hdev = vport->back; 4795 int i; 4796 4797 for (i = 0; i < vport->alloc_tqps; i++) 4798 hclge_tqp_enable(hdev, i, 0, true); 4799 4800 /* mac enable */ 4801 hclge_cfg_mac_mode(hdev, true); 4802 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 4803 mod_timer(&hdev->service_timer, jiffies + HZ); 4804 hdev->hw.mac.link = 0; 4805 4806 /* reset tqp stats */ 4807 hclge_reset_tqp_stats(handle); 4808 4809 hclge_mac_start_phy(hdev); 4810 4811 return 0; 4812 } 4813 4814 static void hclge_ae_stop(struct hnae3_handle *handle) 4815 { 4816 struct hclge_vport *vport = hclge_get_vport(handle); 4817 struct hclge_dev *hdev = vport->back; 4818 int i; 4819 4820 set_bit(HCLGE_STATE_DOWN, &hdev->state); 4821 4822 del_timer_sync(&hdev->service_timer); 4823 cancel_work_sync(&hdev->service_task); 4824 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 4825 4826 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { 4827 hclge_mac_stop_phy(hdev); 4828 return; 4829 } 4830 4831 for (i = 0; i < vport->alloc_tqps; i++) 4832 hclge_tqp_enable(hdev, i, 0, false); 4833 4834 /* Mac disable */ 4835 hclge_cfg_mac_mode(hdev, false); 4836 4837 hclge_mac_stop_phy(hdev); 4838 4839 /* reset tqp stats */ 4840 hclge_reset_tqp_stats(handle); 4841 del_timer_sync(&hdev->service_timer); 4842 cancel_work_sync(&hdev->service_task); 4843 hclge_update_link_status(hdev); 4844 } 4845 4846 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 4847 u16 cmdq_resp, u8 resp_code, 4848 enum hclge_mac_vlan_tbl_opcode op) 4849 { 4850 struct hclge_dev *hdev = vport->back; 4851 int return_status = -EIO; 4852 4853 if (cmdq_resp) { 4854 dev_err(&hdev->pdev->dev, 4855 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", 4856 cmdq_resp); 4857 return -EIO; 4858 } 4859 4860 if (op == HCLGE_MAC_VLAN_ADD) { 4861 if ((!resp_code) || (resp_code == 1)) { 4862 return_status = 0; 4863 } else if (resp_code == 2) { 4864 return_status = -ENOSPC; 4865 dev_err(&hdev->pdev->dev, 4866 "add mac addr failed for uc_overflow.\n"); 4867 } else if (resp_code == 3) { 4868 return_status = -ENOSPC; 4869 dev_err(&hdev->pdev->dev, 4870 "add mac addr failed for mc_overflow.\n"); 4871 } else { 4872 dev_err(&hdev->pdev->dev, 4873 "add mac addr failed for undefined, code=%d.\n", 4874 resp_code); 4875 } 4876 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 4877 if (!resp_code) { 4878 return_status = 0; 4879 } else if (resp_code == 1) { 4880 return_status = -ENOENT; 4881 dev_dbg(&hdev->pdev->dev, 4882 "remove mac addr failed for miss.\n"); 4883 } else { 4884 dev_err(&hdev->pdev->dev, 4885 "remove mac addr failed for undefined, code=%d.\n", 4886 resp_code); 4887 } 4888 } else if (op == HCLGE_MAC_VLAN_LKUP) { 4889 if (!resp_code) { 4890 return_status = 0; 4891 } else if (resp_code == 1) { 4892 return_status = -ENOENT; 4893 dev_dbg(&hdev->pdev->dev, 4894 "lookup mac addr failed for miss.\n"); 4895 } else { 4896 dev_err(&hdev->pdev->dev, 4897 "lookup mac addr failed for undefined, code=%d.\n", 4898 resp_code); 4899 } 4900 } else { 4901 return_status = -EINVAL; 4902 dev_err(&hdev->pdev->dev, 4903 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", 4904 op); 4905 } 4906 4907 return return_status; 4908 } 4909 4910 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 4911 { 4912 int word_num; 4913 int bit_num; 4914 4915 if (vfid > 255 || vfid < 0) 4916 return -EIO; 4917 4918 if (vfid >= 0 && vfid <= 191) { 4919 word_num = vfid / 32; 4920 bit_num = vfid % 32; 4921 if (clr) 4922 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 4923 else 4924 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 4925 } else { 4926 word_num = (vfid - 192) / 32; 4927 bit_num = vfid % 32; 4928 if (clr) 4929 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 4930 else 4931 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 4932 } 4933 4934 return 0; 4935 } 4936 4937 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 4938 { 4939 #define HCLGE_DESC_NUMBER 3 4940 #define HCLGE_FUNC_NUMBER_PER_DESC 6 4941 int i, j; 4942 4943 for (i = 1; i < HCLGE_DESC_NUMBER; i++) 4944 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 4945 if (desc[i].data[j]) 4946 return false; 4947 4948 return true; 4949 } 4950 4951 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 4952 const u8 *addr) 4953 { 4954 const unsigned char *mac_addr = addr; 4955 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 4956 (mac_addr[0]) | (mac_addr[1] << 8); 4957 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 4958 4959 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 4960 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 4961 } 4962 4963 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 4964 struct hclge_mac_vlan_tbl_entry_cmd *req) 4965 { 4966 struct hclge_dev *hdev = vport->back; 4967 struct hclge_desc desc; 4968 u8 resp_code; 4969 u16 retval; 4970 int ret; 4971 4972 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 4973 4974 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4975 4976 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4977 if (ret) { 4978 dev_err(&hdev->pdev->dev, 4979 "del mac addr failed for cmd_send, ret =%d.\n", 4980 ret); 4981 return ret; 4982 } 4983 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4984 retval = le16_to_cpu(desc.retval); 4985 4986 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4987 HCLGE_MAC_VLAN_REMOVE); 4988 } 4989 4990 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 4991 struct hclge_mac_vlan_tbl_entry_cmd *req, 4992 struct hclge_desc *desc, 4993 bool is_mc) 4994 { 4995 struct hclge_dev *hdev = vport->back; 4996 u8 resp_code; 4997 u16 retval; 4998 int ret; 4999 5000 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 5001 if (is_mc) { 5002 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 5003 memcpy(desc[0].data, 5004 req, 5005 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 5006 hclge_cmd_setup_basic_desc(&desc[1], 5007 HCLGE_OPC_MAC_VLAN_ADD, 5008 true); 5009 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 5010 hclge_cmd_setup_basic_desc(&desc[2], 5011 HCLGE_OPC_MAC_VLAN_ADD, 5012 true); 5013 ret = hclge_cmd_send(&hdev->hw, desc, 3); 5014 } else { 5015 memcpy(desc[0].data, 5016 req, 5017 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 5018 ret = hclge_cmd_send(&hdev->hw, desc, 1); 5019 } 5020 if (ret) { 5021 dev_err(&hdev->pdev->dev, 5022 "lookup mac addr failed for cmd_send, ret =%d.\n", 5023 ret); 5024 return ret; 5025 } 5026 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 5027 retval = le16_to_cpu(desc[0].retval); 5028 5029 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 5030 HCLGE_MAC_VLAN_LKUP); 5031 } 5032 5033 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 5034 struct hclge_mac_vlan_tbl_entry_cmd *req, 5035 struct hclge_desc *mc_desc) 5036 { 5037 struct hclge_dev *hdev = vport->back; 5038 int cfg_status; 5039 u8 resp_code; 5040 u16 retval; 5041 int ret; 5042 5043 if (!mc_desc) { 5044 struct hclge_desc desc; 5045 5046 hclge_cmd_setup_basic_desc(&desc, 5047 HCLGE_OPC_MAC_VLAN_ADD, 5048 false); 5049 memcpy(desc.data, req, 5050 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 5051 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5052 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 5053 retval = le16_to_cpu(desc.retval); 5054 5055 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 5056 resp_code, 5057 HCLGE_MAC_VLAN_ADD); 5058 } else { 5059 hclge_cmd_reuse_desc(&mc_desc[0], false); 5060 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 5061 hclge_cmd_reuse_desc(&mc_desc[1], false); 5062 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 5063 hclge_cmd_reuse_desc(&mc_desc[2], false); 5064 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 5065 memcpy(mc_desc[0].data, req, 5066 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 5067 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 5068 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 5069 retval = le16_to_cpu(mc_desc[0].retval); 5070 5071 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 5072 resp_code, 5073 HCLGE_MAC_VLAN_ADD); 5074 } 5075 5076 if (ret) { 5077 dev_err(&hdev->pdev->dev, 5078 "add mac addr failed for cmd_send, ret =%d.\n", 5079 ret); 5080 return ret; 5081 } 5082 5083 return cfg_status; 5084 } 5085 5086 static int hclge_init_umv_space(struct hclge_dev *hdev) 5087 { 5088 u16 allocated_size = 0; 5089 int ret; 5090 5091 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size, 5092 true); 5093 if (ret) 5094 return ret; 5095 5096 if (allocated_size < hdev->wanted_umv_size) 5097 dev_warn(&hdev->pdev->dev, 5098 "Alloc umv space failed, want %d, get %d\n", 5099 hdev->wanted_umv_size, allocated_size); 5100 5101 mutex_init(&hdev->umv_mutex); 5102 hdev->max_umv_size = allocated_size; 5103 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2); 5104 hdev->share_umv_size = hdev->priv_umv_size + 5105 hdev->max_umv_size % (hdev->num_req_vfs + 2); 5106 5107 return 0; 5108 } 5109 5110 static int hclge_uninit_umv_space(struct hclge_dev *hdev) 5111 { 5112 int ret; 5113 5114 if (hdev->max_umv_size > 0) { 5115 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL, 5116 false); 5117 if (ret) 5118 return ret; 5119 hdev->max_umv_size = 0; 5120 } 5121 mutex_destroy(&hdev->umv_mutex); 5122 5123 return 0; 5124 } 5125 5126 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 5127 u16 *allocated_size, bool is_alloc) 5128 { 5129 struct hclge_umv_spc_alc_cmd *req; 5130 struct hclge_desc desc; 5131 int ret; 5132 5133 req = (struct hclge_umv_spc_alc_cmd *)desc.data; 5134 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); 5135 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc); 5136 req->space_size = cpu_to_le32(space_size); 5137 5138 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5139 if (ret) { 5140 dev_err(&hdev->pdev->dev, 5141 "%s umv space failed for cmd_send, ret =%d\n", 5142 is_alloc ? "allocate" : "free", ret); 5143 return ret; 5144 } 5145 5146 if (is_alloc && allocated_size) 5147 *allocated_size = le32_to_cpu(desc.data[1]); 5148 5149 return 0; 5150 } 5151 5152 static void hclge_reset_umv_space(struct hclge_dev *hdev) 5153 { 5154 struct hclge_vport *vport; 5155 int i; 5156 5157 for (i = 0; i < hdev->num_alloc_vport; i++) { 5158 vport = &hdev->vport[i]; 5159 vport->used_umv_num = 0; 5160 } 5161 5162 mutex_lock(&hdev->umv_mutex); 5163 hdev->share_umv_size = hdev->priv_umv_size + 5164 hdev->max_umv_size % (hdev->num_req_vfs + 2); 5165 mutex_unlock(&hdev->umv_mutex); 5166 } 5167 5168 static bool hclge_is_umv_space_full(struct hclge_vport *vport) 5169 { 5170 struct hclge_dev *hdev = vport->back; 5171 bool is_full; 5172 5173 mutex_lock(&hdev->umv_mutex); 5174 is_full = (vport->used_umv_num >= hdev->priv_umv_size && 5175 hdev->share_umv_size == 0); 5176 mutex_unlock(&hdev->umv_mutex); 5177 5178 return is_full; 5179 } 5180 5181 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) 5182 { 5183 struct hclge_dev *hdev = vport->back; 5184 5185 mutex_lock(&hdev->umv_mutex); 5186 if (is_free) { 5187 if (vport->used_umv_num > hdev->priv_umv_size) 5188 hdev->share_umv_size++; 5189 vport->used_umv_num--; 5190 } else { 5191 if (vport->used_umv_num >= hdev->priv_umv_size) 5192 hdev->share_umv_size--; 5193 vport->used_umv_num++; 5194 } 5195 mutex_unlock(&hdev->umv_mutex); 5196 } 5197 5198 static int hclge_add_uc_addr(struct hnae3_handle *handle, 5199 const unsigned char *addr) 5200 { 5201 struct hclge_vport *vport = hclge_get_vport(handle); 5202 5203 return hclge_add_uc_addr_common(vport, addr); 5204 } 5205 5206 int hclge_add_uc_addr_common(struct hclge_vport *vport, 5207 const unsigned char *addr) 5208 { 5209 struct hclge_dev *hdev = vport->back; 5210 struct hclge_mac_vlan_tbl_entry_cmd req; 5211 struct hclge_desc desc; 5212 u16 egress_port = 0; 5213 int ret; 5214 5215 /* mac addr check */ 5216 if (is_zero_ether_addr(addr) || 5217 is_broadcast_ether_addr(addr) || 5218 is_multicast_ether_addr(addr)) { 5219 dev_err(&hdev->pdev->dev, 5220 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 5221 addr, 5222 is_zero_ether_addr(addr), 5223 is_broadcast_ether_addr(addr), 5224 is_multicast_ether_addr(addr)); 5225 return -EINVAL; 5226 } 5227 5228 memset(&req, 0, sizeof(req)); 5229 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5230 5231 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 5232 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 5233 5234 req.egress_port = cpu_to_le16(egress_port); 5235 5236 hclge_prepare_mac_addr(&req, addr); 5237 5238 /* Lookup the mac address in the mac_vlan table, and add 5239 * it if the entry is inexistent. Repeated unicast entry 5240 * is not allowed in the mac vlan table. 5241 */ 5242 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 5243 if (ret == -ENOENT) { 5244 if (!hclge_is_umv_space_full(vport)) { 5245 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); 5246 if (!ret) 5247 hclge_update_umv_space(vport, false); 5248 return ret; 5249 } 5250 5251 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", 5252 hdev->priv_umv_size); 5253 5254 return -ENOSPC; 5255 } 5256 5257 /* check if we just hit the duplicate */ 5258 if (!ret) 5259 ret = -EINVAL; 5260 5261 dev_err(&hdev->pdev->dev, 5262 "PF failed to add unicast entry(%pM) in the MAC table\n", 5263 addr); 5264 5265 return ret; 5266 } 5267 5268 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 5269 const unsigned char *addr) 5270 { 5271 struct hclge_vport *vport = hclge_get_vport(handle); 5272 5273 return hclge_rm_uc_addr_common(vport, addr); 5274 } 5275 5276 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 5277 const unsigned char *addr) 5278 { 5279 struct hclge_dev *hdev = vport->back; 5280 struct hclge_mac_vlan_tbl_entry_cmd req; 5281 int ret; 5282 5283 /* mac addr check */ 5284 if (is_zero_ether_addr(addr) || 5285 is_broadcast_ether_addr(addr) || 5286 is_multicast_ether_addr(addr)) { 5287 dev_dbg(&hdev->pdev->dev, 5288 "Remove mac err! invalid mac:%pM.\n", 5289 addr); 5290 return -EINVAL; 5291 } 5292 5293 memset(&req, 0, sizeof(req)); 5294 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5295 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 5296 hclge_prepare_mac_addr(&req, addr); 5297 ret = hclge_remove_mac_vlan_tbl(vport, &req); 5298 if (!ret) 5299 hclge_update_umv_space(vport, true); 5300 5301 return ret; 5302 } 5303 5304 static int hclge_add_mc_addr(struct hnae3_handle *handle, 5305 const unsigned char *addr) 5306 { 5307 struct hclge_vport *vport = hclge_get_vport(handle); 5308 5309 return hclge_add_mc_addr_common(vport, addr); 5310 } 5311 5312 int hclge_add_mc_addr_common(struct hclge_vport *vport, 5313 const unsigned char *addr) 5314 { 5315 struct hclge_dev *hdev = vport->back; 5316 struct hclge_mac_vlan_tbl_entry_cmd req; 5317 struct hclge_desc desc[3]; 5318 int status; 5319 5320 /* mac addr check */ 5321 if (!is_multicast_ether_addr(addr)) { 5322 dev_err(&hdev->pdev->dev, 5323 "Add mc mac err! invalid mac:%pM.\n", 5324 addr); 5325 return -EINVAL; 5326 } 5327 memset(&req, 0, sizeof(req)); 5328 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5329 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 5330 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 5331 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5332 hclge_prepare_mac_addr(&req, addr); 5333 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 5334 if (!status) { 5335 /* This mac addr exist, update VFID for it */ 5336 hclge_update_desc_vfid(desc, vport->vport_id, false); 5337 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 5338 } else { 5339 /* This mac addr do not exist, add new entry for it */ 5340 memset(desc[0].data, 0, sizeof(desc[0].data)); 5341 memset(desc[1].data, 0, sizeof(desc[0].data)); 5342 memset(desc[2].data, 0, sizeof(desc[0].data)); 5343 hclge_update_desc_vfid(desc, vport->vport_id, false); 5344 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 5345 } 5346 5347 if (status == -ENOSPC) 5348 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); 5349 5350 return status; 5351 } 5352 5353 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 5354 const unsigned char *addr) 5355 { 5356 struct hclge_vport *vport = hclge_get_vport(handle); 5357 5358 return hclge_rm_mc_addr_common(vport, addr); 5359 } 5360 5361 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 5362 const unsigned char *addr) 5363 { 5364 struct hclge_dev *hdev = vport->back; 5365 struct hclge_mac_vlan_tbl_entry_cmd req; 5366 enum hclge_cmd_status status; 5367 struct hclge_desc desc[3]; 5368 5369 /* mac addr check */ 5370 if (!is_multicast_ether_addr(addr)) { 5371 dev_dbg(&hdev->pdev->dev, 5372 "Remove mc mac err! invalid mac:%pM.\n", 5373 addr); 5374 return -EINVAL; 5375 } 5376 5377 memset(&req, 0, sizeof(req)); 5378 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5379 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 5380 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 5381 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 5382 hclge_prepare_mac_addr(&req, addr); 5383 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 5384 if (!status) { 5385 /* This mac addr exist, remove this handle's VFID for it */ 5386 hclge_update_desc_vfid(desc, vport->vport_id, true); 5387 5388 if (hclge_is_all_function_id_zero(desc)) 5389 /* All the vfid is zero, so need to delete this entry */ 5390 status = hclge_remove_mac_vlan_tbl(vport, &req); 5391 else 5392 /* Not all the vfid is zero, update the vfid */ 5393 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 5394 5395 } else { 5396 /* Maybe this mac address is in mta table, but it cannot be 5397 * deleted here because an entry of mta represents an address 5398 * range rather than a specific address. the delete action to 5399 * all entries will take effect in update_mta_status called by 5400 * hns3_nic_set_rx_mode. 5401 */ 5402 status = 0; 5403 } 5404 5405 return status; 5406 } 5407 5408 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 5409 u16 cmdq_resp, u8 resp_code) 5410 { 5411 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 5412 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 5413 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 5414 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 5415 5416 int return_status; 5417 5418 if (cmdq_resp) { 5419 dev_err(&hdev->pdev->dev, 5420 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", 5421 cmdq_resp); 5422 return -EIO; 5423 } 5424 5425 switch (resp_code) { 5426 case HCLGE_ETHERTYPE_SUCCESS_ADD: 5427 case HCLGE_ETHERTYPE_ALREADY_ADD: 5428 return_status = 0; 5429 break; 5430 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 5431 dev_err(&hdev->pdev->dev, 5432 "add mac ethertype failed for manager table overflow.\n"); 5433 return_status = -EIO; 5434 break; 5435 case HCLGE_ETHERTYPE_KEY_CONFLICT: 5436 dev_err(&hdev->pdev->dev, 5437 "add mac ethertype failed for key conflict.\n"); 5438 return_status = -EIO; 5439 break; 5440 default: 5441 dev_err(&hdev->pdev->dev, 5442 "add mac ethertype failed for undefined, code=%d.\n", 5443 resp_code); 5444 return_status = -EIO; 5445 } 5446 5447 return return_status; 5448 } 5449 5450 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 5451 const struct hclge_mac_mgr_tbl_entry_cmd *req) 5452 { 5453 struct hclge_desc desc; 5454 u8 resp_code; 5455 u16 retval; 5456 int ret; 5457 5458 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 5459 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 5460 5461 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5462 if (ret) { 5463 dev_err(&hdev->pdev->dev, 5464 "add mac ethertype failed for cmd_send, ret =%d.\n", 5465 ret); 5466 return ret; 5467 } 5468 5469 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 5470 retval = le16_to_cpu(desc.retval); 5471 5472 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 5473 } 5474 5475 static int init_mgr_tbl(struct hclge_dev *hdev) 5476 { 5477 int ret; 5478 int i; 5479 5480 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 5481 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 5482 if (ret) { 5483 dev_err(&hdev->pdev->dev, 5484 "add mac ethertype failed, ret =%d.\n", 5485 ret); 5486 return ret; 5487 } 5488 } 5489 5490 return 0; 5491 } 5492 5493 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 5494 { 5495 struct hclge_vport *vport = hclge_get_vport(handle); 5496 struct hclge_dev *hdev = vport->back; 5497 5498 ether_addr_copy(p, hdev->hw.mac.mac_addr); 5499 } 5500 5501 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, 5502 bool is_first) 5503 { 5504 const unsigned char *new_addr = (const unsigned char *)p; 5505 struct hclge_vport *vport = hclge_get_vport(handle); 5506 struct hclge_dev *hdev = vport->back; 5507 int ret; 5508 5509 /* mac addr check */ 5510 if (is_zero_ether_addr(new_addr) || 5511 is_broadcast_ether_addr(new_addr) || 5512 is_multicast_ether_addr(new_addr)) { 5513 dev_err(&hdev->pdev->dev, 5514 "Change uc mac err! invalid mac:%p.\n", 5515 new_addr); 5516 return -EINVAL; 5517 } 5518 5519 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) 5520 dev_warn(&hdev->pdev->dev, 5521 "remove old uc mac address fail.\n"); 5522 5523 ret = hclge_add_uc_addr(handle, new_addr); 5524 if (ret) { 5525 dev_err(&hdev->pdev->dev, 5526 "add uc mac address fail, ret =%d.\n", 5527 ret); 5528 5529 if (!is_first && 5530 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) 5531 dev_err(&hdev->pdev->dev, 5532 "restore uc mac address fail.\n"); 5533 5534 return -EIO; 5535 } 5536 5537 ret = hclge_pause_addr_cfg(hdev, new_addr); 5538 if (ret) { 5539 dev_err(&hdev->pdev->dev, 5540 "configure mac pause address fail, ret =%d.\n", 5541 ret); 5542 return -EIO; 5543 } 5544 5545 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 5546 5547 return 0; 5548 } 5549 5550 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, 5551 int cmd) 5552 { 5553 struct hclge_vport *vport = hclge_get_vport(handle); 5554 struct hclge_dev *hdev = vport->back; 5555 5556 if (!hdev->hw.mac.phydev) 5557 return -EOPNOTSUPP; 5558 5559 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); 5560 } 5561 5562 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 5563 u8 fe_type, bool filter_en) 5564 { 5565 struct hclge_vlan_filter_ctrl_cmd *req; 5566 struct hclge_desc desc; 5567 int ret; 5568 5569 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 5570 5571 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 5572 req->vlan_type = vlan_type; 5573 req->vlan_fe = filter_en ? fe_type : 0; 5574 5575 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5576 if (ret) 5577 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 5578 ret); 5579 5580 return ret; 5581 } 5582 5583 #define HCLGE_FILTER_TYPE_VF 0 5584 #define HCLGE_FILTER_TYPE_PORT 1 5585 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0) 5586 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0) 5587 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1) 5588 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2) 5589 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3) 5590 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \ 5591 | HCLGE_FILTER_FE_ROCE_EGRESS_B) 5592 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \ 5593 | HCLGE_FILTER_FE_ROCE_INGRESS_B) 5594 5595 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 5596 { 5597 struct hclge_vport *vport = hclge_get_vport(handle); 5598 struct hclge_dev *hdev = vport->back; 5599 5600 if (hdev->pdev->revision >= 0x21) { 5601 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 5602 HCLGE_FILTER_FE_EGRESS, enable); 5603 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 5604 HCLGE_FILTER_FE_INGRESS, enable); 5605 } else { 5606 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 5607 HCLGE_FILTER_FE_EGRESS_V1_B, enable); 5608 } 5609 if (enable) 5610 handle->netdev_flags |= HNAE3_VLAN_FLTR; 5611 else 5612 handle->netdev_flags &= ~HNAE3_VLAN_FLTR; 5613 } 5614 5615 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 5616 bool is_kill, u16 vlan, u8 qos, 5617 __be16 proto) 5618 { 5619 #define HCLGE_MAX_VF_BYTES 16 5620 struct hclge_vlan_filter_vf_cfg_cmd *req0; 5621 struct hclge_vlan_filter_vf_cfg_cmd *req1; 5622 struct hclge_desc desc[2]; 5623 u8 vf_byte_val; 5624 u8 vf_byte_off; 5625 int ret; 5626 5627 hclge_cmd_setup_basic_desc(&desc[0], 5628 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 5629 hclge_cmd_setup_basic_desc(&desc[1], 5630 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 5631 5632 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 5633 5634 vf_byte_off = vfid / 8; 5635 vf_byte_val = 1 << (vfid % 8); 5636 5637 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 5638 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 5639 5640 req0->vlan_id = cpu_to_le16(vlan); 5641 req0->vlan_cfg = is_kill; 5642 5643 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 5644 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 5645 else 5646 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 5647 5648 ret = hclge_cmd_send(&hdev->hw, desc, 2); 5649 if (ret) { 5650 dev_err(&hdev->pdev->dev, 5651 "Send vf vlan command fail, ret =%d.\n", 5652 ret); 5653 return ret; 5654 } 5655 5656 if (!is_kill) { 5657 #define HCLGE_VF_VLAN_NO_ENTRY 2 5658 if (!req0->resp_code || req0->resp_code == 1) 5659 return 0; 5660 5661 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { 5662 dev_warn(&hdev->pdev->dev, 5663 "vf vlan table is full, vf vlan filter is disabled\n"); 5664 return 0; 5665 } 5666 5667 dev_err(&hdev->pdev->dev, 5668 "Add vf vlan filter fail, ret =%d.\n", 5669 req0->resp_code); 5670 } else { 5671 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 5672 if (!req0->resp_code) 5673 return 0; 5674 5675 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { 5676 dev_warn(&hdev->pdev->dev, 5677 "vlan %d filter is not in vf vlan table\n", 5678 vlan); 5679 return 0; 5680 } 5681 5682 dev_err(&hdev->pdev->dev, 5683 "Kill vf vlan filter fail, ret =%d.\n", 5684 req0->resp_code); 5685 } 5686 5687 return -EIO; 5688 } 5689 5690 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 5691 u16 vlan_id, bool is_kill) 5692 { 5693 struct hclge_vlan_filter_pf_cfg_cmd *req; 5694 struct hclge_desc desc; 5695 u8 vlan_offset_byte_val; 5696 u8 vlan_offset_byte; 5697 u8 vlan_offset_160; 5698 int ret; 5699 5700 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 5701 5702 vlan_offset_160 = vlan_id / 160; 5703 vlan_offset_byte = (vlan_id % 160) / 8; 5704 vlan_offset_byte_val = 1 << (vlan_id % 8); 5705 5706 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 5707 req->vlan_offset = vlan_offset_160; 5708 req->vlan_cfg = is_kill; 5709 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 5710 5711 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5712 if (ret) 5713 dev_err(&hdev->pdev->dev, 5714 "port vlan command, send fail, ret =%d.\n", ret); 5715 return ret; 5716 } 5717 5718 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 5719 u16 vport_id, u16 vlan_id, u8 qos, 5720 bool is_kill) 5721 { 5722 u16 vport_idx, vport_num = 0; 5723 int ret; 5724 5725 if (is_kill && !vlan_id) 5726 return 0; 5727 5728 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 5729 0, proto); 5730 if (ret) { 5731 dev_err(&hdev->pdev->dev, 5732 "Set %d vport vlan filter config fail, ret =%d.\n", 5733 vport_id, ret); 5734 return ret; 5735 } 5736 5737 /* vlan 0 may be added twice when 8021q module is enabled */ 5738 if (!is_kill && !vlan_id && 5739 test_bit(vport_id, hdev->vlan_table[vlan_id])) 5740 return 0; 5741 5742 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 5743 dev_err(&hdev->pdev->dev, 5744 "Add port vlan failed, vport %d is already in vlan %d\n", 5745 vport_id, vlan_id); 5746 return -EINVAL; 5747 } 5748 5749 if (is_kill && 5750 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 5751 dev_err(&hdev->pdev->dev, 5752 "Delete port vlan failed, vport %d is not in vlan %d\n", 5753 vport_id, vlan_id); 5754 return -EINVAL; 5755 } 5756 5757 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) 5758 vport_num++; 5759 5760 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 5761 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 5762 is_kill); 5763 5764 return ret; 5765 } 5766 5767 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 5768 u16 vlan_id, bool is_kill) 5769 { 5770 struct hclge_vport *vport = hclge_get_vport(handle); 5771 struct hclge_dev *hdev = vport->back; 5772 5773 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, 5774 0, is_kill); 5775 } 5776 5777 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 5778 u16 vlan, u8 qos, __be16 proto) 5779 { 5780 struct hclge_vport *vport = hclge_get_vport(handle); 5781 struct hclge_dev *hdev = vport->back; 5782 5783 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) 5784 return -EINVAL; 5785 if (proto != htons(ETH_P_8021Q)) 5786 return -EPROTONOSUPPORT; 5787 5788 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); 5789 } 5790 5791 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 5792 { 5793 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 5794 struct hclge_vport_vtag_tx_cfg_cmd *req; 5795 struct hclge_dev *hdev = vport->back; 5796 struct hclge_desc desc; 5797 int status; 5798 5799 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 5800 5801 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 5802 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 5803 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 5804 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 5805 vcfg->accept_tag1 ? 1 : 0); 5806 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 5807 vcfg->accept_untag1 ? 1 : 0); 5808 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 5809 vcfg->accept_tag2 ? 1 : 0); 5810 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 5811 vcfg->accept_untag2 ? 1 : 0); 5812 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 5813 vcfg->insert_tag1_en ? 1 : 0); 5814 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 5815 vcfg->insert_tag2_en ? 1 : 0); 5816 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 5817 5818 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 5819 req->vf_bitmap[req->vf_offset] = 5820 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 5821 5822 status = hclge_cmd_send(&hdev->hw, &desc, 1); 5823 if (status) 5824 dev_err(&hdev->pdev->dev, 5825 "Send port txvlan cfg command fail, ret =%d\n", 5826 status); 5827 5828 return status; 5829 } 5830 5831 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 5832 { 5833 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 5834 struct hclge_vport_vtag_rx_cfg_cmd *req; 5835 struct hclge_dev *hdev = vport->back; 5836 struct hclge_desc desc; 5837 int status; 5838 5839 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 5840 5841 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 5842 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 5843 vcfg->strip_tag1_en ? 1 : 0); 5844 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 5845 vcfg->strip_tag2_en ? 1 : 0); 5846 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 5847 vcfg->vlan1_vlan_prionly ? 1 : 0); 5848 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 5849 vcfg->vlan2_vlan_prionly ? 1 : 0); 5850 5851 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 5852 req->vf_bitmap[req->vf_offset] = 5853 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 5854 5855 status = hclge_cmd_send(&hdev->hw, &desc, 1); 5856 if (status) 5857 dev_err(&hdev->pdev->dev, 5858 "Send port rxvlan cfg command fail, ret =%d\n", 5859 status); 5860 5861 return status; 5862 } 5863 5864 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 5865 { 5866 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 5867 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 5868 struct hclge_desc desc; 5869 int status; 5870 5871 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 5872 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 5873 rx_req->ot_fst_vlan_type = 5874 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 5875 rx_req->ot_sec_vlan_type = 5876 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 5877 rx_req->in_fst_vlan_type = 5878 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 5879 rx_req->in_sec_vlan_type = 5880 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 5881 5882 status = hclge_cmd_send(&hdev->hw, &desc, 1); 5883 if (status) { 5884 dev_err(&hdev->pdev->dev, 5885 "Send rxvlan protocol type command fail, ret =%d\n", 5886 status); 5887 return status; 5888 } 5889 5890 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 5891 5892 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; 5893 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 5894 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 5895 5896 status = hclge_cmd_send(&hdev->hw, &desc, 1); 5897 if (status) 5898 dev_err(&hdev->pdev->dev, 5899 "Send txvlan protocol type command fail, ret =%d\n", 5900 status); 5901 5902 return status; 5903 } 5904 5905 static int hclge_init_vlan_config(struct hclge_dev *hdev) 5906 { 5907 #define HCLGE_DEF_VLAN_TYPE 0x8100 5908 5909 struct hnae3_handle *handle = &hdev->vport[0].nic; 5910 struct hclge_vport *vport; 5911 int ret; 5912 int i; 5913 5914 if (hdev->pdev->revision >= 0x21) { 5915 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 5916 HCLGE_FILTER_FE_EGRESS, true); 5917 if (ret) 5918 return ret; 5919 5920 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 5921 HCLGE_FILTER_FE_INGRESS, true); 5922 if (ret) 5923 return ret; 5924 } else { 5925 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 5926 HCLGE_FILTER_FE_EGRESS_V1_B, 5927 true); 5928 if (ret) 5929 return ret; 5930 } 5931 5932 handle->netdev_flags |= HNAE3_VLAN_FLTR; 5933 5934 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 5935 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 5936 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 5937 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 5938 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; 5939 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; 5940 5941 ret = hclge_set_vlan_protocol_type(hdev); 5942 if (ret) 5943 return ret; 5944 5945 for (i = 0; i < hdev->num_alloc_vport; i++) { 5946 vport = &hdev->vport[i]; 5947 vport->txvlan_cfg.accept_tag1 = true; 5948 vport->txvlan_cfg.accept_untag1 = true; 5949 5950 /* accept_tag2 and accept_untag2 are not supported on 5951 * pdev revision(0x20), new revision support them. The 5952 * value of this two fields will not return error when driver 5953 * send command to fireware in revision(0x20). 5954 * This two fields can not configured by user. 5955 */ 5956 vport->txvlan_cfg.accept_tag2 = true; 5957 vport->txvlan_cfg.accept_untag2 = true; 5958 5959 vport->txvlan_cfg.insert_tag1_en = false; 5960 vport->txvlan_cfg.insert_tag2_en = false; 5961 vport->txvlan_cfg.default_tag1 = 0; 5962 vport->txvlan_cfg.default_tag2 = 0; 5963 5964 ret = hclge_set_vlan_tx_offload_cfg(vport); 5965 if (ret) 5966 return ret; 5967 5968 vport->rxvlan_cfg.strip_tag1_en = false; 5969 vport->rxvlan_cfg.strip_tag2_en = true; 5970 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 5971 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 5972 5973 ret = hclge_set_vlan_rx_offload_cfg(vport); 5974 if (ret) 5975 return ret; 5976 } 5977 5978 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 5979 } 5980 5981 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 5982 { 5983 struct hclge_vport *vport = hclge_get_vport(handle); 5984 5985 vport->rxvlan_cfg.strip_tag1_en = false; 5986 vport->rxvlan_cfg.strip_tag2_en = enable; 5987 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 5988 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 5989 5990 return hclge_set_vlan_rx_offload_cfg(vport); 5991 } 5992 5993 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) 5994 { 5995 struct hclge_config_max_frm_size_cmd *req; 5996 struct hclge_desc desc; 5997 int max_frm_size; 5998 int ret; 5999 6000 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 6001 6002 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 6003 max_frm_size > HCLGE_MAC_MAX_FRAME) 6004 return -EINVAL; 6005 6006 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 6007 6008 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 6009 6010 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 6011 req->max_frm_size = cpu_to_le16(max_frm_size); 6012 req->min_frm_size = HCLGE_MAC_MIN_FRAME; 6013 6014 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6015 if (ret) 6016 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); 6017 else 6018 hdev->mps = max_frm_size; 6019 6020 return ret; 6021 } 6022 6023 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 6024 { 6025 struct hclge_vport *vport = hclge_get_vport(handle); 6026 struct hclge_dev *hdev = vport->back; 6027 int ret; 6028 6029 ret = hclge_set_mac_mtu(hdev, new_mtu); 6030 if (ret) { 6031 dev_err(&hdev->pdev->dev, 6032 "Change mtu fail, ret =%d\n", ret); 6033 return ret; 6034 } 6035 6036 ret = hclge_buffer_alloc(hdev); 6037 if (ret) 6038 dev_err(&hdev->pdev->dev, 6039 "Allocate buffer fail, ret =%d\n", ret); 6040 6041 return ret; 6042 } 6043 6044 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 6045 bool enable) 6046 { 6047 struct hclge_reset_tqp_queue_cmd *req; 6048 struct hclge_desc desc; 6049 int ret; 6050 6051 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 6052 6053 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 6054 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 6055 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); 6056 6057 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6058 if (ret) { 6059 dev_err(&hdev->pdev->dev, 6060 "Send tqp reset cmd error, status =%d\n", ret); 6061 return ret; 6062 } 6063 6064 return 0; 6065 } 6066 6067 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 6068 { 6069 struct hclge_reset_tqp_queue_cmd *req; 6070 struct hclge_desc desc; 6071 int ret; 6072 6073 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 6074 6075 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 6076 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 6077 6078 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6079 if (ret) { 6080 dev_err(&hdev->pdev->dev, 6081 "Get reset status error, status =%d\n", ret); 6082 return ret; 6083 } 6084 6085 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 6086 } 6087 6088 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, 6089 u16 queue_id) 6090 { 6091 struct hnae3_queue *queue; 6092 struct hclge_tqp *tqp; 6093 6094 queue = handle->kinfo.tqp[queue_id]; 6095 tqp = container_of(queue, struct hclge_tqp, q); 6096 6097 return tqp->index; 6098 } 6099 6100 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 6101 { 6102 struct hclge_vport *vport = hclge_get_vport(handle); 6103 struct hclge_dev *hdev = vport->back; 6104 int reset_try_times = 0; 6105 int reset_status; 6106 u16 queue_gid; 6107 int ret; 6108 6109 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 6110 return; 6111 6112 queue_gid = hclge_covert_handle_qid_global(handle, queue_id); 6113 6114 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 6115 if (ret) { 6116 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 6117 return; 6118 } 6119 6120 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 6121 if (ret) { 6122 dev_warn(&hdev->pdev->dev, 6123 "Send reset tqp cmd fail, ret = %d\n", ret); 6124 return; 6125 } 6126 6127 reset_try_times = 0; 6128 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 6129 /* Wait for tqp hw reset */ 6130 msleep(20); 6131 reset_status = hclge_get_reset_status(hdev, queue_gid); 6132 if (reset_status) 6133 break; 6134 } 6135 6136 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 6137 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 6138 return; 6139 } 6140 6141 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 6142 if (ret) { 6143 dev_warn(&hdev->pdev->dev, 6144 "Deassert the soft reset fail, ret = %d\n", ret); 6145 return; 6146 } 6147 } 6148 6149 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) 6150 { 6151 struct hclge_dev *hdev = vport->back; 6152 int reset_try_times = 0; 6153 int reset_status; 6154 u16 queue_gid; 6155 int ret; 6156 6157 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); 6158 6159 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 6160 if (ret) { 6161 dev_warn(&hdev->pdev->dev, 6162 "Send reset tqp cmd fail, ret = %d\n", ret); 6163 return; 6164 } 6165 6166 reset_try_times = 0; 6167 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 6168 /* Wait for tqp hw reset */ 6169 msleep(20); 6170 reset_status = hclge_get_reset_status(hdev, queue_gid); 6171 if (reset_status) 6172 break; 6173 } 6174 6175 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 6176 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 6177 return; 6178 } 6179 6180 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 6181 if (ret) 6182 dev_warn(&hdev->pdev->dev, 6183 "Deassert the soft reset fail, ret = %d\n", ret); 6184 } 6185 6186 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 6187 { 6188 struct hclge_vport *vport = hclge_get_vport(handle); 6189 struct hclge_dev *hdev = vport->back; 6190 6191 return hdev->fw_version; 6192 } 6193 6194 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 6195 { 6196 struct phy_device *phydev = hdev->hw.mac.phydev; 6197 6198 if (!phydev) 6199 return; 6200 6201 phy_set_asym_pause(phydev, rx_en, tx_en); 6202 } 6203 6204 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 6205 { 6206 int ret; 6207 6208 if (rx_en && tx_en) 6209 hdev->fc_mode_last_time = HCLGE_FC_FULL; 6210 else if (rx_en && !tx_en) 6211 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 6212 else if (!rx_en && tx_en) 6213 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 6214 else 6215 hdev->fc_mode_last_time = HCLGE_FC_NONE; 6216 6217 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 6218 return 0; 6219 6220 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 6221 if (ret) { 6222 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", 6223 ret); 6224 return ret; 6225 } 6226 6227 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 6228 6229 return 0; 6230 } 6231 6232 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 6233 { 6234 struct phy_device *phydev = hdev->hw.mac.phydev; 6235 u16 remote_advertising = 0; 6236 u16 local_advertising = 0; 6237 u32 rx_pause, tx_pause; 6238 u8 flowctl; 6239 6240 if (!phydev->link || !phydev->autoneg) 6241 return 0; 6242 6243 local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising); 6244 6245 if (phydev->pause) 6246 remote_advertising = LPA_PAUSE_CAP; 6247 6248 if (phydev->asym_pause) 6249 remote_advertising |= LPA_PAUSE_ASYM; 6250 6251 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 6252 remote_advertising); 6253 tx_pause = flowctl & FLOW_CTRL_TX; 6254 rx_pause = flowctl & FLOW_CTRL_RX; 6255 6256 if (phydev->duplex == HCLGE_MAC_HALF) { 6257 tx_pause = 0; 6258 rx_pause = 0; 6259 } 6260 6261 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 6262 } 6263 6264 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 6265 u32 *rx_en, u32 *tx_en) 6266 { 6267 struct hclge_vport *vport = hclge_get_vport(handle); 6268 struct hclge_dev *hdev = vport->back; 6269 6270 *auto_neg = hclge_get_autoneg(handle); 6271 6272 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 6273 *rx_en = 0; 6274 *tx_en = 0; 6275 return; 6276 } 6277 6278 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 6279 *rx_en = 1; 6280 *tx_en = 0; 6281 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 6282 *tx_en = 1; 6283 *rx_en = 0; 6284 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 6285 *rx_en = 1; 6286 *tx_en = 1; 6287 } else { 6288 *rx_en = 0; 6289 *tx_en = 0; 6290 } 6291 } 6292 6293 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 6294 u32 rx_en, u32 tx_en) 6295 { 6296 struct hclge_vport *vport = hclge_get_vport(handle); 6297 struct hclge_dev *hdev = vport->back; 6298 struct phy_device *phydev = hdev->hw.mac.phydev; 6299 u32 fc_autoneg; 6300 6301 fc_autoneg = hclge_get_autoneg(handle); 6302 if (auto_neg != fc_autoneg) { 6303 dev_info(&hdev->pdev->dev, 6304 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 6305 return -EOPNOTSUPP; 6306 } 6307 6308 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 6309 dev_info(&hdev->pdev->dev, 6310 "Priority flow control enabled. Cannot set link flow control.\n"); 6311 return -EOPNOTSUPP; 6312 } 6313 6314 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 6315 6316 if (!fc_autoneg) 6317 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 6318 6319 /* Only support flow control negotiation for netdev with 6320 * phy attached for now. 6321 */ 6322 if (!phydev) 6323 return -EOPNOTSUPP; 6324 6325 return phy_start_aneg(phydev); 6326 } 6327 6328 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 6329 u8 *auto_neg, u32 *speed, u8 *duplex) 6330 { 6331 struct hclge_vport *vport = hclge_get_vport(handle); 6332 struct hclge_dev *hdev = vport->back; 6333 6334 if (speed) 6335 *speed = hdev->hw.mac.speed; 6336 if (duplex) 6337 *duplex = hdev->hw.mac.duplex; 6338 if (auto_neg) 6339 *auto_neg = hdev->hw.mac.autoneg; 6340 } 6341 6342 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) 6343 { 6344 struct hclge_vport *vport = hclge_get_vport(handle); 6345 struct hclge_dev *hdev = vport->back; 6346 6347 if (media_type) 6348 *media_type = hdev->hw.mac.media_type; 6349 } 6350 6351 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 6352 u8 *tp_mdix_ctrl, u8 *tp_mdix) 6353 { 6354 struct hclge_vport *vport = hclge_get_vport(handle); 6355 struct hclge_dev *hdev = vport->back; 6356 struct phy_device *phydev = hdev->hw.mac.phydev; 6357 int mdix_ctrl, mdix, retval, is_resolved; 6358 6359 if (!phydev) { 6360 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 6361 *tp_mdix = ETH_TP_MDI_INVALID; 6362 return; 6363 } 6364 6365 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 6366 6367 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 6368 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 6369 HCLGE_PHY_MDIX_CTRL_S); 6370 6371 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 6372 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 6373 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 6374 6375 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 6376 6377 switch (mdix_ctrl) { 6378 case 0x0: 6379 *tp_mdix_ctrl = ETH_TP_MDI; 6380 break; 6381 case 0x1: 6382 *tp_mdix_ctrl = ETH_TP_MDI_X; 6383 break; 6384 case 0x3: 6385 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 6386 break; 6387 default: 6388 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 6389 break; 6390 } 6391 6392 if (!is_resolved) 6393 *tp_mdix = ETH_TP_MDI_INVALID; 6394 else if (mdix) 6395 *tp_mdix = ETH_TP_MDI_X; 6396 else 6397 *tp_mdix = ETH_TP_MDI; 6398 } 6399 6400 static int hclge_init_instance_hw(struct hclge_dev *hdev) 6401 { 6402 return hclge_mac_connect_phy(hdev); 6403 } 6404 6405 static void hclge_uninit_instance_hw(struct hclge_dev *hdev) 6406 { 6407 hclge_mac_disconnect_phy(hdev); 6408 } 6409 6410 static int hclge_init_client_instance(struct hnae3_client *client, 6411 struct hnae3_ae_dev *ae_dev) 6412 { 6413 struct hclge_dev *hdev = ae_dev->priv; 6414 struct hclge_vport *vport; 6415 int i, ret; 6416 6417 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 6418 vport = &hdev->vport[i]; 6419 6420 switch (client->type) { 6421 case HNAE3_CLIENT_KNIC: 6422 6423 hdev->nic_client = client; 6424 vport->nic.client = client; 6425 ret = client->ops->init_instance(&vport->nic); 6426 if (ret) 6427 goto clear_nic; 6428 6429 ret = hclge_init_instance_hw(hdev); 6430 if (ret) { 6431 client->ops->uninit_instance(&vport->nic, 6432 0); 6433 goto clear_nic; 6434 } 6435 6436 hnae3_set_client_init_flag(client, ae_dev, 1); 6437 6438 if (hdev->roce_client && 6439 hnae3_dev_roce_supported(hdev)) { 6440 struct hnae3_client *rc = hdev->roce_client; 6441 6442 ret = hclge_init_roce_base_info(vport); 6443 if (ret) 6444 goto clear_roce; 6445 6446 ret = rc->ops->init_instance(&vport->roce); 6447 if (ret) 6448 goto clear_roce; 6449 6450 hnae3_set_client_init_flag(hdev->roce_client, 6451 ae_dev, 1); 6452 } 6453 6454 break; 6455 case HNAE3_CLIENT_UNIC: 6456 hdev->nic_client = client; 6457 vport->nic.client = client; 6458 6459 ret = client->ops->init_instance(&vport->nic); 6460 if (ret) 6461 goto clear_nic; 6462 6463 hnae3_set_client_init_flag(client, ae_dev, 1); 6464 6465 break; 6466 case HNAE3_CLIENT_ROCE: 6467 if (hnae3_dev_roce_supported(hdev)) { 6468 hdev->roce_client = client; 6469 vport->roce.client = client; 6470 } 6471 6472 if (hdev->roce_client && hdev->nic_client) { 6473 ret = hclge_init_roce_base_info(vport); 6474 if (ret) 6475 goto clear_roce; 6476 6477 ret = client->ops->init_instance(&vport->roce); 6478 if (ret) 6479 goto clear_roce; 6480 6481 hnae3_set_client_init_flag(client, ae_dev, 1); 6482 } 6483 6484 break; 6485 default: 6486 return -EINVAL; 6487 } 6488 } 6489 6490 return 0; 6491 6492 clear_nic: 6493 hdev->nic_client = NULL; 6494 vport->nic.client = NULL; 6495 return ret; 6496 clear_roce: 6497 hdev->roce_client = NULL; 6498 vport->roce.client = NULL; 6499 return ret; 6500 } 6501 6502 static void hclge_uninit_client_instance(struct hnae3_client *client, 6503 struct hnae3_ae_dev *ae_dev) 6504 { 6505 struct hclge_dev *hdev = ae_dev->priv; 6506 struct hclge_vport *vport; 6507 int i; 6508 6509 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 6510 vport = &hdev->vport[i]; 6511 if (hdev->roce_client) { 6512 hdev->roce_client->ops->uninit_instance(&vport->roce, 6513 0); 6514 hdev->roce_client = NULL; 6515 vport->roce.client = NULL; 6516 } 6517 if (client->type == HNAE3_CLIENT_ROCE) 6518 return; 6519 if (hdev->nic_client && client->ops->uninit_instance) { 6520 hclge_uninit_instance_hw(hdev); 6521 client->ops->uninit_instance(&vport->nic, 0); 6522 hdev->nic_client = NULL; 6523 vport->nic.client = NULL; 6524 } 6525 } 6526 } 6527 6528 static int hclge_pci_init(struct hclge_dev *hdev) 6529 { 6530 struct pci_dev *pdev = hdev->pdev; 6531 struct hclge_hw *hw; 6532 int ret; 6533 6534 ret = pci_enable_device(pdev); 6535 if (ret) { 6536 dev_err(&pdev->dev, "failed to enable PCI device\n"); 6537 return ret; 6538 } 6539 6540 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 6541 if (ret) { 6542 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 6543 if (ret) { 6544 dev_err(&pdev->dev, 6545 "can't set consistent PCI DMA"); 6546 goto err_disable_device; 6547 } 6548 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 6549 } 6550 6551 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 6552 if (ret) { 6553 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 6554 goto err_disable_device; 6555 } 6556 6557 pci_set_master(pdev); 6558 hw = &hdev->hw; 6559 hw->io_base = pcim_iomap(pdev, 2, 0); 6560 if (!hw->io_base) { 6561 dev_err(&pdev->dev, "Can't map configuration register space\n"); 6562 ret = -ENOMEM; 6563 goto err_clr_master; 6564 } 6565 6566 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 6567 6568 return 0; 6569 err_clr_master: 6570 pci_clear_master(pdev); 6571 pci_release_regions(pdev); 6572 err_disable_device: 6573 pci_disable_device(pdev); 6574 6575 return ret; 6576 } 6577 6578 static void hclge_pci_uninit(struct hclge_dev *hdev) 6579 { 6580 struct pci_dev *pdev = hdev->pdev; 6581 6582 pcim_iounmap(pdev, hdev->hw.io_base); 6583 pci_free_irq_vectors(pdev); 6584 pci_clear_master(pdev); 6585 pci_release_mem_regions(pdev); 6586 pci_disable_device(pdev); 6587 } 6588 6589 static void hclge_state_init(struct hclge_dev *hdev) 6590 { 6591 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 6592 set_bit(HCLGE_STATE_DOWN, &hdev->state); 6593 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 6594 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 6595 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 6596 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 6597 } 6598 6599 static void hclge_state_uninit(struct hclge_dev *hdev) 6600 { 6601 set_bit(HCLGE_STATE_DOWN, &hdev->state); 6602 6603 if (hdev->service_timer.function) 6604 del_timer_sync(&hdev->service_timer); 6605 if (hdev->service_task.func) 6606 cancel_work_sync(&hdev->service_task); 6607 if (hdev->rst_service_task.func) 6608 cancel_work_sync(&hdev->rst_service_task); 6609 if (hdev->mbx_service_task.func) 6610 cancel_work_sync(&hdev->mbx_service_task); 6611 } 6612 6613 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 6614 { 6615 struct pci_dev *pdev = ae_dev->pdev; 6616 struct hclge_dev *hdev; 6617 int ret; 6618 6619 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 6620 if (!hdev) { 6621 ret = -ENOMEM; 6622 goto out; 6623 } 6624 6625 hdev->pdev = pdev; 6626 hdev->ae_dev = ae_dev; 6627 hdev->reset_type = HNAE3_NONE_RESET; 6628 ae_dev->priv = hdev; 6629 6630 ret = hclge_pci_init(hdev); 6631 if (ret) { 6632 dev_err(&pdev->dev, "PCI init failed\n"); 6633 goto out; 6634 } 6635 6636 /* Firmware command queue initialize */ 6637 ret = hclge_cmd_queue_init(hdev); 6638 if (ret) { 6639 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 6640 goto err_pci_uninit; 6641 } 6642 6643 /* Firmware command initialize */ 6644 ret = hclge_cmd_init(hdev); 6645 if (ret) 6646 goto err_cmd_uninit; 6647 6648 ret = hclge_get_cap(hdev); 6649 if (ret) { 6650 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 6651 ret); 6652 goto err_cmd_uninit; 6653 } 6654 6655 ret = hclge_configure(hdev); 6656 if (ret) { 6657 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 6658 goto err_cmd_uninit; 6659 } 6660 6661 ret = hclge_init_msi(hdev); 6662 if (ret) { 6663 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 6664 goto err_cmd_uninit; 6665 } 6666 6667 ret = hclge_misc_irq_init(hdev); 6668 if (ret) { 6669 dev_err(&pdev->dev, 6670 "Misc IRQ(vector0) init error, ret = %d.\n", 6671 ret); 6672 goto err_msi_uninit; 6673 } 6674 6675 ret = hclge_alloc_tqps(hdev); 6676 if (ret) { 6677 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 6678 goto err_msi_irq_uninit; 6679 } 6680 6681 ret = hclge_alloc_vport(hdev); 6682 if (ret) { 6683 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 6684 goto err_msi_irq_uninit; 6685 } 6686 6687 ret = hclge_map_tqp(hdev); 6688 if (ret) { 6689 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 6690 goto err_msi_irq_uninit; 6691 } 6692 6693 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 6694 ret = hclge_mac_mdio_config(hdev); 6695 if (ret) { 6696 dev_err(&hdev->pdev->dev, 6697 "mdio config fail ret=%d\n", ret); 6698 goto err_msi_irq_uninit; 6699 } 6700 } 6701 6702 ret = hclge_init_umv_space(hdev); 6703 if (ret) { 6704 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); 6705 goto err_msi_irq_uninit; 6706 } 6707 6708 ret = hclge_mac_init(hdev); 6709 if (ret) { 6710 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 6711 goto err_mdiobus_unreg; 6712 } 6713 6714 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 6715 if (ret) { 6716 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 6717 goto err_mdiobus_unreg; 6718 } 6719 6720 ret = hclge_init_vlan_config(hdev); 6721 if (ret) { 6722 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 6723 goto err_mdiobus_unreg; 6724 } 6725 6726 ret = hclge_tm_schd_init(hdev); 6727 if (ret) { 6728 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 6729 goto err_mdiobus_unreg; 6730 } 6731 6732 hclge_rss_init_cfg(hdev); 6733 ret = hclge_rss_init_hw(hdev); 6734 if (ret) { 6735 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 6736 goto err_mdiobus_unreg; 6737 } 6738 6739 ret = init_mgr_tbl(hdev); 6740 if (ret) { 6741 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 6742 goto err_mdiobus_unreg; 6743 } 6744 6745 ret = hclge_init_fd_config(hdev); 6746 if (ret) { 6747 dev_err(&pdev->dev, 6748 "fd table init fail, ret=%d\n", ret); 6749 goto err_mdiobus_unreg; 6750 } 6751 6752 hclge_dcb_ops_set(hdev); 6753 6754 timer_setup(&hdev->service_timer, hclge_service_timer, 0); 6755 INIT_WORK(&hdev->service_task, hclge_service_task); 6756 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); 6757 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); 6758 6759 hclge_clear_all_event_cause(hdev); 6760 6761 /* Enable MISC vector(vector0) */ 6762 hclge_enable_vector(&hdev->misc_vector, true); 6763 6764 hclge_state_init(hdev); 6765 6766 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 6767 return 0; 6768 6769 err_mdiobus_unreg: 6770 if (hdev->hw.mac.phydev) 6771 mdiobus_unregister(hdev->hw.mac.mdio_bus); 6772 err_msi_irq_uninit: 6773 hclge_misc_irq_uninit(hdev); 6774 err_msi_uninit: 6775 pci_free_irq_vectors(pdev); 6776 err_cmd_uninit: 6777 hclge_destroy_cmd_queue(&hdev->hw); 6778 err_pci_uninit: 6779 pcim_iounmap(pdev, hdev->hw.io_base); 6780 pci_clear_master(pdev); 6781 pci_release_regions(pdev); 6782 pci_disable_device(pdev); 6783 out: 6784 return ret; 6785 } 6786 6787 static void hclge_stats_clear(struct hclge_dev *hdev) 6788 { 6789 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); 6790 } 6791 6792 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 6793 { 6794 struct hclge_dev *hdev = ae_dev->priv; 6795 struct pci_dev *pdev = ae_dev->pdev; 6796 int ret; 6797 6798 set_bit(HCLGE_STATE_DOWN, &hdev->state); 6799 6800 hclge_stats_clear(hdev); 6801 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 6802 6803 ret = hclge_cmd_init(hdev); 6804 if (ret) { 6805 dev_err(&pdev->dev, "Cmd queue init failed\n"); 6806 return ret; 6807 } 6808 6809 ret = hclge_get_cap(hdev); 6810 if (ret) { 6811 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 6812 ret); 6813 return ret; 6814 } 6815 6816 ret = hclge_configure(hdev); 6817 if (ret) { 6818 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 6819 return ret; 6820 } 6821 6822 ret = hclge_map_tqp(hdev); 6823 if (ret) { 6824 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 6825 return ret; 6826 } 6827 6828 hclge_reset_umv_space(hdev); 6829 6830 ret = hclge_mac_init(hdev); 6831 if (ret) { 6832 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 6833 return ret; 6834 } 6835 6836 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 6837 if (ret) { 6838 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 6839 return ret; 6840 } 6841 6842 ret = hclge_init_vlan_config(hdev); 6843 if (ret) { 6844 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 6845 return ret; 6846 } 6847 6848 ret = hclge_tm_init_hw(hdev); 6849 if (ret) { 6850 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 6851 return ret; 6852 } 6853 6854 ret = hclge_rss_init_hw(hdev); 6855 if (ret) { 6856 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 6857 return ret; 6858 } 6859 6860 ret = hclge_init_fd_config(hdev); 6861 if (ret) { 6862 dev_err(&pdev->dev, 6863 "fd table init fail, ret=%d\n", ret); 6864 return ret; 6865 } 6866 6867 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 6868 HCLGE_DRIVER_NAME); 6869 6870 return 0; 6871 } 6872 6873 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 6874 { 6875 struct hclge_dev *hdev = ae_dev->priv; 6876 struct hclge_mac *mac = &hdev->hw.mac; 6877 6878 hclge_state_uninit(hdev); 6879 6880 if (mac->phydev) 6881 mdiobus_unregister(mac->mdio_bus); 6882 6883 hclge_uninit_umv_space(hdev); 6884 6885 /* Disable MISC vector(vector0) */ 6886 hclge_enable_vector(&hdev->misc_vector, false); 6887 synchronize_irq(hdev->misc_vector.vector_irq); 6888 6889 hclge_destroy_cmd_queue(&hdev->hw); 6890 hclge_misc_irq_uninit(hdev); 6891 hclge_pci_uninit(hdev); 6892 ae_dev->priv = NULL; 6893 } 6894 6895 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 6896 { 6897 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 6898 struct hclge_vport *vport = hclge_get_vport(handle); 6899 struct hclge_dev *hdev = vport->back; 6900 6901 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 6902 } 6903 6904 static void hclge_get_channels(struct hnae3_handle *handle, 6905 struct ethtool_channels *ch) 6906 { 6907 struct hclge_vport *vport = hclge_get_vport(handle); 6908 6909 ch->max_combined = hclge_get_max_channels(handle); 6910 ch->other_count = 1; 6911 ch->max_other = 1; 6912 ch->combined_count = vport->alloc_tqps; 6913 } 6914 6915 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 6916 u16 *alloc_tqps, u16 *max_rss_size) 6917 { 6918 struct hclge_vport *vport = hclge_get_vport(handle); 6919 struct hclge_dev *hdev = vport->back; 6920 6921 *alloc_tqps = vport->alloc_tqps; 6922 *max_rss_size = hdev->rss_size_max; 6923 } 6924 6925 static void hclge_release_tqp(struct hclge_vport *vport) 6926 { 6927 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 6928 struct hclge_dev *hdev = vport->back; 6929 int i; 6930 6931 for (i = 0; i < kinfo->num_tqps; i++) { 6932 struct hclge_tqp *tqp = 6933 container_of(kinfo->tqp[i], struct hclge_tqp, q); 6934 6935 tqp->q.handle = NULL; 6936 tqp->q.tqp_index = 0; 6937 tqp->alloced = false; 6938 } 6939 6940 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 6941 kinfo->tqp = NULL; 6942 } 6943 6944 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) 6945 { 6946 struct hclge_vport *vport = hclge_get_vport(handle); 6947 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 6948 struct hclge_dev *hdev = vport->back; 6949 int cur_rss_size = kinfo->rss_size; 6950 int cur_tqps = kinfo->num_tqps; 6951 u16 tc_offset[HCLGE_MAX_TC_NUM]; 6952 u16 tc_valid[HCLGE_MAX_TC_NUM]; 6953 u16 tc_size[HCLGE_MAX_TC_NUM]; 6954 u16 roundup_size; 6955 u32 *rss_indir; 6956 int ret, i; 6957 6958 /* Free old tqps, and reallocate with new tqp number when nic setup */ 6959 hclge_release_tqp(vport); 6960 6961 ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); 6962 if (ret) { 6963 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); 6964 return ret; 6965 } 6966 6967 ret = hclge_map_tqp_to_vport(hdev, vport); 6968 if (ret) { 6969 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); 6970 return ret; 6971 } 6972 6973 ret = hclge_tm_schd_init(hdev); 6974 if (ret) { 6975 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); 6976 return ret; 6977 } 6978 6979 roundup_size = roundup_pow_of_two(kinfo->rss_size); 6980 roundup_size = ilog2(roundup_size); 6981 /* Set the RSS TC mode according to the new RSS size */ 6982 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 6983 tc_valid[i] = 0; 6984 6985 if (!(hdev->hw_tc_map & BIT(i))) 6986 continue; 6987 6988 tc_valid[i] = 1; 6989 tc_size[i] = roundup_size; 6990 tc_offset[i] = kinfo->rss_size * i; 6991 } 6992 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 6993 if (ret) 6994 return ret; 6995 6996 /* Reinitializes the rss indirect table according to the new RSS size */ 6997 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 6998 if (!rss_indir) 6999 return -ENOMEM; 7000 7001 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 7002 rss_indir[i] = i % kinfo->rss_size; 7003 7004 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 7005 if (ret) 7006 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 7007 ret); 7008 7009 kfree(rss_indir); 7010 7011 if (!ret) 7012 dev_info(&hdev->pdev->dev, 7013 "Channels changed, rss_size from %d to %d, tqps from %d to %d", 7014 cur_rss_size, kinfo->rss_size, 7015 cur_tqps, kinfo->rss_size * kinfo->num_tc); 7016 7017 return ret; 7018 } 7019 7020 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, 7021 u32 *regs_num_64_bit) 7022 { 7023 struct hclge_desc desc; 7024 u32 total_num; 7025 int ret; 7026 7027 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); 7028 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7029 if (ret) { 7030 dev_err(&hdev->pdev->dev, 7031 "Query register number cmd failed, ret = %d.\n", ret); 7032 return ret; 7033 } 7034 7035 *regs_num_32_bit = le32_to_cpu(desc.data[0]); 7036 *regs_num_64_bit = le32_to_cpu(desc.data[1]); 7037 7038 total_num = *regs_num_32_bit + *regs_num_64_bit; 7039 if (!total_num) 7040 return -EINVAL; 7041 7042 return 0; 7043 } 7044 7045 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, 7046 void *data) 7047 { 7048 #define HCLGE_32_BIT_REG_RTN_DATANUM 8 7049 7050 struct hclge_desc *desc; 7051 u32 *reg_val = data; 7052 __le32 *desc_data; 7053 int cmd_num; 7054 int i, k, n; 7055 int ret; 7056 7057 if (regs_num == 0) 7058 return 0; 7059 7060 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); 7061 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 7062 if (!desc) 7063 return -ENOMEM; 7064 7065 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); 7066 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 7067 if (ret) { 7068 dev_err(&hdev->pdev->dev, 7069 "Query 32 bit register cmd failed, ret = %d.\n", ret); 7070 kfree(desc); 7071 return ret; 7072 } 7073 7074 for (i = 0; i < cmd_num; i++) { 7075 if (i == 0) { 7076 desc_data = (__le32 *)(&desc[i].data[0]); 7077 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; 7078 } else { 7079 desc_data = (__le32 *)(&desc[i]); 7080 n = HCLGE_32_BIT_REG_RTN_DATANUM; 7081 } 7082 for (k = 0; k < n; k++) { 7083 *reg_val++ = le32_to_cpu(*desc_data++); 7084 7085 regs_num--; 7086 if (!regs_num) 7087 break; 7088 } 7089 } 7090 7091 kfree(desc); 7092 return 0; 7093 } 7094 7095 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, 7096 void *data) 7097 { 7098 #define HCLGE_64_BIT_REG_RTN_DATANUM 4 7099 7100 struct hclge_desc *desc; 7101 u64 *reg_val = data; 7102 __le64 *desc_data; 7103 int cmd_num; 7104 int i, k, n; 7105 int ret; 7106 7107 if (regs_num == 0) 7108 return 0; 7109 7110 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); 7111 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 7112 if (!desc) 7113 return -ENOMEM; 7114 7115 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); 7116 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 7117 if (ret) { 7118 dev_err(&hdev->pdev->dev, 7119 "Query 64 bit register cmd failed, ret = %d.\n", ret); 7120 kfree(desc); 7121 return ret; 7122 } 7123 7124 for (i = 0; i < cmd_num; i++) { 7125 if (i == 0) { 7126 desc_data = (__le64 *)(&desc[i].data[0]); 7127 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; 7128 } else { 7129 desc_data = (__le64 *)(&desc[i]); 7130 n = HCLGE_64_BIT_REG_RTN_DATANUM; 7131 } 7132 for (k = 0; k < n; k++) { 7133 *reg_val++ = le64_to_cpu(*desc_data++); 7134 7135 regs_num--; 7136 if (!regs_num) 7137 break; 7138 } 7139 } 7140 7141 kfree(desc); 7142 return 0; 7143 } 7144 7145 static int hclge_get_regs_len(struct hnae3_handle *handle) 7146 { 7147 struct hclge_vport *vport = hclge_get_vport(handle); 7148 struct hclge_dev *hdev = vport->back; 7149 u32 regs_num_32_bit, regs_num_64_bit; 7150 int ret; 7151 7152 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 7153 if (ret) { 7154 dev_err(&hdev->pdev->dev, 7155 "Get register number failed, ret = %d.\n", ret); 7156 return -EOPNOTSUPP; 7157 } 7158 7159 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); 7160 } 7161 7162 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, 7163 void *data) 7164 { 7165 struct hclge_vport *vport = hclge_get_vport(handle); 7166 struct hclge_dev *hdev = vport->back; 7167 u32 regs_num_32_bit, regs_num_64_bit; 7168 int ret; 7169 7170 *version = hdev->fw_version; 7171 7172 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 7173 if (ret) { 7174 dev_err(&hdev->pdev->dev, 7175 "Get register number failed, ret = %d.\n", ret); 7176 return; 7177 } 7178 7179 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); 7180 if (ret) { 7181 dev_err(&hdev->pdev->dev, 7182 "Get 32 bit register failed, ret = %d.\n", ret); 7183 return; 7184 } 7185 7186 data = (u32 *)data + regs_num_32_bit; 7187 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, 7188 data); 7189 if (ret) 7190 dev_err(&hdev->pdev->dev, 7191 "Get 64 bit register failed, ret = %d.\n", ret); 7192 } 7193 7194 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 7195 { 7196 struct hclge_set_led_state_cmd *req; 7197 struct hclge_desc desc; 7198 int ret; 7199 7200 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 7201 7202 req = (struct hclge_set_led_state_cmd *)desc.data; 7203 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 7204 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 7205 7206 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7207 if (ret) 7208 dev_err(&hdev->pdev->dev, 7209 "Send set led state cmd error, ret =%d\n", ret); 7210 7211 return ret; 7212 } 7213 7214 enum hclge_led_status { 7215 HCLGE_LED_OFF, 7216 HCLGE_LED_ON, 7217 HCLGE_LED_NO_CHANGE = 0xFF, 7218 }; 7219 7220 static int hclge_set_led_id(struct hnae3_handle *handle, 7221 enum ethtool_phys_id_state status) 7222 { 7223 struct hclge_vport *vport = hclge_get_vport(handle); 7224 struct hclge_dev *hdev = vport->back; 7225 7226 switch (status) { 7227 case ETHTOOL_ID_ACTIVE: 7228 return hclge_set_led_status(hdev, HCLGE_LED_ON); 7229 case ETHTOOL_ID_INACTIVE: 7230 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 7231 default: 7232 return -EINVAL; 7233 } 7234 } 7235 7236 static void hclge_get_link_mode(struct hnae3_handle *handle, 7237 unsigned long *supported, 7238 unsigned long *advertising) 7239 { 7240 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 7241 struct hclge_vport *vport = hclge_get_vport(handle); 7242 struct hclge_dev *hdev = vport->back; 7243 unsigned int idx = 0; 7244 7245 for (; idx < size; idx++) { 7246 supported[idx] = hdev->hw.mac.supported[idx]; 7247 advertising[idx] = hdev->hw.mac.advertising[idx]; 7248 } 7249 } 7250 7251 static const struct hnae3_ae_ops hclge_ops = { 7252 .init_ae_dev = hclge_init_ae_dev, 7253 .uninit_ae_dev = hclge_uninit_ae_dev, 7254 .init_client_instance = hclge_init_client_instance, 7255 .uninit_client_instance = hclge_uninit_client_instance, 7256 .map_ring_to_vector = hclge_map_ring_to_vector, 7257 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 7258 .get_vector = hclge_get_vector, 7259 .put_vector = hclge_put_vector, 7260 .set_promisc_mode = hclge_set_promisc_mode, 7261 .set_loopback = hclge_set_loopback, 7262 .start = hclge_ae_start, 7263 .stop = hclge_ae_stop, 7264 .get_status = hclge_get_status, 7265 .get_ksettings_an_result = hclge_get_ksettings_an_result, 7266 .update_speed_duplex_h = hclge_update_speed_duplex_h, 7267 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 7268 .get_media_type = hclge_get_media_type, 7269 .get_rss_key_size = hclge_get_rss_key_size, 7270 .get_rss_indir_size = hclge_get_rss_indir_size, 7271 .get_rss = hclge_get_rss, 7272 .set_rss = hclge_set_rss, 7273 .set_rss_tuple = hclge_set_rss_tuple, 7274 .get_rss_tuple = hclge_get_rss_tuple, 7275 .get_tc_size = hclge_get_tc_size, 7276 .get_mac_addr = hclge_get_mac_addr, 7277 .set_mac_addr = hclge_set_mac_addr, 7278 .do_ioctl = hclge_do_ioctl, 7279 .add_uc_addr = hclge_add_uc_addr, 7280 .rm_uc_addr = hclge_rm_uc_addr, 7281 .add_mc_addr = hclge_add_mc_addr, 7282 .rm_mc_addr = hclge_rm_mc_addr, 7283 .set_autoneg = hclge_set_autoneg, 7284 .get_autoneg = hclge_get_autoneg, 7285 .get_pauseparam = hclge_get_pauseparam, 7286 .set_pauseparam = hclge_set_pauseparam, 7287 .set_mtu = hclge_set_mtu, 7288 .reset_queue = hclge_reset_tqp, 7289 .get_stats = hclge_get_stats, 7290 .update_stats = hclge_update_stats, 7291 .get_strings = hclge_get_strings, 7292 .get_sset_count = hclge_get_sset_count, 7293 .get_fw_version = hclge_get_fw_version, 7294 .get_mdix_mode = hclge_get_mdix_mode, 7295 .enable_vlan_filter = hclge_enable_vlan_filter, 7296 .set_vlan_filter = hclge_set_vlan_filter, 7297 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 7298 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 7299 .reset_event = hclge_reset_event, 7300 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 7301 .set_channels = hclge_set_channels, 7302 .get_channels = hclge_get_channels, 7303 .get_regs_len = hclge_get_regs_len, 7304 .get_regs = hclge_get_regs, 7305 .set_led_id = hclge_set_led_id, 7306 .get_link_mode = hclge_get_link_mode, 7307 .add_fd_entry = hclge_add_fd_entry, 7308 .del_fd_entry = hclge_del_fd_entry, 7309 .del_all_fd_entries = hclge_del_all_fd_entries, 7310 .get_fd_rule_cnt = hclge_get_fd_rule_cnt, 7311 .get_fd_rule_info = hclge_get_fd_rule_info, 7312 .get_fd_all_rules = hclge_get_all_rules, 7313 .restore_fd_rules = hclge_restore_fd_entries, 7314 .enable_fd = hclge_enable_fd, 7315 }; 7316 7317 static struct hnae3_ae_algo ae_algo = { 7318 .ops = &hclge_ops, 7319 .pdev_id_table = ae_algo_pci_tbl, 7320 }; 7321 7322 static int hclge_init(void) 7323 { 7324 pr_info("%s is initializing\n", HCLGE_NAME); 7325 7326 hnae3_register_ae_algo(&ae_algo); 7327 7328 return 0; 7329 } 7330 7331 static void hclge_exit(void) 7332 { 7333 hnae3_unregister_ae_algo(&ae_algo); 7334 } 7335 module_init(hclge_init); 7336 module_exit(hclge_exit); 7337 7338 MODULE_LICENSE("GPL"); 7339 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 7340 MODULE_DESCRIPTION("HCLGE Driver"); 7341 MODULE_VERSION(HCLGE_MOD_VERSION); 7342