1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/device.h> 12 #include <linux/etherdevice.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/pci.h> 19 #include <linux/platform_device.h> 20 21 #include "hclge_cmd.h" 22 #include "hclge_dcb.h" 23 #include "hclge_main.h" 24 #include "hclge_mdio.h" 25 #include "hclge_tm.h" 26 #include "hnae3.h" 27 28 #define HCLGE_NAME "hclge" 29 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 31 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) 32 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) 33 34 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 35 enum hclge_mta_dmac_sel_type mta_mac_sel, 36 bool enable); 37 static int hclge_init_vlan_config(struct hclge_dev *hdev); 38 39 static struct hnae3_ae_algo ae_algo; 40 41 static const struct pci_device_id ae_algo_pci_tbl[] = { 42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 49 /* required last entry */ 50 {0, } 51 }; 52 53 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 54 "Mac Loopback test", 55 "Serdes Loopback test", 56 "Phy Loopback test" 57 }; 58 59 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { 60 {"igu_rx_oversize_pkt", 61 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, 62 {"igu_rx_undersize_pkt", 63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, 64 {"igu_rx_out_all_pkt", 65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, 66 {"igu_rx_uni_pkt", 67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, 68 {"igu_rx_multi_pkt", 69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, 70 {"igu_rx_broad_pkt", 71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, 72 {"egu_tx_out_all_pkt", 73 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, 74 {"egu_tx_uni_pkt", 75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, 76 {"egu_tx_multi_pkt", 77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, 78 {"egu_tx_broad_pkt", 79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, 80 {"ssu_ppp_mac_key_num", 81 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, 82 {"ssu_ppp_host_key_num", 83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, 84 {"ppp_ssu_mac_rlt_num", 85 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, 86 {"ppp_ssu_host_rlt_num", 87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, 88 {"ssu_tx_in_num", 89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, 90 {"ssu_tx_out_num", 91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, 92 {"ssu_rx_in_num", 93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, 94 {"ssu_rx_out_num", 95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} 96 }; 97 98 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { 99 {"igu_rx_err_pkt", 100 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, 101 {"igu_rx_no_eof_pkt", 102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, 103 {"igu_rx_no_sof_pkt", 104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, 105 {"egu_tx_1588_pkt", 106 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, 107 {"ssu_full_drop_num", 108 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, 109 {"ssu_part_drop_num", 110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, 111 {"ppp_key_drop_num", 112 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, 113 {"ppp_rlt_drop_num", 114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, 115 {"ssu_key_drop_num", 116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, 117 {"pkt_curr_buf_cnt", 118 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, 119 {"qcn_fb_rcv_cnt", 120 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, 121 {"qcn_fb_drop_cnt", 122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, 123 {"qcn_fb_invaild_cnt", 124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, 125 {"rx_packet_tc0_in_cnt", 126 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, 127 {"rx_packet_tc1_in_cnt", 128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, 129 {"rx_packet_tc2_in_cnt", 130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, 131 {"rx_packet_tc3_in_cnt", 132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, 133 {"rx_packet_tc4_in_cnt", 134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, 135 {"rx_packet_tc5_in_cnt", 136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, 137 {"rx_packet_tc6_in_cnt", 138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, 139 {"rx_packet_tc7_in_cnt", 140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, 141 {"rx_packet_tc0_out_cnt", 142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, 143 {"rx_packet_tc1_out_cnt", 144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, 145 {"rx_packet_tc2_out_cnt", 146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, 147 {"rx_packet_tc3_out_cnt", 148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, 149 {"rx_packet_tc4_out_cnt", 150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, 151 {"rx_packet_tc5_out_cnt", 152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, 153 {"rx_packet_tc6_out_cnt", 154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, 155 {"rx_packet_tc7_out_cnt", 156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, 157 {"tx_packet_tc0_in_cnt", 158 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, 159 {"tx_packet_tc1_in_cnt", 160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, 161 {"tx_packet_tc2_in_cnt", 162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, 163 {"tx_packet_tc3_in_cnt", 164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, 165 {"tx_packet_tc4_in_cnt", 166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, 167 {"tx_packet_tc5_in_cnt", 168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, 169 {"tx_packet_tc6_in_cnt", 170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, 171 {"tx_packet_tc7_in_cnt", 172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, 173 {"tx_packet_tc0_out_cnt", 174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, 175 {"tx_packet_tc1_out_cnt", 176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, 177 {"tx_packet_tc2_out_cnt", 178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, 179 {"tx_packet_tc3_out_cnt", 180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, 181 {"tx_packet_tc4_out_cnt", 182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, 183 {"tx_packet_tc5_out_cnt", 184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, 185 {"tx_packet_tc6_out_cnt", 186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, 187 {"tx_packet_tc7_out_cnt", 188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, 189 {"pkt_curr_buf_tc0_cnt", 190 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, 191 {"pkt_curr_buf_tc1_cnt", 192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, 193 {"pkt_curr_buf_tc2_cnt", 194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, 195 {"pkt_curr_buf_tc3_cnt", 196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, 197 {"pkt_curr_buf_tc4_cnt", 198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, 199 {"pkt_curr_buf_tc5_cnt", 200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, 201 {"pkt_curr_buf_tc6_cnt", 202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, 203 {"pkt_curr_buf_tc7_cnt", 204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, 205 {"mb_uncopy_num", 206 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, 207 {"lo_pri_unicast_rlt_drop_num", 208 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, 209 {"hi_pri_multicast_rlt_drop_num", 210 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, 211 {"lo_pri_multicast_rlt_drop_num", 212 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, 213 {"rx_oq_drop_pkt_cnt", 214 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, 215 {"tx_oq_drop_pkt_cnt", 216 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, 217 {"nic_l2_err_drop_pkt_cnt", 218 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, 219 {"roc_l2_err_drop_pkt_cnt", 220 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} 221 }; 222 223 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 224 {"mac_tx_mac_pause_num", 225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 226 {"mac_rx_mac_pause_num", 227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 228 {"mac_tx_pfc_pri0_pkt_num", 229 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 230 {"mac_tx_pfc_pri1_pkt_num", 231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 232 {"mac_tx_pfc_pri2_pkt_num", 233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 234 {"mac_tx_pfc_pri3_pkt_num", 235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 236 {"mac_tx_pfc_pri4_pkt_num", 237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 238 {"mac_tx_pfc_pri5_pkt_num", 239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 240 {"mac_tx_pfc_pri6_pkt_num", 241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 242 {"mac_tx_pfc_pri7_pkt_num", 243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 244 {"mac_rx_pfc_pri0_pkt_num", 245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 246 {"mac_rx_pfc_pri1_pkt_num", 247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 248 {"mac_rx_pfc_pri2_pkt_num", 249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 250 {"mac_rx_pfc_pri3_pkt_num", 251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 252 {"mac_rx_pfc_pri4_pkt_num", 253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 254 {"mac_rx_pfc_pri5_pkt_num", 255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 256 {"mac_rx_pfc_pri6_pkt_num", 257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 258 {"mac_rx_pfc_pri7_pkt_num", 259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 260 {"mac_tx_total_pkt_num", 261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 262 {"mac_tx_total_oct_num", 263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 264 {"mac_tx_good_pkt_num", 265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 266 {"mac_tx_bad_pkt_num", 267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 268 {"mac_tx_good_oct_num", 269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 270 {"mac_tx_bad_oct_num", 271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 272 {"mac_tx_uni_pkt_num", 273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 274 {"mac_tx_multi_pkt_num", 275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 276 {"mac_tx_broad_pkt_num", 277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 278 {"mac_tx_undersize_pkt_num", 279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 280 {"mac_tx_overrsize_pkt_num", 281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, 282 {"mac_tx_64_oct_pkt_num", 283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 284 {"mac_tx_65_127_oct_pkt_num", 285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 286 {"mac_tx_128_255_oct_pkt_num", 287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 288 {"mac_tx_256_511_oct_pkt_num", 289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 290 {"mac_tx_512_1023_oct_pkt_num", 291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 292 {"mac_tx_1024_1518_oct_pkt_num", 293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 294 {"mac_tx_1519_max_oct_pkt_num", 295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, 296 {"mac_rx_total_pkt_num", 297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 298 {"mac_rx_total_oct_num", 299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 300 {"mac_rx_good_pkt_num", 301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 302 {"mac_rx_bad_pkt_num", 303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 304 {"mac_rx_good_oct_num", 305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 306 {"mac_rx_bad_oct_num", 307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 308 {"mac_rx_uni_pkt_num", 309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 310 {"mac_rx_multi_pkt_num", 311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 312 {"mac_rx_broad_pkt_num", 313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 314 {"mac_rx_undersize_pkt_num", 315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 316 {"mac_rx_overrsize_pkt_num", 317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, 318 {"mac_rx_64_oct_pkt_num", 319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 320 {"mac_rx_65_127_oct_pkt_num", 321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 322 {"mac_rx_128_255_oct_pkt_num", 323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 324 {"mac_rx_256_511_oct_pkt_num", 325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 326 {"mac_rx_512_1023_oct_pkt_num", 327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 328 {"mac_rx_1024_1518_oct_pkt_num", 329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 330 {"mac_rx_1519_max_oct_pkt_num", 331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, 332 333 {"mac_trans_fragment_pkt_num", 334 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, 335 {"mac_trans_undermin_pkt_num", 336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, 337 {"mac_trans_jabber_pkt_num", 338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, 339 {"mac_trans_err_all_pkt_num", 340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, 341 {"mac_trans_from_app_good_pkt_num", 342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, 343 {"mac_trans_from_app_bad_pkt_num", 344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, 345 {"mac_rcv_fragment_pkt_num", 346 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, 347 {"mac_rcv_undermin_pkt_num", 348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)}, 349 {"mac_rcv_jabber_pkt_num", 350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, 351 {"mac_rcv_fcs_err_pkt_num", 352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, 353 {"mac_rcv_send_app_good_pkt_num", 354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, 355 {"mac_rcv_send_app_bad_pkt_num", 356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} 357 }; 358 359 static int hclge_64_bit_update_stats(struct hclge_dev *hdev) 360 { 361 #define HCLGE_64_BIT_CMD_NUM 5 362 #define HCLGE_64_BIT_RTN_DATANUM 4 363 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); 364 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; 365 __le64 *desc_data; 366 int i, k, n; 367 int ret; 368 369 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); 370 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); 371 if (ret) { 372 dev_err(&hdev->pdev->dev, 373 "Get 64 bit pkt stats fail, status = %d.\n", ret); 374 return ret; 375 } 376 377 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { 378 if (unlikely(i == 0)) { 379 desc_data = (__le64 *)(&desc[i].data[0]); 380 n = HCLGE_64_BIT_RTN_DATANUM - 1; 381 } else { 382 desc_data = (__le64 *)(&desc[i]); 383 n = HCLGE_64_BIT_RTN_DATANUM; 384 } 385 for (k = 0; k < n; k++) { 386 *data++ += le64_to_cpu(*desc_data); 387 desc_data++; 388 } 389 } 390 391 return 0; 392 } 393 394 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) 395 { 396 stats->pkt_curr_buf_cnt = 0; 397 stats->pkt_curr_buf_tc0_cnt = 0; 398 stats->pkt_curr_buf_tc1_cnt = 0; 399 stats->pkt_curr_buf_tc2_cnt = 0; 400 stats->pkt_curr_buf_tc3_cnt = 0; 401 stats->pkt_curr_buf_tc4_cnt = 0; 402 stats->pkt_curr_buf_tc5_cnt = 0; 403 stats->pkt_curr_buf_tc6_cnt = 0; 404 stats->pkt_curr_buf_tc7_cnt = 0; 405 } 406 407 static int hclge_32_bit_update_stats(struct hclge_dev *hdev) 408 { 409 #define HCLGE_32_BIT_CMD_NUM 8 410 #define HCLGE_32_BIT_RTN_DATANUM 8 411 412 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; 413 struct hclge_32_bit_stats *all_32_bit_stats; 414 __le32 *desc_data; 415 int i, k, n; 416 u64 *data; 417 int ret; 418 419 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; 420 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); 421 422 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); 423 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); 424 if (ret) { 425 dev_err(&hdev->pdev->dev, 426 "Get 32 bit pkt stats fail, status = %d.\n", ret); 427 428 return ret; 429 } 430 431 hclge_reset_partial_32bit_counter(all_32_bit_stats); 432 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { 433 if (unlikely(i == 0)) { 434 __le16 *desc_data_16bit; 435 436 all_32_bit_stats->igu_rx_err_pkt += 437 le32_to_cpu(desc[i].data[0]); 438 439 desc_data_16bit = (__le16 *)&desc[i].data[1]; 440 all_32_bit_stats->igu_rx_no_eof_pkt += 441 le16_to_cpu(*desc_data_16bit); 442 443 desc_data_16bit++; 444 all_32_bit_stats->igu_rx_no_sof_pkt += 445 le16_to_cpu(*desc_data_16bit); 446 447 desc_data = &desc[i].data[2]; 448 n = HCLGE_32_BIT_RTN_DATANUM - 4; 449 } else { 450 desc_data = (__le32 *)&desc[i]; 451 n = HCLGE_32_BIT_RTN_DATANUM; 452 } 453 for (k = 0; k < n; k++) { 454 *data++ += le32_to_cpu(*desc_data); 455 desc_data++; 456 } 457 } 458 459 return 0; 460 } 461 462 static int hclge_mac_update_stats(struct hclge_dev *hdev) 463 { 464 #define HCLGE_MAC_CMD_NUM 17 465 #define HCLGE_RTN_DATA_NUM 4 466 467 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 468 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 469 __le64 *desc_data; 470 int i, k, n; 471 int ret; 472 473 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 474 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 475 if (ret) { 476 dev_err(&hdev->pdev->dev, 477 "Get MAC pkt stats fail, status = %d.\n", ret); 478 479 return ret; 480 } 481 482 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 483 if (unlikely(i == 0)) { 484 desc_data = (__le64 *)(&desc[i].data[0]); 485 n = HCLGE_RTN_DATA_NUM - 2; 486 } else { 487 desc_data = (__le64 *)(&desc[i]); 488 n = HCLGE_RTN_DATA_NUM; 489 } 490 for (k = 0; k < n; k++) { 491 *data++ += le64_to_cpu(*desc_data); 492 desc_data++; 493 } 494 } 495 496 return 0; 497 } 498 499 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 500 { 501 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 502 struct hclge_vport *vport = hclge_get_vport(handle); 503 struct hclge_dev *hdev = vport->back; 504 struct hnae3_queue *queue; 505 struct hclge_desc desc[1]; 506 struct hclge_tqp *tqp; 507 int ret, i; 508 509 for (i = 0; i < kinfo->num_tqps; i++) { 510 queue = handle->kinfo.tqp[i]; 511 tqp = container_of(queue, struct hclge_tqp, q); 512 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 513 hclge_cmd_setup_basic_desc(&desc[0], 514 HCLGE_OPC_QUERY_RX_STATUS, 515 true); 516 517 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 518 ret = hclge_cmd_send(&hdev->hw, desc, 1); 519 if (ret) { 520 dev_err(&hdev->pdev->dev, 521 "Query tqp stat fail, status = %d,queue = %d\n", 522 ret, i); 523 return ret; 524 } 525 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 526 le32_to_cpu(desc[0].data[4]); 527 } 528 529 for (i = 0; i < kinfo->num_tqps; i++) { 530 queue = handle->kinfo.tqp[i]; 531 tqp = container_of(queue, struct hclge_tqp, q); 532 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 533 hclge_cmd_setup_basic_desc(&desc[0], 534 HCLGE_OPC_QUERY_TX_STATUS, 535 true); 536 537 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 538 ret = hclge_cmd_send(&hdev->hw, desc, 1); 539 if (ret) { 540 dev_err(&hdev->pdev->dev, 541 "Query tqp stat fail, status = %d,queue = %d\n", 542 ret, i); 543 return ret; 544 } 545 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 546 le32_to_cpu(desc[0].data[4]); 547 } 548 549 return 0; 550 } 551 552 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 553 { 554 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 555 struct hclge_tqp *tqp; 556 u64 *buff = data; 557 int i; 558 559 for (i = 0; i < kinfo->num_tqps; i++) { 560 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 561 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 562 } 563 564 for (i = 0; i < kinfo->num_tqps; i++) { 565 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 566 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 567 } 568 569 return buff; 570 } 571 572 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 573 { 574 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 575 576 return kinfo->num_tqps * (2); 577 } 578 579 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 580 { 581 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 582 u8 *buff = data; 583 int i = 0; 584 585 for (i = 0; i < kinfo->num_tqps; i++) { 586 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 587 struct hclge_tqp, q); 588 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", 589 tqp->index); 590 buff = buff + ETH_GSTRING_LEN; 591 } 592 593 for (i = 0; i < kinfo->num_tqps; i++) { 594 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 595 struct hclge_tqp, q); 596 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", 597 tqp->index); 598 buff = buff + ETH_GSTRING_LEN; 599 } 600 601 return buff; 602 } 603 604 static u64 *hclge_comm_get_stats(void *comm_stats, 605 const struct hclge_comm_stats_str strs[], 606 int size, u64 *data) 607 { 608 u64 *buf = data; 609 u32 i; 610 611 for (i = 0; i < size; i++) 612 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 613 614 return buf + size; 615 } 616 617 static u8 *hclge_comm_get_strings(u32 stringset, 618 const struct hclge_comm_stats_str strs[], 619 int size, u8 *data) 620 { 621 char *buff = (char *)data; 622 u32 i; 623 624 if (stringset != ETH_SS_STATS) 625 return buff; 626 627 for (i = 0; i < size; i++) { 628 snprintf(buff, ETH_GSTRING_LEN, 629 strs[i].desc); 630 buff = buff + ETH_GSTRING_LEN; 631 } 632 633 return (u8 *)buff; 634 } 635 636 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, 637 struct net_device_stats *net_stats) 638 { 639 net_stats->tx_dropped = 0; 640 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; 641 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; 642 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; 643 644 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; 645 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 646 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt; 647 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; 648 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; 649 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; 650 651 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 652 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 653 654 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; 655 net_stats->rx_length_errors = 656 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 657 net_stats->rx_length_errors += 658 hw_stats->mac_stats.mac_rx_overrsize_pkt_num; 659 net_stats->rx_over_errors = 660 hw_stats->mac_stats.mac_rx_overrsize_pkt_num; 661 } 662 663 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 664 { 665 struct hnae3_handle *handle; 666 int status; 667 668 handle = &hdev->vport[0].nic; 669 if (handle->client) { 670 status = hclge_tqps_update_stats(handle); 671 if (status) { 672 dev_err(&hdev->pdev->dev, 673 "Update TQPS stats fail, status = %d.\n", 674 status); 675 } 676 } 677 678 status = hclge_mac_update_stats(hdev); 679 if (status) 680 dev_err(&hdev->pdev->dev, 681 "Update MAC stats fail, status = %d.\n", status); 682 683 status = hclge_32_bit_update_stats(hdev); 684 if (status) 685 dev_err(&hdev->pdev->dev, 686 "Update 32 bit stats fail, status = %d.\n", 687 status); 688 689 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); 690 } 691 692 static void hclge_update_stats(struct hnae3_handle *handle, 693 struct net_device_stats *net_stats) 694 { 695 struct hclge_vport *vport = hclge_get_vport(handle); 696 struct hclge_dev *hdev = vport->back; 697 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 698 int status; 699 700 status = hclge_mac_update_stats(hdev); 701 if (status) 702 dev_err(&hdev->pdev->dev, 703 "Update MAC stats fail, status = %d.\n", 704 status); 705 706 status = hclge_32_bit_update_stats(hdev); 707 if (status) 708 dev_err(&hdev->pdev->dev, 709 "Update 32 bit stats fail, status = %d.\n", 710 status); 711 712 status = hclge_64_bit_update_stats(hdev); 713 if (status) 714 dev_err(&hdev->pdev->dev, 715 "Update 64 bit stats fail, status = %d.\n", 716 status); 717 718 status = hclge_tqps_update_stats(handle); 719 if (status) 720 dev_err(&hdev->pdev->dev, 721 "Update TQPS stats fail, status = %d.\n", 722 status); 723 724 hclge_update_netstat(hw_stats, net_stats); 725 } 726 727 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 728 { 729 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7 730 731 struct hclge_vport *vport = hclge_get_vport(handle); 732 struct hclge_dev *hdev = vport->back; 733 int count = 0; 734 735 /* Loopback test support rules: 736 * mac: only GE mode support 737 * serdes: all mac mode will support include GE/XGE/LGE/CGE 738 * phy: only support when phy device exist on board 739 */ 740 if (stringset == ETH_SS_TEST) { 741 /* clear loopback bit flags at first */ 742 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 743 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 744 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 745 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 746 count += 1; 747 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; 748 } else { 749 count = -EOPNOTSUPP; 750 } 751 } else if (stringset == ETH_SS_STATS) { 752 count = ARRAY_SIZE(g_mac_stats_string) + 753 ARRAY_SIZE(g_all_32bit_stats_string) + 754 ARRAY_SIZE(g_all_64bit_stats_string) + 755 hclge_tqps_get_sset_count(handle, stringset); 756 } 757 758 return count; 759 } 760 761 static void hclge_get_strings(struct hnae3_handle *handle, 762 u32 stringset, 763 u8 *data) 764 { 765 u8 *p = (char *)data; 766 int size; 767 768 if (stringset == ETH_SS_STATS) { 769 size = ARRAY_SIZE(g_mac_stats_string); 770 p = hclge_comm_get_strings(stringset, 771 g_mac_stats_string, 772 size, 773 p); 774 size = ARRAY_SIZE(g_all_32bit_stats_string); 775 p = hclge_comm_get_strings(stringset, 776 g_all_32bit_stats_string, 777 size, 778 p); 779 size = ARRAY_SIZE(g_all_64bit_stats_string); 780 p = hclge_comm_get_strings(stringset, 781 g_all_64bit_stats_string, 782 size, 783 p); 784 p = hclge_tqps_get_strings(handle, p); 785 } else if (stringset == ETH_SS_TEST) { 786 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { 787 memcpy(p, 788 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], 789 ETH_GSTRING_LEN); 790 p += ETH_GSTRING_LEN; 791 } 792 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { 793 memcpy(p, 794 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], 795 ETH_GSTRING_LEN); 796 p += ETH_GSTRING_LEN; 797 } 798 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 799 memcpy(p, 800 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], 801 ETH_GSTRING_LEN); 802 p += ETH_GSTRING_LEN; 803 } 804 } 805 } 806 807 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 808 { 809 struct hclge_vport *vport = hclge_get_vport(handle); 810 struct hclge_dev *hdev = vport->back; 811 u64 *p; 812 813 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, 814 g_mac_stats_string, 815 ARRAY_SIZE(g_mac_stats_string), 816 data); 817 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, 818 g_all_32bit_stats_string, 819 ARRAY_SIZE(g_all_32bit_stats_string), 820 p); 821 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, 822 g_all_64bit_stats_string, 823 ARRAY_SIZE(g_all_64bit_stats_string), 824 p); 825 p = hclge_tqps_get_stats(handle, p); 826 } 827 828 static int hclge_parse_func_status(struct hclge_dev *hdev, 829 struct hclge_func_status_cmd *status) 830 { 831 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 832 return -EINVAL; 833 834 /* Set the pf to main pf */ 835 if (status->pf_state & HCLGE_PF_STATE_MAIN) 836 hdev->flag |= HCLGE_FLAG_MAIN; 837 else 838 hdev->flag &= ~HCLGE_FLAG_MAIN; 839 840 hdev->num_req_vfs = status->vf_num / status->pf_num; 841 return 0; 842 } 843 844 static int hclge_query_function_status(struct hclge_dev *hdev) 845 { 846 struct hclge_func_status_cmd *req; 847 struct hclge_desc desc; 848 int timeout = 0; 849 int ret; 850 851 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 852 req = (struct hclge_func_status_cmd *)desc.data; 853 854 do { 855 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 856 if (ret) { 857 dev_err(&hdev->pdev->dev, 858 "query function status failed %d.\n", 859 ret); 860 861 return ret; 862 } 863 864 /* Check pf reset is done */ 865 if (req->pf_state) 866 break; 867 usleep_range(1000, 2000); 868 } while (timeout++ < 5); 869 870 ret = hclge_parse_func_status(hdev, req); 871 872 return ret; 873 } 874 875 static int hclge_query_pf_resource(struct hclge_dev *hdev) 876 { 877 struct hclge_pf_res_cmd *req; 878 struct hclge_desc desc; 879 int ret; 880 881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 882 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 883 if (ret) { 884 dev_err(&hdev->pdev->dev, 885 "query pf resource failed %d.\n", ret); 886 return ret; 887 } 888 889 req = (struct hclge_pf_res_cmd *)desc.data; 890 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 891 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 892 893 if (hnae3_dev_roce_supported(hdev)) { 894 hdev->num_roce_msix = 895 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 896 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 897 898 /* PF should have NIC vectors and Roce vectors, 899 * NIC vectors are queued before Roce vectors. 900 */ 901 hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET; 902 } else { 903 hdev->num_msi = 904 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 905 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 906 } 907 908 return 0; 909 } 910 911 static int hclge_parse_speed(int speed_cmd, int *speed) 912 { 913 switch (speed_cmd) { 914 case 6: 915 *speed = HCLGE_MAC_SPEED_10M; 916 break; 917 case 7: 918 *speed = HCLGE_MAC_SPEED_100M; 919 break; 920 case 0: 921 *speed = HCLGE_MAC_SPEED_1G; 922 break; 923 case 1: 924 *speed = HCLGE_MAC_SPEED_10G; 925 break; 926 case 2: 927 *speed = HCLGE_MAC_SPEED_25G; 928 break; 929 case 3: 930 *speed = HCLGE_MAC_SPEED_40G; 931 break; 932 case 4: 933 *speed = HCLGE_MAC_SPEED_50G; 934 break; 935 case 5: 936 *speed = HCLGE_MAC_SPEED_100G; 937 break; 938 default: 939 return -EINVAL; 940 } 941 942 return 0; 943 } 944 945 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 946 { 947 struct hclge_cfg_param_cmd *req; 948 u64 mac_addr_tmp_high; 949 u64 mac_addr_tmp; 950 int i; 951 952 req = (struct hclge_cfg_param_cmd *)desc[0].data; 953 954 /* get the configuration */ 955 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), 956 HCLGE_CFG_VMDQ_M, 957 HCLGE_CFG_VMDQ_S); 958 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 959 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 960 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 961 HCLGE_CFG_TQP_DESC_N_M, 962 HCLGE_CFG_TQP_DESC_N_S); 963 964 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), 965 HCLGE_CFG_PHY_ADDR_M, 966 HCLGE_CFG_PHY_ADDR_S); 967 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), 968 HCLGE_CFG_MEDIA_TP_M, 969 HCLGE_CFG_MEDIA_TP_S); 970 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), 971 HCLGE_CFG_RX_BUF_LEN_M, 972 HCLGE_CFG_RX_BUF_LEN_S); 973 /* get mac_address */ 974 mac_addr_tmp = __le32_to_cpu(req->param[2]); 975 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), 976 HCLGE_CFG_MAC_ADDR_H_M, 977 HCLGE_CFG_MAC_ADDR_H_S); 978 979 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 980 981 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), 982 HCLGE_CFG_DEFAULT_SPEED_M, 983 HCLGE_CFG_DEFAULT_SPEED_S); 984 for (i = 0; i < ETH_ALEN; i++) 985 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 986 987 req = (struct hclge_cfg_param_cmd *)desc[1].data; 988 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 989 } 990 991 /* hclge_get_cfg: query the static parameter from flash 992 * @hdev: pointer to struct hclge_dev 993 * @hcfg: the config structure to be getted 994 */ 995 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 996 { 997 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 998 struct hclge_cfg_param_cmd *req; 999 int i, ret; 1000 1001 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1002 u32 offset = 0; 1003 1004 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1005 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1006 true); 1007 hnae_set_field(offset, HCLGE_CFG_OFFSET_M, 1008 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1009 /* Len should be united by 4 bytes when send to hardware */ 1010 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1011 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1012 req->offset = cpu_to_le32(offset); 1013 } 1014 1015 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1016 if (ret) { 1017 dev_err(&hdev->pdev->dev, 1018 "get config failed %d.\n", ret); 1019 return ret; 1020 } 1021 1022 hclge_parse_cfg(hcfg, desc); 1023 return 0; 1024 } 1025 1026 static int hclge_get_cap(struct hclge_dev *hdev) 1027 { 1028 int ret; 1029 1030 ret = hclge_query_function_status(hdev); 1031 if (ret) { 1032 dev_err(&hdev->pdev->dev, 1033 "query function status error %d.\n", ret); 1034 return ret; 1035 } 1036 1037 /* get pf resource */ 1038 ret = hclge_query_pf_resource(hdev); 1039 if (ret) { 1040 dev_err(&hdev->pdev->dev, 1041 "query pf resource error %d.\n", ret); 1042 return ret; 1043 } 1044 1045 return 0; 1046 } 1047 1048 static int hclge_configure(struct hclge_dev *hdev) 1049 { 1050 struct hclge_cfg cfg; 1051 int ret, i; 1052 1053 ret = hclge_get_cfg(hdev, &cfg); 1054 if (ret) { 1055 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 1056 return ret; 1057 } 1058 1059 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 1060 hdev->base_tqp_pid = 0; 1061 hdev->rss_size_max = 1; 1062 hdev->rx_buf_len = cfg.rx_buf_len; 1063 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1064 hdev->hw.mac.media_type = cfg.media_type; 1065 hdev->hw.mac.phy_addr = cfg.phy_addr; 1066 hdev->num_desc = cfg.tqp_desc_num; 1067 hdev->tm_info.num_pg = 1; 1068 hdev->tc_max = cfg.tc_num; 1069 hdev->tm_info.hw_pfc_map = 0; 1070 1071 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1072 if (ret) { 1073 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 1074 return ret; 1075 } 1076 1077 if ((hdev->tc_max > HNAE3_MAX_TC) || 1078 (hdev->tc_max < 1)) { 1079 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 1080 hdev->tc_max); 1081 hdev->tc_max = 1; 1082 } 1083 1084 /* Dev does not support DCB */ 1085 if (!hnae3_dev_dcb_supported(hdev)) { 1086 hdev->tc_max = 1; 1087 hdev->pfc_max = 0; 1088 } else { 1089 hdev->pfc_max = hdev->tc_max; 1090 } 1091 1092 hdev->tm_info.num_tc = hdev->tc_max; 1093 1094 /* Currently not support uncontiuous tc */ 1095 for (i = 0; i < hdev->tm_info.num_tc; i++) 1096 hnae_set_bit(hdev->hw_tc_map, i, 1); 1097 1098 if (!hdev->num_vmdq_vport && !hdev->num_req_vfs) 1099 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1100 else 1101 hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE; 1102 1103 return ret; 1104 } 1105 1106 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, 1107 int tso_mss_max) 1108 { 1109 struct hclge_cfg_tso_status_cmd *req; 1110 struct hclge_desc desc; 1111 u16 tso_mss; 1112 1113 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1114 1115 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1116 1117 tso_mss = 0; 1118 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1119 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 1120 req->tso_mss_min = cpu_to_le16(tso_mss); 1121 1122 tso_mss = 0; 1123 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1124 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 1125 req->tso_mss_max = cpu_to_le16(tso_mss); 1126 1127 return hclge_cmd_send(&hdev->hw, &desc, 1); 1128 } 1129 1130 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1131 { 1132 struct hclge_tqp *tqp; 1133 int i; 1134 1135 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1136 sizeof(struct hclge_tqp), GFP_KERNEL); 1137 if (!hdev->htqp) 1138 return -ENOMEM; 1139 1140 tqp = hdev->htqp; 1141 1142 for (i = 0; i < hdev->num_tqps; i++) { 1143 tqp->dev = &hdev->pdev->dev; 1144 tqp->index = i; 1145 1146 tqp->q.ae_algo = &ae_algo; 1147 tqp->q.buf_size = hdev->rx_buf_len; 1148 tqp->q.desc_num = hdev->num_desc; 1149 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 1150 i * HCLGE_TQP_REG_SIZE; 1151 1152 tqp++; 1153 } 1154 1155 return 0; 1156 } 1157 1158 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1159 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1160 { 1161 struct hclge_tqp_map_cmd *req; 1162 struct hclge_desc desc; 1163 int ret; 1164 1165 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1166 1167 req = (struct hclge_tqp_map_cmd *)desc.data; 1168 req->tqp_id = cpu_to_le16(tqp_pid); 1169 req->tqp_vf = func_id; 1170 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 1171 1 << HCLGE_TQP_MAP_EN_B; 1172 req->tqp_vid = cpu_to_le16(tqp_vid); 1173 1174 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1175 if (ret) { 1176 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", 1177 ret); 1178 return ret; 1179 } 1180 1181 return 0; 1182 } 1183 1184 static int hclge_assign_tqp(struct hclge_vport *vport, 1185 struct hnae3_queue **tqp, u16 num_tqps) 1186 { 1187 struct hclge_dev *hdev = vport->back; 1188 int i, alloced, func_id, ret; 1189 bool is_pf; 1190 1191 func_id = vport->vport_id; 1192 is_pf = (vport->vport_id == 0) ? true : false; 1193 1194 for (i = 0, alloced = 0; i < hdev->num_tqps && 1195 alloced < num_tqps; i++) { 1196 if (!hdev->htqp[i].alloced) { 1197 hdev->htqp[i].q.handle = &vport->nic; 1198 hdev->htqp[i].q.tqp_index = alloced; 1199 tqp[alloced] = &hdev->htqp[i].q; 1200 hdev->htqp[i].alloced = true; 1201 ret = hclge_map_tqps_to_func(hdev, func_id, 1202 hdev->htqp[i].index, 1203 alloced, is_pf); 1204 if (ret) 1205 return ret; 1206 1207 alloced++; 1208 } 1209 } 1210 vport->alloc_tqps = num_tqps; 1211 1212 return 0; 1213 } 1214 1215 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) 1216 { 1217 struct hnae3_handle *nic = &vport->nic; 1218 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1219 struct hclge_dev *hdev = vport->back; 1220 int i, ret; 1221 1222 kinfo->num_desc = hdev->num_desc; 1223 kinfo->rx_buf_len = hdev->rx_buf_len; 1224 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); 1225 kinfo->rss_size 1226 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); 1227 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; 1228 1229 for (i = 0; i < HNAE3_MAX_TC; i++) { 1230 if (hdev->hw_tc_map & BIT(i)) { 1231 kinfo->tc_info[i].enable = true; 1232 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 1233 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 1234 kinfo->tc_info[i].tc = i; 1235 } else { 1236 /* Set to default queue if TC is disable */ 1237 kinfo->tc_info[i].enable = false; 1238 kinfo->tc_info[i].tqp_offset = 0; 1239 kinfo->tc_info[i].tqp_count = 1; 1240 kinfo->tc_info[i].tc = 0; 1241 } 1242 } 1243 1244 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 1245 sizeof(struct hnae3_queue *), GFP_KERNEL); 1246 if (!kinfo->tqp) 1247 return -ENOMEM; 1248 1249 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); 1250 if (ret) { 1251 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1252 return -EINVAL; 1253 } 1254 1255 return 0; 1256 } 1257 1258 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) 1259 { 1260 /* this would be initialized later */ 1261 } 1262 1263 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1264 { 1265 struct hnae3_handle *nic = &vport->nic; 1266 struct hclge_dev *hdev = vport->back; 1267 int ret; 1268 1269 nic->pdev = hdev->pdev; 1270 nic->ae_algo = &ae_algo; 1271 nic->numa_node_mask = hdev->numa_node_mask; 1272 1273 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { 1274 ret = hclge_knic_setup(vport, num_tqps); 1275 if (ret) { 1276 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", 1277 ret); 1278 return ret; 1279 } 1280 } else { 1281 hclge_unic_setup(vport, num_tqps); 1282 } 1283 1284 return 0; 1285 } 1286 1287 static int hclge_alloc_vport(struct hclge_dev *hdev) 1288 { 1289 struct pci_dev *pdev = hdev->pdev; 1290 struct hclge_vport *vport; 1291 u32 tqp_main_vport; 1292 u32 tqp_per_vport; 1293 int num_vport, i; 1294 int ret; 1295 1296 /* We need to alloc a vport for main NIC of PF */ 1297 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1298 1299 if (hdev->num_tqps < num_vport) 1300 num_vport = hdev->num_tqps; 1301 1302 /* Alloc the same number of TQPs for every vport */ 1303 tqp_per_vport = hdev->num_tqps / num_vport; 1304 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1305 1306 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1307 GFP_KERNEL); 1308 if (!vport) 1309 return -ENOMEM; 1310 1311 hdev->vport = vport; 1312 hdev->num_alloc_vport = num_vport; 1313 1314 #ifdef CONFIG_PCI_IOV 1315 /* Enable SRIOV */ 1316 if (hdev->num_req_vfs) { 1317 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", 1318 hdev->num_req_vfs); 1319 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs); 1320 if (ret) { 1321 hdev->num_alloc_vfs = 0; 1322 dev_err(&pdev->dev, "SRIOV enable failed %d\n", 1323 ret); 1324 return ret; 1325 } 1326 } 1327 hdev->num_alloc_vfs = hdev->num_req_vfs; 1328 #endif 1329 1330 for (i = 0; i < num_vport; i++) { 1331 vport->back = hdev; 1332 vport->vport_id = i; 1333 1334 if (i == 0) 1335 ret = hclge_vport_setup(vport, tqp_main_vport); 1336 else 1337 ret = hclge_vport_setup(vport, tqp_per_vport); 1338 if (ret) { 1339 dev_err(&pdev->dev, 1340 "vport setup failed for vport %d, %d\n", 1341 i, ret); 1342 return ret; 1343 } 1344 1345 vport++; 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1352 struct hclge_pkt_buf_alloc *buf_alloc) 1353 { 1354 /* TX buffer size is unit by 128 byte */ 1355 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1356 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1357 struct hclge_tx_buff_alloc_cmd *req; 1358 struct hclge_desc desc; 1359 int ret; 1360 u8 i; 1361 1362 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1363 1364 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1365 for (i = 0; i < HCLGE_TC_NUM; i++) { 1366 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1367 1368 req->tx_pkt_buff[i] = 1369 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1370 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1371 } 1372 1373 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1374 if (ret) { 1375 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1376 ret); 1377 return ret; 1378 } 1379 1380 return 0; 1381 } 1382 1383 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1384 struct hclge_pkt_buf_alloc *buf_alloc) 1385 { 1386 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1387 1388 if (ret) { 1389 dev_err(&hdev->pdev->dev, 1390 "tx buffer alloc failed %d\n", ret); 1391 return ret; 1392 } 1393 1394 return 0; 1395 } 1396 1397 static int hclge_get_tc_num(struct hclge_dev *hdev) 1398 { 1399 int i, cnt = 0; 1400 1401 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1402 if (hdev->hw_tc_map & BIT(i)) 1403 cnt++; 1404 return cnt; 1405 } 1406 1407 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) 1408 { 1409 int i, cnt = 0; 1410 1411 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1412 if (hdev->hw_tc_map & BIT(i) && 1413 hdev->tm_info.hw_pfc_map & BIT(i)) 1414 cnt++; 1415 return cnt; 1416 } 1417 1418 /* Get the number of pfc enabled TCs, which have private buffer */ 1419 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1420 struct hclge_pkt_buf_alloc *buf_alloc) 1421 { 1422 struct hclge_priv_buf *priv; 1423 int i, cnt = 0; 1424 1425 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1426 priv = &buf_alloc->priv_buf[i]; 1427 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1428 priv->enable) 1429 cnt++; 1430 } 1431 1432 return cnt; 1433 } 1434 1435 /* Get the number of pfc disabled TCs, which have private buffer */ 1436 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1437 struct hclge_pkt_buf_alloc *buf_alloc) 1438 { 1439 struct hclge_priv_buf *priv; 1440 int i, cnt = 0; 1441 1442 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1443 priv = &buf_alloc->priv_buf[i]; 1444 if (hdev->hw_tc_map & BIT(i) && 1445 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1446 priv->enable) 1447 cnt++; 1448 } 1449 1450 return cnt; 1451 } 1452 1453 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1454 { 1455 struct hclge_priv_buf *priv; 1456 u32 rx_priv = 0; 1457 int i; 1458 1459 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1460 priv = &buf_alloc->priv_buf[i]; 1461 if (priv->enable) 1462 rx_priv += priv->buf_size; 1463 } 1464 return rx_priv; 1465 } 1466 1467 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1468 { 1469 u32 i, total_tx_size = 0; 1470 1471 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1472 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1473 1474 return total_tx_size; 1475 } 1476 1477 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1478 struct hclge_pkt_buf_alloc *buf_alloc, 1479 u32 rx_all) 1480 { 1481 u32 shared_buf_min, shared_buf_tc, shared_std; 1482 int tc_num, pfc_enable_num; 1483 u32 shared_buf; 1484 u32 rx_priv; 1485 int i; 1486 1487 tc_num = hclge_get_tc_num(hdev); 1488 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1489 1490 if (hnae3_dev_dcb_supported(hdev)) 1491 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; 1492 else 1493 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; 1494 1495 shared_buf_tc = pfc_enable_num * hdev->mps + 1496 (tc_num - pfc_enable_num) * hdev->mps / 2 + 1497 hdev->mps; 1498 shared_std = max_t(u32, shared_buf_min, shared_buf_tc); 1499 1500 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1501 if (rx_all <= rx_priv + shared_std) 1502 return false; 1503 1504 shared_buf = rx_all - rx_priv; 1505 buf_alloc->s_buf.buf_size = shared_buf; 1506 buf_alloc->s_buf.self.high = shared_buf; 1507 buf_alloc->s_buf.self.low = 2 * hdev->mps; 1508 1509 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1510 if ((hdev->hw_tc_map & BIT(i)) && 1511 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1512 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; 1513 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1514 } else { 1515 buf_alloc->s_buf.tc_thrd[i].low = 0; 1516 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; 1517 } 1518 } 1519 1520 return true; 1521 } 1522 1523 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1524 struct hclge_pkt_buf_alloc *buf_alloc) 1525 { 1526 u32 i, total_size; 1527 1528 total_size = hdev->pkt_buf_size; 1529 1530 /* alloc tx buffer for all enabled tc */ 1531 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1532 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1533 1534 if (total_size < HCLGE_DEFAULT_TX_BUF) 1535 return -ENOMEM; 1536 1537 if (hdev->hw_tc_map & BIT(i)) 1538 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1539 else 1540 priv->tx_buf_size = 0; 1541 1542 total_size -= priv->tx_buf_size; 1543 } 1544 1545 return 0; 1546 } 1547 1548 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1549 * @hdev: pointer to struct hclge_dev 1550 * @buf_alloc: pointer to buffer calculation data 1551 * @return: 0: calculate sucessful, negative: fail 1552 */ 1553 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1554 struct hclge_pkt_buf_alloc *buf_alloc) 1555 { 1556 u32 rx_all = hdev->pkt_buf_size; 1557 int no_pfc_priv_num, pfc_priv_num; 1558 struct hclge_priv_buf *priv; 1559 int i; 1560 1561 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1562 1563 /* When DCB is not supported, rx private 1564 * buffer is not allocated. 1565 */ 1566 if (!hnae3_dev_dcb_supported(hdev)) { 1567 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1568 return -ENOMEM; 1569 1570 return 0; 1571 } 1572 1573 /* step 1, try to alloc private buffer for all enabled tc */ 1574 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1575 priv = &buf_alloc->priv_buf[i]; 1576 if (hdev->hw_tc_map & BIT(i)) { 1577 priv->enable = 1; 1578 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1579 priv->wl.low = hdev->mps; 1580 priv->wl.high = priv->wl.low + hdev->mps; 1581 priv->buf_size = priv->wl.high + 1582 HCLGE_DEFAULT_DV; 1583 } else { 1584 priv->wl.low = 0; 1585 priv->wl.high = 2 * hdev->mps; 1586 priv->buf_size = priv->wl.high; 1587 } 1588 } else { 1589 priv->enable = 0; 1590 priv->wl.low = 0; 1591 priv->wl.high = 0; 1592 priv->buf_size = 0; 1593 } 1594 } 1595 1596 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1597 return 0; 1598 1599 /* step 2, try to decrease the buffer size of 1600 * no pfc TC's private buffer 1601 */ 1602 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1603 priv = &buf_alloc->priv_buf[i]; 1604 1605 priv->enable = 0; 1606 priv->wl.low = 0; 1607 priv->wl.high = 0; 1608 priv->buf_size = 0; 1609 1610 if (!(hdev->hw_tc_map & BIT(i))) 1611 continue; 1612 1613 priv->enable = 1; 1614 1615 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1616 priv->wl.low = 128; 1617 priv->wl.high = priv->wl.low + hdev->mps; 1618 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; 1619 } else { 1620 priv->wl.low = 0; 1621 priv->wl.high = hdev->mps; 1622 priv->buf_size = priv->wl.high; 1623 } 1624 } 1625 1626 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1627 return 0; 1628 1629 /* step 3, try to reduce the number of pfc disabled TCs, 1630 * which have private buffer 1631 */ 1632 /* get the total no pfc enable TC number, which have private buffer */ 1633 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1634 1635 /* let the last to be cleared first */ 1636 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1637 priv = &buf_alloc->priv_buf[i]; 1638 1639 if (hdev->hw_tc_map & BIT(i) && 1640 !(hdev->tm_info.hw_pfc_map & BIT(i))) { 1641 /* Clear the no pfc TC private buffer */ 1642 priv->wl.low = 0; 1643 priv->wl.high = 0; 1644 priv->buf_size = 0; 1645 priv->enable = 0; 1646 no_pfc_priv_num--; 1647 } 1648 1649 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1650 no_pfc_priv_num == 0) 1651 break; 1652 } 1653 1654 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1655 return 0; 1656 1657 /* step 4, try to reduce the number of pfc enabled TCs 1658 * which have private buffer. 1659 */ 1660 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1661 1662 /* let the last to be cleared first */ 1663 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1664 priv = &buf_alloc->priv_buf[i]; 1665 1666 if (hdev->hw_tc_map & BIT(i) && 1667 hdev->tm_info.hw_pfc_map & BIT(i)) { 1668 /* Reduce the number of pfc TC with private buffer */ 1669 priv->wl.low = 0; 1670 priv->enable = 0; 1671 priv->wl.high = 0; 1672 priv->buf_size = 0; 1673 pfc_priv_num--; 1674 } 1675 1676 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1677 pfc_priv_num == 0) 1678 break; 1679 } 1680 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1681 return 0; 1682 1683 return -ENOMEM; 1684 } 1685 1686 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1687 struct hclge_pkt_buf_alloc *buf_alloc) 1688 { 1689 struct hclge_rx_priv_buff_cmd *req; 1690 struct hclge_desc desc; 1691 int ret; 1692 int i; 1693 1694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 1695 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 1696 1697 /* Alloc private buffer TCs */ 1698 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1699 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1700 1701 req->buf_num[i] = 1702 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 1703 req->buf_num[i] |= 1704 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 1705 } 1706 1707 req->shared_buf = 1708 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1709 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1710 1711 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1712 if (ret) { 1713 dev_err(&hdev->pdev->dev, 1714 "rx private buffer alloc cmd failed %d\n", ret); 1715 return ret; 1716 } 1717 1718 return 0; 1719 } 1720 1721 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) 1722 1723 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1724 struct hclge_pkt_buf_alloc *buf_alloc) 1725 { 1726 struct hclge_rx_priv_wl_buf *req; 1727 struct hclge_priv_buf *priv; 1728 struct hclge_desc desc[2]; 1729 int i, j; 1730 int ret; 1731 1732 for (i = 0; i < 2; i++) { 1733 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 1734 false); 1735 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 1736 1737 /* The first descriptor set the NEXT bit to 1 */ 1738 if (i == 0) 1739 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1740 else 1741 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1742 1743 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1744 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1745 1746 priv = &buf_alloc->priv_buf[idx]; 1747 req->tc_wl[j].high = 1748 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1749 req->tc_wl[j].high |= 1750 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << 1751 HCLGE_RX_PRIV_EN_B); 1752 req->tc_wl[j].low = 1753 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 1754 req->tc_wl[j].low |= 1755 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << 1756 HCLGE_RX_PRIV_EN_B); 1757 } 1758 } 1759 1760 /* Send 2 descriptor at one time */ 1761 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1762 if (ret) { 1763 dev_err(&hdev->pdev->dev, 1764 "rx private waterline config cmd failed %d\n", 1765 ret); 1766 return ret; 1767 } 1768 return 0; 1769 } 1770 1771 static int hclge_common_thrd_config(struct hclge_dev *hdev, 1772 struct hclge_pkt_buf_alloc *buf_alloc) 1773 { 1774 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1775 struct hclge_rx_com_thrd *req; 1776 struct hclge_desc desc[2]; 1777 struct hclge_tc_thrd *tc; 1778 int i, j; 1779 int ret; 1780 1781 for (i = 0; i < 2; i++) { 1782 hclge_cmd_setup_basic_desc(&desc[i], 1783 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 1784 req = (struct hclge_rx_com_thrd *)&desc[i].data; 1785 1786 /* The first descriptor set the NEXT bit to 1 */ 1787 if (i == 0) 1788 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1789 else 1790 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1791 1792 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1793 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 1794 1795 req->com_thrd[j].high = 1796 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 1797 req->com_thrd[j].high |= 1798 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << 1799 HCLGE_RX_PRIV_EN_B); 1800 req->com_thrd[j].low = 1801 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 1802 req->com_thrd[j].low |= 1803 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << 1804 HCLGE_RX_PRIV_EN_B); 1805 } 1806 } 1807 1808 /* Send 2 descriptors at one time */ 1809 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1810 if (ret) { 1811 dev_err(&hdev->pdev->dev, 1812 "common threshold config cmd failed %d\n", ret); 1813 return ret; 1814 } 1815 return 0; 1816 } 1817 1818 static int hclge_common_wl_config(struct hclge_dev *hdev, 1819 struct hclge_pkt_buf_alloc *buf_alloc) 1820 { 1821 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1822 struct hclge_rx_com_wl *req; 1823 struct hclge_desc desc; 1824 int ret; 1825 1826 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 1827 1828 req = (struct hclge_rx_com_wl *)desc.data; 1829 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 1830 req->com_wl.high |= 1831 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << 1832 HCLGE_RX_PRIV_EN_B); 1833 1834 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 1835 req->com_wl.low |= 1836 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << 1837 HCLGE_RX_PRIV_EN_B); 1838 1839 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1840 if (ret) { 1841 dev_err(&hdev->pdev->dev, 1842 "common waterline config cmd failed %d\n", ret); 1843 return ret; 1844 } 1845 1846 return 0; 1847 } 1848 1849 int hclge_buffer_alloc(struct hclge_dev *hdev) 1850 { 1851 struct hclge_pkt_buf_alloc *pkt_buf; 1852 int ret; 1853 1854 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 1855 if (!pkt_buf) 1856 return -ENOMEM; 1857 1858 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 1859 if (ret) { 1860 dev_err(&hdev->pdev->dev, 1861 "could not calc tx buffer size for all TCs %d\n", ret); 1862 goto out; 1863 } 1864 1865 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 1866 if (ret) { 1867 dev_err(&hdev->pdev->dev, 1868 "could not alloc tx buffers %d\n", ret); 1869 goto out; 1870 } 1871 1872 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 1873 if (ret) { 1874 dev_err(&hdev->pdev->dev, 1875 "could not calc rx priv buffer size for all TCs %d\n", 1876 ret); 1877 goto out; 1878 } 1879 1880 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 1881 if (ret) { 1882 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 1883 ret); 1884 goto out; 1885 } 1886 1887 if (hnae3_dev_dcb_supported(hdev)) { 1888 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 1889 if (ret) { 1890 dev_err(&hdev->pdev->dev, 1891 "could not configure rx private waterline %d\n", 1892 ret); 1893 goto out; 1894 } 1895 1896 ret = hclge_common_thrd_config(hdev, pkt_buf); 1897 if (ret) { 1898 dev_err(&hdev->pdev->dev, 1899 "could not configure common threshold %d\n", 1900 ret); 1901 goto out; 1902 } 1903 } 1904 1905 ret = hclge_common_wl_config(hdev, pkt_buf); 1906 if (ret) 1907 dev_err(&hdev->pdev->dev, 1908 "could not configure common waterline %d\n", ret); 1909 1910 out: 1911 kfree(pkt_buf); 1912 return ret; 1913 } 1914 1915 static int hclge_init_roce_base_info(struct hclge_vport *vport) 1916 { 1917 struct hnae3_handle *roce = &vport->roce; 1918 struct hnae3_handle *nic = &vport->nic; 1919 1920 roce->rinfo.num_vectors = vport->back->num_roce_msix; 1921 1922 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 1923 vport->back->num_msi_left == 0) 1924 return -EINVAL; 1925 1926 roce->rinfo.base_vector = vport->back->roce_base_vector; 1927 1928 roce->rinfo.netdev = nic->kinfo.netdev; 1929 roce->rinfo.roce_io_base = vport->back->hw.io_base; 1930 1931 roce->pdev = nic->pdev; 1932 roce->ae_algo = nic->ae_algo; 1933 roce->numa_node_mask = nic->numa_node_mask; 1934 1935 return 0; 1936 } 1937 1938 static int hclge_init_msix(struct hclge_dev *hdev) 1939 { 1940 struct pci_dev *pdev = hdev->pdev; 1941 int ret, i; 1942 1943 hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi, 1944 sizeof(struct msix_entry), 1945 GFP_KERNEL); 1946 if (!hdev->msix_entries) 1947 return -ENOMEM; 1948 1949 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1950 sizeof(u16), GFP_KERNEL); 1951 if (!hdev->vector_status) 1952 return -ENOMEM; 1953 1954 for (i = 0; i < hdev->num_msi; i++) { 1955 hdev->msix_entries[i].entry = i; 1956 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 1957 } 1958 1959 hdev->num_msi_left = hdev->num_msi; 1960 hdev->base_msi_vector = hdev->pdev->irq; 1961 hdev->roce_base_vector = hdev->base_msi_vector + 1962 HCLGE_ROCE_VECTOR_OFFSET; 1963 1964 ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries, 1965 hdev->num_msi, hdev->num_msi); 1966 if (ret < 0) { 1967 dev_info(&hdev->pdev->dev, 1968 "MSI-X vector alloc failed: %d\n", ret); 1969 return ret; 1970 } 1971 1972 return 0; 1973 } 1974 1975 static int hclge_init_msi(struct hclge_dev *hdev) 1976 { 1977 struct pci_dev *pdev = hdev->pdev; 1978 int vectors; 1979 int i; 1980 1981 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1982 sizeof(u16), GFP_KERNEL); 1983 if (!hdev->vector_status) 1984 return -ENOMEM; 1985 1986 for (i = 0; i < hdev->num_msi; i++) 1987 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 1988 1989 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI); 1990 if (vectors < 0) { 1991 dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors); 1992 return -EINVAL; 1993 } 1994 hdev->num_msi = vectors; 1995 hdev->num_msi_left = vectors; 1996 hdev->base_msi_vector = pdev->irq; 1997 hdev->roce_base_vector = hdev->base_msi_vector + 1998 HCLGE_ROCE_VECTOR_OFFSET; 1999 2000 return 0; 2001 } 2002 2003 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) 2004 { 2005 struct hclge_mac *mac = &hdev->hw.mac; 2006 2007 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) 2008 mac->duplex = (u8)duplex; 2009 else 2010 mac->duplex = HCLGE_MAC_FULL; 2011 2012 mac->speed = speed; 2013 } 2014 2015 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 2016 { 2017 struct hclge_config_mac_speed_dup_cmd *req; 2018 struct hclge_desc desc; 2019 int ret; 2020 2021 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2022 2023 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2024 2025 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); 2026 2027 switch (speed) { 2028 case HCLGE_MAC_SPEED_10M: 2029 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2030 HCLGE_CFG_SPEED_S, 6); 2031 break; 2032 case HCLGE_MAC_SPEED_100M: 2033 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2034 HCLGE_CFG_SPEED_S, 7); 2035 break; 2036 case HCLGE_MAC_SPEED_1G: 2037 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2038 HCLGE_CFG_SPEED_S, 0); 2039 break; 2040 case HCLGE_MAC_SPEED_10G: 2041 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2042 HCLGE_CFG_SPEED_S, 1); 2043 break; 2044 case HCLGE_MAC_SPEED_25G: 2045 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2046 HCLGE_CFG_SPEED_S, 2); 2047 break; 2048 case HCLGE_MAC_SPEED_40G: 2049 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2050 HCLGE_CFG_SPEED_S, 3); 2051 break; 2052 case HCLGE_MAC_SPEED_50G: 2053 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2054 HCLGE_CFG_SPEED_S, 4); 2055 break; 2056 case HCLGE_MAC_SPEED_100G: 2057 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2058 HCLGE_CFG_SPEED_S, 5); 2059 break; 2060 default: 2061 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2062 return -EINVAL; 2063 } 2064 2065 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2066 1); 2067 2068 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2069 if (ret) { 2070 dev_err(&hdev->pdev->dev, 2071 "mac speed/duplex config cmd failed %d.\n", ret); 2072 return ret; 2073 } 2074 2075 hclge_check_speed_dup(hdev, duplex, speed); 2076 2077 return 0; 2078 } 2079 2080 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2081 u8 duplex) 2082 { 2083 struct hclge_vport *vport = hclge_get_vport(handle); 2084 struct hclge_dev *hdev = vport->back; 2085 2086 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2087 } 2088 2089 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, 2090 u8 *duplex) 2091 { 2092 struct hclge_query_an_speed_dup_cmd *req; 2093 struct hclge_desc desc; 2094 int speed_tmp; 2095 int ret; 2096 2097 req = (struct hclge_query_an_speed_dup_cmd *)desc.data; 2098 2099 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); 2100 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2101 if (ret) { 2102 dev_err(&hdev->pdev->dev, 2103 "mac speed/autoneg/duplex query cmd failed %d\n", 2104 ret); 2105 return ret; 2106 } 2107 2108 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); 2109 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, 2110 HCLGE_QUERY_SPEED_S); 2111 2112 ret = hclge_parse_speed(speed_tmp, speed); 2113 if (ret) { 2114 dev_err(&hdev->pdev->dev, 2115 "could not parse speed(=%d), %d\n", speed_tmp, ret); 2116 return -EIO; 2117 } 2118 2119 return 0; 2120 } 2121 2122 static int hclge_query_autoneg_result(struct hclge_dev *hdev) 2123 { 2124 struct hclge_mac *mac = &hdev->hw.mac; 2125 struct hclge_query_an_speed_dup_cmd *req; 2126 struct hclge_desc desc; 2127 int ret; 2128 2129 req = (struct hclge_query_an_speed_dup_cmd *)desc.data; 2130 2131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); 2132 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2133 if (ret) { 2134 dev_err(&hdev->pdev->dev, 2135 "autoneg result query cmd failed %d.\n", ret); 2136 return ret; 2137 } 2138 2139 mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B); 2140 2141 return 0; 2142 } 2143 2144 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2145 { 2146 struct hclge_config_auto_neg_cmd *req; 2147 struct hclge_desc desc; 2148 u32 flag = 0; 2149 int ret; 2150 2151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2152 2153 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2154 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); 2155 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2156 2157 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2158 if (ret) { 2159 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2160 ret); 2161 return ret; 2162 } 2163 2164 return 0; 2165 } 2166 2167 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2168 { 2169 struct hclge_vport *vport = hclge_get_vport(handle); 2170 struct hclge_dev *hdev = vport->back; 2171 2172 return hclge_set_autoneg_en(hdev, enable); 2173 } 2174 2175 static int hclge_get_autoneg(struct hnae3_handle *handle) 2176 { 2177 struct hclge_vport *vport = hclge_get_vport(handle); 2178 struct hclge_dev *hdev = vport->back; 2179 2180 hclge_query_autoneg_result(hdev); 2181 2182 return hdev->hw.mac.autoneg; 2183 } 2184 2185 static int hclge_mac_init(struct hclge_dev *hdev) 2186 { 2187 struct hclge_mac *mac = &hdev->hw.mac; 2188 int ret; 2189 2190 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); 2191 if (ret) { 2192 dev_err(&hdev->pdev->dev, 2193 "Config mac speed dup fail ret=%d\n", ret); 2194 return ret; 2195 } 2196 2197 mac->link = 0; 2198 2199 ret = hclge_mac_mdio_config(hdev); 2200 if (ret) { 2201 dev_warn(&hdev->pdev->dev, 2202 "mdio config fail ret=%d\n", ret); 2203 return ret; 2204 } 2205 2206 /* Initialize the MTA table work mode */ 2207 hdev->accept_mta_mc = true; 2208 hdev->enable_mta = true; 2209 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; 2210 2211 ret = hclge_set_mta_filter_mode(hdev, 2212 hdev->mta_mac_sel_type, 2213 hdev->enable_mta); 2214 if (ret) { 2215 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", 2216 ret); 2217 return ret; 2218 } 2219 2220 return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); 2221 } 2222 2223 static void hclge_task_schedule(struct hclge_dev *hdev) 2224 { 2225 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 2226 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2227 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) 2228 (void)schedule_work(&hdev->service_task); 2229 } 2230 2231 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2232 { 2233 struct hclge_link_status_cmd *req; 2234 struct hclge_desc desc; 2235 int link_status; 2236 int ret; 2237 2238 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2239 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2240 if (ret) { 2241 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2242 ret); 2243 return ret; 2244 } 2245 2246 req = (struct hclge_link_status_cmd *)desc.data; 2247 link_status = req->status & HCLGE_LINK_STATUS; 2248 2249 return !!link_status; 2250 } 2251 2252 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2253 { 2254 int mac_state; 2255 int link_stat; 2256 2257 mac_state = hclge_get_mac_link_status(hdev); 2258 2259 if (hdev->hw.mac.phydev) { 2260 if (!genphy_read_status(hdev->hw.mac.phydev)) 2261 link_stat = mac_state & 2262 hdev->hw.mac.phydev->link; 2263 else 2264 link_stat = 0; 2265 2266 } else { 2267 link_stat = mac_state; 2268 } 2269 2270 return !!link_stat; 2271 } 2272 2273 static void hclge_update_link_status(struct hclge_dev *hdev) 2274 { 2275 struct hnae3_client *client = hdev->nic_client; 2276 struct hnae3_handle *handle; 2277 int state; 2278 int i; 2279 2280 if (!client) 2281 return; 2282 state = hclge_get_mac_phy_link(hdev); 2283 if (state != hdev->hw.mac.link) { 2284 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2285 handle = &hdev->vport[i].nic; 2286 client->ops->link_status_change(handle, state); 2287 } 2288 hdev->hw.mac.link = state; 2289 } 2290 } 2291 2292 static int hclge_update_speed_duplex(struct hclge_dev *hdev) 2293 { 2294 struct hclge_mac mac = hdev->hw.mac; 2295 u8 duplex; 2296 int speed; 2297 int ret; 2298 2299 /* get the speed and duplex as autoneg'result from mac cmd when phy 2300 * doesn't exit. 2301 */ 2302 if (mac.phydev) 2303 return 0; 2304 2305 /* update mac->antoneg. */ 2306 ret = hclge_query_autoneg_result(hdev); 2307 if (ret) { 2308 dev_err(&hdev->pdev->dev, 2309 "autoneg result query failed %d\n", ret); 2310 return ret; 2311 } 2312 2313 if (!mac.autoneg) 2314 return 0; 2315 2316 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); 2317 if (ret) { 2318 dev_err(&hdev->pdev->dev, 2319 "mac autoneg/speed/duplex query failed %d\n", ret); 2320 return ret; 2321 } 2322 2323 if ((mac.speed != speed) || (mac.duplex != duplex)) { 2324 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2325 if (ret) { 2326 dev_err(&hdev->pdev->dev, 2327 "mac speed/duplex config failed %d\n", ret); 2328 return ret; 2329 } 2330 } 2331 2332 return 0; 2333 } 2334 2335 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) 2336 { 2337 struct hclge_vport *vport = hclge_get_vport(handle); 2338 struct hclge_dev *hdev = vport->back; 2339 2340 return hclge_update_speed_duplex(hdev); 2341 } 2342 2343 static int hclge_get_status(struct hnae3_handle *handle) 2344 { 2345 struct hclge_vport *vport = hclge_get_vport(handle); 2346 struct hclge_dev *hdev = vport->back; 2347 2348 hclge_update_link_status(hdev); 2349 2350 return hdev->hw.mac.link; 2351 } 2352 2353 static void hclge_service_timer(unsigned long data) 2354 { 2355 struct hclge_dev *hdev = (struct hclge_dev *)data; 2356 (void)mod_timer(&hdev->service_timer, jiffies + HZ); 2357 2358 hclge_task_schedule(hdev); 2359 } 2360 2361 static void hclge_service_complete(struct hclge_dev *hdev) 2362 { 2363 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); 2364 2365 /* Flush memory before next watchdog */ 2366 smp_mb__before_atomic(); 2367 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 2368 } 2369 2370 static void hclge_service_task(struct work_struct *work) 2371 { 2372 struct hclge_dev *hdev = 2373 container_of(work, struct hclge_dev, service_task); 2374 2375 hclge_update_speed_duplex(hdev); 2376 hclge_update_link_status(hdev); 2377 hclge_update_stats_for_all(hdev); 2378 hclge_service_complete(hdev); 2379 } 2380 2381 static void hclge_disable_sriov(struct hclge_dev *hdev) 2382 { 2383 /* If our VFs are assigned we cannot shut down SR-IOV 2384 * without causing issues, so just leave the hardware 2385 * available but disabled 2386 */ 2387 if (pci_vfs_assigned(hdev->pdev)) { 2388 dev_warn(&hdev->pdev->dev, 2389 "disabling driver while VFs are assigned\n"); 2390 return; 2391 } 2392 2393 pci_disable_sriov(hdev->pdev); 2394 } 2395 2396 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 2397 { 2398 /* VF handle has no client */ 2399 if (!handle->client) 2400 return container_of(handle, struct hclge_vport, nic); 2401 else if (handle->client->type == HNAE3_CLIENT_ROCE) 2402 return container_of(handle, struct hclge_vport, roce); 2403 else 2404 return container_of(handle, struct hclge_vport, nic); 2405 } 2406 2407 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 2408 struct hnae3_vector_info *vector_info) 2409 { 2410 struct hclge_vport *vport = hclge_get_vport(handle); 2411 struct hnae3_vector_info *vector = vector_info; 2412 struct hclge_dev *hdev = vport->back; 2413 int alloc = 0; 2414 int i, j; 2415 2416 vector_num = min(hdev->num_msi_left, vector_num); 2417 2418 for (j = 0; j < vector_num; j++) { 2419 for (i = 1; i < hdev->num_msi; i++) { 2420 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 2421 vector->vector = pci_irq_vector(hdev->pdev, i); 2422 vector->io_addr = hdev->hw.io_base + 2423 HCLGE_VECTOR_REG_BASE + 2424 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 2425 vport->vport_id * 2426 HCLGE_VECTOR_VF_OFFSET; 2427 hdev->vector_status[i] = vport->vport_id; 2428 2429 vector++; 2430 alloc++; 2431 2432 break; 2433 } 2434 } 2435 } 2436 hdev->num_msi_left -= alloc; 2437 hdev->num_msi_used += alloc; 2438 2439 return alloc; 2440 } 2441 2442 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 2443 { 2444 int i; 2445 2446 for (i = 0; i < hdev->num_msi; i++) { 2447 if (hdev->msix_entries) { 2448 if (vector == hdev->msix_entries[i].vector) 2449 return i; 2450 } else { 2451 if (vector == (hdev->base_msi_vector + i)) 2452 return i; 2453 } 2454 } 2455 return -EINVAL; 2456 } 2457 2458 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 2459 { 2460 return HCLGE_RSS_KEY_SIZE; 2461 } 2462 2463 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 2464 { 2465 return HCLGE_RSS_IND_TBL_SIZE; 2466 } 2467 2468 static int hclge_get_rss_algo(struct hclge_dev *hdev) 2469 { 2470 struct hclge_rss_config_cmd *req; 2471 struct hclge_desc desc; 2472 int rss_hash_algo; 2473 int ret; 2474 2475 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true); 2476 2477 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2478 if (ret) { 2479 dev_err(&hdev->pdev->dev, 2480 "Get link status error, status =%d\n", ret); 2481 return ret; 2482 } 2483 2484 req = (struct hclge_rss_config_cmd *)desc.data; 2485 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK); 2486 2487 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ) 2488 return ETH_RSS_HASH_TOP; 2489 2490 return -EINVAL; 2491 } 2492 2493 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 2494 const u8 hfunc, const u8 *key) 2495 { 2496 struct hclge_rss_config_cmd *req; 2497 struct hclge_desc desc; 2498 int key_offset; 2499 int key_size; 2500 int ret; 2501 2502 req = (struct hclge_rss_config_cmd *)desc.data; 2503 2504 for (key_offset = 0; key_offset < 3; key_offset++) { 2505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 2506 false); 2507 2508 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 2509 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 2510 2511 if (key_offset == 2) 2512 key_size = 2513 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; 2514 else 2515 key_size = HCLGE_RSS_HASH_KEY_NUM; 2516 2517 memcpy(req->hash_key, 2518 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 2519 2520 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2521 if (ret) { 2522 dev_err(&hdev->pdev->dev, 2523 "Configure RSS config fail, status = %d\n", 2524 ret); 2525 return ret; 2526 } 2527 } 2528 return 0; 2529 } 2530 2531 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) 2532 { 2533 struct hclge_rss_indirection_table_cmd *req; 2534 struct hclge_desc desc; 2535 int i, j; 2536 int ret; 2537 2538 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 2539 2540 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 2541 hclge_cmd_setup_basic_desc 2542 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 2543 2544 req->start_table_index = 2545 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 2546 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 2547 2548 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 2549 req->rss_result[j] = 2550 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 2551 2552 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2553 if (ret) { 2554 dev_err(&hdev->pdev->dev, 2555 "Configure rss indir table fail,status = %d\n", 2556 ret); 2557 return ret; 2558 } 2559 } 2560 return 0; 2561 } 2562 2563 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 2564 u16 *tc_size, u16 *tc_offset) 2565 { 2566 struct hclge_rss_tc_mode_cmd *req; 2567 struct hclge_desc desc; 2568 int ret; 2569 int i; 2570 2571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 2572 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 2573 2574 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2575 u16 mode = 0; 2576 2577 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 2578 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, 2579 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 2580 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 2581 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 2582 2583 req->rss_tc_mode[i] = cpu_to_le16(mode); 2584 } 2585 2586 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2587 if (ret) { 2588 dev_err(&hdev->pdev->dev, 2589 "Configure rss tc mode fail, status = %d\n", ret); 2590 return ret; 2591 } 2592 2593 return 0; 2594 } 2595 2596 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 2597 { 2598 struct hclge_rss_input_tuple_cmd *req; 2599 struct hclge_desc desc; 2600 int ret; 2601 2602 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 2603 2604 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 2605 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2606 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2607 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; 2608 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2609 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2610 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2611 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; 2612 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2613 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2614 if (ret) { 2615 dev_err(&hdev->pdev->dev, 2616 "Configure rss input fail, status = %d\n", ret); 2617 return ret; 2618 } 2619 2620 return 0; 2621 } 2622 2623 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 2624 u8 *key, u8 *hfunc) 2625 { 2626 struct hclge_vport *vport = hclge_get_vport(handle); 2627 struct hclge_dev *hdev = vport->back; 2628 int i; 2629 2630 /* Get hash algorithm */ 2631 if (hfunc) 2632 *hfunc = hclge_get_rss_algo(hdev); 2633 2634 /* Get the RSS Key required by the user */ 2635 if (key) 2636 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 2637 2638 /* Get indirect table */ 2639 if (indir) 2640 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 2641 indir[i] = vport->rss_indirection_tbl[i]; 2642 2643 return 0; 2644 } 2645 2646 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 2647 const u8 *key, const u8 hfunc) 2648 { 2649 struct hclge_vport *vport = hclge_get_vport(handle); 2650 struct hclge_dev *hdev = vport->back; 2651 u8 hash_algo; 2652 int ret, i; 2653 2654 /* Set the RSS Hash Key if specififed by the user */ 2655 if (key) { 2656 /* Update the shadow RSS key with user specified qids */ 2657 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 2658 2659 if (hfunc == ETH_RSS_HASH_TOP || 2660 hfunc == ETH_RSS_HASH_NO_CHANGE) 2661 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 2662 else 2663 return -EINVAL; 2664 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 2665 if (ret) 2666 return ret; 2667 } 2668 2669 /* Update the shadow RSS table with user specified qids */ 2670 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 2671 vport->rss_indirection_tbl[i] = indir[i]; 2672 2673 /* Update the hardware */ 2674 ret = hclge_set_rss_indir_table(hdev, indir); 2675 return ret; 2676 } 2677 2678 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 2679 { 2680 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 2681 2682 if (nfc->data & RXH_L4_B_2_3) 2683 hash_sets |= HCLGE_D_PORT_BIT; 2684 else 2685 hash_sets &= ~HCLGE_D_PORT_BIT; 2686 2687 if (nfc->data & RXH_IP_SRC) 2688 hash_sets |= HCLGE_S_IP_BIT; 2689 else 2690 hash_sets &= ~HCLGE_S_IP_BIT; 2691 2692 if (nfc->data & RXH_IP_DST) 2693 hash_sets |= HCLGE_D_IP_BIT; 2694 else 2695 hash_sets &= ~HCLGE_D_IP_BIT; 2696 2697 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 2698 hash_sets |= HCLGE_V_TAG_BIT; 2699 2700 return hash_sets; 2701 } 2702 2703 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 2704 struct ethtool_rxnfc *nfc) 2705 { 2706 struct hclge_vport *vport = hclge_get_vport(handle); 2707 struct hclge_dev *hdev = vport->back; 2708 struct hclge_rss_input_tuple_cmd *req; 2709 struct hclge_desc desc; 2710 u8 tuple_sets; 2711 int ret; 2712 2713 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 2714 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 2715 return -EINVAL; 2716 2717 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 2718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); 2719 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2720 if (ret) { 2721 dev_err(&hdev->pdev->dev, 2722 "Read rss tuple fail, status = %d\n", ret); 2723 return ret; 2724 } 2725 2726 hclge_cmd_reuse_desc(&desc, false); 2727 2728 tuple_sets = hclge_get_rss_hash_bits(nfc); 2729 switch (nfc->flow_type) { 2730 case TCP_V4_FLOW: 2731 req->ipv4_tcp_en = tuple_sets; 2732 break; 2733 case TCP_V6_FLOW: 2734 req->ipv6_tcp_en = tuple_sets; 2735 break; 2736 case UDP_V4_FLOW: 2737 req->ipv4_udp_en = tuple_sets; 2738 break; 2739 case UDP_V6_FLOW: 2740 req->ipv6_udp_en = tuple_sets; 2741 break; 2742 case SCTP_V4_FLOW: 2743 req->ipv4_sctp_en = tuple_sets; 2744 break; 2745 case SCTP_V6_FLOW: 2746 if ((nfc->data & RXH_L4_B_0_1) || 2747 (nfc->data & RXH_L4_B_2_3)) 2748 return -EINVAL; 2749 2750 req->ipv6_sctp_en = tuple_sets; 2751 break; 2752 case IPV4_FLOW: 2753 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2754 break; 2755 case IPV6_FLOW: 2756 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 2757 break; 2758 default: 2759 return -EINVAL; 2760 } 2761 2762 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2763 if (ret) 2764 dev_err(&hdev->pdev->dev, 2765 "Set rss tuple fail, status = %d\n", ret); 2766 2767 return ret; 2768 } 2769 2770 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 2771 struct ethtool_rxnfc *nfc) 2772 { 2773 struct hclge_vport *vport = hclge_get_vport(handle); 2774 struct hclge_dev *hdev = vport->back; 2775 struct hclge_rss_input_tuple_cmd *req; 2776 struct hclge_desc desc; 2777 u8 tuple_sets; 2778 int ret; 2779 2780 nfc->data = 0; 2781 2782 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 2783 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); 2784 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2785 if (ret) { 2786 dev_err(&hdev->pdev->dev, 2787 "Read rss tuple fail, status = %d\n", ret); 2788 return ret; 2789 } 2790 2791 switch (nfc->flow_type) { 2792 case TCP_V4_FLOW: 2793 tuple_sets = req->ipv4_tcp_en; 2794 break; 2795 case UDP_V4_FLOW: 2796 tuple_sets = req->ipv4_udp_en; 2797 break; 2798 case TCP_V6_FLOW: 2799 tuple_sets = req->ipv6_tcp_en; 2800 break; 2801 case UDP_V6_FLOW: 2802 tuple_sets = req->ipv6_udp_en; 2803 break; 2804 case SCTP_V4_FLOW: 2805 tuple_sets = req->ipv4_sctp_en; 2806 break; 2807 case SCTP_V6_FLOW: 2808 tuple_sets = req->ipv6_sctp_en; 2809 break; 2810 case IPV4_FLOW: 2811 case IPV6_FLOW: 2812 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 2813 break; 2814 default: 2815 return -EINVAL; 2816 } 2817 2818 if (!tuple_sets) 2819 return 0; 2820 2821 if (tuple_sets & HCLGE_D_PORT_BIT) 2822 nfc->data |= RXH_L4_B_2_3; 2823 if (tuple_sets & HCLGE_S_PORT_BIT) 2824 nfc->data |= RXH_L4_B_0_1; 2825 if (tuple_sets & HCLGE_D_IP_BIT) 2826 nfc->data |= RXH_IP_DST; 2827 if (tuple_sets & HCLGE_S_IP_BIT) 2828 nfc->data |= RXH_IP_SRC; 2829 2830 return 0; 2831 } 2832 2833 static int hclge_get_tc_size(struct hnae3_handle *handle) 2834 { 2835 struct hclge_vport *vport = hclge_get_vport(handle); 2836 struct hclge_dev *hdev = vport->back; 2837 2838 return hdev->rss_size_max; 2839 } 2840 2841 int hclge_rss_init_hw(struct hclge_dev *hdev) 2842 { 2843 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 2844 struct hclge_vport *vport = hdev->vport; 2845 u16 tc_offset[HCLGE_MAX_TC_NUM]; 2846 u8 rss_key[HCLGE_RSS_KEY_SIZE]; 2847 u16 tc_valid[HCLGE_MAX_TC_NUM]; 2848 u16 tc_size[HCLGE_MAX_TC_NUM]; 2849 u32 *rss_indir = NULL; 2850 u16 rss_size = 0, roundup_size; 2851 const u8 *key; 2852 int i, ret, j; 2853 2854 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 2855 if (!rss_indir) 2856 return -ENOMEM; 2857 2858 /* Get default RSS key */ 2859 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE); 2860 2861 /* Initialize RSS indirect table for each vport */ 2862 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 2863 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { 2864 vport[j].rss_indirection_tbl[i] = 2865 i % vport[j].alloc_rss_size; 2866 2867 /* vport 0 is for PF */ 2868 if (j != 0) 2869 continue; 2870 2871 rss_size = vport[j].alloc_rss_size; 2872 rss_indir[i] = vport[j].rss_indirection_tbl[i]; 2873 } 2874 } 2875 ret = hclge_set_rss_indir_table(hdev, rss_indir); 2876 if (ret) 2877 goto err; 2878 2879 key = rss_key; 2880 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 2881 if (ret) 2882 goto err; 2883 2884 ret = hclge_set_rss_input_tuple(hdev); 2885 if (ret) 2886 goto err; 2887 2888 /* Each TC have the same queue size, and tc_size set to hardware is 2889 * the log2 of roundup power of two of rss_size, the acutal queue 2890 * size is limited by indirection table. 2891 */ 2892 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 2893 dev_err(&hdev->pdev->dev, 2894 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 2895 rss_size); 2896 ret = -EINVAL; 2897 goto err; 2898 } 2899 2900 roundup_size = roundup_pow_of_two(rss_size); 2901 roundup_size = ilog2(roundup_size); 2902 2903 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2904 tc_valid[i] = 0; 2905 2906 if (!(hdev->hw_tc_map & BIT(i))) 2907 continue; 2908 2909 tc_valid[i] = 1; 2910 tc_size[i] = roundup_size; 2911 tc_offset[i] = rss_size * i; 2912 } 2913 2914 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 2915 2916 err: 2917 kfree(rss_indir); 2918 2919 return ret; 2920 } 2921 2922 int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, 2923 struct hnae3_ring_chain_node *ring_chain) 2924 { 2925 struct hclge_dev *hdev = vport->back; 2926 struct hclge_ctrl_vector_chain_cmd *req; 2927 struct hnae3_ring_chain_node *node; 2928 struct hclge_desc desc; 2929 int ret; 2930 int i; 2931 2932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false); 2933 2934 req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 2935 req->int_vector_id = vector_id; 2936 2937 i = 0; 2938 for (node = ring_chain; node; node = node->next) { 2939 u16 type_and_id = 0; 2940 2941 hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, 2942 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 2943 hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, 2944 node->tqp_index); 2945 hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M, 2946 HCLGE_INT_GL_IDX_S, 2947 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 2948 req->tqp_type_and_id[i] = cpu_to_le16(type_and_id); 2949 req->vfid = vport->vport_id; 2950 2951 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 2952 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 2953 2954 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2955 if (ret) { 2956 dev_err(&hdev->pdev->dev, 2957 "Map TQP fail, status is %d.\n", 2958 ret); 2959 return ret; 2960 } 2961 i = 0; 2962 2963 hclge_cmd_setup_basic_desc(&desc, 2964 HCLGE_OPC_ADD_RING_TO_VECTOR, 2965 false); 2966 req->int_vector_id = vector_id; 2967 } 2968 } 2969 2970 if (i > 0) { 2971 req->int_cause_num = i; 2972 2973 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2974 if (ret) { 2975 dev_err(&hdev->pdev->dev, 2976 "Map TQP fail, status is %d.\n", ret); 2977 return ret; 2978 } 2979 } 2980 2981 return 0; 2982 } 2983 2984 static int hclge_map_handle_ring_to_vector( 2985 struct hnae3_handle *handle, int vector, 2986 struct hnae3_ring_chain_node *ring_chain) 2987 { 2988 struct hclge_vport *vport = hclge_get_vport(handle); 2989 struct hclge_dev *hdev = vport->back; 2990 int vector_id; 2991 2992 vector_id = hclge_get_vector_index(hdev, vector); 2993 if (vector_id < 0) { 2994 dev_err(&hdev->pdev->dev, 2995 "Get vector index fail. ret =%d\n", vector_id); 2996 return vector_id; 2997 } 2998 2999 return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain); 3000 } 3001 3002 static int hclge_unmap_ring_from_vector( 3003 struct hnae3_handle *handle, int vector, 3004 struct hnae3_ring_chain_node *ring_chain) 3005 { 3006 struct hclge_vport *vport = hclge_get_vport(handle); 3007 struct hclge_dev *hdev = vport->back; 3008 struct hclge_ctrl_vector_chain_cmd *req; 3009 struct hnae3_ring_chain_node *node; 3010 struct hclge_desc desc; 3011 int i, vector_id; 3012 int ret; 3013 3014 vector_id = hclge_get_vector_index(hdev, vector); 3015 if (vector_id < 0) { 3016 dev_err(&handle->pdev->dev, 3017 "Get vector index fail. ret =%d\n", vector_id); 3018 return vector_id; 3019 } 3020 3021 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false); 3022 3023 req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 3024 req->int_vector_id = vector_id; 3025 3026 i = 0; 3027 for (node = ring_chain; node; node = node->next) { 3028 u16 type_and_id = 0; 3029 3030 hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, 3031 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3032 hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, 3033 node->tqp_index); 3034 hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M, 3035 HCLGE_INT_GL_IDX_S, 3036 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3037 3038 req->tqp_type_and_id[i] = cpu_to_le16(type_and_id); 3039 req->vfid = vport->vport_id; 3040 3041 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 3042 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 3043 3044 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3045 if (ret) { 3046 dev_err(&hdev->pdev->dev, 3047 "Unmap TQP fail, status is %d.\n", 3048 ret); 3049 return ret; 3050 } 3051 i = 0; 3052 hclge_cmd_setup_basic_desc(&desc, 3053 HCLGE_OPC_DEL_RING_TO_VECTOR, 3054 false); 3055 req->int_vector_id = vector_id; 3056 } 3057 } 3058 3059 if (i > 0) { 3060 req->int_cause_num = i; 3061 3062 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3063 if (ret) { 3064 dev_err(&hdev->pdev->dev, 3065 "Unmap TQP fail, status is %d.\n", ret); 3066 return ret; 3067 } 3068 } 3069 3070 return 0; 3071 } 3072 3073 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 3074 struct hclge_promisc_param *param) 3075 { 3076 struct hclge_promisc_cfg_cmd *req; 3077 struct hclge_desc desc; 3078 int ret; 3079 3080 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 3081 3082 req = (struct hclge_promisc_cfg_cmd *)desc.data; 3083 req->vf_id = param->vf_id; 3084 req->flag = (param->enable << HCLGE_PROMISC_EN_B); 3085 3086 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3087 if (ret) { 3088 dev_err(&hdev->pdev->dev, 3089 "Set promisc mode fail, status is %d.\n", ret); 3090 return ret; 3091 } 3092 return 0; 3093 } 3094 3095 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 3096 bool en_mc, bool en_bc, int vport_id) 3097 { 3098 if (!param) 3099 return; 3100 3101 memset(param, 0, sizeof(struct hclge_promisc_param)); 3102 if (en_uc) 3103 param->enable = HCLGE_PROMISC_EN_UC; 3104 if (en_mc) 3105 param->enable |= HCLGE_PROMISC_EN_MC; 3106 if (en_bc) 3107 param->enable |= HCLGE_PROMISC_EN_BC; 3108 param->vf_id = vport_id; 3109 } 3110 3111 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) 3112 { 3113 struct hclge_vport *vport = hclge_get_vport(handle); 3114 struct hclge_dev *hdev = vport->back; 3115 struct hclge_promisc_param param; 3116 3117 hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); 3118 hclge_cmd_set_promisc_mode(hdev, ¶m); 3119 } 3120 3121 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 3122 { 3123 struct hclge_desc desc; 3124 struct hclge_config_mac_mode_cmd *req = 3125 (struct hclge_config_mac_mode_cmd *)desc.data; 3126 u32 loop_en = 0; 3127 int ret; 3128 3129 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 3130 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); 3131 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); 3132 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); 3133 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); 3134 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); 3135 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); 3136 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 3137 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); 3138 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); 3139 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); 3140 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); 3141 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); 3142 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); 3143 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); 3144 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3145 3146 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3147 if (ret) 3148 dev_err(&hdev->pdev->dev, 3149 "mac enable fail, ret =%d.\n", ret); 3150 } 3151 3152 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, 3153 int stream_id, bool enable) 3154 { 3155 struct hclge_desc desc; 3156 struct hclge_cfg_com_tqp_queue_cmd *req = 3157 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 3158 int ret; 3159 3160 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 3161 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 3162 req->stream_id = cpu_to_le16(stream_id); 3163 req->enable |= enable << HCLGE_TQP_ENABLE_B; 3164 3165 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3166 if (ret) 3167 dev_err(&hdev->pdev->dev, 3168 "Tqp enable fail, status =%d.\n", ret); 3169 return ret; 3170 } 3171 3172 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 3173 { 3174 struct hclge_vport *vport = hclge_get_vport(handle); 3175 struct hnae3_queue *queue; 3176 struct hclge_tqp *tqp; 3177 int i; 3178 3179 for (i = 0; i < vport->alloc_tqps; i++) { 3180 queue = handle->kinfo.tqp[i]; 3181 tqp = container_of(queue, struct hclge_tqp, q); 3182 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 3183 } 3184 } 3185 3186 static int hclge_ae_start(struct hnae3_handle *handle) 3187 { 3188 struct hclge_vport *vport = hclge_get_vport(handle); 3189 struct hclge_dev *hdev = vport->back; 3190 int i, queue_id, ret; 3191 3192 for (i = 0; i < vport->alloc_tqps; i++) { 3193 /* todo clear interrupt */ 3194 /* ring enable */ 3195 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); 3196 if (queue_id < 0) { 3197 dev_warn(&hdev->pdev->dev, 3198 "Get invalid queue id, ignore it\n"); 3199 continue; 3200 } 3201 3202 hclge_tqp_enable(hdev, queue_id, 0, true); 3203 } 3204 /* mac enable */ 3205 hclge_cfg_mac_mode(hdev, true); 3206 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 3207 (void)mod_timer(&hdev->service_timer, jiffies + HZ); 3208 3209 ret = hclge_mac_start_phy(hdev); 3210 if (ret) 3211 return ret; 3212 3213 /* reset tqp stats */ 3214 hclge_reset_tqp_stats(handle); 3215 3216 return 0; 3217 } 3218 3219 static void hclge_ae_stop(struct hnae3_handle *handle) 3220 { 3221 struct hclge_vport *vport = hclge_get_vport(handle); 3222 struct hclge_dev *hdev = vport->back; 3223 int i, queue_id; 3224 3225 for (i = 0; i < vport->alloc_tqps; i++) { 3226 /* Ring disable */ 3227 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); 3228 if (queue_id < 0) { 3229 dev_warn(&hdev->pdev->dev, 3230 "Get invalid queue id, ignore it\n"); 3231 continue; 3232 } 3233 3234 hclge_tqp_enable(hdev, queue_id, 0, false); 3235 } 3236 /* Mac disable */ 3237 hclge_cfg_mac_mode(hdev, false); 3238 3239 hclge_mac_stop_phy(hdev); 3240 3241 /* reset tqp stats */ 3242 hclge_reset_tqp_stats(handle); 3243 } 3244 3245 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 3246 u16 cmdq_resp, u8 resp_code, 3247 enum hclge_mac_vlan_tbl_opcode op) 3248 { 3249 struct hclge_dev *hdev = vport->back; 3250 int return_status = -EIO; 3251 3252 if (cmdq_resp) { 3253 dev_err(&hdev->pdev->dev, 3254 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", 3255 cmdq_resp); 3256 return -EIO; 3257 } 3258 3259 if (op == HCLGE_MAC_VLAN_ADD) { 3260 if ((!resp_code) || (resp_code == 1)) { 3261 return_status = 0; 3262 } else if (resp_code == 2) { 3263 return_status = -EIO; 3264 dev_err(&hdev->pdev->dev, 3265 "add mac addr failed for uc_overflow.\n"); 3266 } else if (resp_code == 3) { 3267 return_status = -EIO; 3268 dev_err(&hdev->pdev->dev, 3269 "add mac addr failed for mc_overflow.\n"); 3270 } else { 3271 dev_err(&hdev->pdev->dev, 3272 "add mac addr failed for undefined, code=%d.\n", 3273 resp_code); 3274 } 3275 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 3276 if (!resp_code) { 3277 return_status = 0; 3278 } else if (resp_code == 1) { 3279 return_status = -EIO; 3280 dev_dbg(&hdev->pdev->dev, 3281 "remove mac addr failed for miss.\n"); 3282 } else { 3283 dev_err(&hdev->pdev->dev, 3284 "remove mac addr failed for undefined, code=%d.\n", 3285 resp_code); 3286 } 3287 } else if (op == HCLGE_MAC_VLAN_LKUP) { 3288 if (!resp_code) { 3289 return_status = 0; 3290 } else if (resp_code == 1) { 3291 return_status = -EIO; 3292 dev_dbg(&hdev->pdev->dev, 3293 "lookup mac addr failed for miss.\n"); 3294 } else { 3295 dev_err(&hdev->pdev->dev, 3296 "lookup mac addr failed for undefined, code=%d.\n", 3297 resp_code); 3298 } 3299 } else { 3300 return_status = -EIO; 3301 dev_err(&hdev->pdev->dev, 3302 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", 3303 op); 3304 } 3305 3306 return return_status; 3307 } 3308 3309 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 3310 { 3311 int word_num; 3312 int bit_num; 3313 3314 if (vfid > 255 || vfid < 0) 3315 return -EIO; 3316 3317 if (vfid >= 0 && vfid <= 191) { 3318 word_num = vfid / 32; 3319 bit_num = vfid % 32; 3320 if (clr) 3321 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3322 else 3323 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 3324 } else { 3325 word_num = (vfid - 192) / 32; 3326 bit_num = vfid % 32; 3327 if (clr) 3328 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3329 else 3330 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 3331 } 3332 3333 return 0; 3334 } 3335 3336 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 3337 { 3338 #define HCLGE_DESC_NUMBER 3 3339 #define HCLGE_FUNC_NUMBER_PER_DESC 6 3340 int i, j; 3341 3342 for (i = 0; i < HCLGE_DESC_NUMBER; i++) 3343 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 3344 if (desc[i].data[j]) 3345 return false; 3346 3347 return true; 3348 } 3349 3350 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 3351 const u8 *addr) 3352 { 3353 const unsigned char *mac_addr = addr; 3354 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 3355 (mac_addr[0]) | (mac_addr[1] << 8); 3356 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 3357 3358 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 3359 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 3360 } 3361 3362 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, 3363 const u8 *addr) 3364 { 3365 u16 high_val = addr[1] | (addr[0] << 8); 3366 struct hclge_dev *hdev = vport->back; 3367 u32 rsh = 4 - hdev->mta_mac_sel_type; 3368 u16 ret_val = (high_val >> rsh) & 0xfff; 3369 3370 return ret_val; 3371 } 3372 3373 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 3374 enum hclge_mta_dmac_sel_type mta_mac_sel, 3375 bool enable) 3376 { 3377 struct hclge_mta_filter_mode_cmd *req; 3378 struct hclge_desc desc; 3379 int ret; 3380 3381 req = (struct hclge_mta_filter_mode_cmd *)desc.data; 3382 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); 3383 3384 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, 3385 enable); 3386 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, 3387 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); 3388 3389 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3390 if (ret) { 3391 dev_err(&hdev->pdev->dev, 3392 "Config mat filter mode failed for cmd_send, ret =%d.\n", 3393 ret); 3394 return ret; 3395 } 3396 3397 return 0; 3398 } 3399 3400 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, 3401 u8 func_id, 3402 bool enable) 3403 { 3404 struct hclge_cfg_func_mta_filter_cmd *req; 3405 struct hclge_desc desc; 3406 int ret; 3407 3408 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; 3409 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); 3410 3411 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, 3412 enable); 3413 req->function_id = func_id; 3414 3415 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3416 if (ret) { 3417 dev_err(&hdev->pdev->dev, 3418 "Config func_id enable failed for cmd_send, ret =%d.\n", 3419 ret); 3420 return ret; 3421 } 3422 3423 return 0; 3424 } 3425 3426 static int hclge_set_mta_table_item(struct hclge_vport *vport, 3427 u16 idx, 3428 bool enable) 3429 { 3430 struct hclge_dev *hdev = vport->back; 3431 struct hclge_cfg_func_mta_item_cmd *req; 3432 struct hclge_desc desc; 3433 u16 item_idx = 0; 3434 int ret; 3435 3436 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; 3437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); 3438 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); 3439 3440 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, 3441 HCLGE_CFG_MTA_ITEM_IDX_S, idx); 3442 req->item_idx = cpu_to_le16(item_idx); 3443 3444 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3445 if (ret) { 3446 dev_err(&hdev->pdev->dev, 3447 "Config mta table item failed for cmd_send, ret =%d.\n", 3448 ret); 3449 return ret; 3450 } 3451 3452 return 0; 3453 } 3454 3455 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 3456 struct hclge_mac_vlan_tbl_entry_cmd *req) 3457 { 3458 struct hclge_dev *hdev = vport->back; 3459 struct hclge_desc desc; 3460 u8 resp_code; 3461 u16 retval; 3462 int ret; 3463 3464 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 3465 3466 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3467 3468 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3469 if (ret) { 3470 dev_err(&hdev->pdev->dev, 3471 "del mac addr failed for cmd_send, ret =%d.\n", 3472 ret); 3473 return ret; 3474 } 3475 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 3476 retval = le16_to_cpu(desc.retval); 3477 3478 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 3479 HCLGE_MAC_VLAN_REMOVE); 3480 } 3481 3482 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 3483 struct hclge_mac_vlan_tbl_entry_cmd *req, 3484 struct hclge_desc *desc, 3485 bool is_mc) 3486 { 3487 struct hclge_dev *hdev = vport->back; 3488 u8 resp_code; 3489 u16 retval; 3490 int ret; 3491 3492 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 3493 if (is_mc) { 3494 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3495 memcpy(desc[0].data, 3496 req, 3497 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3498 hclge_cmd_setup_basic_desc(&desc[1], 3499 HCLGE_OPC_MAC_VLAN_ADD, 3500 true); 3501 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3502 hclge_cmd_setup_basic_desc(&desc[2], 3503 HCLGE_OPC_MAC_VLAN_ADD, 3504 true); 3505 ret = hclge_cmd_send(&hdev->hw, desc, 3); 3506 } else { 3507 memcpy(desc[0].data, 3508 req, 3509 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3510 ret = hclge_cmd_send(&hdev->hw, desc, 1); 3511 } 3512 if (ret) { 3513 dev_err(&hdev->pdev->dev, 3514 "lookup mac addr failed for cmd_send, ret =%d.\n", 3515 ret); 3516 return ret; 3517 } 3518 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 3519 retval = le16_to_cpu(desc[0].retval); 3520 3521 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 3522 HCLGE_MAC_VLAN_LKUP); 3523 } 3524 3525 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 3526 struct hclge_mac_vlan_tbl_entry_cmd *req, 3527 struct hclge_desc *mc_desc) 3528 { 3529 struct hclge_dev *hdev = vport->back; 3530 int cfg_status; 3531 u8 resp_code; 3532 u16 retval; 3533 int ret; 3534 3535 if (!mc_desc) { 3536 struct hclge_desc desc; 3537 3538 hclge_cmd_setup_basic_desc(&desc, 3539 HCLGE_OPC_MAC_VLAN_ADD, 3540 false); 3541 memcpy(desc.data, req, 3542 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3543 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3544 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 3545 retval = le16_to_cpu(desc.retval); 3546 3547 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 3548 resp_code, 3549 HCLGE_MAC_VLAN_ADD); 3550 } else { 3551 mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); 3552 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3553 mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); 3554 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3555 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); 3556 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 3557 memcpy(mc_desc[0].data, req, 3558 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3559 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 3560 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 3561 retval = le16_to_cpu(mc_desc[0].retval); 3562 3563 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 3564 resp_code, 3565 HCLGE_MAC_VLAN_ADD); 3566 } 3567 3568 if (ret) { 3569 dev_err(&hdev->pdev->dev, 3570 "add mac addr failed for cmd_send, ret =%d.\n", 3571 ret); 3572 return ret; 3573 } 3574 3575 return cfg_status; 3576 } 3577 3578 static int hclge_add_uc_addr(struct hnae3_handle *handle, 3579 const unsigned char *addr) 3580 { 3581 struct hclge_vport *vport = hclge_get_vport(handle); 3582 3583 return hclge_add_uc_addr_common(vport, addr); 3584 } 3585 3586 int hclge_add_uc_addr_common(struct hclge_vport *vport, 3587 const unsigned char *addr) 3588 { 3589 struct hclge_dev *hdev = vport->back; 3590 struct hclge_mac_vlan_tbl_entry_cmd req; 3591 enum hclge_cmd_status status; 3592 u16 egress_port = 0; 3593 3594 /* mac addr check */ 3595 if (is_zero_ether_addr(addr) || 3596 is_broadcast_ether_addr(addr) || 3597 is_multicast_ether_addr(addr)) { 3598 dev_err(&hdev->pdev->dev, 3599 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 3600 addr, 3601 is_zero_ether_addr(addr), 3602 is_broadcast_ether_addr(addr), 3603 is_multicast_ether_addr(addr)); 3604 return -EINVAL; 3605 } 3606 3607 memset(&req, 0, sizeof(req)); 3608 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 3609 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 3610 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); 3611 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 3612 3613 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); 3614 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); 3615 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 3616 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 3617 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, 3618 HCLGE_MAC_EPORT_PFID_S, 0); 3619 3620 req.egress_port = cpu_to_le16(egress_port); 3621 3622 hclge_prepare_mac_addr(&req, addr); 3623 3624 status = hclge_add_mac_vlan_tbl(vport, &req, NULL); 3625 3626 return status; 3627 } 3628 3629 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 3630 const unsigned char *addr) 3631 { 3632 struct hclge_vport *vport = hclge_get_vport(handle); 3633 3634 return hclge_rm_uc_addr_common(vport, addr); 3635 } 3636 3637 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 3638 const unsigned char *addr) 3639 { 3640 struct hclge_dev *hdev = vport->back; 3641 struct hclge_mac_vlan_tbl_entry_cmd req; 3642 enum hclge_cmd_status status; 3643 3644 /* mac addr check */ 3645 if (is_zero_ether_addr(addr) || 3646 is_broadcast_ether_addr(addr) || 3647 is_multicast_ether_addr(addr)) { 3648 dev_dbg(&hdev->pdev->dev, 3649 "Remove mac err! invalid mac:%pM.\n", 3650 addr); 3651 return -EINVAL; 3652 } 3653 3654 memset(&req, 0, sizeof(req)); 3655 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 3656 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 3657 hclge_prepare_mac_addr(&req, addr); 3658 status = hclge_remove_mac_vlan_tbl(vport, &req); 3659 3660 return status; 3661 } 3662 3663 static int hclge_add_mc_addr(struct hnae3_handle *handle, 3664 const unsigned char *addr) 3665 { 3666 struct hclge_vport *vport = hclge_get_vport(handle); 3667 3668 return hclge_add_mc_addr_common(vport, addr); 3669 } 3670 3671 int hclge_add_mc_addr_common(struct hclge_vport *vport, 3672 const unsigned char *addr) 3673 { 3674 struct hclge_dev *hdev = vport->back; 3675 struct hclge_mac_vlan_tbl_entry_cmd req; 3676 struct hclge_desc desc[3]; 3677 u16 tbl_idx; 3678 int status; 3679 3680 /* mac addr check */ 3681 if (!is_multicast_ether_addr(addr)) { 3682 dev_err(&hdev->pdev->dev, 3683 "Add mc mac err! invalid mac:%pM.\n", 3684 addr); 3685 return -EINVAL; 3686 } 3687 memset(&req, 0, sizeof(req)); 3688 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 3689 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 3690 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 3691 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 3692 hclge_prepare_mac_addr(&req, addr); 3693 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 3694 if (!status) { 3695 /* This mac addr exist, update VFID for it */ 3696 hclge_update_desc_vfid(desc, vport->vport_id, false); 3697 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 3698 } else { 3699 /* This mac addr do not exist, add new entry for it */ 3700 memset(desc[0].data, 0, sizeof(desc[0].data)); 3701 memset(desc[1].data, 0, sizeof(desc[0].data)); 3702 memset(desc[2].data, 0, sizeof(desc[0].data)); 3703 hclge_update_desc_vfid(desc, vport->vport_id, false); 3704 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 3705 } 3706 3707 /* Set MTA table for this MAC address */ 3708 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 3709 status = hclge_set_mta_table_item(vport, tbl_idx, true); 3710 3711 return status; 3712 } 3713 3714 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 3715 const unsigned char *addr) 3716 { 3717 struct hclge_vport *vport = hclge_get_vport(handle); 3718 3719 return hclge_rm_mc_addr_common(vport, addr); 3720 } 3721 3722 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 3723 const unsigned char *addr) 3724 { 3725 struct hclge_dev *hdev = vport->back; 3726 struct hclge_mac_vlan_tbl_entry_cmd req; 3727 enum hclge_cmd_status status; 3728 struct hclge_desc desc[3]; 3729 u16 tbl_idx; 3730 3731 /* mac addr check */ 3732 if (!is_multicast_ether_addr(addr)) { 3733 dev_dbg(&hdev->pdev->dev, 3734 "Remove mc mac err! invalid mac:%pM.\n", 3735 addr); 3736 return -EINVAL; 3737 } 3738 3739 memset(&req, 0, sizeof(req)); 3740 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 3741 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 3742 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 3743 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 3744 hclge_prepare_mac_addr(&req, addr); 3745 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 3746 if (!status) { 3747 /* This mac addr exist, remove this handle's VFID for it */ 3748 hclge_update_desc_vfid(desc, vport->vport_id, true); 3749 3750 if (hclge_is_all_function_id_zero(desc)) 3751 /* All the vfid is zero, so need to delete this entry */ 3752 status = hclge_remove_mac_vlan_tbl(vport, &req); 3753 else 3754 /* Not all the vfid is zero, update the vfid */ 3755 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 3756 3757 } else { 3758 /* This mac addr do not exist, can't delete it */ 3759 dev_err(&hdev->pdev->dev, 3760 "Rm multicast mac addr failed, ret = %d.\n", 3761 status); 3762 return -EIO; 3763 } 3764 3765 /* Set MTB table for this MAC address */ 3766 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 3767 status = hclge_set_mta_table_item(vport, tbl_idx, false); 3768 3769 return status; 3770 } 3771 3772 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 3773 { 3774 struct hclge_vport *vport = hclge_get_vport(handle); 3775 struct hclge_dev *hdev = vport->back; 3776 3777 ether_addr_copy(p, hdev->hw.mac.mac_addr); 3778 } 3779 3780 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) 3781 { 3782 const unsigned char *new_addr = (const unsigned char *)p; 3783 struct hclge_vport *vport = hclge_get_vport(handle); 3784 struct hclge_dev *hdev = vport->back; 3785 3786 /* mac addr check */ 3787 if (is_zero_ether_addr(new_addr) || 3788 is_broadcast_ether_addr(new_addr) || 3789 is_multicast_ether_addr(new_addr)) { 3790 dev_err(&hdev->pdev->dev, 3791 "Change uc mac err! invalid mac:%p.\n", 3792 new_addr); 3793 return -EINVAL; 3794 } 3795 3796 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); 3797 3798 if (!hclge_add_uc_addr(handle, new_addr)) { 3799 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 3800 return 0; 3801 } 3802 3803 return -EIO; 3804 } 3805 3806 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 3807 bool filter_en) 3808 { 3809 struct hclge_vlan_filter_ctrl_cmd *req; 3810 struct hclge_desc desc; 3811 int ret; 3812 3813 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 3814 3815 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 3816 req->vlan_type = vlan_type; 3817 req->vlan_fe = filter_en; 3818 3819 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3820 if (ret) { 3821 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 3822 ret); 3823 return ret; 3824 } 3825 3826 return 0; 3827 } 3828 3829 int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 3830 bool is_kill, u16 vlan, u8 qos, __be16 proto) 3831 { 3832 #define HCLGE_MAX_VF_BYTES 16 3833 struct hclge_vlan_filter_vf_cfg_cmd *req0; 3834 struct hclge_vlan_filter_vf_cfg_cmd *req1; 3835 struct hclge_desc desc[2]; 3836 u8 vf_byte_val; 3837 u8 vf_byte_off; 3838 int ret; 3839 3840 hclge_cmd_setup_basic_desc(&desc[0], 3841 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 3842 hclge_cmd_setup_basic_desc(&desc[1], 3843 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 3844 3845 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3846 3847 vf_byte_off = vfid / 8; 3848 vf_byte_val = 1 << (vfid % 8); 3849 3850 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 3851 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 3852 3853 req0->vlan_id = cpu_to_le16(vlan); 3854 req0->vlan_cfg = is_kill; 3855 3856 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 3857 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 3858 else 3859 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 3860 3861 ret = hclge_cmd_send(&hdev->hw, desc, 2); 3862 if (ret) { 3863 dev_err(&hdev->pdev->dev, 3864 "Send vf vlan command fail, ret =%d.\n", 3865 ret); 3866 return ret; 3867 } 3868 3869 if (!is_kill) { 3870 if (!req0->resp_code || req0->resp_code == 1) 3871 return 0; 3872 3873 dev_err(&hdev->pdev->dev, 3874 "Add vf vlan filter fail, ret =%d.\n", 3875 req0->resp_code); 3876 } else { 3877 if (!req0->resp_code) 3878 return 0; 3879 3880 dev_err(&hdev->pdev->dev, 3881 "Kill vf vlan filter fail, ret =%d.\n", 3882 req0->resp_code); 3883 } 3884 3885 return -EIO; 3886 } 3887 3888 static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, 3889 __be16 proto, u16 vlan_id, 3890 bool is_kill) 3891 { 3892 struct hclge_vport *vport = hclge_get_vport(handle); 3893 struct hclge_dev *hdev = vport->back; 3894 struct hclge_vlan_filter_pf_cfg_cmd *req; 3895 struct hclge_desc desc; 3896 u8 vlan_offset_byte_val; 3897 u8 vlan_offset_byte; 3898 u8 vlan_offset_160; 3899 int ret; 3900 3901 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 3902 3903 vlan_offset_160 = vlan_id / 160; 3904 vlan_offset_byte = (vlan_id % 160) / 8; 3905 vlan_offset_byte_val = 1 << (vlan_id % 8); 3906 3907 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 3908 req->vlan_offset = vlan_offset_160; 3909 req->vlan_cfg = is_kill; 3910 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 3911 3912 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3913 if (ret) { 3914 dev_err(&hdev->pdev->dev, 3915 "port vlan command, send fail, ret =%d.\n", 3916 ret); 3917 return ret; 3918 } 3919 3920 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto); 3921 if (ret) { 3922 dev_err(&hdev->pdev->dev, 3923 "Set pf vlan filter config fail, ret =%d.\n", 3924 ret); 3925 return -EIO; 3926 } 3927 3928 return 0; 3929 } 3930 3931 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 3932 u16 vlan, u8 qos, __be16 proto) 3933 { 3934 struct hclge_vport *vport = hclge_get_vport(handle); 3935 struct hclge_dev *hdev = vport->back; 3936 3937 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) 3938 return -EINVAL; 3939 if (proto != htons(ETH_P_8021Q)) 3940 return -EPROTONOSUPPORT; 3941 3942 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); 3943 } 3944 3945 static int hclge_init_vlan_config(struct hclge_dev *hdev) 3946 { 3947 #define HCLGE_VLAN_TYPE_VF_TABLE 0 3948 #define HCLGE_VLAN_TYPE_PORT_TABLE 1 3949 struct hnae3_handle *handle; 3950 int ret; 3951 3952 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, 3953 true); 3954 if (ret) 3955 return ret; 3956 3957 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, 3958 true); 3959 if (ret) 3960 return ret; 3961 3962 handle = &hdev->vport[0].nic; 3963 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 3964 } 3965 3966 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 3967 { 3968 struct hclge_vport *vport = hclge_get_vport(handle); 3969 struct hclge_config_max_frm_size_cmd *req; 3970 struct hclge_dev *hdev = vport->back; 3971 struct hclge_desc desc; 3972 int ret; 3973 3974 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) 3975 return -EINVAL; 3976 3977 hdev->mps = new_mtu; 3978 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 3979 3980 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 3981 req->max_frm_size = cpu_to_le16(new_mtu); 3982 3983 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3984 if (ret) { 3985 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); 3986 return ret; 3987 } 3988 3989 return 0; 3990 } 3991 3992 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 3993 bool enable) 3994 { 3995 struct hclge_reset_tqp_queue_cmd *req; 3996 struct hclge_desc desc; 3997 int ret; 3998 3999 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 4000 4001 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4002 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4003 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); 4004 4005 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4006 if (ret) { 4007 dev_err(&hdev->pdev->dev, 4008 "Send tqp reset cmd error, status =%d\n", ret); 4009 return ret; 4010 } 4011 4012 return 0; 4013 } 4014 4015 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 4016 { 4017 struct hclge_reset_tqp_queue_cmd *req; 4018 struct hclge_desc desc; 4019 int ret; 4020 4021 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 4022 4023 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4024 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4025 4026 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4027 if (ret) { 4028 dev_err(&hdev->pdev->dev, 4029 "Get reset status error, status =%d\n", ret); 4030 return ret; 4031 } 4032 4033 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 4034 } 4035 4036 static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 4037 { 4038 struct hclge_vport *vport = hclge_get_vport(handle); 4039 struct hclge_dev *hdev = vport->back; 4040 int reset_try_times = 0; 4041 int reset_status; 4042 int ret; 4043 4044 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 4045 if (ret) { 4046 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 4047 return; 4048 } 4049 4050 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true); 4051 if (ret) { 4052 dev_warn(&hdev->pdev->dev, 4053 "Send reset tqp cmd fail, ret = %d\n", ret); 4054 return; 4055 } 4056 4057 reset_try_times = 0; 4058 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 4059 /* Wait for tqp hw reset */ 4060 msleep(20); 4061 reset_status = hclge_get_reset_status(hdev, queue_id); 4062 if (reset_status) 4063 break; 4064 } 4065 4066 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 4067 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 4068 return; 4069 } 4070 4071 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false); 4072 if (ret) { 4073 dev_warn(&hdev->pdev->dev, 4074 "Deassert the soft reset fail, ret = %d\n", ret); 4075 return; 4076 } 4077 } 4078 4079 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 4080 { 4081 struct hclge_vport *vport = hclge_get_vport(handle); 4082 struct hclge_dev *hdev = vport->back; 4083 4084 return hdev->fw_version; 4085 } 4086 4087 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 4088 u32 *rx_en, u32 *tx_en) 4089 { 4090 struct hclge_vport *vport = hclge_get_vport(handle); 4091 struct hclge_dev *hdev = vport->back; 4092 4093 *auto_neg = hclge_get_autoneg(handle); 4094 4095 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 4096 *rx_en = 0; 4097 *tx_en = 0; 4098 return; 4099 } 4100 4101 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 4102 *rx_en = 1; 4103 *tx_en = 0; 4104 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 4105 *tx_en = 1; 4106 *rx_en = 0; 4107 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 4108 *rx_en = 1; 4109 *tx_en = 1; 4110 } else { 4111 *rx_en = 0; 4112 *tx_en = 0; 4113 } 4114 } 4115 4116 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 4117 u8 *auto_neg, u32 *speed, u8 *duplex) 4118 { 4119 struct hclge_vport *vport = hclge_get_vport(handle); 4120 struct hclge_dev *hdev = vport->back; 4121 4122 if (speed) 4123 *speed = hdev->hw.mac.speed; 4124 if (duplex) 4125 *duplex = hdev->hw.mac.duplex; 4126 if (auto_neg) 4127 *auto_neg = hdev->hw.mac.autoneg; 4128 } 4129 4130 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) 4131 { 4132 struct hclge_vport *vport = hclge_get_vport(handle); 4133 struct hclge_dev *hdev = vport->back; 4134 4135 if (media_type) 4136 *media_type = hdev->hw.mac.media_type; 4137 } 4138 4139 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 4140 u8 *tp_mdix_ctrl, u8 *tp_mdix) 4141 { 4142 struct hclge_vport *vport = hclge_get_vport(handle); 4143 struct hclge_dev *hdev = vport->back; 4144 struct phy_device *phydev = hdev->hw.mac.phydev; 4145 int mdix_ctrl, mdix, retval, is_resolved; 4146 4147 if (!phydev) { 4148 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 4149 *tp_mdix = ETH_TP_MDI_INVALID; 4150 return; 4151 } 4152 4153 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 4154 4155 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 4156 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 4157 HCLGE_PHY_MDIX_CTRL_S); 4158 4159 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 4160 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 4161 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 4162 4163 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 4164 4165 switch (mdix_ctrl) { 4166 case 0x0: 4167 *tp_mdix_ctrl = ETH_TP_MDI; 4168 break; 4169 case 0x1: 4170 *tp_mdix_ctrl = ETH_TP_MDI_X; 4171 break; 4172 case 0x3: 4173 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 4174 break; 4175 default: 4176 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 4177 break; 4178 } 4179 4180 if (!is_resolved) 4181 *tp_mdix = ETH_TP_MDI_INVALID; 4182 else if (mdix) 4183 *tp_mdix = ETH_TP_MDI_X; 4184 else 4185 *tp_mdix = ETH_TP_MDI; 4186 } 4187 4188 static int hclge_init_client_instance(struct hnae3_client *client, 4189 struct hnae3_ae_dev *ae_dev) 4190 { 4191 struct hclge_dev *hdev = ae_dev->priv; 4192 struct hclge_vport *vport; 4193 int i, ret; 4194 4195 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 4196 vport = &hdev->vport[i]; 4197 4198 switch (client->type) { 4199 case HNAE3_CLIENT_KNIC: 4200 4201 hdev->nic_client = client; 4202 vport->nic.client = client; 4203 ret = client->ops->init_instance(&vport->nic); 4204 if (ret) 4205 goto err; 4206 4207 if (hdev->roce_client && 4208 hnae3_dev_roce_supported(hdev)) { 4209 struct hnae3_client *rc = hdev->roce_client; 4210 4211 ret = hclge_init_roce_base_info(vport); 4212 if (ret) 4213 goto err; 4214 4215 ret = rc->ops->init_instance(&vport->roce); 4216 if (ret) 4217 goto err; 4218 } 4219 4220 break; 4221 case HNAE3_CLIENT_UNIC: 4222 hdev->nic_client = client; 4223 vport->nic.client = client; 4224 4225 ret = client->ops->init_instance(&vport->nic); 4226 if (ret) 4227 goto err; 4228 4229 break; 4230 case HNAE3_CLIENT_ROCE: 4231 if (hnae3_dev_roce_supported(hdev)) { 4232 hdev->roce_client = client; 4233 vport->roce.client = client; 4234 } 4235 4236 if (hdev->roce_client) { 4237 ret = hclge_init_roce_base_info(vport); 4238 if (ret) 4239 goto err; 4240 4241 ret = client->ops->init_instance(&vport->roce); 4242 if (ret) 4243 goto err; 4244 } 4245 } 4246 } 4247 4248 return 0; 4249 err: 4250 return ret; 4251 } 4252 4253 static void hclge_uninit_client_instance(struct hnae3_client *client, 4254 struct hnae3_ae_dev *ae_dev) 4255 { 4256 struct hclge_dev *hdev = ae_dev->priv; 4257 struct hclge_vport *vport; 4258 int i; 4259 4260 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 4261 vport = &hdev->vport[i]; 4262 if (hdev->roce_client) 4263 hdev->roce_client->ops->uninit_instance(&vport->roce, 4264 0); 4265 if (client->type == HNAE3_CLIENT_ROCE) 4266 return; 4267 if (client->ops->uninit_instance) 4268 client->ops->uninit_instance(&vport->nic, 0); 4269 } 4270 } 4271 4272 static int hclge_pci_init(struct hclge_dev *hdev) 4273 { 4274 struct pci_dev *pdev = hdev->pdev; 4275 struct hclge_hw *hw; 4276 int ret; 4277 4278 ret = pci_enable_device(pdev); 4279 if (ret) { 4280 dev_err(&pdev->dev, "failed to enable PCI device\n"); 4281 goto err_no_drvdata; 4282 } 4283 4284 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4285 if (ret) { 4286 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 4287 if (ret) { 4288 dev_err(&pdev->dev, 4289 "can't set consistent PCI DMA"); 4290 goto err_disable_device; 4291 } 4292 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 4293 } 4294 4295 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 4296 if (ret) { 4297 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 4298 goto err_disable_device; 4299 } 4300 4301 pci_set_master(pdev); 4302 hw = &hdev->hw; 4303 hw->back = hdev; 4304 hw->io_base = pcim_iomap(pdev, 2, 0); 4305 if (!hw->io_base) { 4306 dev_err(&pdev->dev, "Can't map configuration register space\n"); 4307 ret = -ENOMEM; 4308 goto err_clr_master; 4309 } 4310 4311 return 0; 4312 err_clr_master: 4313 pci_clear_master(pdev); 4314 pci_release_regions(pdev); 4315 err_disable_device: 4316 pci_disable_device(pdev); 4317 err_no_drvdata: 4318 pci_set_drvdata(pdev, NULL); 4319 4320 return ret; 4321 } 4322 4323 static void hclge_pci_uninit(struct hclge_dev *hdev) 4324 { 4325 struct pci_dev *pdev = hdev->pdev; 4326 4327 if (hdev->flag & HCLGE_FLAG_USE_MSIX) { 4328 pci_disable_msix(pdev); 4329 devm_kfree(&pdev->dev, hdev->msix_entries); 4330 hdev->msix_entries = NULL; 4331 } else { 4332 pci_disable_msi(pdev); 4333 } 4334 4335 pci_clear_master(pdev); 4336 pci_release_mem_regions(pdev); 4337 pci_disable_device(pdev); 4338 } 4339 4340 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 4341 { 4342 struct pci_dev *pdev = ae_dev->pdev; 4343 struct hclge_dev *hdev; 4344 int ret; 4345 4346 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 4347 if (!hdev) { 4348 ret = -ENOMEM; 4349 goto err_hclge_dev; 4350 } 4351 4352 hdev->flag |= HCLGE_FLAG_USE_MSIX; 4353 hdev->pdev = pdev; 4354 hdev->ae_dev = ae_dev; 4355 ae_dev->priv = hdev; 4356 4357 ret = hclge_pci_init(hdev); 4358 if (ret) { 4359 dev_err(&pdev->dev, "PCI init failed\n"); 4360 goto err_pci_init; 4361 } 4362 4363 /* Command queue initialize */ 4364 ret = hclge_cmd_init(hdev); 4365 if (ret) 4366 goto err_cmd_init; 4367 4368 ret = hclge_get_cap(hdev); 4369 if (ret) { 4370 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 4371 ret); 4372 return ret; 4373 } 4374 4375 ret = hclge_configure(hdev); 4376 if (ret) { 4377 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 4378 return ret; 4379 } 4380 4381 if (hdev->flag & HCLGE_FLAG_USE_MSIX) 4382 ret = hclge_init_msix(hdev); 4383 else 4384 ret = hclge_init_msi(hdev); 4385 if (ret) { 4386 dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret); 4387 return ret; 4388 } 4389 4390 ret = hclge_alloc_tqps(hdev); 4391 if (ret) { 4392 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 4393 return ret; 4394 } 4395 4396 ret = hclge_alloc_vport(hdev); 4397 if (ret) { 4398 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 4399 return ret; 4400 } 4401 4402 ret = hclge_mac_init(hdev); 4403 if (ret) { 4404 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 4405 return ret; 4406 } 4407 ret = hclge_buffer_alloc(hdev); 4408 if (ret) { 4409 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); 4410 return ret; 4411 } 4412 4413 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 4414 if (ret) { 4415 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 4416 return ret; 4417 } 4418 4419 ret = hclge_init_vlan_config(hdev); 4420 if (ret) { 4421 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 4422 return ret; 4423 } 4424 4425 ret = hclge_tm_schd_init(hdev); 4426 if (ret) { 4427 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 4428 return ret; 4429 } 4430 4431 ret = hclge_rss_init_hw(hdev); 4432 if (ret) { 4433 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 4434 return ret; 4435 } 4436 4437 hclge_dcb_ops_set(hdev); 4438 4439 setup_timer(&hdev->service_timer, hclge_service_timer, 4440 (unsigned long)hdev); 4441 INIT_WORK(&hdev->service_task, hclge_service_task); 4442 4443 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 4444 set_bit(HCLGE_STATE_DOWN, &hdev->state); 4445 4446 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 4447 return 0; 4448 4449 err_cmd_init: 4450 pci_release_regions(pdev); 4451 err_pci_init: 4452 pci_set_drvdata(pdev, NULL); 4453 err_hclge_dev: 4454 return ret; 4455 } 4456 4457 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 4458 { 4459 struct hclge_dev *hdev = ae_dev->priv; 4460 struct hclge_mac *mac = &hdev->hw.mac; 4461 4462 set_bit(HCLGE_STATE_DOWN, &hdev->state); 4463 4464 if (IS_ENABLED(CONFIG_PCI_IOV)) 4465 hclge_disable_sriov(hdev); 4466 4467 if (hdev->service_timer.data) 4468 del_timer_sync(&hdev->service_timer); 4469 if (hdev->service_task.func) 4470 cancel_work_sync(&hdev->service_task); 4471 4472 if (mac->phydev) 4473 mdiobus_unregister(mac->mdio_bus); 4474 4475 hclge_destroy_cmd_queue(&hdev->hw); 4476 hclge_pci_uninit(hdev); 4477 ae_dev->priv = NULL; 4478 } 4479 4480 static const struct hnae3_ae_ops hclge_ops = { 4481 .init_ae_dev = hclge_init_ae_dev, 4482 .uninit_ae_dev = hclge_uninit_ae_dev, 4483 .init_client_instance = hclge_init_client_instance, 4484 .uninit_client_instance = hclge_uninit_client_instance, 4485 .map_ring_to_vector = hclge_map_handle_ring_to_vector, 4486 .unmap_ring_from_vector = hclge_unmap_ring_from_vector, 4487 .get_vector = hclge_get_vector, 4488 .set_promisc_mode = hclge_set_promisc_mode, 4489 .start = hclge_ae_start, 4490 .stop = hclge_ae_stop, 4491 .get_status = hclge_get_status, 4492 .get_ksettings_an_result = hclge_get_ksettings_an_result, 4493 .update_speed_duplex_h = hclge_update_speed_duplex_h, 4494 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 4495 .get_media_type = hclge_get_media_type, 4496 .get_rss_key_size = hclge_get_rss_key_size, 4497 .get_rss_indir_size = hclge_get_rss_indir_size, 4498 .get_rss = hclge_get_rss, 4499 .set_rss = hclge_set_rss, 4500 .set_rss_tuple = hclge_set_rss_tuple, 4501 .get_rss_tuple = hclge_get_rss_tuple, 4502 .get_tc_size = hclge_get_tc_size, 4503 .get_mac_addr = hclge_get_mac_addr, 4504 .set_mac_addr = hclge_set_mac_addr, 4505 .add_uc_addr = hclge_add_uc_addr, 4506 .rm_uc_addr = hclge_rm_uc_addr, 4507 .add_mc_addr = hclge_add_mc_addr, 4508 .rm_mc_addr = hclge_rm_mc_addr, 4509 .set_autoneg = hclge_set_autoneg, 4510 .get_autoneg = hclge_get_autoneg, 4511 .get_pauseparam = hclge_get_pauseparam, 4512 .set_mtu = hclge_set_mtu, 4513 .reset_queue = hclge_reset_tqp, 4514 .get_stats = hclge_get_stats, 4515 .update_stats = hclge_update_stats, 4516 .get_strings = hclge_get_strings, 4517 .get_sset_count = hclge_get_sset_count, 4518 .get_fw_version = hclge_get_fw_version, 4519 .get_mdix_mode = hclge_get_mdix_mode, 4520 .set_vlan_filter = hclge_set_port_vlan_filter, 4521 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 4522 }; 4523 4524 static struct hnae3_ae_algo ae_algo = { 4525 .ops = &hclge_ops, 4526 .name = HCLGE_NAME, 4527 .pdev_id_table = ae_algo_pci_tbl, 4528 }; 4529 4530 static int hclge_init(void) 4531 { 4532 pr_info("%s is initializing\n", HCLGE_NAME); 4533 4534 return hnae3_register_ae_algo(&ae_algo); 4535 } 4536 4537 static void hclge_exit(void) 4538 { 4539 hnae3_unregister_ae_algo(&ae_algo); 4540 } 4541 module_init(hclge_init); 4542 module_exit(hclge_exit); 4543 4544 MODULE_LICENSE("GPL"); 4545 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 4546 MODULE_DESCRIPTION("HCLGE Driver"); 4547 MODULE_VERSION(HCLGE_MOD_VERSION); 4548