1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/device.h> 12 #include <linux/etherdevice.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/pci.h> 19 #include <linux/platform_device.h> 20 #include <linux/if_vlan.h> 21 #include <net/rtnetlink.h> 22 #include "hclge_cmd.h" 23 #include "hclge_dcb.h" 24 #include "hclge_main.h" 25 #include "hclge_mbx.h" 26 #include "hclge_mdio.h" 27 #include "hclge_tm.h" 28 #include "hnae3.h" 29 30 #define HCLGE_NAME "hclge" 31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) 34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) 35 36 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 37 enum hclge_mta_dmac_sel_type mta_mac_sel, 38 bool enable); 39 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); 40 static int hclge_init_vlan_config(struct hclge_dev *hdev); 41 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 42 static int hclge_update_led_status(struct hclge_dev *hdev); 43 44 static struct hnae3_ae_algo ae_algo; 45 46 static const struct pci_device_id ae_algo_pci_tbl[] = { 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 54 /* required last entry */ 55 {0, } 56 }; 57 58 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 59 60 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 61 "Mac Loopback test", 62 "Serdes Loopback test", 63 "Phy Loopback test" 64 }; 65 66 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { 67 {"igu_rx_oversize_pkt", 68 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, 69 {"igu_rx_undersize_pkt", 70 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, 71 {"igu_rx_out_all_pkt", 72 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, 73 {"igu_rx_uni_pkt", 74 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, 75 {"igu_rx_multi_pkt", 76 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, 77 {"igu_rx_broad_pkt", 78 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, 79 {"egu_tx_out_all_pkt", 80 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, 81 {"egu_tx_uni_pkt", 82 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, 83 {"egu_tx_multi_pkt", 84 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, 85 {"egu_tx_broad_pkt", 86 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, 87 {"ssu_ppp_mac_key_num", 88 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, 89 {"ssu_ppp_host_key_num", 90 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, 91 {"ppp_ssu_mac_rlt_num", 92 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, 93 {"ppp_ssu_host_rlt_num", 94 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, 95 {"ssu_tx_in_num", 96 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, 97 {"ssu_tx_out_num", 98 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, 99 {"ssu_rx_in_num", 100 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, 101 {"ssu_rx_out_num", 102 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} 103 }; 104 105 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { 106 {"igu_rx_err_pkt", 107 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, 108 {"igu_rx_no_eof_pkt", 109 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, 110 {"igu_rx_no_sof_pkt", 111 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, 112 {"egu_tx_1588_pkt", 113 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, 114 {"ssu_full_drop_num", 115 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, 116 {"ssu_part_drop_num", 117 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, 118 {"ppp_key_drop_num", 119 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, 120 {"ppp_rlt_drop_num", 121 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, 122 {"ssu_key_drop_num", 123 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, 124 {"pkt_curr_buf_cnt", 125 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, 126 {"qcn_fb_rcv_cnt", 127 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, 128 {"qcn_fb_drop_cnt", 129 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, 130 {"qcn_fb_invaild_cnt", 131 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, 132 {"rx_packet_tc0_in_cnt", 133 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, 134 {"rx_packet_tc1_in_cnt", 135 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, 136 {"rx_packet_tc2_in_cnt", 137 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, 138 {"rx_packet_tc3_in_cnt", 139 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, 140 {"rx_packet_tc4_in_cnt", 141 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, 142 {"rx_packet_tc5_in_cnt", 143 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, 144 {"rx_packet_tc6_in_cnt", 145 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, 146 {"rx_packet_tc7_in_cnt", 147 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, 148 {"rx_packet_tc0_out_cnt", 149 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, 150 {"rx_packet_tc1_out_cnt", 151 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, 152 {"rx_packet_tc2_out_cnt", 153 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, 154 {"rx_packet_tc3_out_cnt", 155 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, 156 {"rx_packet_tc4_out_cnt", 157 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, 158 {"rx_packet_tc5_out_cnt", 159 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, 160 {"rx_packet_tc6_out_cnt", 161 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, 162 {"rx_packet_tc7_out_cnt", 163 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, 164 {"tx_packet_tc0_in_cnt", 165 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, 166 {"tx_packet_tc1_in_cnt", 167 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, 168 {"tx_packet_tc2_in_cnt", 169 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, 170 {"tx_packet_tc3_in_cnt", 171 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, 172 {"tx_packet_tc4_in_cnt", 173 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, 174 {"tx_packet_tc5_in_cnt", 175 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, 176 {"tx_packet_tc6_in_cnt", 177 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, 178 {"tx_packet_tc7_in_cnt", 179 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, 180 {"tx_packet_tc0_out_cnt", 181 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, 182 {"tx_packet_tc1_out_cnt", 183 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, 184 {"tx_packet_tc2_out_cnt", 185 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, 186 {"tx_packet_tc3_out_cnt", 187 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, 188 {"tx_packet_tc4_out_cnt", 189 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, 190 {"tx_packet_tc5_out_cnt", 191 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, 192 {"tx_packet_tc6_out_cnt", 193 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, 194 {"tx_packet_tc7_out_cnt", 195 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, 196 {"pkt_curr_buf_tc0_cnt", 197 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, 198 {"pkt_curr_buf_tc1_cnt", 199 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, 200 {"pkt_curr_buf_tc2_cnt", 201 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, 202 {"pkt_curr_buf_tc3_cnt", 203 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, 204 {"pkt_curr_buf_tc4_cnt", 205 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, 206 {"pkt_curr_buf_tc5_cnt", 207 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, 208 {"pkt_curr_buf_tc6_cnt", 209 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, 210 {"pkt_curr_buf_tc7_cnt", 211 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, 212 {"mb_uncopy_num", 213 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, 214 {"lo_pri_unicast_rlt_drop_num", 215 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, 216 {"hi_pri_multicast_rlt_drop_num", 217 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, 218 {"lo_pri_multicast_rlt_drop_num", 219 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, 220 {"rx_oq_drop_pkt_cnt", 221 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, 222 {"tx_oq_drop_pkt_cnt", 223 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, 224 {"nic_l2_err_drop_pkt_cnt", 225 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, 226 {"roc_l2_err_drop_pkt_cnt", 227 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} 228 }; 229 230 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 231 {"mac_tx_mac_pause_num", 232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 233 {"mac_rx_mac_pause_num", 234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 235 {"mac_tx_pfc_pri0_pkt_num", 236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 237 {"mac_tx_pfc_pri1_pkt_num", 238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 239 {"mac_tx_pfc_pri2_pkt_num", 240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 241 {"mac_tx_pfc_pri3_pkt_num", 242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 243 {"mac_tx_pfc_pri4_pkt_num", 244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 245 {"mac_tx_pfc_pri5_pkt_num", 246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 247 {"mac_tx_pfc_pri6_pkt_num", 248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 249 {"mac_tx_pfc_pri7_pkt_num", 250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 251 {"mac_rx_pfc_pri0_pkt_num", 252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 253 {"mac_rx_pfc_pri1_pkt_num", 254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 255 {"mac_rx_pfc_pri2_pkt_num", 256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 257 {"mac_rx_pfc_pri3_pkt_num", 258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 259 {"mac_rx_pfc_pri4_pkt_num", 260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 261 {"mac_rx_pfc_pri5_pkt_num", 262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 263 {"mac_rx_pfc_pri6_pkt_num", 264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 265 {"mac_rx_pfc_pri7_pkt_num", 266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 267 {"mac_tx_total_pkt_num", 268 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 269 {"mac_tx_total_oct_num", 270 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 271 {"mac_tx_good_pkt_num", 272 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 273 {"mac_tx_bad_pkt_num", 274 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 275 {"mac_tx_good_oct_num", 276 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 277 {"mac_tx_bad_oct_num", 278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 279 {"mac_tx_uni_pkt_num", 280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 281 {"mac_tx_multi_pkt_num", 282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 283 {"mac_tx_broad_pkt_num", 284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 285 {"mac_tx_undersize_pkt_num", 286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 287 {"mac_tx_oversize_pkt_num", 288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 289 {"mac_tx_64_oct_pkt_num", 290 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 291 {"mac_tx_65_127_oct_pkt_num", 292 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 293 {"mac_tx_128_255_oct_pkt_num", 294 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 295 {"mac_tx_256_511_oct_pkt_num", 296 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 297 {"mac_tx_512_1023_oct_pkt_num", 298 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 299 {"mac_tx_1024_1518_oct_pkt_num", 300 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 301 {"mac_tx_1519_2047_oct_pkt_num", 302 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 303 {"mac_tx_2048_4095_oct_pkt_num", 304 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 305 {"mac_tx_4096_8191_oct_pkt_num", 306 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 307 {"mac_tx_8192_9216_oct_pkt_num", 308 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 309 {"mac_tx_9217_12287_oct_pkt_num", 310 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 311 {"mac_tx_12288_16383_oct_pkt_num", 312 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 313 {"mac_tx_1519_max_good_pkt_num", 314 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 315 {"mac_tx_1519_max_bad_pkt_num", 316 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 317 {"mac_rx_total_pkt_num", 318 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 319 {"mac_rx_total_oct_num", 320 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 321 {"mac_rx_good_pkt_num", 322 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 323 {"mac_rx_bad_pkt_num", 324 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 325 {"mac_rx_good_oct_num", 326 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 327 {"mac_rx_bad_oct_num", 328 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 329 {"mac_rx_uni_pkt_num", 330 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 331 {"mac_rx_multi_pkt_num", 332 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 333 {"mac_rx_broad_pkt_num", 334 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 335 {"mac_rx_undersize_pkt_num", 336 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 337 {"mac_rx_oversize_pkt_num", 338 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 339 {"mac_rx_64_oct_pkt_num", 340 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 341 {"mac_rx_65_127_oct_pkt_num", 342 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 343 {"mac_rx_128_255_oct_pkt_num", 344 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 345 {"mac_rx_256_511_oct_pkt_num", 346 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 347 {"mac_rx_512_1023_oct_pkt_num", 348 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 349 {"mac_rx_1024_1518_oct_pkt_num", 350 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 351 {"mac_rx_1519_2047_oct_pkt_num", 352 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 353 {"mac_rx_2048_4095_oct_pkt_num", 354 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 355 {"mac_rx_4096_8191_oct_pkt_num", 356 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 357 {"mac_rx_8192_9216_oct_pkt_num", 358 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 359 {"mac_rx_9217_12287_oct_pkt_num", 360 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 361 {"mac_rx_12288_16383_oct_pkt_num", 362 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 363 {"mac_rx_1519_max_good_pkt_num", 364 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 365 {"mac_rx_1519_max_bad_pkt_num", 366 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 367 368 {"mac_tx_fragment_pkt_num", 369 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 370 {"mac_tx_undermin_pkt_num", 371 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 372 {"mac_tx_jabber_pkt_num", 373 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 374 {"mac_tx_err_all_pkt_num", 375 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 376 {"mac_tx_from_app_good_pkt_num", 377 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 378 {"mac_tx_from_app_bad_pkt_num", 379 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 380 {"mac_rx_fragment_pkt_num", 381 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 382 {"mac_rx_undermin_pkt_num", 383 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 384 {"mac_rx_jabber_pkt_num", 385 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 386 {"mac_rx_fcs_err_pkt_num", 387 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 388 {"mac_rx_send_app_good_pkt_num", 389 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 390 {"mac_rx_send_app_bad_pkt_num", 391 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 392 }; 393 394 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 395 { 396 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 397 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), 398 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), 399 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), 400 .i_port_bitmap = 0x1, 401 }, 402 }; 403 404 static int hclge_64_bit_update_stats(struct hclge_dev *hdev) 405 { 406 #define HCLGE_64_BIT_CMD_NUM 5 407 #define HCLGE_64_BIT_RTN_DATANUM 4 408 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); 409 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; 410 __le64 *desc_data; 411 int i, k, n; 412 int ret; 413 414 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); 415 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); 416 if (ret) { 417 dev_err(&hdev->pdev->dev, 418 "Get 64 bit pkt stats fail, status = %d.\n", ret); 419 return ret; 420 } 421 422 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { 423 if (unlikely(i == 0)) { 424 desc_data = (__le64 *)(&desc[i].data[0]); 425 n = HCLGE_64_BIT_RTN_DATANUM - 1; 426 } else { 427 desc_data = (__le64 *)(&desc[i]); 428 n = HCLGE_64_BIT_RTN_DATANUM; 429 } 430 for (k = 0; k < n; k++) { 431 *data++ += le64_to_cpu(*desc_data); 432 desc_data++; 433 } 434 } 435 436 return 0; 437 } 438 439 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) 440 { 441 stats->pkt_curr_buf_cnt = 0; 442 stats->pkt_curr_buf_tc0_cnt = 0; 443 stats->pkt_curr_buf_tc1_cnt = 0; 444 stats->pkt_curr_buf_tc2_cnt = 0; 445 stats->pkt_curr_buf_tc3_cnt = 0; 446 stats->pkt_curr_buf_tc4_cnt = 0; 447 stats->pkt_curr_buf_tc5_cnt = 0; 448 stats->pkt_curr_buf_tc6_cnt = 0; 449 stats->pkt_curr_buf_tc7_cnt = 0; 450 } 451 452 static int hclge_32_bit_update_stats(struct hclge_dev *hdev) 453 { 454 #define HCLGE_32_BIT_CMD_NUM 8 455 #define HCLGE_32_BIT_RTN_DATANUM 8 456 457 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; 458 struct hclge_32_bit_stats *all_32_bit_stats; 459 __le32 *desc_data; 460 int i, k, n; 461 u64 *data; 462 int ret; 463 464 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; 465 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); 466 467 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); 468 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); 469 if (ret) { 470 dev_err(&hdev->pdev->dev, 471 "Get 32 bit pkt stats fail, status = %d.\n", ret); 472 473 return ret; 474 } 475 476 hclge_reset_partial_32bit_counter(all_32_bit_stats); 477 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { 478 if (unlikely(i == 0)) { 479 __le16 *desc_data_16bit; 480 481 all_32_bit_stats->igu_rx_err_pkt += 482 le32_to_cpu(desc[i].data[0]); 483 484 desc_data_16bit = (__le16 *)&desc[i].data[1]; 485 all_32_bit_stats->igu_rx_no_eof_pkt += 486 le16_to_cpu(*desc_data_16bit); 487 488 desc_data_16bit++; 489 all_32_bit_stats->igu_rx_no_sof_pkt += 490 le16_to_cpu(*desc_data_16bit); 491 492 desc_data = &desc[i].data[2]; 493 n = HCLGE_32_BIT_RTN_DATANUM - 4; 494 } else { 495 desc_data = (__le32 *)&desc[i]; 496 n = HCLGE_32_BIT_RTN_DATANUM; 497 } 498 for (k = 0; k < n; k++) { 499 *data++ += le32_to_cpu(*desc_data); 500 desc_data++; 501 } 502 } 503 504 return 0; 505 } 506 507 static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev) 508 { 509 struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats; 510 struct hclge_desc desc; 511 __le64 *desc_data; 512 int ret; 513 514 /* for fiber port, need to query the total rx/tx packets statstics, 515 * used for data transferring checking. 516 */ 517 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 518 return 0; 519 520 if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 521 return 0; 522 523 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true); 524 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 525 if (ret) { 526 dev_err(&hdev->pdev->dev, 527 "Get MAC total pkt stats fail, ret = %d\n", ret); 528 529 return ret; 530 } 531 532 desc_data = (__le64 *)(&desc.data[0]); 533 mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++); 534 mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data); 535 536 return 0; 537 } 538 539 static int hclge_mac_update_stats(struct hclge_dev *hdev) 540 { 541 #define HCLGE_MAC_CMD_NUM 21 542 #define HCLGE_RTN_DATA_NUM 4 543 544 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 545 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 546 __le64 *desc_data; 547 int i, k, n; 548 int ret; 549 550 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 551 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 552 if (ret) { 553 dev_err(&hdev->pdev->dev, 554 "Get MAC pkt stats fail, status = %d.\n", ret); 555 556 return ret; 557 } 558 559 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 560 if (unlikely(i == 0)) { 561 desc_data = (__le64 *)(&desc[i].data[0]); 562 n = HCLGE_RTN_DATA_NUM - 2; 563 } else { 564 desc_data = (__le64 *)(&desc[i]); 565 n = HCLGE_RTN_DATA_NUM; 566 } 567 for (k = 0; k < n; k++) { 568 *data++ += le64_to_cpu(*desc_data); 569 desc_data++; 570 } 571 } 572 573 return 0; 574 } 575 576 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 577 { 578 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 579 struct hclge_vport *vport = hclge_get_vport(handle); 580 struct hclge_dev *hdev = vport->back; 581 struct hnae3_queue *queue; 582 struct hclge_desc desc[1]; 583 struct hclge_tqp *tqp; 584 int ret, i; 585 586 for (i = 0; i < kinfo->num_tqps; i++) { 587 queue = handle->kinfo.tqp[i]; 588 tqp = container_of(queue, struct hclge_tqp, q); 589 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 590 hclge_cmd_setup_basic_desc(&desc[0], 591 HCLGE_OPC_QUERY_RX_STATUS, 592 true); 593 594 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 595 ret = hclge_cmd_send(&hdev->hw, desc, 1); 596 if (ret) { 597 dev_err(&hdev->pdev->dev, 598 "Query tqp stat fail, status = %d,queue = %d\n", 599 ret, i); 600 return ret; 601 } 602 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 603 le32_to_cpu(desc[0].data[1]); 604 } 605 606 for (i = 0; i < kinfo->num_tqps; i++) { 607 queue = handle->kinfo.tqp[i]; 608 tqp = container_of(queue, struct hclge_tqp, q); 609 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 610 hclge_cmd_setup_basic_desc(&desc[0], 611 HCLGE_OPC_QUERY_TX_STATUS, 612 true); 613 614 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 615 ret = hclge_cmd_send(&hdev->hw, desc, 1); 616 if (ret) { 617 dev_err(&hdev->pdev->dev, 618 "Query tqp stat fail, status = %d,queue = %d\n", 619 ret, i); 620 return ret; 621 } 622 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 623 le32_to_cpu(desc[0].data[1]); 624 } 625 626 return 0; 627 } 628 629 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 630 { 631 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 632 struct hclge_tqp *tqp; 633 u64 *buff = data; 634 int i; 635 636 for (i = 0; i < kinfo->num_tqps; i++) { 637 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 638 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 639 } 640 641 for (i = 0; i < kinfo->num_tqps; i++) { 642 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 643 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 644 } 645 646 return buff; 647 } 648 649 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 650 { 651 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 652 653 return kinfo->num_tqps * (2); 654 } 655 656 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 657 { 658 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 659 u8 *buff = data; 660 int i = 0; 661 662 for (i = 0; i < kinfo->num_tqps; i++) { 663 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 664 struct hclge_tqp, q); 665 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 666 tqp->index); 667 buff = buff + ETH_GSTRING_LEN; 668 } 669 670 for (i = 0; i < kinfo->num_tqps; i++) { 671 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 672 struct hclge_tqp, q); 673 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 674 tqp->index); 675 buff = buff + ETH_GSTRING_LEN; 676 } 677 678 return buff; 679 } 680 681 static u64 *hclge_comm_get_stats(void *comm_stats, 682 const struct hclge_comm_stats_str strs[], 683 int size, u64 *data) 684 { 685 u64 *buf = data; 686 u32 i; 687 688 for (i = 0; i < size; i++) 689 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 690 691 return buf + size; 692 } 693 694 static u8 *hclge_comm_get_strings(u32 stringset, 695 const struct hclge_comm_stats_str strs[], 696 int size, u8 *data) 697 { 698 char *buff = (char *)data; 699 u32 i; 700 701 if (stringset != ETH_SS_STATS) 702 return buff; 703 704 for (i = 0; i < size; i++) { 705 snprintf(buff, ETH_GSTRING_LEN, 706 strs[i].desc); 707 buff = buff + ETH_GSTRING_LEN; 708 } 709 710 return (u8 *)buff; 711 } 712 713 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, 714 struct net_device_stats *net_stats) 715 { 716 net_stats->tx_dropped = 0; 717 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; 718 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; 719 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; 720 721 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; 722 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 723 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; 724 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; 725 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 726 727 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 728 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 729 730 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 731 net_stats->rx_length_errors = 732 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 733 net_stats->rx_length_errors += 734 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 735 net_stats->rx_over_errors = 736 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 737 } 738 739 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 740 { 741 struct hnae3_handle *handle; 742 int status; 743 744 handle = &hdev->vport[0].nic; 745 if (handle->client) { 746 status = hclge_tqps_update_stats(handle); 747 if (status) { 748 dev_err(&hdev->pdev->dev, 749 "Update TQPS stats fail, status = %d.\n", 750 status); 751 } 752 } 753 754 status = hclge_mac_update_stats(hdev); 755 if (status) 756 dev_err(&hdev->pdev->dev, 757 "Update MAC stats fail, status = %d.\n", status); 758 759 status = hclge_32_bit_update_stats(hdev); 760 if (status) 761 dev_err(&hdev->pdev->dev, 762 "Update 32 bit stats fail, status = %d.\n", 763 status); 764 765 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); 766 } 767 768 static void hclge_update_stats(struct hnae3_handle *handle, 769 struct net_device_stats *net_stats) 770 { 771 struct hclge_vport *vport = hclge_get_vport(handle); 772 struct hclge_dev *hdev = vport->back; 773 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 774 int status; 775 776 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 777 return; 778 779 status = hclge_mac_update_stats(hdev); 780 if (status) 781 dev_err(&hdev->pdev->dev, 782 "Update MAC stats fail, status = %d.\n", 783 status); 784 785 status = hclge_32_bit_update_stats(hdev); 786 if (status) 787 dev_err(&hdev->pdev->dev, 788 "Update 32 bit stats fail, status = %d.\n", 789 status); 790 791 status = hclge_64_bit_update_stats(hdev); 792 if (status) 793 dev_err(&hdev->pdev->dev, 794 "Update 64 bit stats fail, status = %d.\n", 795 status); 796 797 status = hclge_tqps_update_stats(handle); 798 if (status) 799 dev_err(&hdev->pdev->dev, 800 "Update TQPS stats fail, status = %d.\n", 801 status); 802 803 hclge_update_netstat(hw_stats, net_stats); 804 805 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 806 } 807 808 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 809 { 810 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7 811 812 struct hclge_vport *vport = hclge_get_vport(handle); 813 struct hclge_dev *hdev = vport->back; 814 int count = 0; 815 816 /* Loopback test support rules: 817 * mac: only GE mode support 818 * serdes: all mac mode will support include GE/XGE/LGE/CGE 819 * phy: only support when phy device exist on board 820 */ 821 if (stringset == ETH_SS_TEST) { 822 /* clear loopback bit flags at first */ 823 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 824 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 825 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 826 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 827 count += 1; 828 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; 829 } else { 830 count = -EOPNOTSUPP; 831 } 832 } else if (stringset == ETH_SS_STATS) { 833 count = ARRAY_SIZE(g_mac_stats_string) + 834 ARRAY_SIZE(g_all_32bit_stats_string) + 835 ARRAY_SIZE(g_all_64bit_stats_string) + 836 hclge_tqps_get_sset_count(handle, stringset); 837 } 838 839 return count; 840 } 841 842 static void hclge_get_strings(struct hnae3_handle *handle, 843 u32 stringset, 844 u8 *data) 845 { 846 u8 *p = (char *)data; 847 int size; 848 849 if (stringset == ETH_SS_STATS) { 850 size = ARRAY_SIZE(g_mac_stats_string); 851 p = hclge_comm_get_strings(stringset, 852 g_mac_stats_string, 853 size, 854 p); 855 size = ARRAY_SIZE(g_all_32bit_stats_string); 856 p = hclge_comm_get_strings(stringset, 857 g_all_32bit_stats_string, 858 size, 859 p); 860 size = ARRAY_SIZE(g_all_64bit_stats_string); 861 p = hclge_comm_get_strings(stringset, 862 g_all_64bit_stats_string, 863 size, 864 p); 865 p = hclge_tqps_get_strings(handle, p); 866 } else if (stringset == ETH_SS_TEST) { 867 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { 868 memcpy(p, 869 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], 870 ETH_GSTRING_LEN); 871 p += ETH_GSTRING_LEN; 872 } 873 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { 874 memcpy(p, 875 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], 876 ETH_GSTRING_LEN); 877 p += ETH_GSTRING_LEN; 878 } 879 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 880 memcpy(p, 881 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], 882 ETH_GSTRING_LEN); 883 p += ETH_GSTRING_LEN; 884 } 885 } 886 } 887 888 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 889 { 890 struct hclge_vport *vport = hclge_get_vport(handle); 891 struct hclge_dev *hdev = vport->back; 892 u64 *p; 893 894 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, 895 g_mac_stats_string, 896 ARRAY_SIZE(g_mac_stats_string), 897 data); 898 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, 899 g_all_32bit_stats_string, 900 ARRAY_SIZE(g_all_32bit_stats_string), 901 p); 902 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, 903 g_all_64bit_stats_string, 904 ARRAY_SIZE(g_all_64bit_stats_string), 905 p); 906 p = hclge_tqps_get_stats(handle, p); 907 } 908 909 static int hclge_parse_func_status(struct hclge_dev *hdev, 910 struct hclge_func_status_cmd *status) 911 { 912 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 913 return -EINVAL; 914 915 /* Set the pf to main pf */ 916 if (status->pf_state & HCLGE_PF_STATE_MAIN) 917 hdev->flag |= HCLGE_FLAG_MAIN; 918 else 919 hdev->flag &= ~HCLGE_FLAG_MAIN; 920 921 return 0; 922 } 923 924 static int hclge_query_function_status(struct hclge_dev *hdev) 925 { 926 struct hclge_func_status_cmd *req; 927 struct hclge_desc desc; 928 int timeout = 0; 929 int ret; 930 931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 932 req = (struct hclge_func_status_cmd *)desc.data; 933 934 do { 935 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 936 if (ret) { 937 dev_err(&hdev->pdev->dev, 938 "query function status failed %d.\n", 939 ret); 940 941 return ret; 942 } 943 944 /* Check pf reset is done */ 945 if (req->pf_state) 946 break; 947 usleep_range(1000, 2000); 948 } while (timeout++ < 5); 949 950 ret = hclge_parse_func_status(hdev, req); 951 952 return ret; 953 } 954 955 static int hclge_query_pf_resource(struct hclge_dev *hdev) 956 { 957 struct hclge_pf_res_cmd *req; 958 struct hclge_desc desc; 959 int ret; 960 961 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 962 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 963 if (ret) { 964 dev_err(&hdev->pdev->dev, 965 "query pf resource failed %d.\n", ret); 966 return ret; 967 } 968 969 req = (struct hclge_pf_res_cmd *)desc.data; 970 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 971 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 972 973 if (hnae3_dev_roce_supported(hdev)) { 974 hdev->num_roce_msi = 975 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 976 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 977 978 /* PF should have NIC vectors and Roce vectors, 979 * NIC vectors are queued before Roce vectors. 980 */ 981 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; 982 } else { 983 hdev->num_msi = 984 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 985 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 986 } 987 988 return 0; 989 } 990 991 static int hclge_parse_speed(int speed_cmd, int *speed) 992 { 993 switch (speed_cmd) { 994 case 6: 995 *speed = HCLGE_MAC_SPEED_10M; 996 break; 997 case 7: 998 *speed = HCLGE_MAC_SPEED_100M; 999 break; 1000 case 0: 1001 *speed = HCLGE_MAC_SPEED_1G; 1002 break; 1003 case 1: 1004 *speed = HCLGE_MAC_SPEED_10G; 1005 break; 1006 case 2: 1007 *speed = HCLGE_MAC_SPEED_25G; 1008 break; 1009 case 3: 1010 *speed = HCLGE_MAC_SPEED_40G; 1011 break; 1012 case 4: 1013 *speed = HCLGE_MAC_SPEED_50G; 1014 break; 1015 case 5: 1016 *speed = HCLGE_MAC_SPEED_100G; 1017 break; 1018 default: 1019 return -EINVAL; 1020 } 1021 1022 return 0; 1023 } 1024 1025 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 1026 u8 speed_ability) 1027 { 1028 unsigned long *supported = hdev->hw.mac.supported; 1029 1030 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1031 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1032 supported); 1033 1034 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1035 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1036 supported); 1037 1038 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1039 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1040 supported); 1041 1042 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1043 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 1044 supported); 1045 1046 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1047 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1048 supported); 1049 1050 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); 1051 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1052 } 1053 1054 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) 1055 { 1056 u8 media_type = hdev->hw.mac.media_type; 1057 1058 if (media_type != HNAE3_MEDIA_TYPE_FIBER) 1059 return; 1060 1061 hclge_parse_fiber_link_mode(hdev, speed_ability); 1062 } 1063 1064 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1065 { 1066 struct hclge_cfg_param_cmd *req; 1067 u64 mac_addr_tmp_high; 1068 u64 mac_addr_tmp; 1069 int i; 1070 1071 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1072 1073 /* get the configuration */ 1074 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1075 HCLGE_CFG_VMDQ_M, 1076 HCLGE_CFG_VMDQ_S); 1077 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1078 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1079 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 1080 HCLGE_CFG_TQP_DESC_N_M, 1081 HCLGE_CFG_TQP_DESC_N_S); 1082 1083 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), 1084 HCLGE_CFG_PHY_ADDR_M, 1085 HCLGE_CFG_PHY_ADDR_S); 1086 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), 1087 HCLGE_CFG_MEDIA_TP_M, 1088 HCLGE_CFG_MEDIA_TP_S); 1089 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), 1090 HCLGE_CFG_RX_BUF_LEN_M, 1091 HCLGE_CFG_RX_BUF_LEN_S); 1092 /* get mac_address */ 1093 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1094 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), 1095 HCLGE_CFG_MAC_ADDR_H_M, 1096 HCLGE_CFG_MAC_ADDR_H_S); 1097 1098 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1099 1100 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), 1101 HCLGE_CFG_DEFAULT_SPEED_M, 1102 HCLGE_CFG_DEFAULT_SPEED_S); 1103 cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), 1104 HCLGE_CFG_RSS_SIZE_M, 1105 HCLGE_CFG_RSS_SIZE_S); 1106 1107 for (i = 0; i < ETH_ALEN; i++) 1108 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1109 1110 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1111 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1112 1113 cfg->speed_ability = hnae_get_field(__le32_to_cpu(req->param[1]), 1114 HCLGE_CFG_SPEED_ABILITY_M, 1115 HCLGE_CFG_SPEED_ABILITY_S); 1116 } 1117 1118 /* hclge_get_cfg: query the static parameter from flash 1119 * @hdev: pointer to struct hclge_dev 1120 * @hcfg: the config structure to be getted 1121 */ 1122 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1123 { 1124 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1125 struct hclge_cfg_param_cmd *req; 1126 int i, ret; 1127 1128 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1129 u32 offset = 0; 1130 1131 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1132 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1133 true); 1134 hnae_set_field(offset, HCLGE_CFG_OFFSET_M, 1135 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1136 /* Len should be united by 4 bytes when send to hardware */ 1137 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1138 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1139 req->offset = cpu_to_le32(offset); 1140 } 1141 1142 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1143 if (ret) { 1144 dev_err(&hdev->pdev->dev, 1145 "get config failed %d.\n", ret); 1146 return ret; 1147 } 1148 1149 hclge_parse_cfg(hcfg, desc); 1150 return 0; 1151 } 1152 1153 static int hclge_get_cap(struct hclge_dev *hdev) 1154 { 1155 int ret; 1156 1157 ret = hclge_query_function_status(hdev); 1158 if (ret) { 1159 dev_err(&hdev->pdev->dev, 1160 "query function status error %d.\n", ret); 1161 return ret; 1162 } 1163 1164 /* get pf resource */ 1165 ret = hclge_query_pf_resource(hdev); 1166 if (ret) { 1167 dev_err(&hdev->pdev->dev, 1168 "query pf resource error %d.\n", ret); 1169 return ret; 1170 } 1171 1172 return 0; 1173 } 1174 1175 static int hclge_configure(struct hclge_dev *hdev) 1176 { 1177 struct hclge_cfg cfg; 1178 int ret, i; 1179 1180 ret = hclge_get_cfg(hdev, &cfg); 1181 if (ret) { 1182 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 1183 return ret; 1184 } 1185 1186 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 1187 hdev->base_tqp_pid = 0; 1188 hdev->rss_size_max = cfg.rss_size_max; 1189 hdev->rx_buf_len = cfg.rx_buf_len; 1190 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1191 hdev->hw.mac.media_type = cfg.media_type; 1192 hdev->hw.mac.phy_addr = cfg.phy_addr; 1193 hdev->num_desc = cfg.tqp_desc_num; 1194 hdev->tm_info.num_pg = 1; 1195 hdev->tc_max = cfg.tc_num; 1196 hdev->tm_info.hw_pfc_map = 0; 1197 1198 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1199 if (ret) { 1200 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 1201 return ret; 1202 } 1203 1204 hclge_parse_link_mode(hdev, cfg.speed_ability); 1205 1206 if ((hdev->tc_max > HNAE3_MAX_TC) || 1207 (hdev->tc_max < 1)) { 1208 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 1209 hdev->tc_max); 1210 hdev->tc_max = 1; 1211 } 1212 1213 /* Dev does not support DCB */ 1214 if (!hnae3_dev_dcb_supported(hdev)) { 1215 hdev->tc_max = 1; 1216 hdev->pfc_max = 0; 1217 } else { 1218 hdev->pfc_max = hdev->tc_max; 1219 } 1220 1221 hdev->tm_info.num_tc = hdev->tc_max; 1222 1223 /* Currently not support uncontiuous tc */ 1224 for (i = 0; i < hdev->tm_info.num_tc; i++) 1225 hnae_set_bit(hdev->hw_tc_map, i, 1); 1226 1227 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1228 1229 return ret; 1230 } 1231 1232 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, 1233 int tso_mss_max) 1234 { 1235 struct hclge_cfg_tso_status_cmd *req; 1236 struct hclge_desc desc; 1237 u16 tso_mss; 1238 1239 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1240 1241 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1242 1243 tso_mss = 0; 1244 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1245 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 1246 req->tso_mss_min = cpu_to_le16(tso_mss); 1247 1248 tso_mss = 0; 1249 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1250 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 1251 req->tso_mss_max = cpu_to_le16(tso_mss); 1252 1253 return hclge_cmd_send(&hdev->hw, &desc, 1); 1254 } 1255 1256 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1257 { 1258 struct hclge_tqp *tqp; 1259 int i; 1260 1261 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1262 sizeof(struct hclge_tqp), GFP_KERNEL); 1263 if (!hdev->htqp) 1264 return -ENOMEM; 1265 1266 tqp = hdev->htqp; 1267 1268 for (i = 0; i < hdev->num_tqps; i++) { 1269 tqp->dev = &hdev->pdev->dev; 1270 tqp->index = i; 1271 1272 tqp->q.ae_algo = &ae_algo; 1273 tqp->q.buf_size = hdev->rx_buf_len; 1274 tqp->q.desc_num = hdev->num_desc; 1275 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 1276 i * HCLGE_TQP_REG_SIZE; 1277 1278 tqp++; 1279 } 1280 1281 return 0; 1282 } 1283 1284 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1285 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1286 { 1287 struct hclge_tqp_map_cmd *req; 1288 struct hclge_desc desc; 1289 int ret; 1290 1291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1292 1293 req = (struct hclge_tqp_map_cmd *)desc.data; 1294 req->tqp_id = cpu_to_le16(tqp_pid); 1295 req->tqp_vf = func_id; 1296 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 1297 1 << HCLGE_TQP_MAP_EN_B; 1298 req->tqp_vid = cpu_to_le16(tqp_vid); 1299 1300 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1301 if (ret) { 1302 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", 1303 ret); 1304 return ret; 1305 } 1306 1307 return 0; 1308 } 1309 1310 static int hclge_assign_tqp(struct hclge_vport *vport, 1311 struct hnae3_queue **tqp, u16 num_tqps) 1312 { 1313 struct hclge_dev *hdev = vport->back; 1314 int i, alloced; 1315 1316 for (i = 0, alloced = 0; i < hdev->num_tqps && 1317 alloced < num_tqps; i++) { 1318 if (!hdev->htqp[i].alloced) { 1319 hdev->htqp[i].q.handle = &vport->nic; 1320 hdev->htqp[i].q.tqp_index = alloced; 1321 tqp[alloced] = &hdev->htqp[i].q; 1322 hdev->htqp[i].alloced = true; 1323 alloced++; 1324 } 1325 } 1326 vport->alloc_tqps = num_tqps; 1327 1328 return 0; 1329 } 1330 1331 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) 1332 { 1333 struct hnae3_handle *nic = &vport->nic; 1334 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1335 struct hclge_dev *hdev = vport->back; 1336 int i, ret; 1337 1338 kinfo->num_desc = hdev->num_desc; 1339 kinfo->rx_buf_len = hdev->rx_buf_len; 1340 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); 1341 kinfo->rss_size 1342 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); 1343 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; 1344 1345 for (i = 0; i < HNAE3_MAX_TC; i++) { 1346 if (hdev->hw_tc_map & BIT(i)) { 1347 kinfo->tc_info[i].enable = true; 1348 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 1349 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 1350 kinfo->tc_info[i].tc = i; 1351 } else { 1352 /* Set to default queue if TC is disable */ 1353 kinfo->tc_info[i].enable = false; 1354 kinfo->tc_info[i].tqp_offset = 0; 1355 kinfo->tc_info[i].tqp_count = 1; 1356 kinfo->tc_info[i].tc = 0; 1357 } 1358 } 1359 1360 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 1361 sizeof(struct hnae3_queue *), GFP_KERNEL); 1362 if (!kinfo->tqp) 1363 return -ENOMEM; 1364 1365 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); 1366 if (ret) { 1367 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1368 return -EINVAL; 1369 } 1370 1371 return 0; 1372 } 1373 1374 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1375 struct hclge_vport *vport) 1376 { 1377 struct hnae3_handle *nic = &vport->nic; 1378 struct hnae3_knic_private_info *kinfo; 1379 u16 i; 1380 1381 kinfo = &nic->kinfo; 1382 for (i = 0; i < kinfo->num_tqps; i++) { 1383 struct hclge_tqp *q = 1384 container_of(kinfo->tqp[i], struct hclge_tqp, q); 1385 bool is_pf; 1386 int ret; 1387 1388 is_pf = !(vport->vport_id); 1389 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1390 i, is_pf); 1391 if (ret) 1392 return ret; 1393 } 1394 1395 return 0; 1396 } 1397 1398 static int hclge_map_tqp(struct hclge_dev *hdev) 1399 { 1400 struct hclge_vport *vport = hdev->vport; 1401 u16 i, num_vport; 1402 1403 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1404 for (i = 0; i < num_vport; i++) { 1405 int ret; 1406 1407 ret = hclge_map_tqp_to_vport(hdev, vport); 1408 if (ret) 1409 return ret; 1410 1411 vport++; 1412 } 1413 1414 return 0; 1415 } 1416 1417 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) 1418 { 1419 /* this would be initialized later */ 1420 } 1421 1422 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1423 { 1424 struct hnae3_handle *nic = &vport->nic; 1425 struct hclge_dev *hdev = vport->back; 1426 int ret; 1427 1428 nic->pdev = hdev->pdev; 1429 nic->ae_algo = &ae_algo; 1430 nic->numa_node_mask = hdev->numa_node_mask; 1431 1432 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { 1433 ret = hclge_knic_setup(vport, num_tqps); 1434 if (ret) { 1435 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", 1436 ret); 1437 return ret; 1438 } 1439 } else { 1440 hclge_unic_setup(vport, num_tqps); 1441 } 1442 1443 return 0; 1444 } 1445 1446 static int hclge_alloc_vport(struct hclge_dev *hdev) 1447 { 1448 struct pci_dev *pdev = hdev->pdev; 1449 struct hclge_vport *vport; 1450 u32 tqp_main_vport; 1451 u32 tqp_per_vport; 1452 int num_vport, i; 1453 int ret; 1454 1455 /* We need to alloc a vport for main NIC of PF */ 1456 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1457 1458 if (hdev->num_tqps < num_vport) { 1459 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", 1460 hdev->num_tqps, num_vport); 1461 return -EINVAL; 1462 } 1463 1464 /* Alloc the same number of TQPs for every vport */ 1465 tqp_per_vport = hdev->num_tqps / num_vport; 1466 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1467 1468 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1469 GFP_KERNEL); 1470 if (!vport) 1471 return -ENOMEM; 1472 1473 hdev->vport = vport; 1474 hdev->num_alloc_vport = num_vport; 1475 1476 if (IS_ENABLED(CONFIG_PCI_IOV)) 1477 hdev->num_alloc_vfs = hdev->num_req_vfs; 1478 1479 for (i = 0; i < num_vport; i++) { 1480 vport->back = hdev; 1481 vport->vport_id = i; 1482 1483 if (i == 0) 1484 ret = hclge_vport_setup(vport, tqp_main_vport); 1485 else 1486 ret = hclge_vport_setup(vport, tqp_per_vport); 1487 if (ret) { 1488 dev_err(&pdev->dev, 1489 "vport setup failed for vport %d, %d\n", 1490 i, ret); 1491 return ret; 1492 } 1493 1494 vport++; 1495 } 1496 1497 return 0; 1498 } 1499 1500 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1501 struct hclge_pkt_buf_alloc *buf_alloc) 1502 { 1503 /* TX buffer size is unit by 128 byte */ 1504 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1505 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1506 struct hclge_tx_buff_alloc_cmd *req; 1507 struct hclge_desc desc; 1508 int ret; 1509 u8 i; 1510 1511 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1512 1513 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1514 for (i = 0; i < HCLGE_TC_NUM; i++) { 1515 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1516 1517 req->tx_pkt_buff[i] = 1518 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1519 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1520 } 1521 1522 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1523 if (ret) { 1524 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1525 ret); 1526 return ret; 1527 } 1528 1529 return 0; 1530 } 1531 1532 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1533 struct hclge_pkt_buf_alloc *buf_alloc) 1534 { 1535 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1536 1537 if (ret) { 1538 dev_err(&hdev->pdev->dev, 1539 "tx buffer alloc failed %d\n", ret); 1540 return ret; 1541 } 1542 1543 return 0; 1544 } 1545 1546 static int hclge_get_tc_num(struct hclge_dev *hdev) 1547 { 1548 int i, cnt = 0; 1549 1550 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1551 if (hdev->hw_tc_map & BIT(i)) 1552 cnt++; 1553 return cnt; 1554 } 1555 1556 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) 1557 { 1558 int i, cnt = 0; 1559 1560 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1561 if (hdev->hw_tc_map & BIT(i) && 1562 hdev->tm_info.hw_pfc_map & BIT(i)) 1563 cnt++; 1564 return cnt; 1565 } 1566 1567 /* Get the number of pfc enabled TCs, which have private buffer */ 1568 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1569 struct hclge_pkt_buf_alloc *buf_alloc) 1570 { 1571 struct hclge_priv_buf *priv; 1572 int i, cnt = 0; 1573 1574 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1575 priv = &buf_alloc->priv_buf[i]; 1576 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1577 priv->enable) 1578 cnt++; 1579 } 1580 1581 return cnt; 1582 } 1583 1584 /* Get the number of pfc disabled TCs, which have private buffer */ 1585 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1586 struct hclge_pkt_buf_alloc *buf_alloc) 1587 { 1588 struct hclge_priv_buf *priv; 1589 int i, cnt = 0; 1590 1591 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1592 priv = &buf_alloc->priv_buf[i]; 1593 if (hdev->hw_tc_map & BIT(i) && 1594 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1595 priv->enable) 1596 cnt++; 1597 } 1598 1599 return cnt; 1600 } 1601 1602 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1603 { 1604 struct hclge_priv_buf *priv; 1605 u32 rx_priv = 0; 1606 int i; 1607 1608 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1609 priv = &buf_alloc->priv_buf[i]; 1610 if (priv->enable) 1611 rx_priv += priv->buf_size; 1612 } 1613 return rx_priv; 1614 } 1615 1616 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1617 { 1618 u32 i, total_tx_size = 0; 1619 1620 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1621 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1622 1623 return total_tx_size; 1624 } 1625 1626 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1627 struct hclge_pkt_buf_alloc *buf_alloc, 1628 u32 rx_all) 1629 { 1630 u32 shared_buf_min, shared_buf_tc, shared_std; 1631 int tc_num, pfc_enable_num; 1632 u32 shared_buf; 1633 u32 rx_priv; 1634 int i; 1635 1636 tc_num = hclge_get_tc_num(hdev); 1637 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1638 1639 if (hnae3_dev_dcb_supported(hdev)) 1640 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; 1641 else 1642 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; 1643 1644 shared_buf_tc = pfc_enable_num * hdev->mps + 1645 (tc_num - pfc_enable_num) * hdev->mps / 2 + 1646 hdev->mps; 1647 shared_std = max_t(u32, shared_buf_min, shared_buf_tc); 1648 1649 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1650 if (rx_all <= rx_priv + shared_std) 1651 return false; 1652 1653 shared_buf = rx_all - rx_priv; 1654 buf_alloc->s_buf.buf_size = shared_buf; 1655 buf_alloc->s_buf.self.high = shared_buf; 1656 buf_alloc->s_buf.self.low = 2 * hdev->mps; 1657 1658 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1659 if ((hdev->hw_tc_map & BIT(i)) && 1660 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1661 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; 1662 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1663 } else { 1664 buf_alloc->s_buf.tc_thrd[i].low = 0; 1665 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; 1666 } 1667 } 1668 1669 return true; 1670 } 1671 1672 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1673 struct hclge_pkt_buf_alloc *buf_alloc) 1674 { 1675 u32 i, total_size; 1676 1677 total_size = hdev->pkt_buf_size; 1678 1679 /* alloc tx buffer for all enabled tc */ 1680 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1681 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1682 1683 if (total_size < HCLGE_DEFAULT_TX_BUF) 1684 return -ENOMEM; 1685 1686 if (hdev->hw_tc_map & BIT(i)) 1687 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1688 else 1689 priv->tx_buf_size = 0; 1690 1691 total_size -= priv->tx_buf_size; 1692 } 1693 1694 return 0; 1695 } 1696 1697 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1698 * @hdev: pointer to struct hclge_dev 1699 * @buf_alloc: pointer to buffer calculation data 1700 * @return: 0: calculate sucessful, negative: fail 1701 */ 1702 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1703 struct hclge_pkt_buf_alloc *buf_alloc) 1704 { 1705 u32 rx_all = hdev->pkt_buf_size; 1706 int no_pfc_priv_num, pfc_priv_num; 1707 struct hclge_priv_buf *priv; 1708 int i; 1709 1710 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1711 1712 /* When DCB is not supported, rx private 1713 * buffer is not allocated. 1714 */ 1715 if (!hnae3_dev_dcb_supported(hdev)) { 1716 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1717 return -ENOMEM; 1718 1719 return 0; 1720 } 1721 1722 /* step 1, try to alloc private buffer for all enabled tc */ 1723 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1724 priv = &buf_alloc->priv_buf[i]; 1725 if (hdev->hw_tc_map & BIT(i)) { 1726 priv->enable = 1; 1727 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1728 priv->wl.low = hdev->mps; 1729 priv->wl.high = priv->wl.low + hdev->mps; 1730 priv->buf_size = priv->wl.high + 1731 HCLGE_DEFAULT_DV; 1732 } else { 1733 priv->wl.low = 0; 1734 priv->wl.high = 2 * hdev->mps; 1735 priv->buf_size = priv->wl.high; 1736 } 1737 } else { 1738 priv->enable = 0; 1739 priv->wl.low = 0; 1740 priv->wl.high = 0; 1741 priv->buf_size = 0; 1742 } 1743 } 1744 1745 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1746 return 0; 1747 1748 /* step 2, try to decrease the buffer size of 1749 * no pfc TC's private buffer 1750 */ 1751 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1752 priv = &buf_alloc->priv_buf[i]; 1753 1754 priv->enable = 0; 1755 priv->wl.low = 0; 1756 priv->wl.high = 0; 1757 priv->buf_size = 0; 1758 1759 if (!(hdev->hw_tc_map & BIT(i))) 1760 continue; 1761 1762 priv->enable = 1; 1763 1764 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1765 priv->wl.low = 128; 1766 priv->wl.high = priv->wl.low + hdev->mps; 1767 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; 1768 } else { 1769 priv->wl.low = 0; 1770 priv->wl.high = hdev->mps; 1771 priv->buf_size = priv->wl.high; 1772 } 1773 } 1774 1775 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1776 return 0; 1777 1778 /* step 3, try to reduce the number of pfc disabled TCs, 1779 * which have private buffer 1780 */ 1781 /* get the total no pfc enable TC number, which have private buffer */ 1782 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1783 1784 /* let the last to be cleared first */ 1785 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1786 priv = &buf_alloc->priv_buf[i]; 1787 1788 if (hdev->hw_tc_map & BIT(i) && 1789 !(hdev->tm_info.hw_pfc_map & BIT(i))) { 1790 /* Clear the no pfc TC private buffer */ 1791 priv->wl.low = 0; 1792 priv->wl.high = 0; 1793 priv->buf_size = 0; 1794 priv->enable = 0; 1795 no_pfc_priv_num--; 1796 } 1797 1798 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1799 no_pfc_priv_num == 0) 1800 break; 1801 } 1802 1803 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1804 return 0; 1805 1806 /* step 4, try to reduce the number of pfc enabled TCs 1807 * which have private buffer. 1808 */ 1809 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1810 1811 /* let the last to be cleared first */ 1812 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1813 priv = &buf_alloc->priv_buf[i]; 1814 1815 if (hdev->hw_tc_map & BIT(i) && 1816 hdev->tm_info.hw_pfc_map & BIT(i)) { 1817 /* Reduce the number of pfc TC with private buffer */ 1818 priv->wl.low = 0; 1819 priv->enable = 0; 1820 priv->wl.high = 0; 1821 priv->buf_size = 0; 1822 pfc_priv_num--; 1823 } 1824 1825 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1826 pfc_priv_num == 0) 1827 break; 1828 } 1829 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1830 return 0; 1831 1832 return -ENOMEM; 1833 } 1834 1835 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1836 struct hclge_pkt_buf_alloc *buf_alloc) 1837 { 1838 struct hclge_rx_priv_buff_cmd *req; 1839 struct hclge_desc desc; 1840 int ret; 1841 int i; 1842 1843 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 1844 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 1845 1846 /* Alloc private buffer TCs */ 1847 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1848 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1849 1850 req->buf_num[i] = 1851 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 1852 req->buf_num[i] |= 1853 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 1854 } 1855 1856 req->shared_buf = 1857 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1858 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1859 1860 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1861 if (ret) { 1862 dev_err(&hdev->pdev->dev, 1863 "rx private buffer alloc cmd failed %d\n", ret); 1864 return ret; 1865 } 1866 1867 return 0; 1868 } 1869 1870 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) 1871 1872 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1873 struct hclge_pkt_buf_alloc *buf_alloc) 1874 { 1875 struct hclge_rx_priv_wl_buf *req; 1876 struct hclge_priv_buf *priv; 1877 struct hclge_desc desc[2]; 1878 int i, j; 1879 int ret; 1880 1881 for (i = 0; i < 2; i++) { 1882 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 1883 false); 1884 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 1885 1886 /* The first descriptor set the NEXT bit to 1 */ 1887 if (i == 0) 1888 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1889 else 1890 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1891 1892 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1893 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1894 1895 priv = &buf_alloc->priv_buf[idx]; 1896 req->tc_wl[j].high = 1897 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1898 req->tc_wl[j].high |= 1899 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << 1900 HCLGE_RX_PRIV_EN_B); 1901 req->tc_wl[j].low = 1902 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 1903 req->tc_wl[j].low |= 1904 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << 1905 HCLGE_RX_PRIV_EN_B); 1906 } 1907 } 1908 1909 /* Send 2 descriptor at one time */ 1910 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1911 if (ret) { 1912 dev_err(&hdev->pdev->dev, 1913 "rx private waterline config cmd failed %d\n", 1914 ret); 1915 return ret; 1916 } 1917 return 0; 1918 } 1919 1920 static int hclge_common_thrd_config(struct hclge_dev *hdev, 1921 struct hclge_pkt_buf_alloc *buf_alloc) 1922 { 1923 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1924 struct hclge_rx_com_thrd *req; 1925 struct hclge_desc desc[2]; 1926 struct hclge_tc_thrd *tc; 1927 int i, j; 1928 int ret; 1929 1930 for (i = 0; i < 2; i++) { 1931 hclge_cmd_setup_basic_desc(&desc[i], 1932 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 1933 req = (struct hclge_rx_com_thrd *)&desc[i].data; 1934 1935 /* The first descriptor set the NEXT bit to 1 */ 1936 if (i == 0) 1937 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1938 else 1939 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1940 1941 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1942 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 1943 1944 req->com_thrd[j].high = 1945 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 1946 req->com_thrd[j].high |= 1947 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << 1948 HCLGE_RX_PRIV_EN_B); 1949 req->com_thrd[j].low = 1950 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 1951 req->com_thrd[j].low |= 1952 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << 1953 HCLGE_RX_PRIV_EN_B); 1954 } 1955 } 1956 1957 /* Send 2 descriptors at one time */ 1958 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1959 if (ret) { 1960 dev_err(&hdev->pdev->dev, 1961 "common threshold config cmd failed %d\n", ret); 1962 return ret; 1963 } 1964 return 0; 1965 } 1966 1967 static int hclge_common_wl_config(struct hclge_dev *hdev, 1968 struct hclge_pkt_buf_alloc *buf_alloc) 1969 { 1970 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1971 struct hclge_rx_com_wl *req; 1972 struct hclge_desc desc; 1973 int ret; 1974 1975 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 1976 1977 req = (struct hclge_rx_com_wl *)desc.data; 1978 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 1979 req->com_wl.high |= 1980 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << 1981 HCLGE_RX_PRIV_EN_B); 1982 1983 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 1984 req->com_wl.low |= 1985 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << 1986 HCLGE_RX_PRIV_EN_B); 1987 1988 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1989 if (ret) { 1990 dev_err(&hdev->pdev->dev, 1991 "common waterline config cmd failed %d\n", ret); 1992 return ret; 1993 } 1994 1995 return 0; 1996 } 1997 1998 int hclge_buffer_alloc(struct hclge_dev *hdev) 1999 { 2000 struct hclge_pkt_buf_alloc *pkt_buf; 2001 int ret; 2002 2003 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 2004 if (!pkt_buf) 2005 return -ENOMEM; 2006 2007 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 2008 if (ret) { 2009 dev_err(&hdev->pdev->dev, 2010 "could not calc tx buffer size for all TCs %d\n", ret); 2011 goto out; 2012 } 2013 2014 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 2015 if (ret) { 2016 dev_err(&hdev->pdev->dev, 2017 "could not alloc tx buffers %d\n", ret); 2018 goto out; 2019 } 2020 2021 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 2022 if (ret) { 2023 dev_err(&hdev->pdev->dev, 2024 "could not calc rx priv buffer size for all TCs %d\n", 2025 ret); 2026 goto out; 2027 } 2028 2029 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 2030 if (ret) { 2031 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 2032 ret); 2033 goto out; 2034 } 2035 2036 if (hnae3_dev_dcb_supported(hdev)) { 2037 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2038 if (ret) { 2039 dev_err(&hdev->pdev->dev, 2040 "could not configure rx private waterline %d\n", 2041 ret); 2042 goto out; 2043 } 2044 2045 ret = hclge_common_thrd_config(hdev, pkt_buf); 2046 if (ret) { 2047 dev_err(&hdev->pdev->dev, 2048 "could not configure common threshold %d\n", 2049 ret); 2050 goto out; 2051 } 2052 } 2053 2054 ret = hclge_common_wl_config(hdev, pkt_buf); 2055 if (ret) 2056 dev_err(&hdev->pdev->dev, 2057 "could not configure common waterline %d\n", ret); 2058 2059 out: 2060 kfree(pkt_buf); 2061 return ret; 2062 } 2063 2064 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2065 { 2066 struct hnae3_handle *roce = &vport->roce; 2067 struct hnae3_handle *nic = &vport->nic; 2068 2069 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2070 2071 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 2072 vport->back->num_msi_left == 0) 2073 return -EINVAL; 2074 2075 roce->rinfo.base_vector = vport->back->roce_base_vector; 2076 2077 roce->rinfo.netdev = nic->kinfo.netdev; 2078 roce->rinfo.roce_io_base = vport->back->hw.io_base; 2079 2080 roce->pdev = nic->pdev; 2081 roce->ae_algo = nic->ae_algo; 2082 roce->numa_node_mask = nic->numa_node_mask; 2083 2084 return 0; 2085 } 2086 2087 static int hclge_init_msi(struct hclge_dev *hdev) 2088 { 2089 struct pci_dev *pdev = hdev->pdev; 2090 int vectors; 2091 int i; 2092 2093 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2094 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2095 if (vectors < 0) { 2096 dev_err(&pdev->dev, 2097 "failed(%d) to allocate MSI/MSI-X vectors\n", 2098 vectors); 2099 return vectors; 2100 } 2101 if (vectors < hdev->num_msi) 2102 dev_warn(&hdev->pdev->dev, 2103 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2104 hdev->num_msi, vectors); 2105 2106 hdev->num_msi = vectors; 2107 hdev->num_msi_left = vectors; 2108 hdev->base_msi_vector = pdev->irq; 2109 hdev->roce_base_vector = hdev->base_msi_vector + 2110 HCLGE_ROCE_VECTOR_OFFSET; 2111 2112 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2113 sizeof(u16), GFP_KERNEL); 2114 if (!hdev->vector_status) { 2115 pci_free_irq_vectors(pdev); 2116 return -ENOMEM; 2117 } 2118 2119 for (i = 0; i < hdev->num_msi; i++) 2120 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2121 2122 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2123 sizeof(int), GFP_KERNEL); 2124 if (!hdev->vector_irq) { 2125 pci_free_irq_vectors(pdev); 2126 return -ENOMEM; 2127 } 2128 2129 return 0; 2130 } 2131 2132 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) 2133 { 2134 struct hclge_mac *mac = &hdev->hw.mac; 2135 2136 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) 2137 mac->duplex = (u8)duplex; 2138 else 2139 mac->duplex = HCLGE_MAC_FULL; 2140 2141 mac->speed = speed; 2142 } 2143 2144 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 2145 { 2146 struct hclge_config_mac_speed_dup_cmd *req; 2147 struct hclge_desc desc; 2148 int ret; 2149 2150 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2151 2152 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2153 2154 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); 2155 2156 switch (speed) { 2157 case HCLGE_MAC_SPEED_10M: 2158 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2159 HCLGE_CFG_SPEED_S, 6); 2160 break; 2161 case HCLGE_MAC_SPEED_100M: 2162 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2163 HCLGE_CFG_SPEED_S, 7); 2164 break; 2165 case HCLGE_MAC_SPEED_1G: 2166 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2167 HCLGE_CFG_SPEED_S, 0); 2168 break; 2169 case HCLGE_MAC_SPEED_10G: 2170 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2171 HCLGE_CFG_SPEED_S, 1); 2172 break; 2173 case HCLGE_MAC_SPEED_25G: 2174 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2175 HCLGE_CFG_SPEED_S, 2); 2176 break; 2177 case HCLGE_MAC_SPEED_40G: 2178 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2179 HCLGE_CFG_SPEED_S, 3); 2180 break; 2181 case HCLGE_MAC_SPEED_50G: 2182 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2183 HCLGE_CFG_SPEED_S, 4); 2184 break; 2185 case HCLGE_MAC_SPEED_100G: 2186 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2187 HCLGE_CFG_SPEED_S, 5); 2188 break; 2189 default: 2190 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2191 return -EINVAL; 2192 } 2193 2194 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2195 1); 2196 2197 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2198 if (ret) { 2199 dev_err(&hdev->pdev->dev, 2200 "mac speed/duplex config cmd failed %d.\n", ret); 2201 return ret; 2202 } 2203 2204 hclge_check_speed_dup(hdev, duplex, speed); 2205 2206 return 0; 2207 } 2208 2209 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2210 u8 duplex) 2211 { 2212 struct hclge_vport *vport = hclge_get_vport(handle); 2213 struct hclge_dev *hdev = vport->back; 2214 2215 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2216 } 2217 2218 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, 2219 u8 *duplex) 2220 { 2221 struct hclge_query_an_speed_dup_cmd *req; 2222 struct hclge_desc desc; 2223 int speed_tmp; 2224 int ret; 2225 2226 req = (struct hclge_query_an_speed_dup_cmd *)desc.data; 2227 2228 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); 2229 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2230 if (ret) { 2231 dev_err(&hdev->pdev->dev, 2232 "mac speed/autoneg/duplex query cmd failed %d\n", 2233 ret); 2234 return ret; 2235 } 2236 2237 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); 2238 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, 2239 HCLGE_QUERY_SPEED_S); 2240 2241 ret = hclge_parse_speed(speed_tmp, speed); 2242 if (ret) { 2243 dev_err(&hdev->pdev->dev, 2244 "could not parse speed(=%d), %d\n", speed_tmp, ret); 2245 return -EIO; 2246 } 2247 2248 return 0; 2249 } 2250 2251 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2252 { 2253 struct hclge_config_auto_neg_cmd *req; 2254 struct hclge_desc desc; 2255 u32 flag = 0; 2256 int ret; 2257 2258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2259 2260 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2261 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); 2262 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2263 2264 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2265 if (ret) { 2266 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2267 ret); 2268 return ret; 2269 } 2270 2271 return 0; 2272 } 2273 2274 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2275 { 2276 struct hclge_vport *vport = hclge_get_vport(handle); 2277 struct hclge_dev *hdev = vport->back; 2278 2279 return hclge_set_autoneg_en(hdev, enable); 2280 } 2281 2282 static int hclge_get_autoneg(struct hnae3_handle *handle) 2283 { 2284 struct hclge_vport *vport = hclge_get_vport(handle); 2285 struct hclge_dev *hdev = vport->back; 2286 struct phy_device *phydev = hdev->hw.mac.phydev; 2287 2288 if (phydev) 2289 return phydev->autoneg; 2290 2291 return hdev->hw.mac.autoneg; 2292 } 2293 2294 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, 2295 bool mask_vlan, 2296 u8 *mac_mask) 2297 { 2298 struct hclge_mac_vlan_mask_entry_cmd *req; 2299 struct hclge_desc desc; 2300 int status; 2301 2302 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; 2303 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); 2304 2305 hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, 2306 mask_vlan ? 1 : 0); 2307 ether_addr_copy(req->mac_mask, mac_mask); 2308 2309 status = hclge_cmd_send(&hdev->hw, &desc, 1); 2310 if (status) 2311 dev_err(&hdev->pdev->dev, 2312 "Config mac_vlan_mask failed for cmd_send, ret =%d\n", 2313 status); 2314 2315 return status; 2316 } 2317 2318 static int hclge_mac_init(struct hclge_dev *hdev) 2319 { 2320 struct hnae3_handle *handle = &hdev->vport[0].nic; 2321 struct net_device *netdev = handle->kinfo.netdev; 2322 struct hclge_mac *mac = &hdev->hw.mac; 2323 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 2324 int mtu; 2325 int ret; 2326 2327 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); 2328 if (ret) { 2329 dev_err(&hdev->pdev->dev, 2330 "Config mac speed dup fail ret=%d\n", ret); 2331 return ret; 2332 } 2333 2334 mac->link = 0; 2335 2336 /* Initialize the MTA table work mode */ 2337 hdev->accept_mta_mc = true; 2338 hdev->enable_mta = true; 2339 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; 2340 2341 ret = hclge_set_mta_filter_mode(hdev, 2342 hdev->mta_mac_sel_type, 2343 hdev->enable_mta); 2344 if (ret) { 2345 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", 2346 ret); 2347 return ret; 2348 } 2349 2350 ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); 2351 if (ret) { 2352 dev_err(&hdev->pdev->dev, 2353 "set mta filter mode fail ret=%d\n", ret); 2354 return ret; 2355 } 2356 2357 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); 2358 if (ret) { 2359 dev_err(&hdev->pdev->dev, 2360 "set default mac_vlan_mask fail ret=%d\n", ret); 2361 return ret; 2362 } 2363 2364 if (netdev) 2365 mtu = netdev->mtu; 2366 else 2367 mtu = ETH_DATA_LEN; 2368 2369 ret = hclge_set_mtu(handle, mtu); 2370 if (ret) { 2371 dev_err(&hdev->pdev->dev, 2372 "set mtu failed ret=%d\n", ret); 2373 return ret; 2374 } 2375 2376 return 0; 2377 } 2378 2379 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2380 { 2381 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2382 schedule_work(&hdev->mbx_service_task); 2383 } 2384 2385 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2386 { 2387 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 2388 schedule_work(&hdev->rst_service_task); 2389 } 2390 2391 static void hclge_task_schedule(struct hclge_dev *hdev) 2392 { 2393 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 2394 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2395 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) 2396 (void)schedule_work(&hdev->service_task); 2397 } 2398 2399 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2400 { 2401 struct hclge_link_status_cmd *req; 2402 struct hclge_desc desc; 2403 int link_status; 2404 int ret; 2405 2406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2407 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2408 if (ret) { 2409 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2410 ret); 2411 return ret; 2412 } 2413 2414 req = (struct hclge_link_status_cmd *)desc.data; 2415 link_status = req->status & HCLGE_LINK_STATUS; 2416 2417 return !!link_status; 2418 } 2419 2420 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2421 { 2422 int mac_state; 2423 int link_stat; 2424 2425 mac_state = hclge_get_mac_link_status(hdev); 2426 2427 if (hdev->hw.mac.phydev) { 2428 if (!genphy_read_status(hdev->hw.mac.phydev)) 2429 link_stat = mac_state & 2430 hdev->hw.mac.phydev->link; 2431 else 2432 link_stat = 0; 2433 2434 } else { 2435 link_stat = mac_state; 2436 } 2437 2438 return !!link_stat; 2439 } 2440 2441 static void hclge_update_link_status(struct hclge_dev *hdev) 2442 { 2443 struct hnae3_client *client = hdev->nic_client; 2444 struct hnae3_handle *handle; 2445 int state; 2446 int i; 2447 2448 if (!client) 2449 return; 2450 state = hclge_get_mac_phy_link(hdev); 2451 if (state != hdev->hw.mac.link) { 2452 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2453 handle = &hdev->vport[i].nic; 2454 client->ops->link_status_change(handle, state); 2455 } 2456 hdev->hw.mac.link = state; 2457 } 2458 } 2459 2460 static int hclge_update_speed_duplex(struct hclge_dev *hdev) 2461 { 2462 struct hclge_mac mac = hdev->hw.mac; 2463 u8 duplex; 2464 int speed; 2465 int ret; 2466 2467 /* get the speed and duplex as autoneg'result from mac cmd when phy 2468 * doesn't exit. 2469 */ 2470 if (mac.phydev || !mac.autoneg) 2471 return 0; 2472 2473 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); 2474 if (ret) { 2475 dev_err(&hdev->pdev->dev, 2476 "mac autoneg/speed/duplex query failed %d\n", ret); 2477 return ret; 2478 } 2479 2480 if ((mac.speed != speed) || (mac.duplex != duplex)) { 2481 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2482 if (ret) { 2483 dev_err(&hdev->pdev->dev, 2484 "mac speed/duplex config failed %d\n", ret); 2485 return ret; 2486 } 2487 } 2488 2489 return 0; 2490 } 2491 2492 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) 2493 { 2494 struct hclge_vport *vport = hclge_get_vport(handle); 2495 struct hclge_dev *hdev = vport->back; 2496 2497 return hclge_update_speed_duplex(hdev); 2498 } 2499 2500 static int hclge_get_status(struct hnae3_handle *handle) 2501 { 2502 struct hclge_vport *vport = hclge_get_vport(handle); 2503 struct hclge_dev *hdev = vport->back; 2504 2505 hclge_update_link_status(hdev); 2506 2507 return hdev->hw.mac.link; 2508 } 2509 2510 static void hclge_service_timer(struct timer_list *t) 2511 { 2512 struct hclge_dev *hdev = from_timer(hdev, t, service_timer); 2513 2514 mod_timer(&hdev->service_timer, jiffies + HZ); 2515 hdev->hw_stats.stats_timer++; 2516 hclge_task_schedule(hdev); 2517 } 2518 2519 static void hclge_service_complete(struct hclge_dev *hdev) 2520 { 2521 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); 2522 2523 /* Flush memory before next watchdog */ 2524 smp_mb__before_atomic(); 2525 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 2526 } 2527 2528 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 2529 { 2530 u32 rst_src_reg; 2531 u32 cmdq_src_reg; 2532 2533 /* fetch the events from their corresponding regs */ 2534 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); 2535 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 2536 2537 /* Assumption: If by any chance reset and mailbox events are reported 2538 * together then we will only process reset event in this go and will 2539 * defer the processing of the mailbox events. Since, we would have not 2540 * cleared RX CMDQ event this time we would receive again another 2541 * interrupt from H/W just for the mailbox. 2542 */ 2543 2544 /* check for vector0 reset event sources */ 2545 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { 2546 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 2547 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2548 return HCLGE_VECTOR0_EVENT_RST; 2549 } 2550 2551 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { 2552 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); 2553 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2554 return HCLGE_VECTOR0_EVENT_RST; 2555 } 2556 2557 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { 2558 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 2559 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2560 return HCLGE_VECTOR0_EVENT_RST; 2561 } 2562 2563 /* check for vector0 mailbox(=CMDQ RX) event source */ 2564 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 2565 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 2566 *clearval = cmdq_src_reg; 2567 return HCLGE_VECTOR0_EVENT_MBX; 2568 } 2569 2570 return HCLGE_VECTOR0_EVENT_OTHER; 2571 } 2572 2573 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 2574 u32 regclr) 2575 { 2576 switch (event_type) { 2577 case HCLGE_VECTOR0_EVENT_RST: 2578 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 2579 break; 2580 case HCLGE_VECTOR0_EVENT_MBX: 2581 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 2582 break; 2583 } 2584 } 2585 2586 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 2587 { 2588 writel(enable ? 1 : 0, vector->addr); 2589 } 2590 2591 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 2592 { 2593 struct hclge_dev *hdev = data; 2594 u32 event_cause; 2595 u32 clearval; 2596 2597 hclge_enable_vector(&hdev->misc_vector, false); 2598 event_cause = hclge_check_event_cause(hdev, &clearval); 2599 2600 /* vector 0 interrupt is shared with reset and mailbox source events.*/ 2601 switch (event_cause) { 2602 case HCLGE_VECTOR0_EVENT_RST: 2603 hclge_reset_task_schedule(hdev); 2604 break; 2605 case HCLGE_VECTOR0_EVENT_MBX: 2606 /* If we are here then, 2607 * 1. Either we are not handling any mbx task and we are not 2608 * scheduled as well 2609 * OR 2610 * 2. We could be handling a mbx task but nothing more is 2611 * scheduled. 2612 * In both cases, we should schedule mbx task as there are more 2613 * mbx messages reported by this interrupt. 2614 */ 2615 hclge_mbx_task_schedule(hdev); 2616 2617 default: 2618 dev_dbg(&hdev->pdev->dev, 2619 "received unknown or unhandled event of vector0\n"); 2620 break; 2621 } 2622 2623 /* we should clear the source of interrupt */ 2624 hclge_clear_event_cause(hdev, event_cause, clearval); 2625 hclge_enable_vector(&hdev->misc_vector, true); 2626 2627 return IRQ_HANDLED; 2628 } 2629 2630 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 2631 { 2632 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 2633 hdev->num_msi_left += 1; 2634 hdev->num_msi_used -= 1; 2635 } 2636 2637 static void hclge_get_misc_vector(struct hclge_dev *hdev) 2638 { 2639 struct hclge_misc_vector *vector = &hdev->misc_vector; 2640 2641 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 2642 2643 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 2644 hdev->vector_status[0] = 0; 2645 2646 hdev->num_msi_left -= 1; 2647 hdev->num_msi_used += 1; 2648 } 2649 2650 static int hclge_misc_irq_init(struct hclge_dev *hdev) 2651 { 2652 int ret; 2653 2654 hclge_get_misc_vector(hdev); 2655 2656 /* this would be explicitly freed in the end */ 2657 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 2658 0, "hclge_misc", hdev); 2659 if (ret) { 2660 hclge_free_vector(hdev, 0); 2661 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 2662 hdev->misc_vector.vector_irq); 2663 } 2664 2665 return ret; 2666 } 2667 2668 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 2669 { 2670 free_irq(hdev->misc_vector.vector_irq, hdev); 2671 hclge_free_vector(hdev, 0); 2672 } 2673 2674 static int hclge_notify_client(struct hclge_dev *hdev, 2675 enum hnae3_reset_notify_type type) 2676 { 2677 struct hnae3_client *client = hdev->nic_client; 2678 u16 i; 2679 2680 if (!client->ops->reset_notify) 2681 return -EOPNOTSUPP; 2682 2683 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2684 struct hnae3_handle *handle = &hdev->vport[i].nic; 2685 int ret; 2686 2687 ret = client->ops->reset_notify(handle, type); 2688 if (ret) 2689 return ret; 2690 } 2691 2692 return 0; 2693 } 2694 2695 static int hclge_reset_wait(struct hclge_dev *hdev) 2696 { 2697 #define HCLGE_RESET_WATI_MS 100 2698 #define HCLGE_RESET_WAIT_CNT 5 2699 u32 val, reg, reg_bit; 2700 u32 cnt = 0; 2701 2702 switch (hdev->reset_type) { 2703 case HNAE3_GLOBAL_RESET: 2704 reg = HCLGE_GLOBAL_RESET_REG; 2705 reg_bit = HCLGE_GLOBAL_RESET_BIT; 2706 break; 2707 case HNAE3_CORE_RESET: 2708 reg = HCLGE_GLOBAL_RESET_REG; 2709 reg_bit = HCLGE_CORE_RESET_BIT; 2710 break; 2711 case HNAE3_FUNC_RESET: 2712 reg = HCLGE_FUN_RST_ING; 2713 reg_bit = HCLGE_FUN_RST_ING_B; 2714 break; 2715 default: 2716 dev_err(&hdev->pdev->dev, 2717 "Wait for unsupported reset type: %d\n", 2718 hdev->reset_type); 2719 return -EINVAL; 2720 } 2721 2722 val = hclge_read_dev(&hdev->hw, reg); 2723 while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 2724 msleep(HCLGE_RESET_WATI_MS); 2725 val = hclge_read_dev(&hdev->hw, reg); 2726 cnt++; 2727 } 2728 2729 if (cnt >= HCLGE_RESET_WAIT_CNT) { 2730 dev_warn(&hdev->pdev->dev, 2731 "Wait for reset timeout: %d\n", hdev->reset_type); 2732 return -EBUSY; 2733 } 2734 2735 return 0; 2736 } 2737 2738 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 2739 { 2740 struct hclge_desc desc; 2741 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 2742 int ret; 2743 2744 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 2745 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); 2746 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 2747 req->fun_reset_vfid = func_id; 2748 2749 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2750 if (ret) 2751 dev_err(&hdev->pdev->dev, 2752 "send function reset cmd fail, status =%d\n", ret); 2753 2754 return ret; 2755 } 2756 2757 static void hclge_do_reset(struct hclge_dev *hdev) 2758 { 2759 struct pci_dev *pdev = hdev->pdev; 2760 u32 val; 2761 2762 switch (hdev->reset_type) { 2763 case HNAE3_GLOBAL_RESET: 2764 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2765 hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 2766 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2767 dev_info(&pdev->dev, "Global Reset requested\n"); 2768 break; 2769 case HNAE3_CORE_RESET: 2770 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2771 hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); 2772 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2773 dev_info(&pdev->dev, "Core Reset requested\n"); 2774 break; 2775 case HNAE3_FUNC_RESET: 2776 dev_info(&pdev->dev, "PF Reset requested\n"); 2777 hclge_func_reset_cmd(hdev, 0); 2778 /* schedule again to check later */ 2779 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 2780 hclge_reset_task_schedule(hdev); 2781 break; 2782 default: 2783 dev_warn(&pdev->dev, 2784 "Unsupported reset type: %d\n", hdev->reset_type); 2785 break; 2786 } 2787 } 2788 2789 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, 2790 unsigned long *addr) 2791 { 2792 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2793 2794 /* return the highest priority reset level amongst all */ 2795 if (test_bit(HNAE3_GLOBAL_RESET, addr)) 2796 rst_level = HNAE3_GLOBAL_RESET; 2797 else if (test_bit(HNAE3_CORE_RESET, addr)) 2798 rst_level = HNAE3_CORE_RESET; 2799 else if (test_bit(HNAE3_IMP_RESET, addr)) 2800 rst_level = HNAE3_IMP_RESET; 2801 else if (test_bit(HNAE3_FUNC_RESET, addr)) 2802 rst_level = HNAE3_FUNC_RESET; 2803 2804 /* now, clear all other resets */ 2805 clear_bit(HNAE3_GLOBAL_RESET, addr); 2806 clear_bit(HNAE3_CORE_RESET, addr); 2807 clear_bit(HNAE3_IMP_RESET, addr); 2808 clear_bit(HNAE3_FUNC_RESET, addr); 2809 2810 return rst_level; 2811 } 2812 2813 static void hclge_reset(struct hclge_dev *hdev) 2814 { 2815 /* perform reset of the stack & ae device for a client */ 2816 2817 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2818 2819 if (!hclge_reset_wait(hdev)) { 2820 rtnl_lock(); 2821 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 2822 hclge_reset_ae_dev(hdev->ae_dev); 2823 hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 2824 rtnl_unlock(); 2825 } else { 2826 /* schedule again to check pending resets later */ 2827 set_bit(hdev->reset_type, &hdev->reset_pending); 2828 hclge_reset_task_schedule(hdev); 2829 } 2830 2831 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2832 } 2833 2834 static void hclge_reset_event(struct hnae3_handle *handle) 2835 { 2836 struct hclge_vport *vport = hclge_get_vport(handle); 2837 struct hclge_dev *hdev = vport->back; 2838 2839 /* check if this is a new reset request and we are not here just because 2840 * last reset attempt did not succeed and watchdog hit us again. We will 2841 * know this if last reset request did not occur very recently (watchdog 2842 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 2843 * In case of new request we reset the "reset level" to PF reset. 2844 */ 2845 if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) 2846 handle->reset_level = HNAE3_FUNC_RESET; 2847 2848 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", 2849 handle->reset_level); 2850 2851 /* request reset & schedule reset task */ 2852 set_bit(handle->reset_level, &hdev->reset_request); 2853 hclge_reset_task_schedule(hdev); 2854 2855 if (handle->reset_level < HNAE3_GLOBAL_RESET) 2856 handle->reset_level++; 2857 2858 handle->last_reset_time = jiffies; 2859 } 2860 2861 static void hclge_reset_subtask(struct hclge_dev *hdev) 2862 { 2863 /* check if there is any ongoing reset in the hardware. This status can 2864 * be checked from reset_pending. If there is then, we need to wait for 2865 * hardware to complete reset. 2866 * a. If we are able to figure out in reasonable time that hardware 2867 * has fully resetted then, we can proceed with driver, client 2868 * reset. 2869 * b. else, we can come back later to check this status so re-sched 2870 * now. 2871 */ 2872 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); 2873 if (hdev->reset_type != HNAE3_NONE_RESET) 2874 hclge_reset(hdev); 2875 2876 /* check if we got any *new* reset requests to be honored */ 2877 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); 2878 if (hdev->reset_type != HNAE3_NONE_RESET) 2879 hclge_do_reset(hdev); 2880 2881 hdev->reset_type = HNAE3_NONE_RESET; 2882 } 2883 2884 static void hclge_reset_service_task(struct work_struct *work) 2885 { 2886 struct hclge_dev *hdev = 2887 container_of(work, struct hclge_dev, rst_service_task); 2888 2889 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2890 return; 2891 2892 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 2893 2894 hclge_reset_subtask(hdev); 2895 2896 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 2897 } 2898 2899 static void hclge_mailbox_service_task(struct work_struct *work) 2900 { 2901 struct hclge_dev *hdev = 2902 container_of(work, struct hclge_dev, mbx_service_task); 2903 2904 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 2905 return; 2906 2907 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 2908 2909 hclge_mbx_handler(hdev); 2910 2911 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 2912 } 2913 2914 static void hclge_service_task(struct work_struct *work) 2915 { 2916 struct hclge_dev *hdev = 2917 container_of(work, struct hclge_dev, service_task); 2918 2919 /* The total rx/tx packets statstics are wanted to be updated 2920 * per second. Both hclge_update_stats_for_all() and 2921 * hclge_mac_get_traffic_stats() can do it. 2922 */ 2923 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { 2924 hclge_update_stats_for_all(hdev); 2925 hdev->hw_stats.stats_timer = 0; 2926 } else { 2927 hclge_mac_get_traffic_stats(hdev); 2928 } 2929 2930 hclge_update_speed_duplex(hdev); 2931 hclge_update_link_status(hdev); 2932 hclge_update_led_status(hdev); 2933 hclge_service_complete(hdev); 2934 } 2935 2936 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 2937 { 2938 /* VF handle has no client */ 2939 if (!handle->client) 2940 return container_of(handle, struct hclge_vport, nic); 2941 else if (handle->client->type == HNAE3_CLIENT_ROCE) 2942 return container_of(handle, struct hclge_vport, roce); 2943 else 2944 return container_of(handle, struct hclge_vport, nic); 2945 } 2946 2947 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 2948 struct hnae3_vector_info *vector_info) 2949 { 2950 struct hclge_vport *vport = hclge_get_vport(handle); 2951 struct hnae3_vector_info *vector = vector_info; 2952 struct hclge_dev *hdev = vport->back; 2953 int alloc = 0; 2954 int i, j; 2955 2956 vector_num = min(hdev->num_msi_left, vector_num); 2957 2958 for (j = 0; j < vector_num; j++) { 2959 for (i = 1; i < hdev->num_msi; i++) { 2960 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 2961 vector->vector = pci_irq_vector(hdev->pdev, i); 2962 vector->io_addr = hdev->hw.io_base + 2963 HCLGE_VECTOR_REG_BASE + 2964 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 2965 vport->vport_id * 2966 HCLGE_VECTOR_VF_OFFSET; 2967 hdev->vector_status[i] = vport->vport_id; 2968 hdev->vector_irq[i] = vector->vector; 2969 2970 vector++; 2971 alloc++; 2972 2973 break; 2974 } 2975 } 2976 } 2977 hdev->num_msi_left -= alloc; 2978 hdev->num_msi_used += alloc; 2979 2980 return alloc; 2981 } 2982 2983 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 2984 { 2985 int i; 2986 2987 for (i = 0; i < hdev->num_msi; i++) 2988 if (vector == hdev->vector_irq[i]) 2989 return i; 2990 2991 return -EINVAL; 2992 } 2993 2994 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 2995 { 2996 struct hclge_vport *vport = hclge_get_vport(handle); 2997 struct hclge_dev *hdev = vport->back; 2998 int vector_id; 2999 3000 vector_id = hclge_get_vector_index(hdev, vector); 3001 if (vector_id < 0) { 3002 dev_err(&hdev->pdev->dev, 3003 "Get vector index fail. vector_id =%d\n", vector_id); 3004 return vector_id; 3005 } 3006 3007 hclge_free_vector(hdev, vector_id); 3008 3009 return 0; 3010 } 3011 3012 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 3013 { 3014 return HCLGE_RSS_KEY_SIZE; 3015 } 3016 3017 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 3018 { 3019 return HCLGE_RSS_IND_TBL_SIZE; 3020 } 3021 3022 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 3023 const u8 hfunc, const u8 *key) 3024 { 3025 struct hclge_rss_config_cmd *req; 3026 struct hclge_desc desc; 3027 int key_offset; 3028 int key_size; 3029 int ret; 3030 3031 req = (struct hclge_rss_config_cmd *)desc.data; 3032 3033 for (key_offset = 0; key_offset < 3; key_offset++) { 3034 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 3035 false); 3036 3037 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 3038 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 3039 3040 if (key_offset == 2) 3041 key_size = 3042 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; 3043 else 3044 key_size = HCLGE_RSS_HASH_KEY_NUM; 3045 3046 memcpy(req->hash_key, 3047 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 3048 3049 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3050 if (ret) { 3051 dev_err(&hdev->pdev->dev, 3052 "Configure RSS config fail, status = %d\n", 3053 ret); 3054 return ret; 3055 } 3056 } 3057 return 0; 3058 } 3059 3060 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) 3061 { 3062 struct hclge_rss_indirection_table_cmd *req; 3063 struct hclge_desc desc; 3064 int i, j; 3065 int ret; 3066 3067 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 3068 3069 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 3070 hclge_cmd_setup_basic_desc 3071 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 3072 3073 req->start_table_index = 3074 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 3075 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 3076 3077 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 3078 req->rss_result[j] = 3079 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 3080 3081 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3082 if (ret) { 3083 dev_err(&hdev->pdev->dev, 3084 "Configure rss indir table fail,status = %d\n", 3085 ret); 3086 return ret; 3087 } 3088 } 3089 return 0; 3090 } 3091 3092 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 3093 u16 *tc_size, u16 *tc_offset) 3094 { 3095 struct hclge_rss_tc_mode_cmd *req; 3096 struct hclge_desc desc; 3097 int ret; 3098 int i; 3099 3100 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 3101 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 3102 3103 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3104 u16 mode = 0; 3105 3106 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 3107 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, 3108 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 3109 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 3110 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 3111 3112 req->rss_tc_mode[i] = cpu_to_le16(mode); 3113 } 3114 3115 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3116 if (ret) { 3117 dev_err(&hdev->pdev->dev, 3118 "Configure rss tc mode fail, status = %d\n", ret); 3119 return ret; 3120 } 3121 3122 return 0; 3123 } 3124 3125 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 3126 { 3127 struct hclge_rss_input_tuple_cmd *req; 3128 struct hclge_desc desc; 3129 int ret; 3130 3131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3132 3133 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3134 3135 /* Get the tuple cfg from pf */ 3136 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; 3137 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; 3138 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; 3139 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; 3140 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; 3141 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; 3142 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; 3143 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; 3144 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3145 if (ret) { 3146 dev_err(&hdev->pdev->dev, 3147 "Configure rss input fail, status = %d\n", ret); 3148 return ret; 3149 } 3150 3151 return 0; 3152 } 3153 3154 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 3155 u8 *key, u8 *hfunc) 3156 { 3157 struct hclge_vport *vport = hclge_get_vport(handle); 3158 int i; 3159 3160 /* Get hash algorithm */ 3161 if (hfunc) 3162 *hfunc = vport->rss_algo; 3163 3164 /* Get the RSS Key required by the user */ 3165 if (key) 3166 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 3167 3168 /* Get indirect table */ 3169 if (indir) 3170 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3171 indir[i] = vport->rss_indirection_tbl[i]; 3172 3173 return 0; 3174 } 3175 3176 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 3177 const u8 *key, const u8 hfunc) 3178 { 3179 struct hclge_vport *vport = hclge_get_vport(handle); 3180 struct hclge_dev *hdev = vport->back; 3181 u8 hash_algo; 3182 int ret, i; 3183 3184 /* Set the RSS Hash Key if specififed by the user */ 3185 if (key) { 3186 3187 if (hfunc == ETH_RSS_HASH_TOP || 3188 hfunc == ETH_RSS_HASH_NO_CHANGE) 3189 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3190 else 3191 return -EINVAL; 3192 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 3193 if (ret) 3194 return ret; 3195 3196 /* Update the shadow RSS key with user specified qids */ 3197 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 3198 vport->rss_algo = hash_algo; 3199 } 3200 3201 /* Update the shadow RSS table with user specified qids */ 3202 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3203 vport->rss_indirection_tbl[i] = indir[i]; 3204 3205 /* Update the hardware */ 3206 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); 3207 } 3208 3209 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 3210 { 3211 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 3212 3213 if (nfc->data & RXH_L4_B_2_3) 3214 hash_sets |= HCLGE_D_PORT_BIT; 3215 else 3216 hash_sets &= ~HCLGE_D_PORT_BIT; 3217 3218 if (nfc->data & RXH_IP_SRC) 3219 hash_sets |= HCLGE_S_IP_BIT; 3220 else 3221 hash_sets &= ~HCLGE_S_IP_BIT; 3222 3223 if (nfc->data & RXH_IP_DST) 3224 hash_sets |= HCLGE_D_IP_BIT; 3225 else 3226 hash_sets &= ~HCLGE_D_IP_BIT; 3227 3228 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 3229 hash_sets |= HCLGE_V_TAG_BIT; 3230 3231 return hash_sets; 3232 } 3233 3234 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 3235 struct ethtool_rxnfc *nfc) 3236 { 3237 struct hclge_vport *vport = hclge_get_vport(handle); 3238 struct hclge_dev *hdev = vport->back; 3239 struct hclge_rss_input_tuple_cmd *req; 3240 struct hclge_desc desc; 3241 u8 tuple_sets; 3242 int ret; 3243 3244 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 3245 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 3246 return -EINVAL; 3247 3248 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3249 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3250 3251 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; 3252 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; 3253 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; 3254 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; 3255 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; 3256 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; 3257 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; 3258 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; 3259 3260 tuple_sets = hclge_get_rss_hash_bits(nfc); 3261 switch (nfc->flow_type) { 3262 case TCP_V4_FLOW: 3263 req->ipv4_tcp_en = tuple_sets; 3264 break; 3265 case TCP_V6_FLOW: 3266 req->ipv6_tcp_en = tuple_sets; 3267 break; 3268 case UDP_V4_FLOW: 3269 req->ipv4_udp_en = tuple_sets; 3270 break; 3271 case UDP_V6_FLOW: 3272 req->ipv6_udp_en = tuple_sets; 3273 break; 3274 case SCTP_V4_FLOW: 3275 req->ipv4_sctp_en = tuple_sets; 3276 break; 3277 case SCTP_V6_FLOW: 3278 if ((nfc->data & RXH_L4_B_0_1) || 3279 (nfc->data & RXH_L4_B_2_3)) 3280 return -EINVAL; 3281 3282 req->ipv6_sctp_en = tuple_sets; 3283 break; 3284 case IPV4_FLOW: 3285 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3286 break; 3287 case IPV6_FLOW: 3288 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3289 break; 3290 default: 3291 return -EINVAL; 3292 } 3293 3294 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3295 if (ret) { 3296 dev_err(&hdev->pdev->dev, 3297 "Set rss tuple fail, status = %d\n", ret); 3298 return ret; 3299 } 3300 3301 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 3302 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 3303 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 3304 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 3305 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 3306 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 3307 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 3308 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 3309 return 0; 3310 } 3311 3312 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 3313 struct ethtool_rxnfc *nfc) 3314 { 3315 struct hclge_vport *vport = hclge_get_vport(handle); 3316 u8 tuple_sets; 3317 3318 nfc->data = 0; 3319 3320 switch (nfc->flow_type) { 3321 case TCP_V4_FLOW: 3322 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; 3323 break; 3324 case UDP_V4_FLOW: 3325 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; 3326 break; 3327 case TCP_V6_FLOW: 3328 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; 3329 break; 3330 case UDP_V6_FLOW: 3331 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; 3332 break; 3333 case SCTP_V4_FLOW: 3334 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; 3335 break; 3336 case SCTP_V6_FLOW: 3337 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; 3338 break; 3339 case IPV4_FLOW: 3340 case IPV6_FLOW: 3341 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 3342 break; 3343 default: 3344 return -EINVAL; 3345 } 3346 3347 if (!tuple_sets) 3348 return 0; 3349 3350 if (tuple_sets & HCLGE_D_PORT_BIT) 3351 nfc->data |= RXH_L4_B_2_3; 3352 if (tuple_sets & HCLGE_S_PORT_BIT) 3353 nfc->data |= RXH_L4_B_0_1; 3354 if (tuple_sets & HCLGE_D_IP_BIT) 3355 nfc->data |= RXH_IP_DST; 3356 if (tuple_sets & HCLGE_S_IP_BIT) 3357 nfc->data |= RXH_IP_SRC; 3358 3359 return 0; 3360 } 3361 3362 static int hclge_get_tc_size(struct hnae3_handle *handle) 3363 { 3364 struct hclge_vport *vport = hclge_get_vport(handle); 3365 struct hclge_dev *hdev = vport->back; 3366 3367 return hdev->rss_size_max; 3368 } 3369 3370 int hclge_rss_init_hw(struct hclge_dev *hdev) 3371 { 3372 struct hclge_vport *vport = hdev->vport; 3373 u8 *rss_indir = vport[0].rss_indirection_tbl; 3374 u16 rss_size = vport[0].alloc_rss_size; 3375 u8 *key = vport[0].rss_hash_key; 3376 u8 hfunc = vport[0].rss_algo; 3377 u16 tc_offset[HCLGE_MAX_TC_NUM]; 3378 u16 tc_valid[HCLGE_MAX_TC_NUM]; 3379 u16 tc_size[HCLGE_MAX_TC_NUM]; 3380 u16 roundup_size; 3381 int i, ret; 3382 3383 ret = hclge_set_rss_indir_table(hdev, rss_indir); 3384 if (ret) 3385 return ret; 3386 3387 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 3388 if (ret) 3389 return ret; 3390 3391 ret = hclge_set_rss_input_tuple(hdev); 3392 if (ret) 3393 return ret; 3394 3395 /* Each TC have the same queue size, and tc_size set to hardware is 3396 * the log2 of roundup power of two of rss_size, the acutal queue 3397 * size is limited by indirection table. 3398 */ 3399 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 3400 dev_err(&hdev->pdev->dev, 3401 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 3402 rss_size); 3403 return -EINVAL; 3404 } 3405 3406 roundup_size = roundup_pow_of_two(rss_size); 3407 roundup_size = ilog2(roundup_size); 3408 3409 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3410 tc_valid[i] = 0; 3411 3412 if (!(hdev->hw_tc_map & BIT(i))) 3413 continue; 3414 3415 tc_valid[i] = 1; 3416 tc_size[i] = roundup_size; 3417 tc_offset[i] = rss_size * i; 3418 } 3419 3420 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 3421 } 3422 3423 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) 3424 { 3425 struct hclge_vport *vport = hdev->vport; 3426 int i, j; 3427 3428 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 3429 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3430 vport[j].rss_indirection_tbl[i] = 3431 i % vport[j].alloc_rss_size; 3432 } 3433 } 3434 3435 static void hclge_rss_init_cfg(struct hclge_dev *hdev) 3436 { 3437 struct hclge_vport *vport = hdev->vport; 3438 int i; 3439 3440 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 3441 vport[i].rss_tuple_sets.ipv4_tcp_en = 3442 HCLGE_RSS_INPUT_TUPLE_OTHER; 3443 vport[i].rss_tuple_sets.ipv4_udp_en = 3444 HCLGE_RSS_INPUT_TUPLE_OTHER; 3445 vport[i].rss_tuple_sets.ipv4_sctp_en = 3446 HCLGE_RSS_INPUT_TUPLE_SCTP; 3447 vport[i].rss_tuple_sets.ipv4_fragment_en = 3448 HCLGE_RSS_INPUT_TUPLE_OTHER; 3449 vport[i].rss_tuple_sets.ipv6_tcp_en = 3450 HCLGE_RSS_INPUT_TUPLE_OTHER; 3451 vport[i].rss_tuple_sets.ipv6_udp_en = 3452 HCLGE_RSS_INPUT_TUPLE_OTHER; 3453 vport[i].rss_tuple_sets.ipv6_sctp_en = 3454 HCLGE_RSS_INPUT_TUPLE_SCTP; 3455 vport[i].rss_tuple_sets.ipv6_fragment_en = 3456 HCLGE_RSS_INPUT_TUPLE_OTHER; 3457 3458 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3459 3460 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); 3461 } 3462 3463 hclge_rss_indir_init_cfg(hdev); 3464 } 3465 3466 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 3467 int vector_id, bool en, 3468 struct hnae3_ring_chain_node *ring_chain) 3469 { 3470 struct hclge_dev *hdev = vport->back; 3471 struct hnae3_ring_chain_node *node; 3472 struct hclge_desc desc; 3473 struct hclge_ctrl_vector_chain_cmd *req 3474 = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 3475 enum hclge_cmd_status status; 3476 enum hclge_opcode_type op; 3477 u16 tqp_type_and_id; 3478 int i; 3479 3480 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 3481 hclge_cmd_setup_basic_desc(&desc, op, false); 3482 req->int_vector_id = vector_id; 3483 3484 i = 0; 3485 for (node = ring_chain; node; node = node->next) { 3486 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 3487 hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 3488 HCLGE_INT_TYPE_S, 3489 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3490 hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 3491 HCLGE_TQP_ID_S, node->tqp_index); 3492 hnae_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 3493 HCLGE_INT_GL_IDX_S, 3494 hnae_get_field(node->int_gl_idx, 3495 HNAE3_RING_GL_IDX_M, 3496 HNAE3_RING_GL_IDX_S)); 3497 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 3498 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 3499 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 3500 req->vfid = vport->vport_id; 3501 3502 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3503 if (status) { 3504 dev_err(&hdev->pdev->dev, 3505 "Map TQP fail, status is %d.\n", 3506 status); 3507 return -EIO; 3508 } 3509 i = 0; 3510 3511 hclge_cmd_setup_basic_desc(&desc, 3512 op, 3513 false); 3514 req->int_vector_id = vector_id; 3515 } 3516 } 3517 3518 if (i > 0) { 3519 req->int_cause_num = i; 3520 req->vfid = vport->vport_id; 3521 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3522 if (status) { 3523 dev_err(&hdev->pdev->dev, 3524 "Map TQP fail, status is %d.\n", status); 3525 return -EIO; 3526 } 3527 } 3528 3529 return 0; 3530 } 3531 3532 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, 3533 int vector, 3534 struct hnae3_ring_chain_node *ring_chain) 3535 { 3536 struct hclge_vport *vport = hclge_get_vport(handle); 3537 struct hclge_dev *hdev = vport->back; 3538 int vector_id; 3539 3540 vector_id = hclge_get_vector_index(hdev, vector); 3541 if (vector_id < 0) { 3542 dev_err(&hdev->pdev->dev, 3543 "Get vector index fail. vector_id =%d\n", vector_id); 3544 return vector_id; 3545 } 3546 3547 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 3548 } 3549 3550 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, 3551 int vector, 3552 struct hnae3_ring_chain_node *ring_chain) 3553 { 3554 struct hclge_vport *vport = hclge_get_vport(handle); 3555 struct hclge_dev *hdev = vport->back; 3556 int vector_id, ret; 3557 3558 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3559 return 0; 3560 3561 vector_id = hclge_get_vector_index(hdev, vector); 3562 if (vector_id < 0) { 3563 dev_err(&handle->pdev->dev, 3564 "Get vector index fail. ret =%d\n", vector_id); 3565 return vector_id; 3566 } 3567 3568 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 3569 if (ret) 3570 dev_err(&handle->pdev->dev, 3571 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 3572 vector_id, 3573 ret); 3574 3575 return ret; 3576 } 3577 3578 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 3579 struct hclge_promisc_param *param) 3580 { 3581 struct hclge_promisc_cfg_cmd *req; 3582 struct hclge_desc desc; 3583 int ret; 3584 3585 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 3586 3587 req = (struct hclge_promisc_cfg_cmd *)desc.data; 3588 req->vf_id = param->vf_id; 3589 req->flag = (param->enable << HCLGE_PROMISC_EN_B); 3590 3591 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3592 if (ret) { 3593 dev_err(&hdev->pdev->dev, 3594 "Set promisc mode fail, status is %d.\n", ret); 3595 return ret; 3596 } 3597 return 0; 3598 } 3599 3600 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 3601 bool en_mc, bool en_bc, int vport_id) 3602 { 3603 if (!param) 3604 return; 3605 3606 memset(param, 0, sizeof(struct hclge_promisc_param)); 3607 if (en_uc) 3608 param->enable = HCLGE_PROMISC_EN_UC; 3609 if (en_mc) 3610 param->enable |= HCLGE_PROMISC_EN_MC; 3611 if (en_bc) 3612 param->enable |= HCLGE_PROMISC_EN_BC; 3613 param->vf_id = vport_id; 3614 } 3615 3616 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) 3617 { 3618 struct hclge_vport *vport = hclge_get_vport(handle); 3619 struct hclge_dev *hdev = vport->back; 3620 struct hclge_promisc_param param; 3621 3622 hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); 3623 hclge_cmd_set_promisc_mode(hdev, ¶m); 3624 } 3625 3626 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 3627 { 3628 struct hclge_desc desc; 3629 struct hclge_config_mac_mode_cmd *req = 3630 (struct hclge_config_mac_mode_cmd *)desc.data; 3631 u32 loop_en = 0; 3632 int ret; 3633 3634 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 3635 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); 3636 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); 3637 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); 3638 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); 3639 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); 3640 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); 3641 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 3642 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); 3643 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); 3644 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); 3645 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); 3646 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); 3647 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); 3648 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); 3649 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3650 3651 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3652 if (ret) 3653 dev_err(&hdev->pdev->dev, 3654 "mac enable fail, ret =%d.\n", ret); 3655 } 3656 3657 static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) 3658 { 3659 struct hclge_config_mac_mode_cmd *req; 3660 struct hclge_desc desc; 3661 u32 loop_en; 3662 int ret; 3663 3664 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 3665 /* 1 Read out the MAC mode config at first */ 3666 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 3667 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3668 if (ret) { 3669 dev_err(&hdev->pdev->dev, 3670 "mac loopback get fail, ret =%d.\n", ret); 3671 return ret; 3672 } 3673 3674 /* 2 Then setup the loopback flag */ 3675 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 3676 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 3677 3678 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3679 3680 /* 3 Config mac work mode with loopback flag 3681 * and its original configure parameters 3682 */ 3683 hclge_cmd_reuse_desc(&desc, false); 3684 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3685 if (ret) 3686 dev_err(&hdev->pdev->dev, 3687 "mac loopback set fail, ret =%d.\n", ret); 3688 return ret; 3689 } 3690 3691 static int hclge_set_loopback(struct hnae3_handle *handle, 3692 enum hnae3_loop loop_mode, bool en) 3693 { 3694 struct hclge_vport *vport = hclge_get_vport(handle); 3695 struct hclge_dev *hdev = vport->back; 3696 int ret; 3697 3698 switch (loop_mode) { 3699 case HNAE3_MAC_INTER_LOOP_MAC: 3700 ret = hclge_set_mac_loopback(hdev, en); 3701 break; 3702 default: 3703 ret = -ENOTSUPP; 3704 dev_err(&hdev->pdev->dev, 3705 "loop_mode %d is not supported\n", loop_mode); 3706 break; 3707 } 3708 3709 return ret; 3710 } 3711 3712 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, 3713 int stream_id, bool enable) 3714 { 3715 struct hclge_desc desc; 3716 struct hclge_cfg_com_tqp_queue_cmd *req = 3717 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 3718 int ret; 3719 3720 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 3721 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 3722 req->stream_id = cpu_to_le16(stream_id); 3723 req->enable |= enable << HCLGE_TQP_ENABLE_B; 3724 3725 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3726 if (ret) 3727 dev_err(&hdev->pdev->dev, 3728 "Tqp enable fail, status =%d.\n", ret); 3729 return ret; 3730 } 3731 3732 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 3733 { 3734 struct hclge_vport *vport = hclge_get_vport(handle); 3735 struct hnae3_queue *queue; 3736 struct hclge_tqp *tqp; 3737 int i; 3738 3739 for (i = 0; i < vport->alloc_tqps; i++) { 3740 queue = handle->kinfo.tqp[i]; 3741 tqp = container_of(queue, struct hclge_tqp, q); 3742 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 3743 } 3744 } 3745 3746 static int hclge_ae_start(struct hnae3_handle *handle) 3747 { 3748 struct hclge_vport *vport = hclge_get_vport(handle); 3749 struct hclge_dev *hdev = vport->back; 3750 int i, ret; 3751 3752 for (i = 0; i < vport->alloc_tqps; i++) 3753 hclge_tqp_enable(hdev, i, 0, true); 3754 3755 /* mac enable */ 3756 hclge_cfg_mac_mode(hdev, true); 3757 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 3758 mod_timer(&hdev->service_timer, jiffies + HZ); 3759 hdev->hw.mac.link = 0; 3760 3761 /* reset tqp stats */ 3762 hclge_reset_tqp_stats(handle); 3763 3764 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3765 return 0; 3766 3767 ret = hclge_mac_start_phy(hdev); 3768 if (ret) 3769 return ret; 3770 3771 return 0; 3772 } 3773 3774 static void hclge_ae_stop(struct hnae3_handle *handle) 3775 { 3776 struct hclge_vport *vport = hclge_get_vport(handle); 3777 struct hclge_dev *hdev = vport->back; 3778 int i; 3779 3780 del_timer_sync(&hdev->service_timer); 3781 cancel_work_sync(&hdev->service_task); 3782 3783 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3784 return; 3785 3786 for (i = 0; i < vport->alloc_tqps; i++) 3787 hclge_tqp_enable(hdev, i, 0, false); 3788 3789 /* Mac disable */ 3790 hclge_cfg_mac_mode(hdev, false); 3791 3792 hclge_mac_stop_phy(hdev); 3793 3794 /* reset tqp stats */ 3795 hclge_reset_tqp_stats(handle); 3796 } 3797 3798 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 3799 u16 cmdq_resp, u8 resp_code, 3800 enum hclge_mac_vlan_tbl_opcode op) 3801 { 3802 struct hclge_dev *hdev = vport->back; 3803 int return_status = -EIO; 3804 3805 if (cmdq_resp) { 3806 dev_err(&hdev->pdev->dev, 3807 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", 3808 cmdq_resp); 3809 return -EIO; 3810 } 3811 3812 if (op == HCLGE_MAC_VLAN_ADD) { 3813 if ((!resp_code) || (resp_code == 1)) { 3814 return_status = 0; 3815 } else if (resp_code == 2) { 3816 return_status = -ENOSPC; 3817 dev_err(&hdev->pdev->dev, 3818 "add mac addr failed for uc_overflow.\n"); 3819 } else if (resp_code == 3) { 3820 return_status = -ENOSPC; 3821 dev_err(&hdev->pdev->dev, 3822 "add mac addr failed for mc_overflow.\n"); 3823 } else { 3824 dev_err(&hdev->pdev->dev, 3825 "add mac addr failed for undefined, code=%d.\n", 3826 resp_code); 3827 } 3828 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 3829 if (!resp_code) { 3830 return_status = 0; 3831 } else if (resp_code == 1) { 3832 return_status = -ENOENT; 3833 dev_dbg(&hdev->pdev->dev, 3834 "remove mac addr failed for miss.\n"); 3835 } else { 3836 dev_err(&hdev->pdev->dev, 3837 "remove mac addr failed for undefined, code=%d.\n", 3838 resp_code); 3839 } 3840 } else if (op == HCLGE_MAC_VLAN_LKUP) { 3841 if (!resp_code) { 3842 return_status = 0; 3843 } else if (resp_code == 1) { 3844 return_status = -ENOENT; 3845 dev_dbg(&hdev->pdev->dev, 3846 "lookup mac addr failed for miss.\n"); 3847 } else { 3848 dev_err(&hdev->pdev->dev, 3849 "lookup mac addr failed for undefined, code=%d.\n", 3850 resp_code); 3851 } 3852 } else { 3853 return_status = -EINVAL; 3854 dev_err(&hdev->pdev->dev, 3855 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", 3856 op); 3857 } 3858 3859 return return_status; 3860 } 3861 3862 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 3863 { 3864 int word_num; 3865 int bit_num; 3866 3867 if (vfid > 255 || vfid < 0) 3868 return -EIO; 3869 3870 if (vfid >= 0 && vfid <= 191) { 3871 word_num = vfid / 32; 3872 bit_num = vfid % 32; 3873 if (clr) 3874 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3875 else 3876 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 3877 } else { 3878 word_num = (vfid - 192) / 32; 3879 bit_num = vfid % 32; 3880 if (clr) 3881 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3882 else 3883 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 3884 } 3885 3886 return 0; 3887 } 3888 3889 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 3890 { 3891 #define HCLGE_DESC_NUMBER 3 3892 #define HCLGE_FUNC_NUMBER_PER_DESC 6 3893 int i, j; 3894 3895 for (i = 0; i < HCLGE_DESC_NUMBER; i++) 3896 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 3897 if (desc[i].data[j]) 3898 return false; 3899 3900 return true; 3901 } 3902 3903 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 3904 const u8 *addr) 3905 { 3906 const unsigned char *mac_addr = addr; 3907 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 3908 (mac_addr[0]) | (mac_addr[1] << 8); 3909 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 3910 3911 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 3912 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 3913 } 3914 3915 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, 3916 const u8 *addr) 3917 { 3918 u16 high_val = addr[1] | (addr[0] << 8); 3919 struct hclge_dev *hdev = vport->back; 3920 u32 rsh = 4 - hdev->mta_mac_sel_type; 3921 u16 ret_val = (high_val >> rsh) & 0xfff; 3922 3923 return ret_val; 3924 } 3925 3926 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 3927 enum hclge_mta_dmac_sel_type mta_mac_sel, 3928 bool enable) 3929 { 3930 struct hclge_mta_filter_mode_cmd *req; 3931 struct hclge_desc desc; 3932 int ret; 3933 3934 req = (struct hclge_mta_filter_mode_cmd *)desc.data; 3935 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); 3936 3937 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, 3938 enable); 3939 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, 3940 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); 3941 3942 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3943 if (ret) { 3944 dev_err(&hdev->pdev->dev, 3945 "Config mat filter mode failed for cmd_send, ret =%d.\n", 3946 ret); 3947 return ret; 3948 } 3949 3950 return 0; 3951 } 3952 3953 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, 3954 u8 func_id, 3955 bool enable) 3956 { 3957 struct hclge_cfg_func_mta_filter_cmd *req; 3958 struct hclge_desc desc; 3959 int ret; 3960 3961 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; 3962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); 3963 3964 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, 3965 enable); 3966 req->function_id = func_id; 3967 3968 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3969 if (ret) { 3970 dev_err(&hdev->pdev->dev, 3971 "Config func_id enable failed for cmd_send, ret =%d.\n", 3972 ret); 3973 return ret; 3974 } 3975 3976 return 0; 3977 } 3978 3979 static int hclge_set_mta_table_item(struct hclge_vport *vport, 3980 u16 idx, 3981 bool enable) 3982 { 3983 struct hclge_dev *hdev = vport->back; 3984 struct hclge_cfg_func_mta_item_cmd *req; 3985 struct hclge_desc desc; 3986 u16 item_idx = 0; 3987 int ret; 3988 3989 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; 3990 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); 3991 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); 3992 3993 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, 3994 HCLGE_CFG_MTA_ITEM_IDX_S, idx); 3995 req->item_idx = cpu_to_le16(item_idx); 3996 3997 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3998 if (ret) { 3999 dev_err(&hdev->pdev->dev, 4000 "Config mta table item failed for cmd_send, ret =%d.\n", 4001 ret); 4002 return ret; 4003 } 4004 4005 return 0; 4006 } 4007 4008 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 4009 struct hclge_mac_vlan_tbl_entry_cmd *req) 4010 { 4011 struct hclge_dev *hdev = vport->back; 4012 struct hclge_desc desc; 4013 u8 resp_code; 4014 u16 retval; 4015 int ret; 4016 4017 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 4018 4019 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4020 4021 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4022 if (ret) { 4023 dev_err(&hdev->pdev->dev, 4024 "del mac addr failed for cmd_send, ret =%d.\n", 4025 ret); 4026 return ret; 4027 } 4028 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4029 retval = le16_to_cpu(desc.retval); 4030 4031 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4032 HCLGE_MAC_VLAN_REMOVE); 4033 } 4034 4035 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 4036 struct hclge_mac_vlan_tbl_entry_cmd *req, 4037 struct hclge_desc *desc, 4038 bool is_mc) 4039 { 4040 struct hclge_dev *hdev = vport->back; 4041 u8 resp_code; 4042 u16 retval; 4043 int ret; 4044 4045 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 4046 if (is_mc) { 4047 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4048 memcpy(desc[0].data, 4049 req, 4050 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4051 hclge_cmd_setup_basic_desc(&desc[1], 4052 HCLGE_OPC_MAC_VLAN_ADD, 4053 true); 4054 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4055 hclge_cmd_setup_basic_desc(&desc[2], 4056 HCLGE_OPC_MAC_VLAN_ADD, 4057 true); 4058 ret = hclge_cmd_send(&hdev->hw, desc, 3); 4059 } else { 4060 memcpy(desc[0].data, 4061 req, 4062 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4063 ret = hclge_cmd_send(&hdev->hw, desc, 1); 4064 } 4065 if (ret) { 4066 dev_err(&hdev->pdev->dev, 4067 "lookup mac addr failed for cmd_send, ret =%d.\n", 4068 ret); 4069 return ret; 4070 } 4071 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 4072 retval = le16_to_cpu(desc[0].retval); 4073 4074 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4075 HCLGE_MAC_VLAN_LKUP); 4076 } 4077 4078 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 4079 struct hclge_mac_vlan_tbl_entry_cmd *req, 4080 struct hclge_desc *mc_desc) 4081 { 4082 struct hclge_dev *hdev = vport->back; 4083 int cfg_status; 4084 u8 resp_code; 4085 u16 retval; 4086 int ret; 4087 4088 if (!mc_desc) { 4089 struct hclge_desc desc; 4090 4091 hclge_cmd_setup_basic_desc(&desc, 4092 HCLGE_OPC_MAC_VLAN_ADD, 4093 false); 4094 memcpy(desc.data, req, 4095 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4096 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4097 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4098 retval = le16_to_cpu(desc.retval); 4099 4100 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4101 resp_code, 4102 HCLGE_MAC_VLAN_ADD); 4103 } else { 4104 hclge_cmd_reuse_desc(&mc_desc[0], false); 4105 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4106 hclge_cmd_reuse_desc(&mc_desc[1], false); 4107 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4108 hclge_cmd_reuse_desc(&mc_desc[2], false); 4109 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 4110 memcpy(mc_desc[0].data, req, 4111 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4112 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 4113 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 4114 retval = le16_to_cpu(mc_desc[0].retval); 4115 4116 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4117 resp_code, 4118 HCLGE_MAC_VLAN_ADD); 4119 } 4120 4121 if (ret) { 4122 dev_err(&hdev->pdev->dev, 4123 "add mac addr failed for cmd_send, ret =%d.\n", 4124 ret); 4125 return ret; 4126 } 4127 4128 return cfg_status; 4129 } 4130 4131 static int hclge_add_uc_addr(struct hnae3_handle *handle, 4132 const unsigned char *addr) 4133 { 4134 struct hclge_vport *vport = hclge_get_vport(handle); 4135 4136 return hclge_add_uc_addr_common(vport, addr); 4137 } 4138 4139 int hclge_add_uc_addr_common(struct hclge_vport *vport, 4140 const unsigned char *addr) 4141 { 4142 struct hclge_dev *hdev = vport->back; 4143 struct hclge_mac_vlan_tbl_entry_cmd req; 4144 struct hclge_desc desc; 4145 u16 egress_port = 0; 4146 int ret; 4147 4148 /* mac addr check */ 4149 if (is_zero_ether_addr(addr) || 4150 is_broadcast_ether_addr(addr) || 4151 is_multicast_ether_addr(addr)) { 4152 dev_err(&hdev->pdev->dev, 4153 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 4154 addr, 4155 is_zero_ether_addr(addr), 4156 is_broadcast_ether_addr(addr), 4157 is_multicast_ether_addr(addr)); 4158 return -EINVAL; 4159 } 4160 4161 memset(&req, 0, sizeof(req)); 4162 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4163 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4164 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); 4165 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4166 4167 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); 4168 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); 4169 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 4170 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 4171 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, 4172 HCLGE_MAC_EPORT_PFID_S, 0); 4173 4174 req.egress_port = cpu_to_le16(egress_port); 4175 4176 hclge_prepare_mac_addr(&req, addr); 4177 4178 /* Lookup the mac address in the mac_vlan table, and add 4179 * it if the entry is inexistent. Repeated unicast entry 4180 * is not allowed in the mac vlan table. 4181 */ 4182 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 4183 if (ret == -ENOENT) 4184 return hclge_add_mac_vlan_tbl(vport, &req, NULL); 4185 4186 /* check if we just hit the duplicate */ 4187 if (!ret) 4188 ret = -EINVAL; 4189 4190 dev_err(&hdev->pdev->dev, 4191 "PF failed to add unicast entry(%pM) in the MAC table\n", 4192 addr); 4193 4194 return ret; 4195 } 4196 4197 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 4198 const unsigned char *addr) 4199 { 4200 struct hclge_vport *vport = hclge_get_vport(handle); 4201 4202 return hclge_rm_uc_addr_common(vport, addr); 4203 } 4204 4205 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 4206 const unsigned char *addr) 4207 { 4208 struct hclge_dev *hdev = vport->back; 4209 struct hclge_mac_vlan_tbl_entry_cmd req; 4210 int ret; 4211 4212 /* mac addr check */ 4213 if (is_zero_ether_addr(addr) || 4214 is_broadcast_ether_addr(addr) || 4215 is_multicast_ether_addr(addr)) { 4216 dev_dbg(&hdev->pdev->dev, 4217 "Remove mac err! invalid mac:%pM.\n", 4218 addr); 4219 return -EINVAL; 4220 } 4221 4222 memset(&req, 0, sizeof(req)); 4223 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4224 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4225 hclge_prepare_mac_addr(&req, addr); 4226 ret = hclge_remove_mac_vlan_tbl(vport, &req); 4227 4228 return ret; 4229 } 4230 4231 static int hclge_add_mc_addr(struct hnae3_handle *handle, 4232 const unsigned char *addr) 4233 { 4234 struct hclge_vport *vport = hclge_get_vport(handle); 4235 4236 return hclge_add_mc_addr_common(vport, addr); 4237 } 4238 4239 int hclge_add_mc_addr_common(struct hclge_vport *vport, 4240 const unsigned char *addr) 4241 { 4242 struct hclge_dev *hdev = vport->back; 4243 struct hclge_mac_vlan_tbl_entry_cmd req; 4244 struct hclge_desc desc[3]; 4245 u16 tbl_idx; 4246 int status; 4247 4248 /* mac addr check */ 4249 if (!is_multicast_ether_addr(addr)) { 4250 dev_err(&hdev->pdev->dev, 4251 "Add mc mac err! invalid mac:%pM.\n", 4252 addr); 4253 return -EINVAL; 4254 } 4255 memset(&req, 0, sizeof(req)); 4256 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4257 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4258 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4259 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4260 hclge_prepare_mac_addr(&req, addr); 4261 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4262 if (!status) { 4263 /* This mac addr exist, update VFID for it */ 4264 hclge_update_desc_vfid(desc, vport->vport_id, false); 4265 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4266 } else { 4267 /* This mac addr do not exist, add new entry for it */ 4268 memset(desc[0].data, 0, sizeof(desc[0].data)); 4269 memset(desc[1].data, 0, sizeof(desc[0].data)); 4270 memset(desc[2].data, 0, sizeof(desc[0].data)); 4271 hclge_update_desc_vfid(desc, vport->vport_id, false); 4272 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4273 } 4274 4275 /* Set MTA table for this MAC address */ 4276 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4277 status = hclge_set_mta_table_item(vport, tbl_idx, true); 4278 4279 return status; 4280 } 4281 4282 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 4283 const unsigned char *addr) 4284 { 4285 struct hclge_vport *vport = hclge_get_vport(handle); 4286 4287 return hclge_rm_mc_addr_common(vport, addr); 4288 } 4289 4290 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 4291 const unsigned char *addr) 4292 { 4293 struct hclge_dev *hdev = vport->back; 4294 struct hclge_mac_vlan_tbl_entry_cmd req; 4295 enum hclge_cmd_status status; 4296 struct hclge_desc desc[3]; 4297 u16 tbl_idx; 4298 4299 /* mac addr check */ 4300 if (!is_multicast_ether_addr(addr)) { 4301 dev_dbg(&hdev->pdev->dev, 4302 "Remove mc mac err! invalid mac:%pM.\n", 4303 addr); 4304 return -EINVAL; 4305 } 4306 4307 memset(&req, 0, sizeof(req)); 4308 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4309 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4310 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4311 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4312 hclge_prepare_mac_addr(&req, addr); 4313 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4314 if (!status) { 4315 /* This mac addr exist, remove this handle's VFID for it */ 4316 hclge_update_desc_vfid(desc, vport->vport_id, true); 4317 4318 if (hclge_is_all_function_id_zero(desc)) 4319 /* All the vfid is zero, so need to delete this entry */ 4320 status = hclge_remove_mac_vlan_tbl(vport, &req); 4321 else 4322 /* Not all the vfid is zero, update the vfid */ 4323 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4324 4325 } else { 4326 /* This mac addr do not exist, can't delete it */ 4327 dev_err(&hdev->pdev->dev, 4328 "Rm multicast mac addr failed, ret = %d.\n", 4329 status); 4330 return -EIO; 4331 } 4332 4333 /* Set MTB table for this MAC address */ 4334 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4335 status = hclge_set_mta_table_item(vport, tbl_idx, false); 4336 4337 return status; 4338 } 4339 4340 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 4341 u16 cmdq_resp, u8 resp_code) 4342 { 4343 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 4344 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 4345 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 4346 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 4347 4348 int return_status; 4349 4350 if (cmdq_resp) { 4351 dev_err(&hdev->pdev->dev, 4352 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", 4353 cmdq_resp); 4354 return -EIO; 4355 } 4356 4357 switch (resp_code) { 4358 case HCLGE_ETHERTYPE_SUCCESS_ADD: 4359 case HCLGE_ETHERTYPE_ALREADY_ADD: 4360 return_status = 0; 4361 break; 4362 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 4363 dev_err(&hdev->pdev->dev, 4364 "add mac ethertype failed for manager table overflow.\n"); 4365 return_status = -EIO; 4366 break; 4367 case HCLGE_ETHERTYPE_KEY_CONFLICT: 4368 dev_err(&hdev->pdev->dev, 4369 "add mac ethertype failed for key conflict.\n"); 4370 return_status = -EIO; 4371 break; 4372 default: 4373 dev_err(&hdev->pdev->dev, 4374 "add mac ethertype failed for undefined, code=%d.\n", 4375 resp_code); 4376 return_status = -EIO; 4377 } 4378 4379 return return_status; 4380 } 4381 4382 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 4383 const struct hclge_mac_mgr_tbl_entry_cmd *req) 4384 { 4385 struct hclge_desc desc; 4386 u8 resp_code; 4387 u16 retval; 4388 int ret; 4389 4390 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 4391 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 4392 4393 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4394 if (ret) { 4395 dev_err(&hdev->pdev->dev, 4396 "add mac ethertype failed for cmd_send, ret =%d.\n", 4397 ret); 4398 return ret; 4399 } 4400 4401 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4402 retval = le16_to_cpu(desc.retval); 4403 4404 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 4405 } 4406 4407 static int init_mgr_tbl(struct hclge_dev *hdev) 4408 { 4409 int ret; 4410 int i; 4411 4412 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 4413 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 4414 if (ret) { 4415 dev_err(&hdev->pdev->dev, 4416 "add mac ethertype failed, ret =%d.\n", 4417 ret); 4418 return ret; 4419 } 4420 } 4421 4422 return 0; 4423 } 4424 4425 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 4426 { 4427 struct hclge_vport *vport = hclge_get_vport(handle); 4428 struct hclge_dev *hdev = vport->back; 4429 4430 ether_addr_copy(p, hdev->hw.mac.mac_addr); 4431 } 4432 4433 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, 4434 bool is_first) 4435 { 4436 const unsigned char *new_addr = (const unsigned char *)p; 4437 struct hclge_vport *vport = hclge_get_vport(handle); 4438 struct hclge_dev *hdev = vport->back; 4439 int ret; 4440 4441 /* mac addr check */ 4442 if (is_zero_ether_addr(new_addr) || 4443 is_broadcast_ether_addr(new_addr) || 4444 is_multicast_ether_addr(new_addr)) { 4445 dev_err(&hdev->pdev->dev, 4446 "Change uc mac err! invalid mac:%p.\n", 4447 new_addr); 4448 return -EINVAL; 4449 } 4450 4451 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) 4452 dev_warn(&hdev->pdev->dev, 4453 "remove old uc mac address fail.\n"); 4454 4455 ret = hclge_add_uc_addr(handle, new_addr); 4456 if (ret) { 4457 dev_err(&hdev->pdev->dev, 4458 "add uc mac address fail, ret =%d.\n", 4459 ret); 4460 4461 if (!is_first && 4462 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) 4463 dev_err(&hdev->pdev->dev, 4464 "restore uc mac address fail.\n"); 4465 4466 return -EIO; 4467 } 4468 4469 ret = hclge_pause_addr_cfg(hdev, new_addr); 4470 if (ret) { 4471 dev_err(&hdev->pdev->dev, 4472 "configure mac pause address fail, ret =%d.\n", 4473 ret); 4474 return -EIO; 4475 } 4476 4477 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 4478 4479 return 0; 4480 } 4481 4482 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 4483 bool filter_en) 4484 { 4485 struct hclge_vlan_filter_ctrl_cmd *req; 4486 struct hclge_desc desc; 4487 int ret; 4488 4489 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 4490 4491 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 4492 req->vlan_type = vlan_type; 4493 req->vlan_fe = filter_en; 4494 4495 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4496 if (ret) { 4497 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 4498 ret); 4499 return ret; 4500 } 4501 4502 return 0; 4503 } 4504 4505 #define HCLGE_FILTER_TYPE_VF 0 4506 #define HCLGE_FILTER_TYPE_PORT 1 4507 4508 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 4509 { 4510 struct hclge_vport *vport = hclge_get_vport(handle); 4511 struct hclge_dev *hdev = vport->back; 4512 4513 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); 4514 } 4515 4516 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 4517 bool is_kill, u16 vlan, u8 qos, 4518 __be16 proto) 4519 { 4520 #define HCLGE_MAX_VF_BYTES 16 4521 struct hclge_vlan_filter_vf_cfg_cmd *req0; 4522 struct hclge_vlan_filter_vf_cfg_cmd *req1; 4523 struct hclge_desc desc[2]; 4524 u8 vf_byte_val; 4525 u8 vf_byte_off; 4526 int ret; 4527 4528 hclge_cmd_setup_basic_desc(&desc[0], 4529 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4530 hclge_cmd_setup_basic_desc(&desc[1], 4531 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4532 4533 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4534 4535 vf_byte_off = vfid / 8; 4536 vf_byte_val = 1 << (vfid % 8); 4537 4538 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 4539 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 4540 4541 req0->vlan_id = cpu_to_le16(vlan); 4542 req0->vlan_cfg = is_kill; 4543 4544 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 4545 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 4546 else 4547 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 4548 4549 ret = hclge_cmd_send(&hdev->hw, desc, 2); 4550 if (ret) { 4551 dev_err(&hdev->pdev->dev, 4552 "Send vf vlan command fail, ret =%d.\n", 4553 ret); 4554 return ret; 4555 } 4556 4557 if (!is_kill) { 4558 if (!req0->resp_code || req0->resp_code == 1) 4559 return 0; 4560 4561 dev_err(&hdev->pdev->dev, 4562 "Add vf vlan filter fail, ret =%d.\n", 4563 req0->resp_code); 4564 } else { 4565 if (!req0->resp_code) 4566 return 0; 4567 4568 dev_err(&hdev->pdev->dev, 4569 "Kill vf vlan filter fail, ret =%d.\n", 4570 req0->resp_code); 4571 } 4572 4573 return -EIO; 4574 } 4575 4576 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 4577 u16 vlan_id, bool is_kill) 4578 { 4579 struct hclge_vlan_filter_pf_cfg_cmd *req; 4580 struct hclge_desc desc; 4581 u8 vlan_offset_byte_val; 4582 u8 vlan_offset_byte; 4583 u8 vlan_offset_160; 4584 int ret; 4585 4586 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 4587 4588 vlan_offset_160 = vlan_id / 160; 4589 vlan_offset_byte = (vlan_id % 160) / 8; 4590 vlan_offset_byte_val = 1 << (vlan_id % 8); 4591 4592 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 4593 req->vlan_offset = vlan_offset_160; 4594 req->vlan_cfg = is_kill; 4595 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 4596 4597 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4598 if (ret) 4599 dev_err(&hdev->pdev->dev, 4600 "port vlan command, send fail, ret =%d.\n", ret); 4601 return ret; 4602 } 4603 4604 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 4605 u16 vport_id, u16 vlan_id, u8 qos, 4606 bool is_kill) 4607 { 4608 u16 vport_idx, vport_num = 0; 4609 int ret; 4610 4611 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 4612 0, proto); 4613 if (ret) { 4614 dev_err(&hdev->pdev->dev, 4615 "Set %d vport vlan filter config fail, ret =%d.\n", 4616 vport_id, ret); 4617 return ret; 4618 } 4619 4620 /* vlan 0 may be added twice when 8021q module is enabled */ 4621 if (!is_kill && !vlan_id && 4622 test_bit(vport_id, hdev->vlan_table[vlan_id])) 4623 return 0; 4624 4625 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 4626 dev_err(&hdev->pdev->dev, 4627 "Add port vlan failed, vport %d is already in vlan %d\n", 4628 vport_id, vlan_id); 4629 return -EINVAL; 4630 } 4631 4632 if (is_kill && 4633 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 4634 dev_err(&hdev->pdev->dev, 4635 "Delete port vlan failed, vport %d is not in vlan %d\n", 4636 vport_id, vlan_id); 4637 return -EINVAL; 4638 } 4639 4640 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) 4641 vport_num++; 4642 4643 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 4644 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 4645 is_kill); 4646 4647 return ret; 4648 } 4649 4650 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 4651 u16 vlan_id, bool is_kill) 4652 { 4653 struct hclge_vport *vport = hclge_get_vport(handle); 4654 struct hclge_dev *hdev = vport->back; 4655 4656 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, 4657 0, is_kill); 4658 } 4659 4660 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 4661 u16 vlan, u8 qos, __be16 proto) 4662 { 4663 struct hclge_vport *vport = hclge_get_vport(handle); 4664 struct hclge_dev *hdev = vport->back; 4665 4666 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) 4667 return -EINVAL; 4668 if (proto != htons(ETH_P_8021Q)) 4669 return -EPROTONOSUPPORT; 4670 4671 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); 4672 } 4673 4674 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 4675 { 4676 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 4677 struct hclge_vport_vtag_tx_cfg_cmd *req; 4678 struct hclge_dev *hdev = vport->back; 4679 struct hclge_desc desc; 4680 int status; 4681 4682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 4683 4684 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 4685 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 4686 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 4687 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, 4688 vcfg->accept_tag ? 1 : 0); 4689 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, 4690 vcfg->accept_untag ? 1 : 0); 4691 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 4692 vcfg->insert_tag1_en ? 1 : 0); 4693 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 4694 vcfg->insert_tag2_en ? 1 : 0); 4695 hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 4696 4697 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4698 req->vf_bitmap[req->vf_offset] = 4699 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4700 4701 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4702 if (status) 4703 dev_err(&hdev->pdev->dev, 4704 "Send port txvlan cfg command fail, ret =%d\n", 4705 status); 4706 4707 return status; 4708 } 4709 4710 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 4711 { 4712 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 4713 struct hclge_vport_vtag_rx_cfg_cmd *req; 4714 struct hclge_dev *hdev = vport->back; 4715 struct hclge_desc desc; 4716 int status; 4717 4718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 4719 4720 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 4721 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 4722 vcfg->strip_tag1_en ? 1 : 0); 4723 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 4724 vcfg->strip_tag2_en ? 1 : 0); 4725 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 4726 vcfg->vlan1_vlan_prionly ? 1 : 0); 4727 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 4728 vcfg->vlan2_vlan_prionly ? 1 : 0); 4729 4730 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4731 req->vf_bitmap[req->vf_offset] = 4732 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4733 4734 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4735 if (status) 4736 dev_err(&hdev->pdev->dev, 4737 "Send port rxvlan cfg command fail, ret =%d\n", 4738 status); 4739 4740 return status; 4741 } 4742 4743 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 4744 { 4745 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 4746 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 4747 struct hclge_desc desc; 4748 int status; 4749 4750 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 4751 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 4752 rx_req->ot_fst_vlan_type = 4753 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 4754 rx_req->ot_sec_vlan_type = 4755 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 4756 rx_req->in_fst_vlan_type = 4757 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 4758 rx_req->in_sec_vlan_type = 4759 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 4760 4761 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4762 if (status) { 4763 dev_err(&hdev->pdev->dev, 4764 "Send rxvlan protocol type command fail, ret =%d\n", 4765 status); 4766 return status; 4767 } 4768 4769 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 4770 4771 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; 4772 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 4773 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 4774 4775 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4776 if (status) 4777 dev_err(&hdev->pdev->dev, 4778 "Send txvlan protocol type command fail, ret =%d\n", 4779 status); 4780 4781 return status; 4782 } 4783 4784 static int hclge_init_vlan_config(struct hclge_dev *hdev) 4785 { 4786 #define HCLGE_DEF_VLAN_TYPE 0x8100 4787 4788 struct hnae3_handle *handle; 4789 struct hclge_vport *vport; 4790 int ret; 4791 int i; 4792 4793 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); 4794 if (ret) 4795 return ret; 4796 4797 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); 4798 if (ret) 4799 return ret; 4800 4801 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4802 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4803 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4804 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4805 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; 4806 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; 4807 4808 ret = hclge_set_vlan_protocol_type(hdev); 4809 if (ret) 4810 return ret; 4811 4812 for (i = 0; i < hdev->num_alloc_vport; i++) { 4813 vport = &hdev->vport[i]; 4814 vport->txvlan_cfg.accept_tag = true; 4815 vport->txvlan_cfg.accept_untag = true; 4816 vport->txvlan_cfg.insert_tag1_en = false; 4817 vport->txvlan_cfg.insert_tag2_en = false; 4818 vport->txvlan_cfg.default_tag1 = 0; 4819 vport->txvlan_cfg.default_tag2 = 0; 4820 4821 ret = hclge_set_vlan_tx_offload_cfg(vport); 4822 if (ret) 4823 return ret; 4824 4825 vport->rxvlan_cfg.strip_tag1_en = false; 4826 vport->rxvlan_cfg.strip_tag2_en = true; 4827 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4828 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4829 4830 ret = hclge_set_vlan_rx_offload_cfg(vport); 4831 if (ret) 4832 return ret; 4833 } 4834 4835 handle = &hdev->vport[0].nic; 4836 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 4837 } 4838 4839 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 4840 { 4841 struct hclge_vport *vport = hclge_get_vport(handle); 4842 4843 vport->rxvlan_cfg.strip_tag1_en = false; 4844 vport->rxvlan_cfg.strip_tag2_en = enable; 4845 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4846 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4847 4848 return hclge_set_vlan_rx_offload_cfg(vport); 4849 } 4850 4851 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) 4852 { 4853 struct hclge_config_max_frm_size_cmd *req; 4854 struct hclge_desc desc; 4855 int max_frm_size; 4856 int ret; 4857 4858 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4859 4860 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 4861 max_frm_size > HCLGE_MAC_MAX_FRAME) 4862 return -EINVAL; 4863 4864 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 4865 4866 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 4867 4868 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 4869 req->max_frm_size = cpu_to_le16(max_frm_size); 4870 4871 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4872 if (ret) { 4873 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); 4874 return ret; 4875 } 4876 4877 hdev->mps = max_frm_size; 4878 4879 return 0; 4880 } 4881 4882 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 4883 { 4884 struct hclge_vport *vport = hclge_get_vport(handle); 4885 struct hclge_dev *hdev = vport->back; 4886 int ret; 4887 4888 ret = hclge_set_mac_mtu(hdev, new_mtu); 4889 if (ret) { 4890 dev_err(&hdev->pdev->dev, 4891 "Change mtu fail, ret =%d\n", ret); 4892 return ret; 4893 } 4894 4895 ret = hclge_buffer_alloc(hdev); 4896 if (ret) 4897 dev_err(&hdev->pdev->dev, 4898 "Allocate buffer fail, ret =%d\n", ret); 4899 4900 return ret; 4901 } 4902 4903 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 4904 bool enable) 4905 { 4906 struct hclge_reset_tqp_queue_cmd *req; 4907 struct hclge_desc desc; 4908 int ret; 4909 4910 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 4911 4912 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4913 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4914 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); 4915 4916 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4917 if (ret) { 4918 dev_err(&hdev->pdev->dev, 4919 "Send tqp reset cmd error, status =%d\n", ret); 4920 return ret; 4921 } 4922 4923 return 0; 4924 } 4925 4926 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 4927 { 4928 struct hclge_reset_tqp_queue_cmd *req; 4929 struct hclge_desc desc; 4930 int ret; 4931 4932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 4933 4934 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4935 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4936 4937 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4938 if (ret) { 4939 dev_err(&hdev->pdev->dev, 4940 "Get reset status error, status =%d\n", ret); 4941 return ret; 4942 } 4943 4944 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 4945 } 4946 4947 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, 4948 u16 queue_id) 4949 { 4950 struct hnae3_queue *queue; 4951 struct hclge_tqp *tqp; 4952 4953 queue = handle->kinfo.tqp[queue_id]; 4954 tqp = container_of(queue, struct hclge_tqp, q); 4955 4956 return tqp->index; 4957 } 4958 4959 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 4960 { 4961 struct hclge_vport *vport = hclge_get_vport(handle); 4962 struct hclge_dev *hdev = vport->back; 4963 int reset_try_times = 0; 4964 int reset_status; 4965 u16 queue_gid; 4966 int ret; 4967 4968 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 4969 return; 4970 4971 queue_gid = hclge_covert_handle_qid_global(handle, queue_id); 4972 4973 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 4974 if (ret) { 4975 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 4976 return; 4977 } 4978 4979 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 4980 if (ret) { 4981 dev_warn(&hdev->pdev->dev, 4982 "Send reset tqp cmd fail, ret = %d\n", ret); 4983 return; 4984 } 4985 4986 reset_try_times = 0; 4987 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 4988 /* Wait for tqp hw reset */ 4989 msleep(20); 4990 reset_status = hclge_get_reset_status(hdev, queue_gid); 4991 if (reset_status) 4992 break; 4993 } 4994 4995 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 4996 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 4997 return; 4998 } 4999 5000 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 5001 if (ret) { 5002 dev_warn(&hdev->pdev->dev, 5003 "Deassert the soft reset fail, ret = %d\n", ret); 5004 return; 5005 } 5006 } 5007 5008 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) 5009 { 5010 struct hclge_dev *hdev = vport->back; 5011 int reset_try_times = 0; 5012 int reset_status; 5013 u16 queue_gid; 5014 int ret; 5015 5016 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); 5017 5018 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 5019 if (ret) { 5020 dev_warn(&hdev->pdev->dev, 5021 "Send reset tqp cmd fail, ret = %d\n", ret); 5022 return; 5023 } 5024 5025 reset_try_times = 0; 5026 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 5027 /* Wait for tqp hw reset */ 5028 msleep(20); 5029 reset_status = hclge_get_reset_status(hdev, queue_gid); 5030 if (reset_status) 5031 break; 5032 } 5033 5034 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 5035 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 5036 return; 5037 } 5038 5039 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 5040 if (ret) 5041 dev_warn(&hdev->pdev->dev, 5042 "Deassert the soft reset fail, ret = %d\n", ret); 5043 } 5044 5045 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 5046 { 5047 struct hclge_vport *vport = hclge_get_vport(handle); 5048 struct hclge_dev *hdev = vport->back; 5049 5050 return hdev->fw_version; 5051 } 5052 5053 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, 5054 u32 *flowctrl_adv) 5055 { 5056 struct hclge_vport *vport = hclge_get_vport(handle); 5057 struct hclge_dev *hdev = vport->back; 5058 struct phy_device *phydev = hdev->hw.mac.phydev; 5059 5060 if (!phydev) 5061 return; 5062 5063 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | 5064 (phydev->advertising & ADVERTISED_Asym_Pause); 5065 } 5066 5067 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5068 { 5069 struct phy_device *phydev = hdev->hw.mac.phydev; 5070 5071 if (!phydev) 5072 return; 5073 5074 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); 5075 5076 if (rx_en) 5077 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 5078 5079 if (tx_en) 5080 phydev->advertising ^= ADVERTISED_Asym_Pause; 5081 } 5082 5083 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5084 { 5085 int ret; 5086 5087 if (rx_en && tx_en) 5088 hdev->fc_mode_last_time = HCLGE_FC_FULL; 5089 else if (rx_en && !tx_en) 5090 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 5091 else if (!rx_en && tx_en) 5092 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 5093 else 5094 hdev->fc_mode_last_time = HCLGE_FC_NONE; 5095 5096 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 5097 return 0; 5098 5099 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 5100 if (ret) { 5101 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", 5102 ret); 5103 return ret; 5104 } 5105 5106 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 5107 5108 return 0; 5109 } 5110 5111 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 5112 { 5113 struct phy_device *phydev = hdev->hw.mac.phydev; 5114 u16 remote_advertising = 0; 5115 u16 local_advertising = 0; 5116 u32 rx_pause, tx_pause; 5117 u8 flowctl; 5118 5119 if (!phydev->link || !phydev->autoneg) 5120 return 0; 5121 5122 if (phydev->advertising & ADVERTISED_Pause) 5123 local_advertising = ADVERTISE_PAUSE_CAP; 5124 5125 if (phydev->advertising & ADVERTISED_Asym_Pause) 5126 local_advertising |= ADVERTISE_PAUSE_ASYM; 5127 5128 if (phydev->pause) 5129 remote_advertising = LPA_PAUSE_CAP; 5130 5131 if (phydev->asym_pause) 5132 remote_advertising |= LPA_PAUSE_ASYM; 5133 5134 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 5135 remote_advertising); 5136 tx_pause = flowctl & FLOW_CTRL_TX; 5137 rx_pause = flowctl & FLOW_CTRL_RX; 5138 5139 if (phydev->duplex == HCLGE_MAC_HALF) { 5140 tx_pause = 0; 5141 rx_pause = 0; 5142 } 5143 5144 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 5145 } 5146 5147 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 5148 u32 *rx_en, u32 *tx_en) 5149 { 5150 struct hclge_vport *vport = hclge_get_vport(handle); 5151 struct hclge_dev *hdev = vport->back; 5152 5153 *auto_neg = hclge_get_autoneg(handle); 5154 5155 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5156 *rx_en = 0; 5157 *tx_en = 0; 5158 return; 5159 } 5160 5161 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 5162 *rx_en = 1; 5163 *tx_en = 0; 5164 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 5165 *tx_en = 1; 5166 *rx_en = 0; 5167 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 5168 *rx_en = 1; 5169 *tx_en = 1; 5170 } else { 5171 *rx_en = 0; 5172 *tx_en = 0; 5173 } 5174 } 5175 5176 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 5177 u32 rx_en, u32 tx_en) 5178 { 5179 struct hclge_vport *vport = hclge_get_vport(handle); 5180 struct hclge_dev *hdev = vport->back; 5181 struct phy_device *phydev = hdev->hw.mac.phydev; 5182 u32 fc_autoneg; 5183 5184 fc_autoneg = hclge_get_autoneg(handle); 5185 if (auto_neg != fc_autoneg) { 5186 dev_info(&hdev->pdev->dev, 5187 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 5188 return -EOPNOTSUPP; 5189 } 5190 5191 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5192 dev_info(&hdev->pdev->dev, 5193 "Priority flow control enabled. Cannot set link flow control.\n"); 5194 return -EOPNOTSUPP; 5195 } 5196 5197 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 5198 5199 if (!fc_autoneg) 5200 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 5201 5202 /* Only support flow control negotiation for netdev with 5203 * phy attached for now. 5204 */ 5205 if (!phydev) 5206 return -EOPNOTSUPP; 5207 5208 return phy_start_aneg(phydev); 5209 } 5210 5211 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 5212 u8 *auto_neg, u32 *speed, u8 *duplex) 5213 { 5214 struct hclge_vport *vport = hclge_get_vport(handle); 5215 struct hclge_dev *hdev = vport->back; 5216 5217 if (speed) 5218 *speed = hdev->hw.mac.speed; 5219 if (duplex) 5220 *duplex = hdev->hw.mac.duplex; 5221 if (auto_neg) 5222 *auto_neg = hdev->hw.mac.autoneg; 5223 } 5224 5225 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) 5226 { 5227 struct hclge_vport *vport = hclge_get_vport(handle); 5228 struct hclge_dev *hdev = vport->back; 5229 5230 if (media_type) 5231 *media_type = hdev->hw.mac.media_type; 5232 } 5233 5234 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 5235 u8 *tp_mdix_ctrl, u8 *tp_mdix) 5236 { 5237 struct hclge_vport *vport = hclge_get_vport(handle); 5238 struct hclge_dev *hdev = vport->back; 5239 struct phy_device *phydev = hdev->hw.mac.phydev; 5240 int mdix_ctrl, mdix, retval, is_resolved; 5241 5242 if (!phydev) { 5243 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5244 *tp_mdix = ETH_TP_MDI_INVALID; 5245 return; 5246 } 5247 5248 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 5249 5250 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 5251 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 5252 HCLGE_PHY_MDIX_CTRL_S); 5253 5254 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 5255 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 5256 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 5257 5258 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 5259 5260 switch (mdix_ctrl) { 5261 case 0x0: 5262 *tp_mdix_ctrl = ETH_TP_MDI; 5263 break; 5264 case 0x1: 5265 *tp_mdix_ctrl = ETH_TP_MDI_X; 5266 break; 5267 case 0x3: 5268 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 5269 break; 5270 default: 5271 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5272 break; 5273 } 5274 5275 if (!is_resolved) 5276 *tp_mdix = ETH_TP_MDI_INVALID; 5277 else if (mdix) 5278 *tp_mdix = ETH_TP_MDI_X; 5279 else 5280 *tp_mdix = ETH_TP_MDI; 5281 } 5282 5283 static int hclge_init_client_instance(struct hnae3_client *client, 5284 struct hnae3_ae_dev *ae_dev) 5285 { 5286 struct hclge_dev *hdev = ae_dev->priv; 5287 struct hclge_vport *vport; 5288 int i, ret; 5289 5290 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5291 vport = &hdev->vport[i]; 5292 5293 switch (client->type) { 5294 case HNAE3_CLIENT_KNIC: 5295 5296 hdev->nic_client = client; 5297 vport->nic.client = client; 5298 ret = client->ops->init_instance(&vport->nic); 5299 if (ret) 5300 goto err; 5301 5302 if (hdev->roce_client && 5303 hnae3_dev_roce_supported(hdev)) { 5304 struct hnae3_client *rc = hdev->roce_client; 5305 5306 ret = hclge_init_roce_base_info(vport); 5307 if (ret) 5308 goto err; 5309 5310 ret = rc->ops->init_instance(&vport->roce); 5311 if (ret) 5312 goto err; 5313 } 5314 5315 break; 5316 case HNAE3_CLIENT_UNIC: 5317 hdev->nic_client = client; 5318 vport->nic.client = client; 5319 5320 ret = client->ops->init_instance(&vport->nic); 5321 if (ret) 5322 goto err; 5323 5324 break; 5325 case HNAE3_CLIENT_ROCE: 5326 if (hnae3_dev_roce_supported(hdev)) { 5327 hdev->roce_client = client; 5328 vport->roce.client = client; 5329 } 5330 5331 if (hdev->roce_client && hdev->nic_client) { 5332 ret = hclge_init_roce_base_info(vport); 5333 if (ret) 5334 goto err; 5335 5336 ret = client->ops->init_instance(&vport->roce); 5337 if (ret) 5338 goto err; 5339 } 5340 } 5341 } 5342 5343 return 0; 5344 err: 5345 return ret; 5346 } 5347 5348 static void hclge_uninit_client_instance(struct hnae3_client *client, 5349 struct hnae3_ae_dev *ae_dev) 5350 { 5351 struct hclge_dev *hdev = ae_dev->priv; 5352 struct hclge_vport *vport; 5353 int i; 5354 5355 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5356 vport = &hdev->vport[i]; 5357 if (hdev->roce_client) { 5358 hdev->roce_client->ops->uninit_instance(&vport->roce, 5359 0); 5360 hdev->roce_client = NULL; 5361 vport->roce.client = NULL; 5362 } 5363 if (client->type == HNAE3_CLIENT_ROCE) 5364 return; 5365 if (client->ops->uninit_instance) { 5366 client->ops->uninit_instance(&vport->nic, 0); 5367 hdev->nic_client = NULL; 5368 vport->nic.client = NULL; 5369 } 5370 } 5371 } 5372 5373 static int hclge_pci_init(struct hclge_dev *hdev) 5374 { 5375 struct pci_dev *pdev = hdev->pdev; 5376 struct hclge_hw *hw; 5377 int ret; 5378 5379 ret = pci_enable_device(pdev); 5380 if (ret) { 5381 dev_err(&pdev->dev, "failed to enable PCI device\n"); 5382 return ret; 5383 } 5384 5385 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 5386 if (ret) { 5387 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 5388 if (ret) { 5389 dev_err(&pdev->dev, 5390 "can't set consistent PCI DMA"); 5391 goto err_disable_device; 5392 } 5393 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 5394 } 5395 5396 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 5397 if (ret) { 5398 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 5399 goto err_disable_device; 5400 } 5401 5402 pci_set_master(pdev); 5403 hw = &hdev->hw; 5404 hw->back = hdev; 5405 hw->io_base = pcim_iomap(pdev, 2, 0); 5406 if (!hw->io_base) { 5407 dev_err(&pdev->dev, "Can't map configuration register space\n"); 5408 ret = -ENOMEM; 5409 goto err_clr_master; 5410 } 5411 5412 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 5413 5414 return 0; 5415 err_clr_master: 5416 pci_clear_master(pdev); 5417 pci_release_regions(pdev); 5418 err_disable_device: 5419 pci_disable_device(pdev); 5420 5421 return ret; 5422 } 5423 5424 static void hclge_pci_uninit(struct hclge_dev *hdev) 5425 { 5426 struct pci_dev *pdev = hdev->pdev; 5427 5428 pcim_iounmap(pdev, hdev->hw.io_base); 5429 pci_free_irq_vectors(pdev); 5430 pci_clear_master(pdev); 5431 pci_release_mem_regions(pdev); 5432 pci_disable_device(pdev); 5433 } 5434 5435 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 5436 { 5437 struct pci_dev *pdev = ae_dev->pdev; 5438 struct hclge_dev *hdev; 5439 int ret; 5440 5441 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 5442 if (!hdev) { 5443 ret = -ENOMEM; 5444 goto out; 5445 } 5446 5447 hdev->pdev = pdev; 5448 hdev->ae_dev = ae_dev; 5449 hdev->reset_type = HNAE3_NONE_RESET; 5450 hdev->reset_request = 0; 5451 hdev->reset_pending = 0; 5452 ae_dev->priv = hdev; 5453 5454 ret = hclge_pci_init(hdev); 5455 if (ret) { 5456 dev_err(&pdev->dev, "PCI init failed\n"); 5457 goto out; 5458 } 5459 5460 /* Firmware command queue initialize */ 5461 ret = hclge_cmd_queue_init(hdev); 5462 if (ret) { 5463 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 5464 goto err_pci_uninit; 5465 } 5466 5467 /* Firmware command initialize */ 5468 ret = hclge_cmd_init(hdev); 5469 if (ret) 5470 goto err_cmd_uninit; 5471 5472 ret = hclge_get_cap(hdev); 5473 if (ret) { 5474 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5475 ret); 5476 goto err_cmd_uninit; 5477 } 5478 5479 ret = hclge_configure(hdev); 5480 if (ret) { 5481 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5482 goto err_cmd_uninit; 5483 } 5484 5485 ret = hclge_init_msi(hdev); 5486 if (ret) { 5487 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 5488 goto err_cmd_uninit; 5489 } 5490 5491 ret = hclge_misc_irq_init(hdev); 5492 if (ret) { 5493 dev_err(&pdev->dev, 5494 "Misc IRQ(vector0) init error, ret = %d.\n", 5495 ret); 5496 goto err_msi_uninit; 5497 } 5498 5499 ret = hclge_alloc_tqps(hdev); 5500 if (ret) { 5501 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 5502 goto err_msi_irq_uninit; 5503 } 5504 5505 ret = hclge_alloc_vport(hdev); 5506 if (ret) { 5507 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 5508 goto err_msi_irq_uninit; 5509 } 5510 5511 ret = hclge_map_tqp(hdev); 5512 if (ret) { 5513 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5514 goto err_msi_irq_uninit; 5515 } 5516 5517 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 5518 ret = hclge_mac_mdio_config(hdev); 5519 if (ret) { 5520 dev_err(&hdev->pdev->dev, 5521 "mdio config fail ret=%d\n", ret); 5522 goto err_msi_irq_uninit; 5523 } 5524 } 5525 5526 ret = hclge_mac_init(hdev); 5527 if (ret) { 5528 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5529 goto err_mdiobus_unreg; 5530 } 5531 5532 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5533 if (ret) { 5534 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5535 goto err_mdiobus_unreg; 5536 } 5537 5538 ret = hclge_init_vlan_config(hdev); 5539 if (ret) { 5540 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5541 goto err_mdiobus_unreg; 5542 } 5543 5544 ret = hclge_tm_schd_init(hdev); 5545 if (ret) { 5546 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 5547 goto err_mdiobus_unreg; 5548 } 5549 5550 hclge_rss_init_cfg(hdev); 5551 ret = hclge_rss_init_hw(hdev); 5552 if (ret) { 5553 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5554 goto err_mdiobus_unreg; 5555 } 5556 5557 ret = init_mgr_tbl(hdev); 5558 if (ret) { 5559 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 5560 goto err_mdiobus_unreg; 5561 } 5562 5563 hclge_dcb_ops_set(hdev); 5564 5565 timer_setup(&hdev->service_timer, hclge_service_timer, 0); 5566 INIT_WORK(&hdev->service_task, hclge_service_task); 5567 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); 5568 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); 5569 5570 /* Enable MISC vector(vector0) */ 5571 hclge_enable_vector(&hdev->misc_vector, true); 5572 5573 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 5574 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5575 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 5576 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 5577 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 5578 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 5579 5580 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 5581 return 0; 5582 5583 err_mdiobus_unreg: 5584 if (hdev->hw.mac.phydev) 5585 mdiobus_unregister(hdev->hw.mac.mdio_bus); 5586 err_msi_irq_uninit: 5587 hclge_misc_irq_uninit(hdev); 5588 err_msi_uninit: 5589 pci_free_irq_vectors(pdev); 5590 err_cmd_uninit: 5591 hclge_destroy_cmd_queue(&hdev->hw); 5592 err_pci_uninit: 5593 pcim_iounmap(pdev, hdev->hw.io_base); 5594 pci_clear_master(pdev); 5595 pci_release_regions(pdev); 5596 pci_disable_device(pdev); 5597 out: 5598 return ret; 5599 } 5600 5601 static void hclge_stats_clear(struct hclge_dev *hdev) 5602 { 5603 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); 5604 } 5605 5606 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 5607 { 5608 struct hclge_dev *hdev = ae_dev->priv; 5609 struct pci_dev *pdev = ae_dev->pdev; 5610 int ret; 5611 5612 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5613 5614 hclge_stats_clear(hdev); 5615 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 5616 5617 ret = hclge_cmd_init(hdev); 5618 if (ret) { 5619 dev_err(&pdev->dev, "Cmd queue init failed\n"); 5620 return ret; 5621 } 5622 5623 ret = hclge_get_cap(hdev); 5624 if (ret) { 5625 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5626 ret); 5627 return ret; 5628 } 5629 5630 ret = hclge_configure(hdev); 5631 if (ret) { 5632 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5633 return ret; 5634 } 5635 5636 ret = hclge_map_tqp(hdev); 5637 if (ret) { 5638 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5639 return ret; 5640 } 5641 5642 ret = hclge_mac_init(hdev); 5643 if (ret) { 5644 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5645 return ret; 5646 } 5647 5648 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5649 if (ret) { 5650 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5651 return ret; 5652 } 5653 5654 ret = hclge_init_vlan_config(hdev); 5655 if (ret) { 5656 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5657 return ret; 5658 } 5659 5660 ret = hclge_tm_init_hw(hdev); 5661 if (ret) { 5662 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 5663 return ret; 5664 } 5665 5666 ret = hclge_rss_init_hw(hdev); 5667 if (ret) { 5668 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5669 return ret; 5670 } 5671 5672 /* Enable MISC vector(vector0) */ 5673 hclge_enable_vector(&hdev->misc_vector, true); 5674 5675 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 5676 HCLGE_DRIVER_NAME); 5677 5678 return 0; 5679 } 5680 5681 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 5682 { 5683 struct hclge_dev *hdev = ae_dev->priv; 5684 struct hclge_mac *mac = &hdev->hw.mac; 5685 5686 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5687 5688 if (hdev->service_timer.function) 5689 del_timer_sync(&hdev->service_timer); 5690 if (hdev->service_task.func) 5691 cancel_work_sync(&hdev->service_task); 5692 if (hdev->rst_service_task.func) 5693 cancel_work_sync(&hdev->rst_service_task); 5694 if (hdev->mbx_service_task.func) 5695 cancel_work_sync(&hdev->mbx_service_task); 5696 5697 if (mac->phydev) 5698 mdiobus_unregister(mac->mdio_bus); 5699 5700 /* Disable MISC vector(vector0) */ 5701 hclge_enable_vector(&hdev->misc_vector, false); 5702 hclge_destroy_cmd_queue(&hdev->hw); 5703 hclge_misc_irq_uninit(hdev); 5704 hclge_pci_uninit(hdev); 5705 ae_dev->priv = NULL; 5706 } 5707 5708 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 5709 { 5710 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5711 struct hclge_vport *vport = hclge_get_vport(handle); 5712 struct hclge_dev *hdev = vport->back; 5713 5714 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 5715 } 5716 5717 static void hclge_get_channels(struct hnae3_handle *handle, 5718 struct ethtool_channels *ch) 5719 { 5720 struct hclge_vport *vport = hclge_get_vport(handle); 5721 5722 ch->max_combined = hclge_get_max_channels(handle); 5723 ch->other_count = 1; 5724 ch->max_other = 1; 5725 ch->combined_count = vport->alloc_tqps; 5726 } 5727 5728 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 5729 u16 *free_tqps, u16 *max_rss_size) 5730 { 5731 struct hclge_vport *vport = hclge_get_vport(handle); 5732 struct hclge_dev *hdev = vport->back; 5733 u16 temp_tqps = 0; 5734 int i; 5735 5736 for (i = 0; i < hdev->num_tqps; i++) { 5737 if (!hdev->htqp[i].alloced) 5738 temp_tqps++; 5739 } 5740 *free_tqps = temp_tqps; 5741 *max_rss_size = hdev->rss_size_max; 5742 } 5743 5744 static void hclge_release_tqp(struct hclge_vport *vport) 5745 { 5746 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5747 struct hclge_dev *hdev = vport->back; 5748 int i; 5749 5750 for (i = 0; i < kinfo->num_tqps; i++) { 5751 struct hclge_tqp *tqp = 5752 container_of(kinfo->tqp[i], struct hclge_tqp, q); 5753 5754 tqp->q.handle = NULL; 5755 tqp->q.tqp_index = 0; 5756 tqp->alloced = false; 5757 } 5758 5759 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 5760 kinfo->tqp = NULL; 5761 } 5762 5763 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) 5764 { 5765 struct hclge_vport *vport = hclge_get_vport(handle); 5766 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5767 struct hclge_dev *hdev = vport->back; 5768 int cur_rss_size = kinfo->rss_size; 5769 int cur_tqps = kinfo->num_tqps; 5770 u16 tc_offset[HCLGE_MAX_TC_NUM]; 5771 u16 tc_valid[HCLGE_MAX_TC_NUM]; 5772 u16 tc_size[HCLGE_MAX_TC_NUM]; 5773 u16 roundup_size; 5774 u32 *rss_indir; 5775 int ret, i; 5776 5777 hclge_release_tqp(vport); 5778 5779 ret = hclge_knic_setup(vport, new_tqps_num); 5780 if (ret) { 5781 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); 5782 return ret; 5783 } 5784 5785 ret = hclge_map_tqp_to_vport(hdev, vport); 5786 if (ret) { 5787 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); 5788 return ret; 5789 } 5790 5791 ret = hclge_tm_schd_init(hdev); 5792 if (ret) { 5793 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); 5794 return ret; 5795 } 5796 5797 roundup_size = roundup_pow_of_two(kinfo->rss_size); 5798 roundup_size = ilog2(roundup_size); 5799 /* Set the RSS TC mode according to the new RSS size */ 5800 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 5801 tc_valid[i] = 0; 5802 5803 if (!(hdev->hw_tc_map & BIT(i))) 5804 continue; 5805 5806 tc_valid[i] = 1; 5807 tc_size[i] = roundup_size; 5808 tc_offset[i] = kinfo->rss_size * i; 5809 } 5810 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 5811 if (ret) 5812 return ret; 5813 5814 /* Reinitializes the rss indirect table according to the new RSS size */ 5815 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 5816 if (!rss_indir) 5817 return -ENOMEM; 5818 5819 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 5820 rss_indir[i] = i % kinfo->rss_size; 5821 5822 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 5823 if (ret) 5824 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 5825 ret); 5826 5827 kfree(rss_indir); 5828 5829 if (!ret) 5830 dev_info(&hdev->pdev->dev, 5831 "Channels changed, rss_size from %d to %d, tqps from %d to %d", 5832 cur_rss_size, kinfo->rss_size, 5833 cur_tqps, kinfo->rss_size * kinfo->num_tc); 5834 5835 return ret; 5836 } 5837 5838 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, 5839 u32 *regs_num_64_bit) 5840 { 5841 struct hclge_desc desc; 5842 u32 total_num; 5843 int ret; 5844 5845 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); 5846 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5847 if (ret) { 5848 dev_err(&hdev->pdev->dev, 5849 "Query register number cmd failed, ret = %d.\n", ret); 5850 return ret; 5851 } 5852 5853 *regs_num_32_bit = le32_to_cpu(desc.data[0]); 5854 *regs_num_64_bit = le32_to_cpu(desc.data[1]); 5855 5856 total_num = *regs_num_32_bit + *regs_num_64_bit; 5857 if (!total_num) 5858 return -EINVAL; 5859 5860 return 0; 5861 } 5862 5863 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, 5864 void *data) 5865 { 5866 #define HCLGE_32_BIT_REG_RTN_DATANUM 8 5867 5868 struct hclge_desc *desc; 5869 u32 *reg_val = data; 5870 __le32 *desc_data; 5871 int cmd_num; 5872 int i, k, n; 5873 int ret; 5874 5875 if (regs_num == 0) 5876 return 0; 5877 5878 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); 5879 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 5880 if (!desc) 5881 return -ENOMEM; 5882 5883 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); 5884 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 5885 if (ret) { 5886 dev_err(&hdev->pdev->dev, 5887 "Query 32 bit register cmd failed, ret = %d.\n", ret); 5888 kfree(desc); 5889 return ret; 5890 } 5891 5892 for (i = 0; i < cmd_num; i++) { 5893 if (i == 0) { 5894 desc_data = (__le32 *)(&desc[i].data[0]); 5895 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; 5896 } else { 5897 desc_data = (__le32 *)(&desc[i]); 5898 n = HCLGE_32_BIT_REG_RTN_DATANUM; 5899 } 5900 for (k = 0; k < n; k++) { 5901 *reg_val++ = le32_to_cpu(*desc_data++); 5902 5903 regs_num--; 5904 if (!regs_num) 5905 break; 5906 } 5907 } 5908 5909 kfree(desc); 5910 return 0; 5911 } 5912 5913 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, 5914 void *data) 5915 { 5916 #define HCLGE_64_BIT_REG_RTN_DATANUM 4 5917 5918 struct hclge_desc *desc; 5919 u64 *reg_val = data; 5920 __le64 *desc_data; 5921 int cmd_num; 5922 int i, k, n; 5923 int ret; 5924 5925 if (regs_num == 0) 5926 return 0; 5927 5928 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); 5929 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 5930 if (!desc) 5931 return -ENOMEM; 5932 5933 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); 5934 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 5935 if (ret) { 5936 dev_err(&hdev->pdev->dev, 5937 "Query 64 bit register cmd failed, ret = %d.\n", ret); 5938 kfree(desc); 5939 return ret; 5940 } 5941 5942 for (i = 0; i < cmd_num; i++) { 5943 if (i == 0) { 5944 desc_data = (__le64 *)(&desc[i].data[0]); 5945 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; 5946 } else { 5947 desc_data = (__le64 *)(&desc[i]); 5948 n = HCLGE_64_BIT_REG_RTN_DATANUM; 5949 } 5950 for (k = 0; k < n; k++) { 5951 *reg_val++ = le64_to_cpu(*desc_data++); 5952 5953 regs_num--; 5954 if (!regs_num) 5955 break; 5956 } 5957 } 5958 5959 kfree(desc); 5960 return 0; 5961 } 5962 5963 static int hclge_get_regs_len(struct hnae3_handle *handle) 5964 { 5965 struct hclge_vport *vport = hclge_get_vport(handle); 5966 struct hclge_dev *hdev = vport->back; 5967 u32 regs_num_32_bit, regs_num_64_bit; 5968 int ret; 5969 5970 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 5971 if (ret) { 5972 dev_err(&hdev->pdev->dev, 5973 "Get register number failed, ret = %d.\n", ret); 5974 return -EOPNOTSUPP; 5975 } 5976 5977 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); 5978 } 5979 5980 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, 5981 void *data) 5982 { 5983 struct hclge_vport *vport = hclge_get_vport(handle); 5984 struct hclge_dev *hdev = vport->back; 5985 u32 regs_num_32_bit, regs_num_64_bit; 5986 int ret; 5987 5988 *version = hdev->fw_version; 5989 5990 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 5991 if (ret) { 5992 dev_err(&hdev->pdev->dev, 5993 "Get register number failed, ret = %d.\n", ret); 5994 return; 5995 } 5996 5997 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); 5998 if (ret) { 5999 dev_err(&hdev->pdev->dev, 6000 "Get 32 bit register failed, ret = %d.\n", ret); 6001 return; 6002 } 6003 6004 data = (u32 *)data + regs_num_32_bit; 6005 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, 6006 data); 6007 if (ret) 6008 dev_err(&hdev->pdev->dev, 6009 "Get 64 bit register failed, ret = %d.\n", ret); 6010 } 6011 6012 static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, 6013 u8 act_led_status, u8 link_led_status, 6014 u8 locate_led_status) 6015 { 6016 struct hclge_set_led_state_cmd *req; 6017 struct hclge_desc desc; 6018 int ret; 6019 6020 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 6021 6022 req = (struct hclge_set_led_state_cmd *)desc.data; 6023 hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M, 6024 HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status); 6025 hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M, 6026 HCLGE_LED_ACTIVITY_STATE_S, act_led_status); 6027 hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M, 6028 HCLGE_LED_LINK_STATE_S, link_led_status); 6029 hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 6030 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 6031 6032 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6033 if (ret) 6034 dev_err(&hdev->pdev->dev, 6035 "Send set led state cmd error, ret =%d\n", ret); 6036 6037 return ret; 6038 } 6039 6040 enum hclge_led_status { 6041 HCLGE_LED_OFF, 6042 HCLGE_LED_ON, 6043 HCLGE_LED_NO_CHANGE = 0xFF, 6044 }; 6045 6046 static int hclge_set_led_id(struct hnae3_handle *handle, 6047 enum ethtool_phys_id_state status) 6048 { 6049 #define BLINK_FREQUENCY 2 6050 struct hclge_vport *vport = hclge_get_vport(handle); 6051 struct hclge_dev *hdev = vport->back; 6052 struct phy_device *phydev = hdev->hw.mac.phydev; 6053 int ret = 0; 6054 6055 if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 6056 return -EOPNOTSUPP; 6057 6058 switch (status) { 6059 case ETHTOOL_ID_ACTIVE: 6060 ret = hclge_set_led_status_sfp(hdev, 6061 HCLGE_LED_NO_CHANGE, 6062 HCLGE_LED_NO_CHANGE, 6063 HCLGE_LED_NO_CHANGE, 6064 HCLGE_LED_ON); 6065 break; 6066 case ETHTOOL_ID_INACTIVE: 6067 ret = hclge_set_led_status_sfp(hdev, 6068 HCLGE_LED_NO_CHANGE, 6069 HCLGE_LED_NO_CHANGE, 6070 HCLGE_LED_NO_CHANGE, 6071 HCLGE_LED_OFF); 6072 break; 6073 default: 6074 ret = -EINVAL; 6075 break; 6076 } 6077 6078 return ret; 6079 } 6080 6081 enum hclge_led_port_speed { 6082 HCLGE_SPEED_LED_FOR_1G, 6083 HCLGE_SPEED_LED_FOR_10G, 6084 HCLGE_SPEED_LED_FOR_25G, 6085 HCLGE_SPEED_LED_FOR_40G, 6086 HCLGE_SPEED_LED_FOR_50G, 6087 HCLGE_SPEED_LED_FOR_100G, 6088 }; 6089 6090 static u8 hclge_led_get_speed_status(u32 speed) 6091 { 6092 u8 speed_led; 6093 6094 switch (speed) { 6095 case HCLGE_MAC_SPEED_1G: 6096 speed_led = HCLGE_SPEED_LED_FOR_1G; 6097 break; 6098 case HCLGE_MAC_SPEED_10G: 6099 speed_led = HCLGE_SPEED_LED_FOR_10G; 6100 break; 6101 case HCLGE_MAC_SPEED_25G: 6102 speed_led = HCLGE_SPEED_LED_FOR_25G; 6103 break; 6104 case HCLGE_MAC_SPEED_40G: 6105 speed_led = HCLGE_SPEED_LED_FOR_40G; 6106 break; 6107 case HCLGE_MAC_SPEED_50G: 6108 speed_led = HCLGE_SPEED_LED_FOR_50G; 6109 break; 6110 case HCLGE_MAC_SPEED_100G: 6111 speed_led = HCLGE_SPEED_LED_FOR_100G; 6112 break; 6113 default: 6114 speed_led = HCLGE_LED_NO_CHANGE; 6115 } 6116 6117 return speed_led; 6118 } 6119 6120 static int hclge_update_led_status(struct hclge_dev *hdev) 6121 { 6122 u8 port_speed_status, link_status, activity_status; 6123 u64 rx_pkts, tx_pkts; 6124 6125 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 6126 return 0; 6127 6128 port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed); 6129 6130 rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num; 6131 tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num; 6132 if (rx_pkts != hdev->rx_pkts_for_led || 6133 tx_pkts != hdev->tx_pkts_for_led) 6134 activity_status = HCLGE_LED_ON; 6135 else 6136 activity_status = HCLGE_LED_OFF; 6137 hdev->rx_pkts_for_led = rx_pkts; 6138 hdev->tx_pkts_for_led = tx_pkts; 6139 6140 if (hdev->hw.mac.link) 6141 link_status = HCLGE_LED_ON; 6142 else 6143 link_status = HCLGE_LED_OFF; 6144 6145 return hclge_set_led_status_sfp(hdev, port_speed_status, 6146 activity_status, link_status, 6147 HCLGE_LED_NO_CHANGE); 6148 } 6149 6150 static void hclge_get_link_mode(struct hnae3_handle *handle, 6151 unsigned long *supported, 6152 unsigned long *advertising) 6153 { 6154 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 6155 struct hclge_vport *vport = hclge_get_vport(handle); 6156 struct hclge_dev *hdev = vport->back; 6157 unsigned int idx = 0; 6158 6159 for (; idx < size; idx++) { 6160 supported[idx] = hdev->hw.mac.supported[idx]; 6161 advertising[idx] = hdev->hw.mac.advertising[idx]; 6162 } 6163 } 6164 6165 static void hclge_get_port_type(struct hnae3_handle *handle, 6166 u8 *port_type) 6167 { 6168 struct hclge_vport *vport = hclge_get_vport(handle); 6169 struct hclge_dev *hdev = vport->back; 6170 u8 media_type = hdev->hw.mac.media_type; 6171 6172 switch (media_type) { 6173 case HNAE3_MEDIA_TYPE_FIBER: 6174 *port_type = PORT_FIBRE; 6175 break; 6176 case HNAE3_MEDIA_TYPE_COPPER: 6177 *port_type = PORT_TP; 6178 break; 6179 case HNAE3_MEDIA_TYPE_UNKNOWN: 6180 default: 6181 *port_type = PORT_OTHER; 6182 break; 6183 } 6184 } 6185 6186 static const struct hnae3_ae_ops hclge_ops = { 6187 .init_ae_dev = hclge_init_ae_dev, 6188 .uninit_ae_dev = hclge_uninit_ae_dev, 6189 .init_client_instance = hclge_init_client_instance, 6190 .uninit_client_instance = hclge_uninit_client_instance, 6191 .map_ring_to_vector = hclge_map_ring_to_vector, 6192 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 6193 .get_vector = hclge_get_vector, 6194 .put_vector = hclge_put_vector, 6195 .set_promisc_mode = hclge_set_promisc_mode, 6196 .set_loopback = hclge_set_loopback, 6197 .start = hclge_ae_start, 6198 .stop = hclge_ae_stop, 6199 .get_status = hclge_get_status, 6200 .get_ksettings_an_result = hclge_get_ksettings_an_result, 6201 .update_speed_duplex_h = hclge_update_speed_duplex_h, 6202 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 6203 .get_media_type = hclge_get_media_type, 6204 .get_rss_key_size = hclge_get_rss_key_size, 6205 .get_rss_indir_size = hclge_get_rss_indir_size, 6206 .get_rss = hclge_get_rss, 6207 .set_rss = hclge_set_rss, 6208 .set_rss_tuple = hclge_set_rss_tuple, 6209 .get_rss_tuple = hclge_get_rss_tuple, 6210 .get_tc_size = hclge_get_tc_size, 6211 .get_mac_addr = hclge_get_mac_addr, 6212 .set_mac_addr = hclge_set_mac_addr, 6213 .add_uc_addr = hclge_add_uc_addr, 6214 .rm_uc_addr = hclge_rm_uc_addr, 6215 .add_mc_addr = hclge_add_mc_addr, 6216 .rm_mc_addr = hclge_rm_mc_addr, 6217 .set_autoneg = hclge_set_autoneg, 6218 .get_autoneg = hclge_get_autoneg, 6219 .get_pauseparam = hclge_get_pauseparam, 6220 .set_pauseparam = hclge_set_pauseparam, 6221 .set_mtu = hclge_set_mtu, 6222 .reset_queue = hclge_reset_tqp, 6223 .get_stats = hclge_get_stats, 6224 .update_stats = hclge_update_stats, 6225 .get_strings = hclge_get_strings, 6226 .get_sset_count = hclge_get_sset_count, 6227 .get_fw_version = hclge_get_fw_version, 6228 .get_mdix_mode = hclge_get_mdix_mode, 6229 .enable_vlan_filter = hclge_enable_vlan_filter, 6230 .set_vlan_filter = hclge_set_vlan_filter, 6231 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 6232 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 6233 .reset_event = hclge_reset_event, 6234 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 6235 .set_channels = hclge_set_channels, 6236 .get_channels = hclge_get_channels, 6237 .get_flowctrl_adv = hclge_get_flowctrl_adv, 6238 .get_regs_len = hclge_get_regs_len, 6239 .get_regs = hclge_get_regs, 6240 .set_led_id = hclge_set_led_id, 6241 .get_link_mode = hclge_get_link_mode, 6242 .get_port_type = hclge_get_port_type, 6243 }; 6244 6245 static struct hnae3_ae_algo ae_algo = { 6246 .ops = &hclge_ops, 6247 .name = HCLGE_NAME, 6248 .pdev_id_table = ae_algo_pci_tbl, 6249 }; 6250 6251 static int hclge_init(void) 6252 { 6253 pr_info("%s is initializing\n", HCLGE_NAME); 6254 6255 hnae3_register_ae_algo(&ae_algo); 6256 6257 return 0; 6258 } 6259 6260 static void hclge_exit(void) 6261 { 6262 hnae3_unregister_ae_algo(&ae_algo); 6263 } 6264 module_init(hclge_init); 6265 module_exit(hclge_exit); 6266 6267 MODULE_LICENSE("GPL"); 6268 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 6269 MODULE_DESCRIPTION("HCLGE Driver"); 6270 MODULE_VERSION(HCLGE_MOD_VERSION); 6271