1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/device.h> 12 #include <linux/etherdevice.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/pci.h> 19 #include <linux/platform_device.h> 20 #include <linux/if_vlan.h> 21 #include <net/rtnetlink.h> 22 #include "hclge_cmd.h" 23 #include "hclge_dcb.h" 24 #include "hclge_main.h" 25 #include "hclge_mbx.h" 26 #include "hclge_mdio.h" 27 #include "hclge_tm.h" 28 #include "hnae3.h" 29 30 #define HCLGE_NAME "hclge" 31 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 32 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 33 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) 34 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) 35 36 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 37 enum hclge_mta_dmac_sel_type mta_mac_sel, 38 bool enable); 39 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); 40 static int hclge_init_vlan_config(struct hclge_dev *hdev); 41 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 42 43 static struct hnae3_ae_algo ae_algo; 44 45 static const struct pci_device_id ae_algo_pci_tbl[] = { 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 51 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 53 /* required last entry */ 54 {0, } 55 }; 56 57 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 58 59 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 60 "Mac Loopback test", 61 "Serdes Loopback test", 62 "Phy Loopback test" 63 }; 64 65 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { 66 {"igu_rx_oversize_pkt", 67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, 68 {"igu_rx_undersize_pkt", 69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, 70 {"igu_rx_out_all_pkt", 71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, 72 {"igu_rx_uni_pkt", 73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, 74 {"igu_rx_multi_pkt", 75 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, 76 {"igu_rx_broad_pkt", 77 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, 78 {"egu_tx_out_all_pkt", 79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, 80 {"egu_tx_uni_pkt", 81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, 82 {"egu_tx_multi_pkt", 83 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, 84 {"egu_tx_broad_pkt", 85 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, 86 {"ssu_ppp_mac_key_num", 87 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, 88 {"ssu_ppp_host_key_num", 89 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, 90 {"ppp_ssu_mac_rlt_num", 91 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, 92 {"ppp_ssu_host_rlt_num", 93 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, 94 {"ssu_tx_in_num", 95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, 96 {"ssu_tx_out_num", 97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, 98 {"ssu_rx_in_num", 99 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, 100 {"ssu_rx_out_num", 101 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} 102 }; 103 104 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { 105 {"igu_rx_err_pkt", 106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, 107 {"igu_rx_no_eof_pkt", 108 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, 109 {"igu_rx_no_sof_pkt", 110 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, 111 {"egu_tx_1588_pkt", 112 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, 113 {"ssu_full_drop_num", 114 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, 115 {"ssu_part_drop_num", 116 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, 117 {"ppp_key_drop_num", 118 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, 119 {"ppp_rlt_drop_num", 120 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, 121 {"ssu_key_drop_num", 122 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, 123 {"pkt_curr_buf_cnt", 124 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, 125 {"qcn_fb_rcv_cnt", 126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, 127 {"qcn_fb_drop_cnt", 128 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, 129 {"qcn_fb_invaild_cnt", 130 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, 131 {"rx_packet_tc0_in_cnt", 132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, 133 {"rx_packet_tc1_in_cnt", 134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, 135 {"rx_packet_tc2_in_cnt", 136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, 137 {"rx_packet_tc3_in_cnt", 138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, 139 {"rx_packet_tc4_in_cnt", 140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, 141 {"rx_packet_tc5_in_cnt", 142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, 143 {"rx_packet_tc6_in_cnt", 144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, 145 {"rx_packet_tc7_in_cnt", 146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, 147 {"rx_packet_tc0_out_cnt", 148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, 149 {"rx_packet_tc1_out_cnt", 150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, 151 {"rx_packet_tc2_out_cnt", 152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, 153 {"rx_packet_tc3_out_cnt", 154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, 155 {"rx_packet_tc4_out_cnt", 156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, 157 {"rx_packet_tc5_out_cnt", 158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, 159 {"rx_packet_tc6_out_cnt", 160 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, 161 {"rx_packet_tc7_out_cnt", 162 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, 163 {"tx_packet_tc0_in_cnt", 164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, 165 {"tx_packet_tc1_in_cnt", 166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, 167 {"tx_packet_tc2_in_cnt", 168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, 169 {"tx_packet_tc3_in_cnt", 170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, 171 {"tx_packet_tc4_in_cnt", 172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, 173 {"tx_packet_tc5_in_cnt", 174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, 175 {"tx_packet_tc6_in_cnt", 176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, 177 {"tx_packet_tc7_in_cnt", 178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, 179 {"tx_packet_tc0_out_cnt", 180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, 181 {"tx_packet_tc1_out_cnt", 182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, 183 {"tx_packet_tc2_out_cnt", 184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, 185 {"tx_packet_tc3_out_cnt", 186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, 187 {"tx_packet_tc4_out_cnt", 188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, 189 {"tx_packet_tc5_out_cnt", 190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, 191 {"tx_packet_tc6_out_cnt", 192 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, 193 {"tx_packet_tc7_out_cnt", 194 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, 195 {"pkt_curr_buf_tc0_cnt", 196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, 197 {"pkt_curr_buf_tc1_cnt", 198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, 199 {"pkt_curr_buf_tc2_cnt", 200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, 201 {"pkt_curr_buf_tc3_cnt", 202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, 203 {"pkt_curr_buf_tc4_cnt", 204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, 205 {"pkt_curr_buf_tc5_cnt", 206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, 207 {"pkt_curr_buf_tc6_cnt", 208 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, 209 {"pkt_curr_buf_tc7_cnt", 210 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, 211 {"mb_uncopy_num", 212 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, 213 {"lo_pri_unicast_rlt_drop_num", 214 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, 215 {"hi_pri_multicast_rlt_drop_num", 216 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, 217 {"lo_pri_multicast_rlt_drop_num", 218 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, 219 {"rx_oq_drop_pkt_cnt", 220 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, 221 {"tx_oq_drop_pkt_cnt", 222 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, 223 {"nic_l2_err_drop_pkt_cnt", 224 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, 225 {"roc_l2_err_drop_pkt_cnt", 226 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} 227 }; 228 229 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 230 {"mac_tx_mac_pause_num", 231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 232 {"mac_rx_mac_pause_num", 233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 234 {"mac_tx_pfc_pri0_pkt_num", 235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 236 {"mac_tx_pfc_pri1_pkt_num", 237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 238 {"mac_tx_pfc_pri2_pkt_num", 239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 240 {"mac_tx_pfc_pri3_pkt_num", 241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 242 {"mac_tx_pfc_pri4_pkt_num", 243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 244 {"mac_tx_pfc_pri5_pkt_num", 245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 246 {"mac_tx_pfc_pri6_pkt_num", 247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 248 {"mac_tx_pfc_pri7_pkt_num", 249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 250 {"mac_rx_pfc_pri0_pkt_num", 251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 252 {"mac_rx_pfc_pri1_pkt_num", 253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 254 {"mac_rx_pfc_pri2_pkt_num", 255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 256 {"mac_rx_pfc_pri3_pkt_num", 257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 258 {"mac_rx_pfc_pri4_pkt_num", 259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 260 {"mac_rx_pfc_pri5_pkt_num", 261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 262 {"mac_rx_pfc_pri6_pkt_num", 263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 264 {"mac_rx_pfc_pri7_pkt_num", 265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 266 {"mac_tx_total_pkt_num", 267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 268 {"mac_tx_total_oct_num", 269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 270 {"mac_tx_good_pkt_num", 271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 272 {"mac_tx_bad_pkt_num", 273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 274 {"mac_tx_good_oct_num", 275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 276 {"mac_tx_bad_oct_num", 277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 278 {"mac_tx_uni_pkt_num", 279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 280 {"mac_tx_multi_pkt_num", 281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 282 {"mac_tx_broad_pkt_num", 283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 284 {"mac_tx_undersize_pkt_num", 285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 286 {"mac_tx_oversize_pkt_num", 287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 288 {"mac_tx_64_oct_pkt_num", 289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 290 {"mac_tx_65_127_oct_pkt_num", 291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 292 {"mac_tx_128_255_oct_pkt_num", 293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 294 {"mac_tx_256_511_oct_pkt_num", 295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 296 {"mac_tx_512_1023_oct_pkt_num", 297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 298 {"mac_tx_1024_1518_oct_pkt_num", 299 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 300 {"mac_tx_1519_2047_oct_pkt_num", 301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 302 {"mac_tx_2048_4095_oct_pkt_num", 303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 304 {"mac_tx_4096_8191_oct_pkt_num", 305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 306 {"mac_tx_8192_9216_oct_pkt_num", 307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 308 {"mac_tx_9217_12287_oct_pkt_num", 309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 310 {"mac_tx_12288_16383_oct_pkt_num", 311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 312 {"mac_tx_1519_max_good_pkt_num", 313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 314 {"mac_tx_1519_max_bad_pkt_num", 315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 316 {"mac_rx_total_pkt_num", 317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 318 {"mac_rx_total_oct_num", 319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 320 {"mac_rx_good_pkt_num", 321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 322 {"mac_rx_bad_pkt_num", 323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 324 {"mac_rx_good_oct_num", 325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 326 {"mac_rx_bad_oct_num", 327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 328 {"mac_rx_uni_pkt_num", 329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 330 {"mac_rx_multi_pkt_num", 331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 332 {"mac_rx_broad_pkt_num", 333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 334 {"mac_rx_undersize_pkt_num", 335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 336 {"mac_rx_oversize_pkt_num", 337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 338 {"mac_rx_64_oct_pkt_num", 339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 340 {"mac_rx_65_127_oct_pkt_num", 341 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 342 {"mac_rx_128_255_oct_pkt_num", 343 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 344 {"mac_rx_256_511_oct_pkt_num", 345 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 346 {"mac_rx_512_1023_oct_pkt_num", 347 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 348 {"mac_rx_1024_1518_oct_pkt_num", 349 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 350 {"mac_rx_1519_2047_oct_pkt_num", 351 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 352 {"mac_rx_2048_4095_oct_pkt_num", 353 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 354 {"mac_rx_4096_8191_oct_pkt_num", 355 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 356 {"mac_rx_8192_9216_oct_pkt_num", 357 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 358 {"mac_rx_9217_12287_oct_pkt_num", 359 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 360 {"mac_rx_12288_16383_oct_pkt_num", 361 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 362 {"mac_rx_1519_max_good_pkt_num", 363 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 364 {"mac_rx_1519_max_bad_pkt_num", 365 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 366 367 {"mac_tx_fragment_pkt_num", 368 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 369 {"mac_tx_undermin_pkt_num", 370 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 371 {"mac_tx_jabber_pkt_num", 372 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 373 {"mac_tx_err_all_pkt_num", 374 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 375 {"mac_tx_from_app_good_pkt_num", 376 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 377 {"mac_tx_from_app_bad_pkt_num", 378 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 379 {"mac_rx_fragment_pkt_num", 380 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 381 {"mac_rx_undermin_pkt_num", 382 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 383 {"mac_rx_jabber_pkt_num", 384 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 385 {"mac_rx_fcs_err_pkt_num", 386 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 387 {"mac_rx_send_app_good_pkt_num", 388 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 389 {"mac_rx_send_app_bad_pkt_num", 390 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 391 }; 392 393 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 394 { 395 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 396 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP), 397 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), 398 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), 399 .i_port_bitmap = 0x1, 400 }, 401 }; 402 403 static int hclge_64_bit_update_stats(struct hclge_dev *hdev) 404 { 405 #define HCLGE_64_BIT_CMD_NUM 5 406 #define HCLGE_64_BIT_RTN_DATANUM 4 407 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); 408 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; 409 __le64 *desc_data; 410 int i, k, n; 411 int ret; 412 413 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); 414 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); 415 if (ret) { 416 dev_err(&hdev->pdev->dev, 417 "Get 64 bit pkt stats fail, status = %d.\n", ret); 418 return ret; 419 } 420 421 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { 422 if (unlikely(i == 0)) { 423 desc_data = (__le64 *)(&desc[i].data[0]); 424 n = HCLGE_64_BIT_RTN_DATANUM - 1; 425 } else { 426 desc_data = (__le64 *)(&desc[i]); 427 n = HCLGE_64_BIT_RTN_DATANUM; 428 } 429 for (k = 0; k < n; k++) { 430 *data++ += le64_to_cpu(*desc_data); 431 desc_data++; 432 } 433 } 434 435 return 0; 436 } 437 438 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) 439 { 440 stats->pkt_curr_buf_cnt = 0; 441 stats->pkt_curr_buf_tc0_cnt = 0; 442 stats->pkt_curr_buf_tc1_cnt = 0; 443 stats->pkt_curr_buf_tc2_cnt = 0; 444 stats->pkt_curr_buf_tc3_cnt = 0; 445 stats->pkt_curr_buf_tc4_cnt = 0; 446 stats->pkt_curr_buf_tc5_cnt = 0; 447 stats->pkt_curr_buf_tc6_cnt = 0; 448 stats->pkt_curr_buf_tc7_cnt = 0; 449 } 450 451 static int hclge_32_bit_update_stats(struct hclge_dev *hdev) 452 { 453 #define HCLGE_32_BIT_CMD_NUM 8 454 #define HCLGE_32_BIT_RTN_DATANUM 8 455 456 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; 457 struct hclge_32_bit_stats *all_32_bit_stats; 458 __le32 *desc_data; 459 int i, k, n; 460 u64 *data; 461 int ret; 462 463 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; 464 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); 465 466 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); 467 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); 468 if (ret) { 469 dev_err(&hdev->pdev->dev, 470 "Get 32 bit pkt stats fail, status = %d.\n", ret); 471 472 return ret; 473 } 474 475 hclge_reset_partial_32bit_counter(all_32_bit_stats); 476 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { 477 if (unlikely(i == 0)) { 478 __le16 *desc_data_16bit; 479 480 all_32_bit_stats->igu_rx_err_pkt += 481 le32_to_cpu(desc[i].data[0]); 482 483 desc_data_16bit = (__le16 *)&desc[i].data[1]; 484 all_32_bit_stats->igu_rx_no_eof_pkt += 485 le16_to_cpu(*desc_data_16bit); 486 487 desc_data_16bit++; 488 all_32_bit_stats->igu_rx_no_sof_pkt += 489 le16_to_cpu(*desc_data_16bit); 490 491 desc_data = &desc[i].data[2]; 492 n = HCLGE_32_BIT_RTN_DATANUM - 4; 493 } else { 494 desc_data = (__le32 *)&desc[i]; 495 n = HCLGE_32_BIT_RTN_DATANUM; 496 } 497 for (k = 0; k < n; k++) { 498 *data++ += le32_to_cpu(*desc_data); 499 desc_data++; 500 } 501 } 502 503 return 0; 504 } 505 506 static int hclge_mac_update_stats(struct hclge_dev *hdev) 507 { 508 #define HCLGE_MAC_CMD_NUM 21 509 #define HCLGE_RTN_DATA_NUM 4 510 511 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 512 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 513 __le64 *desc_data; 514 int i, k, n; 515 int ret; 516 517 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 518 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 519 if (ret) { 520 dev_err(&hdev->pdev->dev, 521 "Get MAC pkt stats fail, status = %d.\n", ret); 522 523 return ret; 524 } 525 526 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 527 if (unlikely(i == 0)) { 528 desc_data = (__le64 *)(&desc[i].data[0]); 529 n = HCLGE_RTN_DATA_NUM - 2; 530 } else { 531 desc_data = (__le64 *)(&desc[i]); 532 n = HCLGE_RTN_DATA_NUM; 533 } 534 for (k = 0; k < n; k++) { 535 *data++ += le64_to_cpu(*desc_data); 536 desc_data++; 537 } 538 } 539 540 return 0; 541 } 542 543 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 544 { 545 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 546 struct hclge_vport *vport = hclge_get_vport(handle); 547 struct hclge_dev *hdev = vport->back; 548 struct hnae3_queue *queue; 549 struct hclge_desc desc[1]; 550 struct hclge_tqp *tqp; 551 int ret, i; 552 553 for (i = 0; i < kinfo->num_tqps; i++) { 554 queue = handle->kinfo.tqp[i]; 555 tqp = container_of(queue, struct hclge_tqp, q); 556 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 557 hclge_cmd_setup_basic_desc(&desc[0], 558 HCLGE_OPC_QUERY_RX_STATUS, 559 true); 560 561 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 562 ret = hclge_cmd_send(&hdev->hw, desc, 1); 563 if (ret) { 564 dev_err(&hdev->pdev->dev, 565 "Query tqp stat fail, status = %d,queue = %d\n", 566 ret, i); 567 return ret; 568 } 569 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 570 le32_to_cpu(desc[0].data[1]); 571 } 572 573 for (i = 0; i < kinfo->num_tqps; i++) { 574 queue = handle->kinfo.tqp[i]; 575 tqp = container_of(queue, struct hclge_tqp, q); 576 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 577 hclge_cmd_setup_basic_desc(&desc[0], 578 HCLGE_OPC_QUERY_TX_STATUS, 579 true); 580 581 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 582 ret = hclge_cmd_send(&hdev->hw, desc, 1); 583 if (ret) { 584 dev_err(&hdev->pdev->dev, 585 "Query tqp stat fail, status = %d,queue = %d\n", 586 ret, i); 587 return ret; 588 } 589 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 590 le32_to_cpu(desc[0].data[1]); 591 } 592 593 return 0; 594 } 595 596 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 597 { 598 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 599 struct hclge_tqp *tqp; 600 u64 *buff = data; 601 int i; 602 603 for (i = 0; i < kinfo->num_tqps; i++) { 604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 605 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 606 } 607 608 for (i = 0; i < kinfo->num_tqps; i++) { 609 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 610 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 611 } 612 613 return buff; 614 } 615 616 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 617 { 618 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 619 620 return kinfo->num_tqps * (2); 621 } 622 623 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 624 { 625 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 626 u8 *buff = data; 627 int i = 0; 628 629 for (i = 0; i < kinfo->num_tqps; i++) { 630 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 631 struct hclge_tqp, q); 632 snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", 633 tqp->index); 634 buff = buff + ETH_GSTRING_LEN; 635 } 636 637 for (i = 0; i < kinfo->num_tqps; i++) { 638 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 639 struct hclge_tqp, q); 640 snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", 641 tqp->index); 642 buff = buff + ETH_GSTRING_LEN; 643 } 644 645 return buff; 646 } 647 648 static u64 *hclge_comm_get_stats(void *comm_stats, 649 const struct hclge_comm_stats_str strs[], 650 int size, u64 *data) 651 { 652 u64 *buf = data; 653 u32 i; 654 655 for (i = 0; i < size; i++) 656 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 657 658 return buf + size; 659 } 660 661 static u8 *hclge_comm_get_strings(u32 stringset, 662 const struct hclge_comm_stats_str strs[], 663 int size, u8 *data) 664 { 665 char *buff = (char *)data; 666 u32 i; 667 668 if (stringset != ETH_SS_STATS) 669 return buff; 670 671 for (i = 0; i < size; i++) { 672 snprintf(buff, ETH_GSTRING_LEN, 673 strs[i].desc); 674 buff = buff + ETH_GSTRING_LEN; 675 } 676 677 return (u8 *)buff; 678 } 679 680 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, 681 struct net_device_stats *net_stats) 682 { 683 net_stats->tx_dropped = 0; 684 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; 685 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; 686 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; 687 688 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; 689 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 690 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; 691 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; 692 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 693 694 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 695 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 696 697 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; 698 net_stats->rx_length_errors = 699 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 700 net_stats->rx_length_errors += 701 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 702 net_stats->rx_over_errors = 703 hw_stats->mac_stats.mac_rx_oversize_pkt_num; 704 } 705 706 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 707 { 708 struct hnae3_handle *handle; 709 int status; 710 711 handle = &hdev->vport[0].nic; 712 if (handle->client) { 713 status = hclge_tqps_update_stats(handle); 714 if (status) { 715 dev_err(&hdev->pdev->dev, 716 "Update TQPS stats fail, status = %d.\n", 717 status); 718 } 719 } 720 721 status = hclge_mac_update_stats(hdev); 722 if (status) 723 dev_err(&hdev->pdev->dev, 724 "Update MAC stats fail, status = %d.\n", status); 725 726 status = hclge_32_bit_update_stats(hdev); 727 if (status) 728 dev_err(&hdev->pdev->dev, 729 "Update 32 bit stats fail, status = %d.\n", 730 status); 731 732 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); 733 } 734 735 static void hclge_update_stats(struct hnae3_handle *handle, 736 struct net_device_stats *net_stats) 737 { 738 struct hclge_vport *vport = hclge_get_vport(handle); 739 struct hclge_dev *hdev = vport->back; 740 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 741 int status; 742 743 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 744 return; 745 746 status = hclge_mac_update_stats(hdev); 747 if (status) 748 dev_err(&hdev->pdev->dev, 749 "Update MAC stats fail, status = %d.\n", 750 status); 751 752 status = hclge_32_bit_update_stats(hdev); 753 if (status) 754 dev_err(&hdev->pdev->dev, 755 "Update 32 bit stats fail, status = %d.\n", 756 status); 757 758 status = hclge_64_bit_update_stats(hdev); 759 if (status) 760 dev_err(&hdev->pdev->dev, 761 "Update 64 bit stats fail, status = %d.\n", 762 status); 763 764 status = hclge_tqps_update_stats(handle); 765 if (status) 766 dev_err(&hdev->pdev->dev, 767 "Update TQPS stats fail, status = %d.\n", 768 status); 769 770 hclge_update_netstat(hw_stats, net_stats); 771 772 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 773 } 774 775 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 776 { 777 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7 778 779 struct hclge_vport *vport = hclge_get_vport(handle); 780 struct hclge_dev *hdev = vport->back; 781 int count = 0; 782 783 /* Loopback test support rules: 784 * mac: only GE mode support 785 * serdes: all mac mode will support include GE/XGE/LGE/CGE 786 * phy: only support when phy device exist on board 787 */ 788 if (stringset == ETH_SS_TEST) { 789 /* clear loopback bit flags at first */ 790 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 791 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 792 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 793 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 794 count += 1; 795 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; 796 } else { 797 count = -EOPNOTSUPP; 798 } 799 } else if (stringset == ETH_SS_STATS) { 800 count = ARRAY_SIZE(g_mac_stats_string) + 801 ARRAY_SIZE(g_all_32bit_stats_string) + 802 ARRAY_SIZE(g_all_64bit_stats_string) + 803 hclge_tqps_get_sset_count(handle, stringset); 804 } 805 806 return count; 807 } 808 809 static void hclge_get_strings(struct hnae3_handle *handle, 810 u32 stringset, 811 u8 *data) 812 { 813 u8 *p = (char *)data; 814 int size; 815 816 if (stringset == ETH_SS_STATS) { 817 size = ARRAY_SIZE(g_mac_stats_string); 818 p = hclge_comm_get_strings(stringset, 819 g_mac_stats_string, 820 size, 821 p); 822 size = ARRAY_SIZE(g_all_32bit_stats_string); 823 p = hclge_comm_get_strings(stringset, 824 g_all_32bit_stats_string, 825 size, 826 p); 827 size = ARRAY_SIZE(g_all_64bit_stats_string); 828 p = hclge_comm_get_strings(stringset, 829 g_all_64bit_stats_string, 830 size, 831 p); 832 p = hclge_tqps_get_strings(handle, p); 833 } else if (stringset == ETH_SS_TEST) { 834 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { 835 memcpy(p, 836 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], 837 ETH_GSTRING_LEN); 838 p += ETH_GSTRING_LEN; 839 } 840 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { 841 memcpy(p, 842 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], 843 ETH_GSTRING_LEN); 844 p += ETH_GSTRING_LEN; 845 } 846 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 847 memcpy(p, 848 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], 849 ETH_GSTRING_LEN); 850 p += ETH_GSTRING_LEN; 851 } 852 } 853 } 854 855 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 856 { 857 struct hclge_vport *vport = hclge_get_vport(handle); 858 struct hclge_dev *hdev = vport->back; 859 u64 *p; 860 861 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, 862 g_mac_stats_string, 863 ARRAY_SIZE(g_mac_stats_string), 864 data); 865 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, 866 g_all_32bit_stats_string, 867 ARRAY_SIZE(g_all_32bit_stats_string), 868 p); 869 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, 870 g_all_64bit_stats_string, 871 ARRAY_SIZE(g_all_64bit_stats_string), 872 p); 873 p = hclge_tqps_get_stats(handle, p); 874 } 875 876 static int hclge_parse_func_status(struct hclge_dev *hdev, 877 struct hclge_func_status_cmd *status) 878 { 879 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 880 return -EINVAL; 881 882 /* Set the pf to main pf */ 883 if (status->pf_state & HCLGE_PF_STATE_MAIN) 884 hdev->flag |= HCLGE_FLAG_MAIN; 885 else 886 hdev->flag &= ~HCLGE_FLAG_MAIN; 887 888 return 0; 889 } 890 891 static int hclge_query_function_status(struct hclge_dev *hdev) 892 { 893 struct hclge_func_status_cmd *req; 894 struct hclge_desc desc; 895 int timeout = 0; 896 int ret; 897 898 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 899 req = (struct hclge_func_status_cmd *)desc.data; 900 901 do { 902 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 903 if (ret) { 904 dev_err(&hdev->pdev->dev, 905 "query function status failed %d.\n", 906 ret); 907 908 return ret; 909 } 910 911 /* Check pf reset is done */ 912 if (req->pf_state) 913 break; 914 usleep_range(1000, 2000); 915 } while (timeout++ < 5); 916 917 ret = hclge_parse_func_status(hdev, req); 918 919 return ret; 920 } 921 922 static int hclge_query_pf_resource(struct hclge_dev *hdev) 923 { 924 struct hclge_pf_res_cmd *req; 925 struct hclge_desc desc; 926 int ret; 927 928 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 929 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 930 if (ret) { 931 dev_err(&hdev->pdev->dev, 932 "query pf resource failed %d.\n", ret); 933 return ret; 934 } 935 936 req = (struct hclge_pf_res_cmd *)desc.data; 937 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 938 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 939 940 if (hnae3_dev_roce_supported(hdev)) { 941 hdev->num_roce_msi = 942 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), 943 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 944 945 /* PF should have NIC vectors and Roce vectors, 946 * NIC vectors are queued before Roce vectors. 947 */ 948 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; 949 } else { 950 hdev->num_msi = 951 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), 952 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 953 } 954 955 return 0; 956 } 957 958 static int hclge_parse_speed(int speed_cmd, int *speed) 959 { 960 switch (speed_cmd) { 961 case 6: 962 *speed = HCLGE_MAC_SPEED_10M; 963 break; 964 case 7: 965 *speed = HCLGE_MAC_SPEED_100M; 966 break; 967 case 0: 968 *speed = HCLGE_MAC_SPEED_1G; 969 break; 970 case 1: 971 *speed = HCLGE_MAC_SPEED_10G; 972 break; 973 case 2: 974 *speed = HCLGE_MAC_SPEED_25G; 975 break; 976 case 3: 977 *speed = HCLGE_MAC_SPEED_40G; 978 break; 979 case 4: 980 *speed = HCLGE_MAC_SPEED_50G; 981 break; 982 case 5: 983 *speed = HCLGE_MAC_SPEED_100G; 984 break; 985 default: 986 return -EINVAL; 987 } 988 989 return 0; 990 } 991 992 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 993 u8 speed_ability) 994 { 995 unsigned long *supported = hdev->hw.mac.supported; 996 997 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 998 set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 999 supported); 1000 1001 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1002 set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 1003 supported); 1004 1005 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1006 set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 1007 supported); 1008 1009 if (speed_ability & HCLGE_SUPPORT_50G_BIT) 1010 set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 1011 supported); 1012 1013 if (speed_ability & HCLGE_SUPPORT_100G_BIT) 1014 set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 1015 supported); 1016 1017 set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); 1018 set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1019 } 1020 1021 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) 1022 { 1023 u8 media_type = hdev->hw.mac.media_type; 1024 1025 if (media_type != HNAE3_MEDIA_TYPE_FIBER) 1026 return; 1027 1028 hclge_parse_fiber_link_mode(hdev, speed_ability); 1029 } 1030 1031 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1032 { 1033 struct hclge_cfg_param_cmd *req; 1034 u64 mac_addr_tmp_high; 1035 u64 mac_addr_tmp; 1036 int i; 1037 1038 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1039 1040 /* get the configuration */ 1041 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1042 HCLGE_CFG_VMDQ_M, 1043 HCLGE_CFG_VMDQ_S); 1044 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1045 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1046 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1047 HCLGE_CFG_TQP_DESC_N_M, 1048 HCLGE_CFG_TQP_DESC_N_S); 1049 1050 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), 1051 HCLGE_CFG_PHY_ADDR_M, 1052 HCLGE_CFG_PHY_ADDR_S); 1053 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), 1054 HCLGE_CFG_MEDIA_TP_M, 1055 HCLGE_CFG_MEDIA_TP_S); 1056 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), 1057 HCLGE_CFG_RX_BUF_LEN_M, 1058 HCLGE_CFG_RX_BUF_LEN_S); 1059 /* get mac_address */ 1060 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1061 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), 1062 HCLGE_CFG_MAC_ADDR_H_M, 1063 HCLGE_CFG_MAC_ADDR_H_S); 1064 1065 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1066 1067 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), 1068 HCLGE_CFG_DEFAULT_SPEED_M, 1069 HCLGE_CFG_DEFAULT_SPEED_S); 1070 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 1071 HCLGE_CFG_RSS_SIZE_M, 1072 HCLGE_CFG_RSS_SIZE_S); 1073 1074 for (i = 0; i < ETH_ALEN; i++) 1075 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1076 1077 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1078 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1079 1080 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), 1081 HCLGE_CFG_SPEED_ABILITY_M, 1082 HCLGE_CFG_SPEED_ABILITY_S); 1083 } 1084 1085 /* hclge_get_cfg: query the static parameter from flash 1086 * @hdev: pointer to struct hclge_dev 1087 * @hcfg: the config structure to be getted 1088 */ 1089 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1090 { 1091 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1092 struct hclge_cfg_param_cmd *req; 1093 int i, ret; 1094 1095 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1096 u32 offset = 0; 1097 1098 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1099 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1100 true); 1101 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, 1102 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1103 /* Len should be united by 4 bytes when send to hardware */ 1104 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1105 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1106 req->offset = cpu_to_le32(offset); 1107 } 1108 1109 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1110 if (ret) { 1111 dev_err(&hdev->pdev->dev, 1112 "get config failed %d.\n", ret); 1113 return ret; 1114 } 1115 1116 hclge_parse_cfg(hcfg, desc); 1117 return 0; 1118 } 1119 1120 static int hclge_get_cap(struct hclge_dev *hdev) 1121 { 1122 int ret; 1123 1124 ret = hclge_query_function_status(hdev); 1125 if (ret) { 1126 dev_err(&hdev->pdev->dev, 1127 "query function status error %d.\n", ret); 1128 return ret; 1129 } 1130 1131 /* get pf resource */ 1132 ret = hclge_query_pf_resource(hdev); 1133 if (ret) { 1134 dev_err(&hdev->pdev->dev, 1135 "query pf resource error %d.\n", ret); 1136 return ret; 1137 } 1138 1139 return 0; 1140 } 1141 1142 static int hclge_configure(struct hclge_dev *hdev) 1143 { 1144 struct hclge_cfg cfg; 1145 int ret, i; 1146 1147 ret = hclge_get_cfg(hdev, &cfg); 1148 if (ret) { 1149 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 1150 return ret; 1151 } 1152 1153 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 1154 hdev->base_tqp_pid = 0; 1155 hdev->rss_size_max = cfg.rss_size_max; 1156 hdev->rx_buf_len = cfg.rx_buf_len; 1157 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1158 hdev->hw.mac.media_type = cfg.media_type; 1159 hdev->hw.mac.phy_addr = cfg.phy_addr; 1160 hdev->num_desc = cfg.tqp_desc_num; 1161 hdev->tm_info.num_pg = 1; 1162 hdev->tc_max = cfg.tc_num; 1163 hdev->tm_info.hw_pfc_map = 0; 1164 1165 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1166 if (ret) { 1167 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 1168 return ret; 1169 } 1170 1171 hclge_parse_link_mode(hdev, cfg.speed_ability); 1172 1173 if ((hdev->tc_max > HNAE3_MAX_TC) || 1174 (hdev->tc_max < 1)) { 1175 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 1176 hdev->tc_max); 1177 hdev->tc_max = 1; 1178 } 1179 1180 /* Dev does not support DCB */ 1181 if (!hnae3_dev_dcb_supported(hdev)) { 1182 hdev->tc_max = 1; 1183 hdev->pfc_max = 0; 1184 } else { 1185 hdev->pfc_max = hdev->tc_max; 1186 } 1187 1188 hdev->tm_info.num_tc = hdev->tc_max; 1189 1190 /* Currently not support uncontiuous tc */ 1191 for (i = 0; i < hdev->tm_info.num_tc; i++) 1192 hnae3_set_bit(hdev->hw_tc_map, i, 1); 1193 1194 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1195 1196 return ret; 1197 } 1198 1199 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, 1200 int tso_mss_max) 1201 { 1202 struct hclge_cfg_tso_status_cmd *req; 1203 struct hclge_desc desc; 1204 u16 tso_mss; 1205 1206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1207 1208 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1209 1210 tso_mss = 0; 1211 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1212 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 1213 req->tso_mss_min = cpu_to_le16(tso_mss); 1214 1215 tso_mss = 0; 1216 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1217 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 1218 req->tso_mss_max = cpu_to_le16(tso_mss); 1219 1220 return hclge_cmd_send(&hdev->hw, &desc, 1); 1221 } 1222 1223 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1224 { 1225 struct hclge_tqp *tqp; 1226 int i; 1227 1228 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1229 sizeof(struct hclge_tqp), GFP_KERNEL); 1230 if (!hdev->htqp) 1231 return -ENOMEM; 1232 1233 tqp = hdev->htqp; 1234 1235 for (i = 0; i < hdev->num_tqps; i++) { 1236 tqp->dev = &hdev->pdev->dev; 1237 tqp->index = i; 1238 1239 tqp->q.ae_algo = &ae_algo; 1240 tqp->q.buf_size = hdev->rx_buf_len; 1241 tqp->q.desc_num = hdev->num_desc; 1242 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 1243 i * HCLGE_TQP_REG_SIZE; 1244 1245 tqp++; 1246 } 1247 1248 return 0; 1249 } 1250 1251 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1252 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1253 { 1254 struct hclge_tqp_map_cmd *req; 1255 struct hclge_desc desc; 1256 int ret; 1257 1258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1259 1260 req = (struct hclge_tqp_map_cmd *)desc.data; 1261 req->tqp_id = cpu_to_le16(tqp_pid); 1262 req->tqp_vf = func_id; 1263 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 1264 1 << HCLGE_TQP_MAP_EN_B; 1265 req->tqp_vid = cpu_to_le16(tqp_vid); 1266 1267 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1268 if (ret) { 1269 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", 1270 ret); 1271 return ret; 1272 } 1273 1274 return 0; 1275 } 1276 1277 static int hclge_assign_tqp(struct hclge_vport *vport, 1278 struct hnae3_queue **tqp, u16 num_tqps) 1279 { 1280 struct hclge_dev *hdev = vport->back; 1281 int i, alloced; 1282 1283 for (i = 0, alloced = 0; i < hdev->num_tqps && 1284 alloced < num_tqps; i++) { 1285 if (!hdev->htqp[i].alloced) { 1286 hdev->htqp[i].q.handle = &vport->nic; 1287 hdev->htqp[i].q.tqp_index = alloced; 1288 tqp[alloced] = &hdev->htqp[i].q; 1289 hdev->htqp[i].alloced = true; 1290 alloced++; 1291 } 1292 } 1293 vport->alloc_tqps = num_tqps; 1294 1295 return 0; 1296 } 1297 1298 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) 1299 { 1300 struct hnae3_handle *nic = &vport->nic; 1301 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1302 struct hclge_dev *hdev = vport->back; 1303 int i, ret; 1304 1305 kinfo->num_desc = hdev->num_desc; 1306 kinfo->rx_buf_len = hdev->rx_buf_len; 1307 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); 1308 kinfo->rss_size 1309 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); 1310 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; 1311 1312 for (i = 0; i < HNAE3_MAX_TC; i++) { 1313 if (hdev->hw_tc_map & BIT(i)) { 1314 kinfo->tc_info[i].enable = true; 1315 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 1316 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 1317 kinfo->tc_info[i].tc = i; 1318 } else { 1319 /* Set to default queue if TC is disable */ 1320 kinfo->tc_info[i].enable = false; 1321 kinfo->tc_info[i].tqp_offset = 0; 1322 kinfo->tc_info[i].tqp_count = 1; 1323 kinfo->tc_info[i].tc = 0; 1324 } 1325 } 1326 1327 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 1328 sizeof(struct hnae3_queue *), GFP_KERNEL); 1329 if (!kinfo->tqp) 1330 return -ENOMEM; 1331 1332 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); 1333 if (ret) { 1334 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1335 return -EINVAL; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1342 struct hclge_vport *vport) 1343 { 1344 struct hnae3_handle *nic = &vport->nic; 1345 struct hnae3_knic_private_info *kinfo; 1346 u16 i; 1347 1348 kinfo = &nic->kinfo; 1349 for (i = 0; i < kinfo->num_tqps; i++) { 1350 struct hclge_tqp *q = 1351 container_of(kinfo->tqp[i], struct hclge_tqp, q); 1352 bool is_pf; 1353 int ret; 1354 1355 is_pf = !(vport->vport_id); 1356 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1357 i, is_pf); 1358 if (ret) 1359 return ret; 1360 } 1361 1362 return 0; 1363 } 1364 1365 static int hclge_map_tqp(struct hclge_dev *hdev) 1366 { 1367 struct hclge_vport *vport = hdev->vport; 1368 u16 i, num_vport; 1369 1370 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1371 for (i = 0; i < num_vport; i++) { 1372 int ret; 1373 1374 ret = hclge_map_tqp_to_vport(hdev, vport); 1375 if (ret) 1376 return ret; 1377 1378 vport++; 1379 } 1380 1381 return 0; 1382 } 1383 1384 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) 1385 { 1386 /* this would be initialized later */ 1387 } 1388 1389 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1390 { 1391 struct hnae3_handle *nic = &vport->nic; 1392 struct hclge_dev *hdev = vport->back; 1393 int ret; 1394 1395 nic->pdev = hdev->pdev; 1396 nic->ae_algo = &ae_algo; 1397 nic->numa_node_mask = hdev->numa_node_mask; 1398 1399 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { 1400 ret = hclge_knic_setup(vport, num_tqps); 1401 if (ret) { 1402 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", 1403 ret); 1404 return ret; 1405 } 1406 } else { 1407 hclge_unic_setup(vport, num_tqps); 1408 } 1409 1410 return 0; 1411 } 1412 1413 static int hclge_alloc_vport(struct hclge_dev *hdev) 1414 { 1415 struct pci_dev *pdev = hdev->pdev; 1416 struct hclge_vport *vport; 1417 u32 tqp_main_vport; 1418 u32 tqp_per_vport; 1419 int num_vport, i; 1420 int ret; 1421 1422 /* We need to alloc a vport for main NIC of PF */ 1423 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1424 1425 if (hdev->num_tqps < num_vport) { 1426 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", 1427 hdev->num_tqps, num_vport); 1428 return -EINVAL; 1429 } 1430 1431 /* Alloc the same number of TQPs for every vport */ 1432 tqp_per_vport = hdev->num_tqps / num_vport; 1433 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1434 1435 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1436 GFP_KERNEL); 1437 if (!vport) 1438 return -ENOMEM; 1439 1440 hdev->vport = vport; 1441 hdev->num_alloc_vport = num_vport; 1442 1443 if (IS_ENABLED(CONFIG_PCI_IOV)) 1444 hdev->num_alloc_vfs = hdev->num_req_vfs; 1445 1446 for (i = 0; i < num_vport; i++) { 1447 vport->back = hdev; 1448 vport->vport_id = i; 1449 1450 if (i == 0) 1451 ret = hclge_vport_setup(vport, tqp_main_vport); 1452 else 1453 ret = hclge_vport_setup(vport, tqp_per_vport); 1454 if (ret) { 1455 dev_err(&pdev->dev, 1456 "vport setup failed for vport %d, %d\n", 1457 i, ret); 1458 return ret; 1459 } 1460 1461 vport++; 1462 } 1463 1464 return 0; 1465 } 1466 1467 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1468 struct hclge_pkt_buf_alloc *buf_alloc) 1469 { 1470 /* TX buffer size is unit by 128 byte */ 1471 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1472 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1473 struct hclge_tx_buff_alloc_cmd *req; 1474 struct hclge_desc desc; 1475 int ret; 1476 u8 i; 1477 1478 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1479 1480 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1481 for (i = 0; i < HCLGE_TC_NUM; i++) { 1482 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1483 1484 req->tx_pkt_buff[i] = 1485 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1486 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1487 } 1488 1489 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1490 if (ret) { 1491 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1492 ret); 1493 return ret; 1494 } 1495 1496 return 0; 1497 } 1498 1499 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1500 struct hclge_pkt_buf_alloc *buf_alloc) 1501 { 1502 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1503 1504 if (ret) { 1505 dev_err(&hdev->pdev->dev, 1506 "tx buffer alloc failed %d\n", ret); 1507 return ret; 1508 } 1509 1510 return 0; 1511 } 1512 1513 static int hclge_get_tc_num(struct hclge_dev *hdev) 1514 { 1515 int i, cnt = 0; 1516 1517 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1518 if (hdev->hw_tc_map & BIT(i)) 1519 cnt++; 1520 return cnt; 1521 } 1522 1523 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) 1524 { 1525 int i, cnt = 0; 1526 1527 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1528 if (hdev->hw_tc_map & BIT(i) && 1529 hdev->tm_info.hw_pfc_map & BIT(i)) 1530 cnt++; 1531 return cnt; 1532 } 1533 1534 /* Get the number of pfc enabled TCs, which have private buffer */ 1535 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1536 struct hclge_pkt_buf_alloc *buf_alloc) 1537 { 1538 struct hclge_priv_buf *priv; 1539 int i, cnt = 0; 1540 1541 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1542 priv = &buf_alloc->priv_buf[i]; 1543 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1544 priv->enable) 1545 cnt++; 1546 } 1547 1548 return cnt; 1549 } 1550 1551 /* Get the number of pfc disabled TCs, which have private buffer */ 1552 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1553 struct hclge_pkt_buf_alloc *buf_alloc) 1554 { 1555 struct hclge_priv_buf *priv; 1556 int i, cnt = 0; 1557 1558 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1559 priv = &buf_alloc->priv_buf[i]; 1560 if (hdev->hw_tc_map & BIT(i) && 1561 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1562 priv->enable) 1563 cnt++; 1564 } 1565 1566 return cnt; 1567 } 1568 1569 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1570 { 1571 struct hclge_priv_buf *priv; 1572 u32 rx_priv = 0; 1573 int i; 1574 1575 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1576 priv = &buf_alloc->priv_buf[i]; 1577 if (priv->enable) 1578 rx_priv += priv->buf_size; 1579 } 1580 return rx_priv; 1581 } 1582 1583 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1584 { 1585 u32 i, total_tx_size = 0; 1586 1587 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1588 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1589 1590 return total_tx_size; 1591 } 1592 1593 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1594 struct hclge_pkt_buf_alloc *buf_alloc, 1595 u32 rx_all) 1596 { 1597 u32 shared_buf_min, shared_buf_tc, shared_std; 1598 int tc_num, pfc_enable_num; 1599 u32 shared_buf; 1600 u32 rx_priv; 1601 int i; 1602 1603 tc_num = hclge_get_tc_num(hdev); 1604 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1605 1606 if (hnae3_dev_dcb_supported(hdev)) 1607 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; 1608 else 1609 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; 1610 1611 shared_buf_tc = pfc_enable_num * hdev->mps + 1612 (tc_num - pfc_enable_num) * hdev->mps / 2 + 1613 hdev->mps; 1614 shared_std = max_t(u32, shared_buf_min, shared_buf_tc); 1615 1616 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1617 if (rx_all <= rx_priv + shared_std) 1618 return false; 1619 1620 shared_buf = rx_all - rx_priv; 1621 buf_alloc->s_buf.buf_size = shared_buf; 1622 buf_alloc->s_buf.self.high = shared_buf; 1623 buf_alloc->s_buf.self.low = 2 * hdev->mps; 1624 1625 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1626 if ((hdev->hw_tc_map & BIT(i)) && 1627 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1628 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; 1629 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1630 } else { 1631 buf_alloc->s_buf.tc_thrd[i].low = 0; 1632 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; 1633 } 1634 } 1635 1636 return true; 1637 } 1638 1639 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1640 struct hclge_pkt_buf_alloc *buf_alloc) 1641 { 1642 u32 i, total_size; 1643 1644 total_size = hdev->pkt_buf_size; 1645 1646 /* alloc tx buffer for all enabled tc */ 1647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1648 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1649 1650 if (total_size < HCLGE_DEFAULT_TX_BUF) 1651 return -ENOMEM; 1652 1653 if (hdev->hw_tc_map & BIT(i)) 1654 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1655 else 1656 priv->tx_buf_size = 0; 1657 1658 total_size -= priv->tx_buf_size; 1659 } 1660 1661 return 0; 1662 } 1663 1664 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1665 * @hdev: pointer to struct hclge_dev 1666 * @buf_alloc: pointer to buffer calculation data 1667 * @return: 0: calculate sucessful, negative: fail 1668 */ 1669 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1670 struct hclge_pkt_buf_alloc *buf_alloc) 1671 { 1672 u32 rx_all = hdev->pkt_buf_size; 1673 int no_pfc_priv_num, pfc_priv_num; 1674 struct hclge_priv_buf *priv; 1675 int i; 1676 1677 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1678 1679 /* When DCB is not supported, rx private 1680 * buffer is not allocated. 1681 */ 1682 if (!hnae3_dev_dcb_supported(hdev)) { 1683 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1684 return -ENOMEM; 1685 1686 return 0; 1687 } 1688 1689 /* step 1, try to alloc private buffer for all enabled tc */ 1690 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1691 priv = &buf_alloc->priv_buf[i]; 1692 if (hdev->hw_tc_map & BIT(i)) { 1693 priv->enable = 1; 1694 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1695 priv->wl.low = hdev->mps; 1696 priv->wl.high = priv->wl.low + hdev->mps; 1697 priv->buf_size = priv->wl.high + 1698 HCLGE_DEFAULT_DV; 1699 } else { 1700 priv->wl.low = 0; 1701 priv->wl.high = 2 * hdev->mps; 1702 priv->buf_size = priv->wl.high; 1703 } 1704 } else { 1705 priv->enable = 0; 1706 priv->wl.low = 0; 1707 priv->wl.high = 0; 1708 priv->buf_size = 0; 1709 } 1710 } 1711 1712 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1713 return 0; 1714 1715 /* step 2, try to decrease the buffer size of 1716 * no pfc TC's private buffer 1717 */ 1718 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1719 priv = &buf_alloc->priv_buf[i]; 1720 1721 priv->enable = 0; 1722 priv->wl.low = 0; 1723 priv->wl.high = 0; 1724 priv->buf_size = 0; 1725 1726 if (!(hdev->hw_tc_map & BIT(i))) 1727 continue; 1728 1729 priv->enable = 1; 1730 1731 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1732 priv->wl.low = 128; 1733 priv->wl.high = priv->wl.low + hdev->mps; 1734 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; 1735 } else { 1736 priv->wl.low = 0; 1737 priv->wl.high = hdev->mps; 1738 priv->buf_size = priv->wl.high; 1739 } 1740 } 1741 1742 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1743 return 0; 1744 1745 /* step 3, try to reduce the number of pfc disabled TCs, 1746 * which have private buffer 1747 */ 1748 /* get the total no pfc enable TC number, which have private buffer */ 1749 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1750 1751 /* let the last to be cleared first */ 1752 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1753 priv = &buf_alloc->priv_buf[i]; 1754 1755 if (hdev->hw_tc_map & BIT(i) && 1756 !(hdev->tm_info.hw_pfc_map & BIT(i))) { 1757 /* Clear the no pfc TC private buffer */ 1758 priv->wl.low = 0; 1759 priv->wl.high = 0; 1760 priv->buf_size = 0; 1761 priv->enable = 0; 1762 no_pfc_priv_num--; 1763 } 1764 1765 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1766 no_pfc_priv_num == 0) 1767 break; 1768 } 1769 1770 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1771 return 0; 1772 1773 /* step 4, try to reduce the number of pfc enabled TCs 1774 * which have private buffer. 1775 */ 1776 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1777 1778 /* let the last to be cleared first */ 1779 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1780 priv = &buf_alloc->priv_buf[i]; 1781 1782 if (hdev->hw_tc_map & BIT(i) && 1783 hdev->tm_info.hw_pfc_map & BIT(i)) { 1784 /* Reduce the number of pfc TC with private buffer */ 1785 priv->wl.low = 0; 1786 priv->enable = 0; 1787 priv->wl.high = 0; 1788 priv->buf_size = 0; 1789 pfc_priv_num--; 1790 } 1791 1792 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1793 pfc_priv_num == 0) 1794 break; 1795 } 1796 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1797 return 0; 1798 1799 return -ENOMEM; 1800 } 1801 1802 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1803 struct hclge_pkt_buf_alloc *buf_alloc) 1804 { 1805 struct hclge_rx_priv_buff_cmd *req; 1806 struct hclge_desc desc; 1807 int ret; 1808 int i; 1809 1810 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 1811 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 1812 1813 /* Alloc private buffer TCs */ 1814 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1815 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1816 1817 req->buf_num[i] = 1818 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 1819 req->buf_num[i] |= 1820 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 1821 } 1822 1823 req->shared_buf = 1824 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1825 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1826 1827 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1828 if (ret) { 1829 dev_err(&hdev->pdev->dev, 1830 "rx private buffer alloc cmd failed %d\n", ret); 1831 return ret; 1832 } 1833 1834 return 0; 1835 } 1836 1837 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) 1838 1839 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1840 struct hclge_pkt_buf_alloc *buf_alloc) 1841 { 1842 struct hclge_rx_priv_wl_buf *req; 1843 struct hclge_priv_buf *priv; 1844 struct hclge_desc desc[2]; 1845 int i, j; 1846 int ret; 1847 1848 for (i = 0; i < 2; i++) { 1849 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 1850 false); 1851 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 1852 1853 /* The first descriptor set the NEXT bit to 1 */ 1854 if (i == 0) 1855 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1856 else 1857 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1858 1859 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1860 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1861 1862 priv = &buf_alloc->priv_buf[idx]; 1863 req->tc_wl[j].high = 1864 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1865 req->tc_wl[j].high |= 1866 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << 1867 HCLGE_RX_PRIV_EN_B); 1868 req->tc_wl[j].low = 1869 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 1870 req->tc_wl[j].low |= 1871 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << 1872 HCLGE_RX_PRIV_EN_B); 1873 } 1874 } 1875 1876 /* Send 2 descriptor at one time */ 1877 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1878 if (ret) { 1879 dev_err(&hdev->pdev->dev, 1880 "rx private waterline config cmd failed %d\n", 1881 ret); 1882 return ret; 1883 } 1884 return 0; 1885 } 1886 1887 static int hclge_common_thrd_config(struct hclge_dev *hdev, 1888 struct hclge_pkt_buf_alloc *buf_alloc) 1889 { 1890 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1891 struct hclge_rx_com_thrd *req; 1892 struct hclge_desc desc[2]; 1893 struct hclge_tc_thrd *tc; 1894 int i, j; 1895 int ret; 1896 1897 for (i = 0; i < 2; i++) { 1898 hclge_cmd_setup_basic_desc(&desc[i], 1899 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 1900 req = (struct hclge_rx_com_thrd *)&desc[i].data; 1901 1902 /* The first descriptor set the NEXT bit to 1 */ 1903 if (i == 0) 1904 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1905 else 1906 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1907 1908 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1909 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 1910 1911 req->com_thrd[j].high = 1912 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 1913 req->com_thrd[j].high |= 1914 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << 1915 HCLGE_RX_PRIV_EN_B); 1916 req->com_thrd[j].low = 1917 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 1918 req->com_thrd[j].low |= 1919 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << 1920 HCLGE_RX_PRIV_EN_B); 1921 } 1922 } 1923 1924 /* Send 2 descriptors at one time */ 1925 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1926 if (ret) { 1927 dev_err(&hdev->pdev->dev, 1928 "common threshold config cmd failed %d\n", ret); 1929 return ret; 1930 } 1931 return 0; 1932 } 1933 1934 static int hclge_common_wl_config(struct hclge_dev *hdev, 1935 struct hclge_pkt_buf_alloc *buf_alloc) 1936 { 1937 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1938 struct hclge_rx_com_wl *req; 1939 struct hclge_desc desc; 1940 int ret; 1941 1942 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 1943 1944 req = (struct hclge_rx_com_wl *)desc.data; 1945 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 1946 req->com_wl.high |= 1947 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << 1948 HCLGE_RX_PRIV_EN_B); 1949 1950 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 1951 req->com_wl.low |= 1952 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << 1953 HCLGE_RX_PRIV_EN_B); 1954 1955 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1956 if (ret) { 1957 dev_err(&hdev->pdev->dev, 1958 "common waterline config cmd failed %d\n", ret); 1959 return ret; 1960 } 1961 1962 return 0; 1963 } 1964 1965 int hclge_buffer_alloc(struct hclge_dev *hdev) 1966 { 1967 struct hclge_pkt_buf_alloc *pkt_buf; 1968 int ret; 1969 1970 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 1971 if (!pkt_buf) 1972 return -ENOMEM; 1973 1974 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 1975 if (ret) { 1976 dev_err(&hdev->pdev->dev, 1977 "could not calc tx buffer size for all TCs %d\n", ret); 1978 goto out; 1979 } 1980 1981 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 1982 if (ret) { 1983 dev_err(&hdev->pdev->dev, 1984 "could not alloc tx buffers %d\n", ret); 1985 goto out; 1986 } 1987 1988 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 1989 if (ret) { 1990 dev_err(&hdev->pdev->dev, 1991 "could not calc rx priv buffer size for all TCs %d\n", 1992 ret); 1993 goto out; 1994 } 1995 1996 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 1997 if (ret) { 1998 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 1999 ret); 2000 goto out; 2001 } 2002 2003 if (hnae3_dev_dcb_supported(hdev)) { 2004 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2005 if (ret) { 2006 dev_err(&hdev->pdev->dev, 2007 "could not configure rx private waterline %d\n", 2008 ret); 2009 goto out; 2010 } 2011 2012 ret = hclge_common_thrd_config(hdev, pkt_buf); 2013 if (ret) { 2014 dev_err(&hdev->pdev->dev, 2015 "could not configure common threshold %d\n", 2016 ret); 2017 goto out; 2018 } 2019 } 2020 2021 ret = hclge_common_wl_config(hdev, pkt_buf); 2022 if (ret) 2023 dev_err(&hdev->pdev->dev, 2024 "could not configure common waterline %d\n", ret); 2025 2026 out: 2027 kfree(pkt_buf); 2028 return ret; 2029 } 2030 2031 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2032 { 2033 struct hnae3_handle *roce = &vport->roce; 2034 struct hnae3_handle *nic = &vport->nic; 2035 2036 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2037 2038 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 2039 vport->back->num_msi_left == 0) 2040 return -EINVAL; 2041 2042 roce->rinfo.base_vector = vport->back->roce_base_vector; 2043 2044 roce->rinfo.netdev = nic->kinfo.netdev; 2045 roce->rinfo.roce_io_base = vport->back->hw.io_base; 2046 2047 roce->pdev = nic->pdev; 2048 roce->ae_algo = nic->ae_algo; 2049 roce->numa_node_mask = nic->numa_node_mask; 2050 2051 return 0; 2052 } 2053 2054 static int hclge_init_msi(struct hclge_dev *hdev) 2055 { 2056 struct pci_dev *pdev = hdev->pdev; 2057 int vectors; 2058 int i; 2059 2060 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 2061 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2062 if (vectors < 0) { 2063 dev_err(&pdev->dev, 2064 "failed(%d) to allocate MSI/MSI-X vectors\n", 2065 vectors); 2066 return vectors; 2067 } 2068 if (vectors < hdev->num_msi) 2069 dev_warn(&hdev->pdev->dev, 2070 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2071 hdev->num_msi, vectors); 2072 2073 hdev->num_msi = vectors; 2074 hdev->num_msi_left = vectors; 2075 hdev->base_msi_vector = pdev->irq; 2076 hdev->roce_base_vector = hdev->base_msi_vector + 2077 HCLGE_ROCE_VECTOR_OFFSET; 2078 2079 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2080 sizeof(u16), GFP_KERNEL); 2081 if (!hdev->vector_status) { 2082 pci_free_irq_vectors(pdev); 2083 return -ENOMEM; 2084 } 2085 2086 for (i = 0; i < hdev->num_msi; i++) 2087 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2088 2089 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2090 sizeof(int), GFP_KERNEL); 2091 if (!hdev->vector_irq) { 2092 pci_free_irq_vectors(pdev); 2093 return -ENOMEM; 2094 } 2095 2096 return 0; 2097 } 2098 2099 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) 2100 { 2101 struct hclge_mac *mac = &hdev->hw.mac; 2102 2103 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) 2104 mac->duplex = (u8)duplex; 2105 else 2106 mac->duplex = HCLGE_MAC_FULL; 2107 2108 mac->speed = speed; 2109 } 2110 2111 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 2112 { 2113 struct hclge_config_mac_speed_dup_cmd *req; 2114 struct hclge_desc desc; 2115 int ret; 2116 2117 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2118 2119 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2120 2121 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); 2122 2123 switch (speed) { 2124 case HCLGE_MAC_SPEED_10M: 2125 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2126 HCLGE_CFG_SPEED_S, 6); 2127 break; 2128 case HCLGE_MAC_SPEED_100M: 2129 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2130 HCLGE_CFG_SPEED_S, 7); 2131 break; 2132 case HCLGE_MAC_SPEED_1G: 2133 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2134 HCLGE_CFG_SPEED_S, 0); 2135 break; 2136 case HCLGE_MAC_SPEED_10G: 2137 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2138 HCLGE_CFG_SPEED_S, 1); 2139 break; 2140 case HCLGE_MAC_SPEED_25G: 2141 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2142 HCLGE_CFG_SPEED_S, 2); 2143 break; 2144 case HCLGE_MAC_SPEED_40G: 2145 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2146 HCLGE_CFG_SPEED_S, 3); 2147 break; 2148 case HCLGE_MAC_SPEED_50G: 2149 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2150 HCLGE_CFG_SPEED_S, 4); 2151 break; 2152 case HCLGE_MAC_SPEED_100G: 2153 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2154 HCLGE_CFG_SPEED_S, 5); 2155 break; 2156 default: 2157 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2158 return -EINVAL; 2159 } 2160 2161 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2162 1); 2163 2164 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2165 if (ret) { 2166 dev_err(&hdev->pdev->dev, 2167 "mac speed/duplex config cmd failed %d.\n", ret); 2168 return ret; 2169 } 2170 2171 hclge_check_speed_dup(hdev, duplex, speed); 2172 2173 return 0; 2174 } 2175 2176 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2177 u8 duplex) 2178 { 2179 struct hclge_vport *vport = hclge_get_vport(handle); 2180 struct hclge_dev *hdev = vport->back; 2181 2182 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2183 } 2184 2185 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, 2186 u8 *duplex) 2187 { 2188 struct hclge_query_an_speed_dup_cmd *req; 2189 struct hclge_desc desc; 2190 int speed_tmp; 2191 int ret; 2192 2193 req = (struct hclge_query_an_speed_dup_cmd *)desc.data; 2194 2195 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); 2196 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2197 if (ret) { 2198 dev_err(&hdev->pdev->dev, 2199 "mac speed/autoneg/duplex query cmd failed %d\n", 2200 ret); 2201 return ret; 2202 } 2203 2204 *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); 2205 speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, 2206 HCLGE_QUERY_SPEED_S); 2207 2208 ret = hclge_parse_speed(speed_tmp, speed); 2209 if (ret) { 2210 dev_err(&hdev->pdev->dev, 2211 "could not parse speed(=%d), %d\n", speed_tmp, ret); 2212 return -EIO; 2213 } 2214 2215 return 0; 2216 } 2217 2218 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2219 { 2220 struct hclge_config_auto_neg_cmd *req; 2221 struct hclge_desc desc; 2222 u32 flag = 0; 2223 int ret; 2224 2225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2226 2227 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2228 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); 2229 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2230 2231 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2232 if (ret) { 2233 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2234 ret); 2235 return ret; 2236 } 2237 2238 return 0; 2239 } 2240 2241 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2242 { 2243 struct hclge_vport *vport = hclge_get_vport(handle); 2244 struct hclge_dev *hdev = vport->back; 2245 2246 return hclge_set_autoneg_en(hdev, enable); 2247 } 2248 2249 static int hclge_get_autoneg(struct hnae3_handle *handle) 2250 { 2251 struct hclge_vport *vport = hclge_get_vport(handle); 2252 struct hclge_dev *hdev = vport->back; 2253 struct phy_device *phydev = hdev->hw.mac.phydev; 2254 2255 if (phydev) 2256 return phydev->autoneg; 2257 2258 return hdev->hw.mac.autoneg; 2259 } 2260 2261 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, 2262 bool mask_vlan, 2263 u8 *mac_mask) 2264 { 2265 struct hclge_mac_vlan_mask_entry_cmd *req; 2266 struct hclge_desc desc; 2267 int status; 2268 2269 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; 2270 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); 2271 2272 hnae3_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, 2273 mask_vlan ? 1 : 0); 2274 ether_addr_copy(req->mac_mask, mac_mask); 2275 2276 status = hclge_cmd_send(&hdev->hw, &desc, 1); 2277 if (status) 2278 dev_err(&hdev->pdev->dev, 2279 "Config mac_vlan_mask failed for cmd_send, ret =%d\n", 2280 status); 2281 2282 return status; 2283 } 2284 2285 static int hclge_mac_init(struct hclge_dev *hdev) 2286 { 2287 struct hnae3_handle *handle = &hdev->vport[0].nic; 2288 struct net_device *netdev = handle->kinfo.netdev; 2289 struct hclge_mac *mac = &hdev->hw.mac; 2290 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 2291 struct hclge_vport *vport; 2292 int mtu; 2293 int ret; 2294 int i; 2295 2296 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); 2297 if (ret) { 2298 dev_err(&hdev->pdev->dev, 2299 "Config mac speed dup fail ret=%d\n", ret); 2300 return ret; 2301 } 2302 2303 mac->link = 0; 2304 2305 /* Initialize the MTA table work mode */ 2306 hdev->enable_mta = true; 2307 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; 2308 2309 ret = hclge_set_mta_filter_mode(hdev, 2310 hdev->mta_mac_sel_type, 2311 hdev->enable_mta); 2312 if (ret) { 2313 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", 2314 ret); 2315 return ret; 2316 } 2317 2318 for (i = 0; i < hdev->num_alloc_vport; i++) { 2319 vport = &hdev->vport[i]; 2320 vport->accept_mta_mc = false; 2321 2322 memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); 2323 ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); 2324 if (ret) { 2325 dev_err(&hdev->pdev->dev, 2326 "set mta filter mode fail ret=%d\n", ret); 2327 return ret; 2328 } 2329 } 2330 2331 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); 2332 if (ret) { 2333 dev_err(&hdev->pdev->dev, 2334 "set default mac_vlan_mask fail ret=%d\n", ret); 2335 return ret; 2336 } 2337 2338 if (netdev) 2339 mtu = netdev->mtu; 2340 else 2341 mtu = ETH_DATA_LEN; 2342 2343 ret = hclge_set_mtu(handle, mtu); 2344 if (ret) { 2345 dev_err(&hdev->pdev->dev, 2346 "set mtu failed ret=%d\n", ret); 2347 return ret; 2348 } 2349 2350 return 0; 2351 } 2352 2353 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2354 { 2355 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2356 schedule_work(&hdev->mbx_service_task); 2357 } 2358 2359 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2360 { 2361 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 2362 schedule_work(&hdev->rst_service_task); 2363 } 2364 2365 static void hclge_task_schedule(struct hclge_dev *hdev) 2366 { 2367 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 2368 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2369 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) 2370 (void)schedule_work(&hdev->service_task); 2371 } 2372 2373 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2374 { 2375 struct hclge_link_status_cmd *req; 2376 struct hclge_desc desc; 2377 int link_status; 2378 int ret; 2379 2380 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2381 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2382 if (ret) { 2383 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2384 ret); 2385 return ret; 2386 } 2387 2388 req = (struct hclge_link_status_cmd *)desc.data; 2389 link_status = req->status & HCLGE_LINK_STATUS; 2390 2391 return !!link_status; 2392 } 2393 2394 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2395 { 2396 int mac_state; 2397 int link_stat; 2398 2399 mac_state = hclge_get_mac_link_status(hdev); 2400 2401 if (hdev->hw.mac.phydev) { 2402 if (!genphy_read_status(hdev->hw.mac.phydev)) 2403 link_stat = mac_state & 2404 hdev->hw.mac.phydev->link; 2405 else 2406 link_stat = 0; 2407 2408 } else { 2409 link_stat = mac_state; 2410 } 2411 2412 return !!link_stat; 2413 } 2414 2415 static void hclge_update_link_status(struct hclge_dev *hdev) 2416 { 2417 struct hnae3_client *client = hdev->nic_client; 2418 struct hnae3_handle *handle; 2419 int state; 2420 int i; 2421 2422 if (!client) 2423 return; 2424 state = hclge_get_mac_phy_link(hdev); 2425 if (state != hdev->hw.mac.link) { 2426 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2427 handle = &hdev->vport[i].nic; 2428 client->ops->link_status_change(handle, state); 2429 } 2430 hdev->hw.mac.link = state; 2431 } 2432 } 2433 2434 static int hclge_update_speed_duplex(struct hclge_dev *hdev) 2435 { 2436 struct hclge_mac mac = hdev->hw.mac; 2437 u8 duplex; 2438 int speed; 2439 int ret; 2440 2441 /* get the speed and duplex as autoneg'result from mac cmd when phy 2442 * doesn't exit. 2443 */ 2444 if (mac.phydev || !mac.autoneg) 2445 return 0; 2446 2447 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); 2448 if (ret) { 2449 dev_err(&hdev->pdev->dev, 2450 "mac autoneg/speed/duplex query failed %d\n", ret); 2451 return ret; 2452 } 2453 2454 if ((mac.speed != speed) || (mac.duplex != duplex)) { 2455 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2456 if (ret) { 2457 dev_err(&hdev->pdev->dev, 2458 "mac speed/duplex config failed %d\n", ret); 2459 return ret; 2460 } 2461 } 2462 2463 return 0; 2464 } 2465 2466 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) 2467 { 2468 struct hclge_vport *vport = hclge_get_vport(handle); 2469 struct hclge_dev *hdev = vport->back; 2470 2471 return hclge_update_speed_duplex(hdev); 2472 } 2473 2474 static int hclge_get_status(struct hnae3_handle *handle) 2475 { 2476 struct hclge_vport *vport = hclge_get_vport(handle); 2477 struct hclge_dev *hdev = vport->back; 2478 2479 hclge_update_link_status(hdev); 2480 2481 return hdev->hw.mac.link; 2482 } 2483 2484 static void hclge_service_timer(struct timer_list *t) 2485 { 2486 struct hclge_dev *hdev = from_timer(hdev, t, service_timer); 2487 2488 mod_timer(&hdev->service_timer, jiffies + HZ); 2489 hdev->hw_stats.stats_timer++; 2490 hclge_task_schedule(hdev); 2491 } 2492 2493 static void hclge_service_complete(struct hclge_dev *hdev) 2494 { 2495 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); 2496 2497 /* Flush memory before next watchdog */ 2498 smp_mb__before_atomic(); 2499 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 2500 } 2501 2502 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 2503 { 2504 u32 rst_src_reg; 2505 u32 cmdq_src_reg; 2506 2507 /* fetch the events from their corresponding regs */ 2508 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); 2509 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 2510 2511 /* Assumption: If by any chance reset and mailbox events are reported 2512 * together then we will only process reset event in this go and will 2513 * defer the processing of the mailbox events. Since, we would have not 2514 * cleared RX CMDQ event this time we would receive again another 2515 * interrupt from H/W just for the mailbox. 2516 */ 2517 2518 /* check for vector0 reset event sources */ 2519 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { 2520 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 2521 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2522 return HCLGE_VECTOR0_EVENT_RST; 2523 } 2524 2525 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { 2526 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); 2527 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2528 return HCLGE_VECTOR0_EVENT_RST; 2529 } 2530 2531 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { 2532 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 2533 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2534 return HCLGE_VECTOR0_EVENT_RST; 2535 } 2536 2537 /* check for vector0 mailbox(=CMDQ RX) event source */ 2538 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 2539 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 2540 *clearval = cmdq_src_reg; 2541 return HCLGE_VECTOR0_EVENT_MBX; 2542 } 2543 2544 return HCLGE_VECTOR0_EVENT_OTHER; 2545 } 2546 2547 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 2548 u32 regclr) 2549 { 2550 switch (event_type) { 2551 case HCLGE_VECTOR0_EVENT_RST: 2552 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 2553 break; 2554 case HCLGE_VECTOR0_EVENT_MBX: 2555 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 2556 break; 2557 } 2558 } 2559 2560 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) 2561 { 2562 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, 2563 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | 2564 BIT(HCLGE_VECTOR0_CORERESET_INT_B) | 2565 BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); 2566 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); 2567 } 2568 2569 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 2570 { 2571 writel(enable ? 1 : 0, vector->addr); 2572 } 2573 2574 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 2575 { 2576 struct hclge_dev *hdev = data; 2577 u32 event_cause; 2578 u32 clearval; 2579 2580 hclge_enable_vector(&hdev->misc_vector, false); 2581 event_cause = hclge_check_event_cause(hdev, &clearval); 2582 2583 /* vector 0 interrupt is shared with reset and mailbox source events.*/ 2584 switch (event_cause) { 2585 case HCLGE_VECTOR0_EVENT_RST: 2586 hclge_reset_task_schedule(hdev); 2587 break; 2588 case HCLGE_VECTOR0_EVENT_MBX: 2589 /* If we are here then, 2590 * 1. Either we are not handling any mbx task and we are not 2591 * scheduled as well 2592 * OR 2593 * 2. We could be handling a mbx task but nothing more is 2594 * scheduled. 2595 * In both cases, we should schedule mbx task as there are more 2596 * mbx messages reported by this interrupt. 2597 */ 2598 hclge_mbx_task_schedule(hdev); 2599 break; 2600 default: 2601 dev_warn(&hdev->pdev->dev, 2602 "received unknown or unhandled event of vector0\n"); 2603 break; 2604 } 2605 2606 /* clear the source of interrupt if it is not cause by reset */ 2607 if (event_cause != HCLGE_VECTOR0_EVENT_RST) { 2608 hclge_clear_event_cause(hdev, event_cause, clearval); 2609 hclge_enable_vector(&hdev->misc_vector, true); 2610 } 2611 2612 return IRQ_HANDLED; 2613 } 2614 2615 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 2616 { 2617 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { 2618 dev_warn(&hdev->pdev->dev, 2619 "vector(vector_id %d) has been freed.\n", vector_id); 2620 return; 2621 } 2622 2623 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 2624 hdev->num_msi_left += 1; 2625 hdev->num_msi_used -= 1; 2626 } 2627 2628 static void hclge_get_misc_vector(struct hclge_dev *hdev) 2629 { 2630 struct hclge_misc_vector *vector = &hdev->misc_vector; 2631 2632 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 2633 2634 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 2635 hdev->vector_status[0] = 0; 2636 2637 hdev->num_msi_left -= 1; 2638 hdev->num_msi_used += 1; 2639 } 2640 2641 static int hclge_misc_irq_init(struct hclge_dev *hdev) 2642 { 2643 int ret; 2644 2645 hclge_get_misc_vector(hdev); 2646 2647 /* this would be explicitly freed in the end */ 2648 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 2649 0, "hclge_misc", hdev); 2650 if (ret) { 2651 hclge_free_vector(hdev, 0); 2652 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 2653 hdev->misc_vector.vector_irq); 2654 } 2655 2656 return ret; 2657 } 2658 2659 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 2660 { 2661 free_irq(hdev->misc_vector.vector_irq, hdev); 2662 hclge_free_vector(hdev, 0); 2663 } 2664 2665 static int hclge_notify_client(struct hclge_dev *hdev, 2666 enum hnae3_reset_notify_type type) 2667 { 2668 struct hnae3_client *client = hdev->nic_client; 2669 u16 i; 2670 2671 if (!client->ops->reset_notify) 2672 return -EOPNOTSUPP; 2673 2674 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2675 struct hnae3_handle *handle = &hdev->vport[i].nic; 2676 int ret; 2677 2678 ret = client->ops->reset_notify(handle, type); 2679 if (ret) 2680 return ret; 2681 } 2682 2683 return 0; 2684 } 2685 2686 static int hclge_reset_wait(struct hclge_dev *hdev) 2687 { 2688 #define HCLGE_RESET_WATI_MS 100 2689 #define HCLGE_RESET_WAIT_CNT 5 2690 u32 val, reg, reg_bit; 2691 u32 cnt = 0; 2692 2693 switch (hdev->reset_type) { 2694 case HNAE3_GLOBAL_RESET: 2695 reg = HCLGE_GLOBAL_RESET_REG; 2696 reg_bit = HCLGE_GLOBAL_RESET_BIT; 2697 break; 2698 case HNAE3_CORE_RESET: 2699 reg = HCLGE_GLOBAL_RESET_REG; 2700 reg_bit = HCLGE_CORE_RESET_BIT; 2701 break; 2702 case HNAE3_FUNC_RESET: 2703 reg = HCLGE_FUN_RST_ING; 2704 reg_bit = HCLGE_FUN_RST_ING_B; 2705 break; 2706 default: 2707 dev_err(&hdev->pdev->dev, 2708 "Wait for unsupported reset type: %d\n", 2709 hdev->reset_type); 2710 return -EINVAL; 2711 } 2712 2713 val = hclge_read_dev(&hdev->hw, reg); 2714 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 2715 msleep(HCLGE_RESET_WATI_MS); 2716 val = hclge_read_dev(&hdev->hw, reg); 2717 cnt++; 2718 } 2719 2720 if (cnt >= HCLGE_RESET_WAIT_CNT) { 2721 dev_warn(&hdev->pdev->dev, 2722 "Wait for reset timeout: %d\n", hdev->reset_type); 2723 return -EBUSY; 2724 } 2725 2726 return 0; 2727 } 2728 2729 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 2730 { 2731 struct hclge_desc desc; 2732 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 2733 int ret; 2734 2735 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 2736 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 2737 req->fun_reset_vfid = func_id; 2738 2739 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2740 if (ret) 2741 dev_err(&hdev->pdev->dev, 2742 "send function reset cmd fail, status =%d\n", ret); 2743 2744 return ret; 2745 } 2746 2747 static void hclge_do_reset(struct hclge_dev *hdev) 2748 { 2749 struct pci_dev *pdev = hdev->pdev; 2750 u32 val; 2751 2752 switch (hdev->reset_type) { 2753 case HNAE3_GLOBAL_RESET: 2754 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2755 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 2756 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2757 dev_info(&pdev->dev, "Global Reset requested\n"); 2758 break; 2759 case HNAE3_CORE_RESET: 2760 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2761 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1); 2762 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2763 dev_info(&pdev->dev, "Core Reset requested\n"); 2764 break; 2765 case HNAE3_FUNC_RESET: 2766 dev_info(&pdev->dev, "PF Reset requested\n"); 2767 hclge_func_reset_cmd(hdev, 0); 2768 /* schedule again to check later */ 2769 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 2770 hclge_reset_task_schedule(hdev); 2771 break; 2772 default: 2773 dev_warn(&pdev->dev, 2774 "Unsupported reset type: %d\n", hdev->reset_type); 2775 break; 2776 } 2777 } 2778 2779 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, 2780 unsigned long *addr) 2781 { 2782 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2783 2784 /* return the highest priority reset level amongst all */ 2785 if (test_bit(HNAE3_GLOBAL_RESET, addr)) 2786 rst_level = HNAE3_GLOBAL_RESET; 2787 else if (test_bit(HNAE3_CORE_RESET, addr)) 2788 rst_level = HNAE3_CORE_RESET; 2789 else if (test_bit(HNAE3_IMP_RESET, addr)) 2790 rst_level = HNAE3_IMP_RESET; 2791 else if (test_bit(HNAE3_FUNC_RESET, addr)) 2792 rst_level = HNAE3_FUNC_RESET; 2793 2794 /* now, clear all other resets */ 2795 clear_bit(HNAE3_GLOBAL_RESET, addr); 2796 clear_bit(HNAE3_CORE_RESET, addr); 2797 clear_bit(HNAE3_IMP_RESET, addr); 2798 clear_bit(HNAE3_FUNC_RESET, addr); 2799 2800 return rst_level; 2801 } 2802 2803 static void hclge_clear_reset_cause(struct hclge_dev *hdev) 2804 { 2805 u32 clearval = 0; 2806 2807 switch (hdev->reset_type) { 2808 case HNAE3_IMP_RESET: 2809 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2810 break; 2811 case HNAE3_GLOBAL_RESET: 2812 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2813 break; 2814 case HNAE3_CORE_RESET: 2815 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2816 break; 2817 default: 2818 dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d", 2819 hdev->reset_type); 2820 break; 2821 } 2822 2823 if (!clearval) 2824 return; 2825 2826 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); 2827 hclge_enable_vector(&hdev->misc_vector, true); 2828 } 2829 2830 static void hclge_reset(struct hclge_dev *hdev) 2831 { 2832 /* perform reset of the stack & ae device for a client */ 2833 2834 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2835 2836 if (!hclge_reset_wait(hdev)) { 2837 rtnl_lock(); 2838 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 2839 hclge_reset_ae_dev(hdev->ae_dev); 2840 hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 2841 rtnl_unlock(); 2842 2843 hclge_clear_reset_cause(hdev); 2844 } else { 2845 /* schedule again to check pending resets later */ 2846 set_bit(hdev->reset_type, &hdev->reset_pending); 2847 hclge_reset_task_schedule(hdev); 2848 } 2849 2850 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2851 } 2852 2853 static void hclge_reset_event(struct hnae3_handle *handle) 2854 { 2855 struct hclge_vport *vport = hclge_get_vport(handle); 2856 struct hclge_dev *hdev = vport->back; 2857 2858 /* check if this is a new reset request and we are not here just because 2859 * last reset attempt did not succeed and watchdog hit us again. We will 2860 * know this if last reset request did not occur very recently (watchdog 2861 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 2862 * In case of new request we reset the "reset level" to PF reset. 2863 */ 2864 if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) 2865 handle->reset_level = HNAE3_FUNC_RESET; 2866 2867 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", 2868 handle->reset_level); 2869 2870 /* request reset & schedule reset task */ 2871 set_bit(handle->reset_level, &hdev->reset_request); 2872 hclge_reset_task_schedule(hdev); 2873 2874 if (handle->reset_level < HNAE3_GLOBAL_RESET) 2875 handle->reset_level++; 2876 2877 handle->last_reset_time = jiffies; 2878 } 2879 2880 static void hclge_reset_subtask(struct hclge_dev *hdev) 2881 { 2882 /* check if there is any ongoing reset in the hardware. This status can 2883 * be checked from reset_pending. If there is then, we need to wait for 2884 * hardware to complete reset. 2885 * a. If we are able to figure out in reasonable time that hardware 2886 * has fully resetted then, we can proceed with driver, client 2887 * reset. 2888 * b. else, we can come back later to check this status so re-sched 2889 * now. 2890 */ 2891 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); 2892 if (hdev->reset_type != HNAE3_NONE_RESET) 2893 hclge_reset(hdev); 2894 2895 /* check if we got any *new* reset requests to be honored */ 2896 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); 2897 if (hdev->reset_type != HNAE3_NONE_RESET) 2898 hclge_do_reset(hdev); 2899 2900 hdev->reset_type = HNAE3_NONE_RESET; 2901 } 2902 2903 static void hclge_reset_service_task(struct work_struct *work) 2904 { 2905 struct hclge_dev *hdev = 2906 container_of(work, struct hclge_dev, rst_service_task); 2907 2908 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2909 return; 2910 2911 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 2912 2913 hclge_reset_subtask(hdev); 2914 2915 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 2916 } 2917 2918 static void hclge_mailbox_service_task(struct work_struct *work) 2919 { 2920 struct hclge_dev *hdev = 2921 container_of(work, struct hclge_dev, mbx_service_task); 2922 2923 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 2924 return; 2925 2926 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 2927 2928 hclge_mbx_handler(hdev); 2929 2930 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 2931 } 2932 2933 static void hclge_service_task(struct work_struct *work) 2934 { 2935 struct hclge_dev *hdev = 2936 container_of(work, struct hclge_dev, service_task); 2937 2938 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { 2939 hclge_update_stats_for_all(hdev); 2940 hdev->hw_stats.stats_timer = 0; 2941 } 2942 2943 hclge_update_speed_duplex(hdev); 2944 hclge_update_link_status(hdev); 2945 hclge_service_complete(hdev); 2946 } 2947 2948 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 2949 { 2950 /* VF handle has no client */ 2951 if (!handle->client) 2952 return container_of(handle, struct hclge_vport, nic); 2953 else if (handle->client->type == HNAE3_CLIENT_ROCE) 2954 return container_of(handle, struct hclge_vport, roce); 2955 else 2956 return container_of(handle, struct hclge_vport, nic); 2957 } 2958 2959 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 2960 struct hnae3_vector_info *vector_info) 2961 { 2962 struct hclge_vport *vport = hclge_get_vport(handle); 2963 struct hnae3_vector_info *vector = vector_info; 2964 struct hclge_dev *hdev = vport->back; 2965 int alloc = 0; 2966 int i, j; 2967 2968 vector_num = min(hdev->num_msi_left, vector_num); 2969 2970 for (j = 0; j < vector_num; j++) { 2971 for (i = 1; i < hdev->num_msi; i++) { 2972 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 2973 vector->vector = pci_irq_vector(hdev->pdev, i); 2974 vector->io_addr = hdev->hw.io_base + 2975 HCLGE_VECTOR_REG_BASE + 2976 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 2977 vport->vport_id * 2978 HCLGE_VECTOR_VF_OFFSET; 2979 hdev->vector_status[i] = vport->vport_id; 2980 hdev->vector_irq[i] = vector->vector; 2981 2982 vector++; 2983 alloc++; 2984 2985 break; 2986 } 2987 } 2988 } 2989 hdev->num_msi_left -= alloc; 2990 hdev->num_msi_used += alloc; 2991 2992 return alloc; 2993 } 2994 2995 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 2996 { 2997 int i; 2998 2999 for (i = 0; i < hdev->num_msi; i++) 3000 if (vector == hdev->vector_irq[i]) 3001 return i; 3002 3003 return -EINVAL; 3004 } 3005 3006 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 3007 { 3008 struct hclge_vport *vport = hclge_get_vport(handle); 3009 struct hclge_dev *hdev = vport->back; 3010 int vector_id; 3011 3012 vector_id = hclge_get_vector_index(hdev, vector); 3013 if (vector_id < 0) { 3014 dev_err(&hdev->pdev->dev, 3015 "Get vector index fail. vector_id =%d\n", vector_id); 3016 return vector_id; 3017 } 3018 3019 hclge_free_vector(hdev, vector_id); 3020 3021 return 0; 3022 } 3023 3024 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 3025 { 3026 return HCLGE_RSS_KEY_SIZE; 3027 } 3028 3029 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 3030 { 3031 return HCLGE_RSS_IND_TBL_SIZE; 3032 } 3033 3034 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 3035 const u8 hfunc, const u8 *key) 3036 { 3037 struct hclge_rss_config_cmd *req; 3038 struct hclge_desc desc; 3039 int key_offset; 3040 int key_size; 3041 int ret; 3042 3043 req = (struct hclge_rss_config_cmd *)desc.data; 3044 3045 for (key_offset = 0; key_offset < 3; key_offset++) { 3046 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 3047 false); 3048 3049 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 3050 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 3051 3052 if (key_offset == 2) 3053 key_size = 3054 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; 3055 else 3056 key_size = HCLGE_RSS_HASH_KEY_NUM; 3057 3058 memcpy(req->hash_key, 3059 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 3060 3061 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3062 if (ret) { 3063 dev_err(&hdev->pdev->dev, 3064 "Configure RSS config fail, status = %d\n", 3065 ret); 3066 return ret; 3067 } 3068 } 3069 return 0; 3070 } 3071 3072 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) 3073 { 3074 struct hclge_rss_indirection_table_cmd *req; 3075 struct hclge_desc desc; 3076 int i, j; 3077 int ret; 3078 3079 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 3080 3081 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 3082 hclge_cmd_setup_basic_desc 3083 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 3084 3085 req->start_table_index = 3086 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 3087 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 3088 3089 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 3090 req->rss_result[j] = 3091 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 3092 3093 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3094 if (ret) { 3095 dev_err(&hdev->pdev->dev, 3096 "Configure rss indir table fail,status = %d\n", 3097 ret); 3098 return ret; 3099 } 3100 } 3101 return 0; 3102 } 3103 3104 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 3105 u16 *tc_size, u16 *tc_offset) 3106 { 3107 struct hclge_rss_tc_mode_cmd *req; 3108 struct hclge_desc desc; 3109 int ret; 3110 int i; 3111 3112 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 3113 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 3114 3115 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3116 u16 mode = 0; 3117 3118 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 3119 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, 3120 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 3121 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 3122 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 3123 3124 req->rss_tc_mode[i] = cpu_to_le16(mode); 3125 } 3126 3127 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3128 if (ret) { 3129 dev_err(&hdev->pdev->dev, 3130 "Configure rss tc mode fail, status = %d\n", ret); 3131 return ret; 3132 } 3133 3134 return 0; 3135 } 3136 3137 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 3138 { 3139 struct hclge_rss_input_tuple_cmd *req; 3140 struct hclge_desc desc; 3141 int ret; 3142 3143 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3144 3145 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3146 3147 /* Get the tuple cfg from pf */ 3148 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en; 3149 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en; 3150 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en; 3151 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en; 3152 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en; 3153 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en; 3154 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en; 3155 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en; 3156 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3157 if (ret) { 3158 dev_err(&hdev->pdev->dev, 3159 "Configure rss input fail, status = %d\n", ret); 3160 return ret; 3161 } 3162 3163 return 0; 3164 } 3165 3166 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 3167 u8 *key, u8 *hfunc) 3168 { 3169 struct hclge_vport *vport = hclge_get_vport(handle); 3170 int i; 3171 3172 /* Get hash algorithm */ 3173 if (hfunc) 3174 *hfunc = vport->rss_algo; 3175 3176 /* Get the RSS Key required by the user */ 3177 if (key) 3178 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 3179 3180 /* Get indirect table */ 3181 if (indir) 3182 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3183 indir[i] = vport->rss_indirection_tbl[i]; 3184 3185 return 0; 3186 } 3187 3188 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 3189 const u8 *key, const u8 hfunc) 3190 { 3191 struct hclge_vport *vport = hclge_get_vport(handle); 3192 struct hclge_dev *hdev = vport->back; 3193 u8 hash_algo; 3194 int ret, i; 3195 3196 /* Set the RSS Hash Key if specififed by the user */ 3197 if (key) { 3198 3199 if (hfunc == ETH_RSS_HASH_TOP || 3200 hfunc == ETH_RSS_HASH_NO_CHANGE) 3201 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3202 else 3203 return -EINVAL; 3204 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 3205 if (ret) 3206 return ret; 3207 3208 /* Update the shadow RSS key with user specified qids */ 3209 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 3210 vport->rss_algo = hash_algo; 3211 } 3212 3213 /* Update the shadow RSS table with user specified qids */ 3214 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3215 vport->rss_indirection_tbl[i] = indir[i]; 3216 3217 /* Update the hardware */ 3218 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl); 3219 } 3220 3221 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 3222 { 3223 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 3224 3225 if (nfc->data & RXH_L4_B_2_3) 3226 hash_sets |= HCLGE_D_PORT_BIT; 3227 else 3228 hash_sets &= ~HCLGE_D_PORT_BIT; 3229 3230 if (nfc->data & RXH_IP_SRC) 3231 hash_sets |= HCLGE_S_IP_BIT; 3232 else 3233 hash_sets &= ~HCLGE_S_IP_BIT; 3234 3235 if (nfc->data & RXH_IP_DST) 3236 hash_sets |= HCLGE_D_IP_BIT; 3237 else 3238 hash_sets &= ~HCLGE_D_IP_BIT; 3239 3240 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 3241 hash_sets |= HCLGE_V_TAG_BIT; 3242 3243 return hash_sets; 3244 } 3245 3246 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 3247 struct ethtool_rxnfc *nfc) 3248 { 3249 struct hclge_vport *vport = hclge_get_vport(handle); 3250 struct hclge_dev *hdev = vport->back; 3251 struct hclge_rss_input_tuple_cmd *req; 3252 struct hclge_desc desc; 3253 u8 tuple_sets; 3254 int ret; 3255 3256 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 3257 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 3258 return -EINVAL; 3259 3260 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3261 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3262 3263 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en; 3264 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en; 3265 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en; 3266 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en; 3267 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en; 3268 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en; 3269 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en; 3270 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en; 3271 3272 tuple_sets = hclge_get_rss_hash_bits(nfc); 3273 switch (nfc->flow_type) { 3274 case TCP_V4_FLOW: 3275 req->ipv4_tcp_en = tuple_sets; 3276 break; 3277 case TCP_V6_FLOW: 3278 req->ipv6_tcp_en = tuple_sets; 3279 break; 3280 case UDP_V4_FLOW: 3281 req->ipv4_udp_en = tuple_sets; 3282 break; 3283 case UDP_V6_FLOW: 3284 req->ipv6_udp_en = tuple_sets; 3285 break; 3286 case SCTP_V4_FLOW: 3287 req->ipv4_sctp_en = tuple_sets; 3288 break; 3289 case SCTP_V6_FLOW: 3290 if ((nfc->data & RXH_L4_B_0_1) || 3291 (nfc->data & RXH_L4_B_2_3)) 3292 return -EINVAL; 3293 3294 req->ipv6_sctp_en = tuple_sets; 3295 break; 3296 case IPV4_FLOW: 3297 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3298 break; 3299 case IPV6_FLOW: 3300 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3301 break; 3302 default: 3303 return -EINVAL; 3304 } 3305 3306 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3307 if (ret) { 3308 dev_err(&hdev->pdev->dev, 3309 "Set rss tuple fail, status = %d\n", ret); 3310 return ret; 3311 } 3312 3313 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en; 3314 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en; 3315 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en; 3316 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en; 3317 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en; 3318 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en; 3319 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en; 3320 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; 3321 return 0; 3322 } 3323 3324 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 3325 struct ethtool_rxnfc *nfc) 3326 { 3327 struct hclge_vport *vport = hclge_get_vport(handle); 3328 u8 tuple_sets; 3329 3330 nfc->data = 0; 3331 3332 switch (nfc->flow_type) { 3333 case TCP_V4_FLOW: 3334 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en; 3335 break; 3336 case UDP_V4_FLOW: 3337 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en; 3338 break; 3339 case TCP_V6_FLOW: 3340 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en; 3341 break; 3342 case UDP_V6_FLOW: 3343 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en; 3344 break; 3345 case SCTP_V4_FLOW: 3346 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en; 3347 break; 3348 case SCTP_V6_FLOW: 3349 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en; 3350 break; 3351 case IPV4_FLOW: 3352 case IPV6_FLOW: 3353 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 3354 break; 3355 default: 3356 return -EINVAL; 3357 } 3358 3359 if (!tuple_sets) 3360 return 0; 3361 3362 if (tuple_sets & HCLGE_D_PORT_BIT) 3363 nfc->data |= RXH_L4_B_2_3; 3364 if (tuple_sets & HCLGE_S_PORT_BIT) 3365 nfc->data |= RXH_L4_B_0_1; 3366 if (tuple_sets & HCLGE_D_IP_BIT) 3367 nfc->data |= RXH_IP_DST; 3368 if (tuple_sets & HCLGE_S_IP_BIT) 3369 nfc->data |= RXH_IP_SRC; 3370 3371 return 0; 3372 } 3373 3374 static int hclge_get_tc_size(struct hnae3_handle *handle) 3375 { 3376 struct hclge_vport *vport = hclge_get_vport(handle); 3377 struct hclge_dev *hdev = vport->back; 3378 3379 return hdev->rss_size_max; 3380 } 3381 3382 int hclge_rss_init_hw(struct hclge_dev *hdev) 3383 { 3384 struct hclge_vport *vport = hdev->vport; 3385 u8 *rss_indir = vport[0].rss_indirection_tbl; 3386 u16 rss_size = vport[0].alloc_rss_size; 3387 u8 *key = vport[0].rss_hash_key; 3388 u8 hfunc = vport[0].rss_algo; 3389 u16 tc_offset[HCLGE_MAX_TC_NUM]; 3390 u16 tc_valid[HCLGE_MAX_TC_NUM]; 3391 u16 tc_size[HCLGE_MAX_TC_NUM]; 3392 u16 roundup_size; 3393 int i, ret; 3394 3395 ret = hclge_set_rss_indir_table(hdev, rss_indir); 3396 if (ret) 3397 return ret; 3398 3399 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 3400 if (ret) 3401 return ret; 3402 3403 ret = hclge_set_rss_input_tuple(hdev); 3404 if (ret) 3405 return ret; 3406 3407 /* Each TC have the same queue size, and tc_size set to hardware is 3408 * the log2 of roundup power of two of rss_size, the acutal queue 3409 * size is limited by indirection table. 3410 */ 3411 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 3412 dev_err(&hdev->pdev->dev, 3413 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 3414 rss_size); 3415 return -EINVAL; 3416 } 3417 3418 roundup_size = roundup_pow_of_two(rss_size); 3419 roundup_size = ilog2(roundup_size); 3420 3421 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3422 tc_valid[i] = 0; 3423 3424 if (!(hdev->hw_tc_map & BIT(i))) 3425 continue; 3426 3427 tc_valid[i] = 1; 3428 tc_size[i] = roundup_size; 3429 tc_offset[i] = rss_size * i; 3430 } 3431 3432 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 3433 } 3434 3435 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) 3436 { 3437 struct hclge_vport *vport = hdev->vport; 3438 int i, j; 3439 3440 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 3441 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3442 vport[j].rss_indirection_tbl[i] = 3443 i % vport[j].alloc_rss_size; 3444 } 3445 } 3446 3447 static void hclge_rss_init_cfg(struct hclge_dev *hdev) 3448 { 3449 struct hclge_vport *vport = hdev->vport; 3450 int i; 3451 3452 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 3453 vport[i].rss_tuple_sets.ipv4_tcp_en = 3454 HCLGE_RSS_INPUT_TUPLE_OTHER; 3455 vport[i].rss_tuple_sets.ipv4_udp_en = 3456 HCLGE_RSS_INPUT_TUPLE_OTHER; 3457 vport[i].rss_tuple_sets.ipv4_sctp_en = 3458 HCLGE_RSS_INPUT_TUPLE_SCTP; 3459 vport[i].rss_tuple_sets.ipv4_fragment_en = 3460 HCLGE_RSS_INPUT_TUPLE_OTHER; 3461 vport[i].rss_tuple_sets.ipv6_tcp_en = 3462 HCLGE_RSS_INPUT_TUPLE_OTHER; 3463 vport[i].rss_tuple_sets.ipv6_udp_en = 3464 HCLGE_RSS_INPUT_TUPLE_OTHER; 3465 vport[i].rss_tuple_sets.ipv6_sctp_en = 3466 HCLGE_RSS_INPUT_TUPLE_SCTP; 3467 vport[i].rss_tuple_sets.ipv6_fragment_en = 3468 HCLGE_RSS_INPUT_TUPLE_OTHER; 3469 3470 vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3471 3472 netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); 3473 } 3474 3475 hclge_rss_indir_init_cfg(hdev); 3476 } 3477 3478 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 3479 int vector_id, bool en, 3480 struct hnae3_ring_chain_node *ring_chain) 3481 { 3482 struct hclge_dev *hdev = vport->back; 3483 struct hnae3_ring_chain_node *node; 3484 struct hclge_desc desc; 3485 struct hclge_ctrl_vector_chain_cmd *req 3486 = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 3487 enum hclge_cmd_status status; 3488 enum hclge_opcode_type op; 3489 u16 tqp_type_and_id; 3490 int i; 3491 3492 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 3493 hclge_cmd_setup_basic_desc(&desc, op, false); 3494 req->int_vector_id = vector_id; 3495 3496 i = 0; 3497 for (node = ring_chain; node; node = node->next) { 3498 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 3499 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 3500 HCLGE_INT_TYPE_S, 3501 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3502 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 3503 HCLGE_TQP_ID_S, node->tqp_index); 3504 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 3505 HCLGE_INT_GL_IDX_S, 3506 hnae3_get_field(node->int_gl_idx, 3507 HNAE3_RING_GL_IDX_M, 3508 HNAE3_RING_GL_IDX_S)); 3509 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 3510 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 3511 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 3512 req->vfid = vport->vport_id; 3513 3514 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3515 if (status) { 3516 dev_err(&hdev->pdev->dev, 3517 "Map TQP fail, status is %d.\n", 3518 status); 3519 return -EIO; 3520 } 3521 i = 0; 3522 3523 hclge_cmd_setup_basic_desc(&desc, 3524 op, 3525 false); 3526 req->int_vector_id = vector_id; 3527 } 3528 } 3529 3530 if (i > 0) { 3531 req->int_cause_num = i; 3532 req->vfid = vport->vport_id; 3533 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3534 if (status) { 3535 dev_err(&hdev->pdev->dev, 3536 "Map TQP fail, status is %d.\n", status); 3537 return -EIO; 3538 } 3539 } 3540 3541 return 0; 3542 } 3543 3544 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, 3545 int vector, 3546 struct hnae3_ring_chain_node *ring_chain) 3547 { 3548 struct hclge_vport *vport = hclge_get_vport(handle); 3549 struct hclge_dev *hdev = vport->back; 3550 int vector_id; 3551 3552 vector_id = hclge_get_vector_index(hdev, vector); 3553 if (vector_id < 0) { 3554 dev_err(&hdev->pdev->dev, 3555 "Get vector index fail. vector_id =%d\n", vector_id); 3556 return vector_id; 3557 } 3558 3559 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 3560 } 3561 3562 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, 3563 int vector, 3564 struct hnae3_ring_chain_node *ring_chain) 3565 { 3566 struct hclge_vport *vport = hclge_get_vport(handle); 3567 struct hclge_dev *hdev = vport->back; 3568 int vector_id, ret; 3569 3570 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 3571 return 0; 3572 3573 vector_id = hclge_get_vector_index(hdev, vector); 3574 if (vector_id < 0) { 3575 dev_err(&handle->pdev->dev, 3576 "Get vector index fail. ret =%d\n", vector_id); 3577 return vector_id; 3578 } 3579 3580 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 3581 if (ret) 3582 dev_err(&handle->pdev->dev, 3583 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 3584 vector_id, 3585 ret); 3586 3587 return ret; 3588 } 3589 3590 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 3591 struct hclge_promisc_param *param) 3592 { 3593 struct hclge_promisc_cfg_cmd *req; 3594 struct hclge_desc desc; 3595 int ret; 3596 3597 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 3598 3599 req = (struct hclge_promisc_cfg_cmd *)desc.data; 3600 req->vf_id = param->vf_id; 3601 3602 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on 3603 * pdev revision(0x20), new revision support them. The 3604 * value of this two fields will not return error when driver 3605 * send command to fireware in revision(0x20). 3606 */ 3607 req->flag = (param->enable << HCLGE_PROMISC_EN_B) | 3608 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; 3609 3610 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3611 if (ret) { 3612 dev_err(&hdev->pdev->dev, 3613 "Set promisc mode fail, status is %d.\n", ret); 3614 return ret; 3615 } 3616 return 0; 3617 } 3618 3619 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 3620 bool en_mc, bool en_bc, int vport_id) 3621 { 3622 if (!param) 3623 return; 3624 3625 memset(param, 0, sizeof(struct hclge_promisc_param)); 3626 if (en_uc) 3627 param->enable = HCLGE_PROMISC_EN_UC; 3628 if (en_mc) 3629 param->enable |= HCLGE_PROMISC_EN_MC; 3630 if (en_bc) 3631 param->enable |= HCLGE_PROMISC_EN_BC; 3632 param->vf_id = vport_id; 3633 } 3634 3635 static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 3636 bool en_mc_pmc) 3637 { 3638 struct hclge_vport *vport = hclge_get_vport(handle); 3639 struct hclge_dev *hdev = vport->back; 3640 struct hclge_promisc_param param; 3641 3642 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, 3643 vport->vport_id); 3644 hclge_cmd_set_promisc_mode(hdev, ¶m); 3645 } 3646 3647 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 3648 { 3649 struct hclge_desc desc; 3650 struct hclge_config_mac_mode_cmd *req = 3651 (struct hclge_config_mac_mode_cmd *)desc.data; 3652 u32 loop_en = 0; 3653 int ret; 3654 3655 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 3656 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); 3657 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); 3658 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); 3659 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); 3660 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); 3661 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); 3662 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 3663 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); 3664 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); 3665 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); 3666 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); 3667 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); 3668 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); 3669 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); 3670 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3671 3672 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3673 if (ret) 3674 dev_err(&hdev->pdev->dev, 3675 "mac enable fail, ret =%d.\n", ret); 3676 } 3677 3678 static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) 3679 { 3680 struct hclge_config_mac_mode_cmd *req; 3681 struct hclge_desc desc; 3682 u32 loop_en; 3683 int ret; 3684 3685 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 3686 /* 1 Read out the MAC mode config at first */ 3687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 3688 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3689 if (ret) { 3690 dev_err(&hdev->pdev->dev, 3691 "mac loopback get fail, ret =%d.\n", ret); 3692 return ret; 3693 } 3694 3695 /* 2 Then setup the loopback flag */ 3696 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 3697 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 3698 3699 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3700 3701 /* 3 Config mac work mode with loopback flag 3702 * and its original configure parameters 3703 */ 3704 hclge_cmd_reuse_desc(&desc, false); 3705 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3706 if (ret) 3707 dev_err(&hdev->pdev->dev, 3708 "mac loopback set fail, ret =%d.\n", ret); 3709 return ret; 3710 } 3711 3712 static int hclge_set_loopback(struct hnae3_handle *handle, 3713 enum hnae3_loop loop_mode, bool en) 3714 { 3715 struct hclge_vport *vport = hclge_get_vport(handle); 3716 struct hclge_dev *hdev = vport->back; 3717 int ret; 3718 3719 switch (loop_mode) { 3720 case HNAE3_MAC_INTER_LOOP_MAC: 3721 ret = hclge_set_mac_loopback(hdev, en); 3722 break; 3723 default: 3724 ret = -ENOTSUPP; 3725 dev_err(&hdev->pdev->dev, 3726 "loop_mode %d is not supported\n", loop_mode); 3727 break; 3728 } 3729 3730 return ret; 3731 } 3732 3733 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, 3734 int stream_id, bool enable) 3735 { 3736 struct hclge_desc desc; 3737 struct hclge_cfg_com_tqp_queue_cmd *req = 3738 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 3739 int ret; 3740 3741 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 3742 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 3743 req->stream_id = cpu_to_le16(stream_id); 3744 req->enable |= enable << HCLGE_TQP_ENABLE_B; 3745 3746 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3747 if (ret) 3748 dev_err(&hdev->pdev->dev, 3749 "Tqp enable fail, status =%d.\n", ret); 3750 return ret; 3751 } 3752 3753 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 3754 { 3755 struct hclge_vport *vport = hclge_get_vport(handle); 3756 struct hnae3_queue *queue; 3757 struct hclge_tqp *tqp; 3758 int i; 3759 3760 for (i = 0; i < vport->alloc_tqps; i++) { 3761 queue = handle->kinfo.tqp[i]; 3762 tqp = container_of(queue, struct hclge_tqp, q); 3763 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 3764 } 3765 } 3766 3767 static int hclge_ae_start(struct hnae3_handle *handle) 3768 { 3769 struct hclge_vport *vport = hclge_get_vport(handle); 3770 struct hclge_dev *hdev = vport->back; 3771 int i, ret; 3772 3773 for (i = 0; i < vport->alloc_tqps; i++) 3774 hclge_tqp_enable(hdev, i, 0, true); 3775 3776 /* mac enable */ 3777 hclge_cfg_mac_mode(hdev, true); 3778 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 3779 mod_timer(&hdev->service_timer, jiffies + HZ); 3780 hdev->hw.mac.link = 0; 3781 3782 /* reset tqp stats */ 3783 hclge_reset_tqp_stats(handle); 3784 3785 ret = hclge_mac_start_phy(hdev); 3786 if (ret) 3787 return ret; 3788 3789 return 0; 3790 } 3791 3792 static void hclge_ae_stop(struct hnae3_handle *handle) 3793 { 3794 struct hclge_vport *vport = hclge_get_vport(handle); 3795 struct hclge_dev *hdev = vport->back; 3796 int i; 3797 3798 del_timer_sync(&hdev->service_timer); 3799 cancel_work_sync(&hdev->service_task); 3800 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 3801 3802 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { 3803 hclge_mac_stop_phy(hdev); 3804 return; 3805 } 3806 3807 for (i = 0; i < vport->alloc_tqps; i++) 3808 hclge_tqp_enable(hdev, i, 0, false); 3809 3810 /* Mac disable */ 3811 hclge_cfg_mac_mode(hdev, false); 3812 3813 hclge_mac_stop_phy(hdev); 3814 3815 /* reset tqp stats */ 3816 hclge_reset_tqp_stats(handle); 3817 del_timer_sync(&hdev->service_timer); 3818 cancel_work_sync(&hdev->service_task); 3819 hclge_update_link_status(hdev); 3820 } 3821 3822 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 3823 u16 cmdq_resp, u8 resp_code, 3824 enum hclge_mac_vlan_tbl_opcode op) 3825 { 3826 struct hclge_dev *hdev = vport->back; 3827 int return_status = -EIO; 3828 3829 if (cmdq_resp) { 3830 dev_err(&hdev->pdev->dev, 3831 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", 3832 cmdq_resp); 3833 return -EIO; 3834 } 3835 3836 if (op == HCLGE_MAC_VLAN_ADD) { 3837 if ((!resp_code) || (resp_code == 1)) { 3838 return_status = 0; 3839 } else if (resp_code == 2) { 3840 return_status = -ENOSPC; 3841 dev_err(&hdev->pdev->dev, 3842 "add mac addr failed for uc_overflow.\n"); 3843 } else if (resp_code == 3) { 3844 return_status = -ENOSPC; 3845 dev_err(&hdev->pdev->dev, 3846 "add mac addr failed for mc_overflow.\n"); 3847 } else { 3848 dev_err(&hdev->pdev->dev, 3849 "add mac addr failed for undefined, code=%d.\n", 3850 resp_code); 3851 } 3852 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 3853 if (!resp_code) { 3854 return_status = 0; 3855 } else if (resp_code == 1) { 3856 return_status = -ENOENT; 3857 dev_dbg(&hdev->pdev->dev, 3858 "remove mac addr failed for miss.\n"); 3859 } else { 3860 dev_err(&hdev->pdev->dev, 3861 "remove mac addr failed for undefined, code=%d.\n", 3862 resp_code); 3863 } 3864 } else if (op == HCLGE_MAC_VLAN_LKUP) { 3865 if (!resp_code) { 3866 return_status = 0; 3867 } else if (resp_code == 1) { 3868 return_status = -ENOENT; 3869 dev_dbg(&hdev->pdev->dev, 3870 "lookup mac addr failed for miss.\n"); 3871 } else { 3872 dev_err(&hdev->pdev->dev, 3873 "lookup mac addr failed for undefined, code=%d.\n", 3874 resp_code); 3875 } 3876 } else { 3877 return_status = -EINVAL; 3878 dev_err(&hdev->pdev->dev, 3879 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", 3880 op); 3881 } 3882 3883 return return_status; 3884 } 3885 3886 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 3887 { 3888 int word_num; 3889 int bit_num; 3890 3891 if (vfid > 255 || vfid < 0) 3892 return -EIO; 3893 3894 if (vfid >= 0 && vfid <= 191) { 3895 word_num = vfid / 32; 3896 bit_num = vfid % 32; 3897 if (clr) 3898 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3899 else 3900 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 3901 } else { 3902 word_num = (vfid - 192) / 32; 3903 bit_num = vfid % 32; 3904 if (clr) 3905 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3906 else 3907 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 3908 } 3909 3910 return 0; 3911 } 3912 3913 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 3914 { 3915 #define HCLGE_DESC_NUMBER 3 3916 #define HCLGE_FUNC_NUMBER_PER_DESC 6 3917 int i, j; 3918 3919 for (i = 0; i < HCLGE_DESC_NUMBER; i++) 3920 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 3921 if (desc[i].data[j]) 3922 return false; 3923 3924 return true; 3925 } 3926 3927 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 3928 const u8 *addr) 3929 { 3930 const unsigned char *mac_addr = addr; 3931 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 3932 (mac_addr[0]) | (mac_addr[1] << 8); 3933 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 3934 3935 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 3936 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 3937 } 3938 3939 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, 3940 const u8 *addr) 3941 { 3942 u16 high_val = addr[1] | (addr[0] << 8); 3943 struct hclge_dev *hdev = vport->back; 3944 u32 rsh = 4 - hdev->mta_mac_sel_type; 3945 u16 ret_val = (high_val >> rsh) & 0xfff; 3946 3947 return ret_val; 3948 } 3949 3950 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 3951 enum hclge_mta_dmac_sel_type mta_mac_sel, 3952 bool enable) 3953 { 3954 struct hclge_mta_filter_mode_cmd *req; 3955 struct hclge_desc desc; 3956 int ret; 3957 3958 req = (struct hclge_mta_filter_mode_cmd *)desc.data; 3959 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); 3960 3961 hnae3_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, 3962 enable); 3963 hnae3_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, 3964 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); 3965 3966 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3967 if (ret) { 3968 dev_err(&hdev->pdev->dev, 3969 "Config mat filter mode failed for cmd_send, ret =%d.\n", 3970 ret); 3971 return ret; 3972 } 3973 3974 return 0; 3975 } 3976 3977 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, 3978 u8 func_id, 3979 bool enable) 3980 { 3981 struct hclge_cfg_func_mta_filter_cmd *req; 3982 struct hclge_desc desc; 3983 int ret; 3984 3985 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; 3986 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); 3987 3988 hnae3_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, 3989 enable); 3990 req->function_id = func_id; 3991 3992 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3993 if (ret) { 3994 dev_err(&hdev->pdev->dev, 3995 "Config func_id enable failed for cmd_send, ret =%d.\n", 3996 ret); 3997 return ret; 3998 } 3999 4000 return 0; 4001 } 4002 4003 static int hclge_set_mta_table_item(struct hclge_vport *vport, 4004 u16 idx, 4005 bool enable) 4006 { 4007 struct hclge_dev *hdev = vport->back; 4008 struct hclge_cfg_func_mta_item_cmd *req; 4009 struct hclge_desc desc; 4010 u16 item_idx = 0; 4011 int ret; 4012 4013 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; 4014 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); 4015 hnae3_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); 4016 4017 hnae3_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, 4018 HCLGE_CFG_MTA_ITEM_IDX_S, idx); 4019 req->item_idx = cpu_to_le16(item_idx); 4020 4021 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4022 if (ret) { 4023 dev_err(&hdev->pdev->dev, 4024 "Config mta table item failed for cmd_send, ret =%d.\n", 4025 ret); 4026 return ret; 4027 } 4028 4029 if (enable) 4030 set_bit(idx, vport->mta_shadow); 4031 else 4032 clear_bit(idx, vport->mta_shadow); 4033 4034 return 0; 4035 } 4036 4037 static int hclge_update_mta_status(struct hnae3_handle *handle) 4038 { 4039 unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; 4040 struct hclge_vport *vport = hclge_get_vport(handle); 4041 struct net_device *netdev = handle->kinfo.netdev; 4042 struct netdev_hw_addr *ha; 4043 u16 tbl_idx; 4044 4045 memset(mta_status, 0, sizeof(mta_status)); 4046 4047 /* update mta_status from mc addr list */ 4048 netdev_for_each_mc_addr(ha, netdev) { 4049 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); 4050 set_bit(tbl_idx, mta_status); 4051 } 4052 4053 return hclge_update_mta_status_common(vport, mta_status, 4054 0, HCLGE_MTA_TBL_SIZE, true); 4055 } 4056 4057 int hclge_update_mta_status_common(struct hclge_vport *vport, 4058 unsigned long *status, 4059 u16 idx, 4060 u16 count, 4061 bool update_filter) 4062 { 4063 struct hclge_dev *hdev = vport->back; 4064 u16 update_max = idx + count; 4065 u16 check_max; 4066 int ret = 0; 4067 bool used; 4068 u16 i; 4069 4070 /* setup mta check range */ 4071 if (update_filter) { 4072 i = 0; 4073 check_max = HCLGE_MTA_TBL_SIZE; 4074 } else { 4075 i = idx; 4076 check_max = update_max; 4077 } 4078 4079 used = false; 4080 /* check and update all mta item */ 4081 for (; i < check_max; i++) { 4082 /* ignore unused item */ 4083 if (!test_bit(i, vport->mta_shadow)) 4084 continue; 4085 4086 /* if i in update range then update it */ 4087 if (i >= idx && i < update_max) 4088 if (!test_bit(i - idx, status)) 4089 hclge_set_mta_table_item(vport, i, false); 4090 4091 if (!used && test_bit(i, vport->mta_shadow)) 4092 used = true; 4093 } 4094 4095 /* no longer use mta, disable it */ 4096 if (vport->accept_mta_mc && update_filter && !used) { 4097 ret = hclge_cfg_func_mta_filter(hdev, 4098 vport->vport_id, 4099 false); 4100 if (ret) 4101 dev_err(&hdev->pdev->dev, 4102 "disable func mta filter fail ret=%d\n", 4103 ret); 4104 else 4105 vport->accept_mta_mc = false; 4106 } 4107 4108 return ret; 4109 } 4110 4111 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 4112 struct hclge_mac_vlan_tbl_entry_cmd *req) 4113 { 4114 struct hclge_dev *hdev = vport->back; 4115 struct hclge_desc desc; 4116 u8 resp_code; 4117 u16 retval; 4118 int ret; 4119 4120 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 4121 4122 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4123 4124 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4125 if (ret) { 4126 dev_err(&hdev->pdev->dev, 4127 "del mac addr failed for cmd_send, ret =%d.\n", 4128 ret); 4129 return ret; 4130 } 4131 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4132 retval = le16_to_cpu(desc.retval); 4133 4134 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4135 HCLGE_MAC_VLAN_REMOVE); 4136 } 4137 4138 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 4139 struct hclge_mac_vlan_tbl_entry_cmd *req, 4140 struct hclge_desc *desc, 4141 bool is_mc) 4142 { 4143 struct hclge_dev *hdev = vport->back; 4144 u8 resp_code; 4145 u16 retval; 4146 int ret; 4147 4148 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 4149 if (is_mc) { 4150 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4151 memcpy(desc[0].data, 4152 req, 4153 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4154 hclge_cmd_setup_basic_desc(&desc[1], 4155 HCLGE_OPC_MAC_VLAN_ADD, 4156 true); 4157 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4158 hclge_cmd_setup_basic_desc(&desc[2], 4159 HCLGE_OPC_MAC_VLAN_ADD, 4160 true); 4161 ret = hclge_cmd_send(&hdev->hw, desc, 3); 4162 } else { 4163 memcpy(desc[0].data, 4164 req, 4165 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4166 ret = hclge_cmd_send(&hdev->hw, desc, 1); 4167 } 4168 if (ret) { 4169 dev_err(&hdev->pdev->dev, 4170 "lookup mac addr failed for cmd_send, ret =%d.\n", 4171 ret); 4172 return ret; 4173 } 4174 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 4175 retval = le16_to_cpu(desc[0].retval); 4176 4177 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 4178 HCLGE_MAC_VLAN_LKUP); 4179 } 4180 4181 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 4182 struct hclge_mac_vlan_tbl_entry_cmd *req, 4183 struct hclge_desc *mc_desc) 4184 { 4185 struct hclge_dev *hdev = vport->back; 4186 int cfg_status; 4187 u8 resp_code; 4188 u16 retval; 4189 int ret; 4190 4191 if (!mc_desc) { 4192 struct hclge_desc desc; 4193 4194 hclge_cmd_setup_basic_desc(&desc, 4195 HCLGE_OPC_MAC_VLAN_ADD, 4196 false); 4197 memcpy(desc.data, req, 4198 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4199 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4200 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4201 retval = le16_to_cpu(desc.retval); 4202 4203 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4204 resp_code, 4205 HCLGE_MAC_VLAN_ADD); 4206 } else { 4207 hclge_cmd_reuse_desc(&mc_desc[0], false); 4208 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4209 hclge_cmd_reuse_desc(&mc_desc[1], false); 4210 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4211 hclge_cmd_reuse_desc(&mc_desc[2], false); 4212 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 4213 memcpy(mc_desc[0].data, req, 4214 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 4215 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 4216 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 4217 retval = le16_to_cpu(mc_desc[0].retval); 4218 4219 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 4220 resp_code, 4221 HCLGE_MAC_VLAN_ADD); 4222 } 4223 4224 if (ret) { 4225 dev_err(&hdev->pdev->dev, 4226 "add mac addr failed for cmd_send, ret =%d.\n", 4227 ret); 4228 return ret; 4229 } 4230 4231 return cfg_status; 4232 } 4233 4234 static int hclge_add_uc_addr(struct hnae3_handle *handle, 4235 const unsigned char *addr) 4236 { 4237 struct hclge_vport *vport = hclge_get_vport(handle); 4238 4239 return hclge_add_uc_addr_common(vport, addr); 4240 } 4241 4242 int hclge_add_uc_addr_common(struct hclge_vport *vport, 4243 const unsigned char *addr) 4244 { 4245 struct hclge_dev *hdev = vport->back; 4246 struct hclge_mac_vlan_tbl_entry_cmd req; 4247 struct hclge_desc desc; 4248 u16 egress_port = 0; 4249 int ret; 4250 4251 /* mac addr check */ 4252 if (is_zero_ether_addr(addr) || 4253 is_broadcast_ether_addr(addr) || 4254 is_multicast_ether_addr(addr)) { 4255 dev_err(&hdev->pdev->dev, 4256 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 4257 addr, 4258 is_zero_ether_addr(addr), 4259 is_broadcast_ether_addr(addr), 4260 is_multicast_ether_addr(addr)); 4261 return -EINVAL; 4262 } 4263 4264 memset(&req, 0, sizeof(req)); 4265 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4266 4267 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 4268 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 4269 4270 req.egress_port = cpu_to_le16(egress_port); 4271 4272 hclge_prepare_mac_addr(&req, addr); 4273 4274 /* Lookup the mac address in the mac_vlan table, and add 4275 * it if the entry is inexistent. Repeated unicast entry 4276 * is not allowed in the mac vlan table. 4277 */ 4278 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 4279 if (ret == -ENOENT) 4280 return hclge_add_mac_vlan_tbl(vport, &req, NULL); 4281 4282 /* check if we just hit the duplicate */ 4283 if (!ret) 4284 ret = -EINVAL; 4285 4286 dev_err(&hdev->pdev->dev, 4287 "PF failed to add unicast entry(%pM) in the MAC table\n", 4288 addr); 4289 4290 return ret; 4291 } 4292 4293 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 4294 const unsigned char *addr) 4295 { 4296 struct hclge_vport *vport = hclge_get_vport(handle); 4297 4298 return hclge_rm_uc_addr_common(vport, addr); 4299 } 4300 4301 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 4302 const unsigned char *addr) 4303 { 4304 struct hclge_dev *hdev = vport->back; 4305 struct hclge_mac_vlan_tbl_entry_cmd req; 4306 int ret; 4307 4308 /* mac addr check */ 4309 if (is_zero_ether_addr(addr) || 4310 is_broadcast_ether_addr(addr) || 4311 is_multicast_ether_addr(addr)) { 4312 dev_dbg(&hdev->pdev->dev, 4313 "Remove mac err! invalid mac:%pM.\n", 4314 addr); 4315 return -EINVAL; 4316 } 4317 4318 memset(&req, 0, sizeof(req)); 4319 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4320 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4321 hclge_prepare_mac_addr(&req, addr); 4322 ret = hclge_remove_mac_vlan_tbl(vport, &req); 4323 4324 return ret; 4325 } 4326 4327 static int hclge_add_mc_addr(struct hnae3_handle *handle, 4328 const unsigned char *addr) 4329 { 4330 struct hclge_vport *vport = hclge_get_vport(handle); 4331 4332 return hclge_add_mc_addr_common(vport, addr); 4333 } 4334 4335 int hclge_add_mc_addr_common(struct hclge_vport *vport, 4336 const unsigned char *addr) 4337 { 4338 struct hclge_dev *hdev = vport->back; 4339 struct hclge_mac_vlan_tbl_entry_cmd req; 4340 struct hclge_desc desc[3]; 4341 u16 tbl_idx; 4342 int status; 4343 4344 /* mac addr check */ 4345 if (!is_multicast_ether_addr(addr)) { 4346 dev_err(&hdev->pdev->dev, 4347 "Add mc mac err! invalid mac:%pM.\n", 4348 addr); 4349 return -EINVAL; 4350 } 4351 memset(&req, 0, sizeof(req)); 4352 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4353 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4354 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4355 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4356 hclge_prepare_mac_addr(&req, addr); 4357 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4358 if (!status) { 4359 /* This mac addr exist, update VFID for it */ 4360 hclge_update_desc_vfid(desc, vport->vport_id, false); 4361 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4362 } else { 4363 /* This mac addr do not exist, add new entry for it */ 4364 memset(desc[0].data, 0, sizeof(desc[0].data)); 4365 memset(desc[1].data, 0, sizeof(desc[0].data)); 4366 memset(desc[2].data, 0, sizeof(desc[0].data)); 4367 hclge_update_desc_vfid(desc, vport->vport_id, false); 4368 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4369 } 4370 4371 /* If mc mac vlan table is full, use MTA table */ 4372 if (status == -ENOSPC) { 4373 if (!vport->accept_mta_mc) { 4374 status = hclge_cfg_func_mta_filter(hdev, 4375 vport->vport_id, 4376 true); 4377 if (status) { 4378 dev_err(&hdev->pdev->dev, 4379 "set mta filter mode fail ret=%d\n", 4380 status); 4381 return status; 4382 } 4383 vport->accept_mta_mc = true; 4384 } 4385 4386 /* Set MTA table for this MAC address */ 4387 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4388 status = hclge_set_mta_table_item(vport, tbl_idx, true); 4389 } 4390 4391 return status; 4392 } 4393 4394 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 4395 const unsigned char *addr) 4396 { 4397 struct hclge_vport *vport = hclge_get_vport(handle); 4398 4399 return hclge_rm_mc_addr_common(vport, addr); 4400 } 4401 4402 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 4403 const unsigned char *addr) 4404 { 4405 struct hclge_dev *hdev = vport->back; 4406 struct hclge_mac_vlan_tbl_entry_cmd req; 4407 enum hclge_cmd_status status; 4408 struct hclge_desc desc[3]; 4409 4410 /* mac addr check */ 4411 if (!is_multicast_ether_addr(addr)) { 4412 dev_dbg(&hdev->pdev->dev, 4413 "Remove mc mac err! invalid mac:%pM.\n", 4414 addr); 4415 return -EINVAL; 4416 } 4417 4418 memset(&req, 0, sizeof(req)); 4419 hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4420 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4421 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4422 hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4423 hclge_prepare_mac_addr(&req, addr); 4424 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4425 if (!status) { 4426 /* This mac addr exist, remove this handle's VFID for it */ 4427 hclge_update_desc_vfid(desc, vport->vport_id, true); 4428 4429 if (hclge_is_all_function_id_zero(desc)) 4430 /* All the vfid is zero, so need to delete this entry */ 4431 status = hclge_remove_mac_vlan_tbl(vport, &req); 4432 else 4433 /* Not all the vfid is zero, update the vfid */ 4434 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4435 4436 } else { 4437 /* Maybe this mac address is in mta table, but it cannot be 4438 * deleted here because an entry of mta represents an address 4439 * range rather than a specific address. the delete action to 4440 * all entries will take effect in update_mta_status called by 4441 * hns3_nic_set_rx_mode. 4442 */ 4443 status = 0; 4444 } 4445 4446 return status; 4447 } 4448 4449 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 4450 u16 cmdq_resp, u8 resp_code) 4451 { 4452 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 4453 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 4454 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 4455 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 4456 4457 int return_status; 4458 4459 if (cmdq_resp) { 4460 dev_err(&hdev->pdev->dev, 4461 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", 4462 cmdq_resp); 4463 return -EIO; 4464 } 4465 4466 switch (resp_code) { 4467 case HCLGE_ETHERTYPE_SUCCESS_ADD: 4468 case HCLGE_ETHERTYPE_ALREADY_ADD: 4469 return_status = 0; 4470 break; 4471 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 4472 dev_err(&hdev->pdev->dev, 4473 "add mac ethertype failed for manager table overflow.\n"); 4474 return_status = -EIO; 4475 break; 4476 case HCLGE_ETHERTYPE_KEY_CONFLICT: 4477 dev_err(&hdev->pdev->dev, 4478 "add mac ethertype failed for key conflict.\n"); 4479 return_status = -EIO; 4480 break; 4481 default: 4482 dev_err(&hdev->pdev->dev, 4483 "add mac ethertype failed for undefined, code=%d.\n", 4484 resp_code); 4485 return_status = -EIO; 4486 } 4487 4488 return return_status; 4489 } 4490 4491 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 4492 const struct hclge_mac_mgr_tbl_entry_cmd *req) 4493 { 4494 struct hclge_desc desc; 4495 u8 resp_code; 4496 u16 retval; 4497 int ret; 4498 4499 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 4500 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 4501 4502 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4503 if (ret) { 4504 dev_err(&hdev->pdev->dev, 4505 "add mac ethertype failed for cmd_send, ret =%d.\n", 4506 ret); 4507 return ret; 4508 } 4509 4510 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 4511 retval = le16_to_cpu(desc.retval); 4512 4513 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 4514 } 4515 4516 static int init_mgr_tbl(struct hclge_dev *hdev) 4517 { 4518 int ret; 4519 int i; 4520 4521 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 4522 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 4523 if (ret) { 4524 dev_err(&hdev->pdev->dev, 4525 "add mac ethertype failed, ret =%d.\n", 4526 ret); 4527 return ret; 4528 } 4529 } 4530 4531 return 0; 4532 } 4533 4534 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 4535 { 4536 struct hclge_vport *vport = hclge_get_vport(handle); 4537 struct hclge_dev *hdev = vport->back; 4538 4539 ether_addr_copy(p, hdev->hw.mac.mac_addr); 4540 } 4541 4542 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, 4543 bool is_first) 4544 { 4545 const unsigned char *new_addr = (const unsigned char *)p; 4546 struct hclge_vport *vport = hclge_get_vport(handle); 4547 struct hclge_dev *hdev = vport->back; 4548 int ret; 4549 4550 /* mac addr check */ 4551 if (is_zero_ether_addr(new_addr) || 4552 is_broadcast_ether_addr(new_addr) || 4553 is_multicast_ether_addr(new_addr)) { 4554 dev_err(&hdev->pdev->dev, 4555 "Change uc mac err! invalid mac:%p.\n", 4556 new_addr); 4557 return -EINVAL; 4558 } 4559 4560 if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) 4561 dev_warn(&hdev->pdev->dev, 4562 "remove old uc mac address fail.\n"); 4563 4564 ret = hclge_add_uc_addr(handle, new_addr); 4565 if (ret) { 4566 dev_err(&hdev->pdev->dev, 4567 "add uc mac address fail, ret =%d.\n", 4568 ret); 4569 4570 if (!is_first && 4571 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr)) 4572 dev_err(&hdev->pdev->dev, 4573 "restore uc mac address fail.\n"); 4574 4575 return -EIO; 4576 } 4577 4578 ret = hclge_pause_addr_cfg(hdev, new_addr); 4579 if (ret) { 4580 dev_err(&hdev->pdev->dev, 4581 "configure mac pause address fail, ret =%d.\n", 4582 ret); 4583 return -EIO; 4584 } 4585 4586 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 4587 4588 return 0; 4589 } 4590 4591 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 4592 bool filter_en) 4593 { 4594 struct hclge_vlan_filter_ctrl_cmd *req; 4595 struct hclge_desc desc; 4596 int ret; 4597 4598 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 4599 4600 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 4601 req->vlan_type = vlan_type; 4602 req->vlan_fe = filter_en; 4603 4604 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4605 if (ret) { 4606 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 4607 ret); 4608 return ret; 4609 } 4610 4611 return 0; 4612 } 4613 4614 #define HCLGE_FILTER_TYPE_VF 0 4615 #define HCLGE_FILTER_TYPE_PORT 1 4616 4617 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 4618 { 4619 struct hclge_vport *vport = hclge_get_vport(handle); 4620 struct hclge_dev *hdev = vport->back; 4621 4622 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); 4623 } 4624 4625 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 4626 bool is_kill, u16 vlan, u8 qos, 4627 __be16 proto) 4628 { 4629 #define HCLGE_MAX_VF_BYTES 16 4630 struct hclge_vlan_filter_vf_cfg_cmd *req0; 4631 struct hclge_vlan_filter_vf_cfg_cmd *req1; 4632 struct hclge_desc desc[2]; 4633 u8 vf_byte_val; 4634 u8 vf_byte_off; 4635 int ret; 4636 4637 hclge_cmd_setup_basic_desc(&desc[0], 4638 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4639 hclge_cmd_setup_basic_desc(&desc[1], 4640 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4641 4642 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4643 4644 vf_byte_off = vfid / 8; 4645 vf_byte_val = 1 << (vfid % 8); 4646 4647 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 4648 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 4649 4650 req0->vlan_id = cpu_to_le16(vlan); 4651 req0->vlan_cfg = is_kill; 4652 4653 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 4654 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 4655 else 4656 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 4657 4658 ret = hclge_cmd_send(&hdev->hw, desc, 2); 4659 if (ret) { 4660 dev_err(&hdev->pdev->dev, 4661 "Send vf vlan command fail, ret =%d.\n", 4662 ret); 4663 return ret; 4664 } 4665 4666 if (!is_kill) { 4667 #define HCLGE_VF_VLAN_NO_ENTRY 2 4668 if (!req0->resp_code || req0->resp_code == 1) 4669 return 0; 4670 4671 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { 4672 dev_warn(&hdev->pdev->dev, 4673 "vf vlan table is full, vf vlan filter is disabled\n"); 4674 return 0; 4675 } 4676 4677 dev_err(&hdev->pdev->dev, 4678 "Add vf vlan filter fail, ret =%d.\n", 4679 req0->resp_code); 4680 } else { 4681 if (!req0->resp_code) 4682 return 0; 4683 4684 dev_err(&hdev->pdev->dev, 4685 "Kill vf vlan filter fail, ret =%d.\n", 4686 req0->resp_code); 4687 } 4688 4689 return -EIO; 4690 } 4691 4692 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 4693 u16 vlan_id, bool is_kill) 4694 { 4695 struct hclge_vlan_filter_pf_cfg_cmd *req; 4696 struct hclge_desc desc; 4697 u8 vlan_offset_byte_val; 4698 u8 vlan_offset_byte; 4699 u8 vlan_offset_160; 4700 int ret; 4701 4702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 4703 4704 vlan_offset_160 = vlan_id / 160; 4705 vlan_offset_byte = (vlan_id % 160) / 8; 4706 vlan_offset_byte_val = 1 << (vlan_id % 8); 4707 4708 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 4709 req->vlan_offset = vlan_offset_160; 4710 req->vlan_cfg = is_kill; 4711 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 4712 4713 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4714 if (ret) 4715 dev_err(&hdev->pdev->dev, 4716 "port vlan command, send fail, ret =%d.\n", ret); 4717 return ret; 4718 } 4719 4720 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 4721 u16 vport_id, u16 vlan_id, u8 qos, 4722 bool is_kill) 4723 { 4724 u16 vport_idx, vport_num = 0; 4725 int ret; 4726 4727 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 4728 0, proto); 4729 if (ret) { 4730 dev_err(&hdev->pdev->dev, 4731 "Set %d vport vlan filter config fail, ret =%d.\n", 4732 vport_id, ret); 4733 return ret; 4734 } 4735 4736 /* vlan 0 may be added twice when 8021q module is enabled */ 4737 if (!is_kill && !vlan_id && 4738 test_bit(vport_id, hdev->vlan_table[vlan_id])) 4739 return 0; 4740 4741 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 4742 dev_err(&hdev->pdev->dev, 4743 "Add port vlan failed, vport %d is already in vlan %d\n", 4744 vport_id, vlan_id); 4745 return -EINVAL; 4746 } 4747 4748 if (is_kill && 4749 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 4750 dev_err(&hdev->pdev->dev, 4751 "Delete port vlan failed, vport %d is not in vlan %d\n", 4752 vport_id, vlan_id); 4753 return -EINVAL; 4754 } 4755 4756 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) 4757 vport_num++; 4758 4759 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 4760 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 4761 is_kill); 4762 4763 return ret; 4764 } 4765 4766 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 4767 u16 vlan_id, bool is_kill) 4768 { 4769 struct hclge_vport *vport = hclge_get_vport(handle); 4770 struct hclge_dev *hdev = vport->back; 4771 4772 return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, 4773 0, is_kill); 4774 } 4775 4776 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 4777 u16 vlan, u8 qos, __be16 proto) 4778 { 4779 struct hclge_vport *vport = hclge_get_vport(handle); 4780 struct hclge_dev *hdev = vport->back; 4781 4782 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) 4783 return -EINVAL; 4784 if (proto != htons(ETH_P_8021Q)) 4785 return -EPROTONOSUPPORT; 4786 4787 return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); 4788 } 4789 4790 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 4791 { 4792 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 4793 struct hclge_vport_vtag_tx_cfg_cmd *req; 4794 struct hclge_dev *hdev = vport->back; 4795 struct hclge_desc desc; 4796 int status; 4797 4798 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 4799 4800 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 4801 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 4802 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 4803 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 4804 vcfg->accept_tag1 ? 1 : 0); 4805 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 4806 vcfg->accept_untag1 ? 1 : 0); 4807 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 4808 vcfg->accept_tag2 ? 1 : 0); 4809 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 4810 vcfg->accept_untag2 ? 1 : 0); 4811 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 4812 vcfg->insert_tag1_en ? 1 : 0); 4813 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 4814 vcfg->insert_tag2_en ? 1 : 0); 4815 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 4816 4817 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4818 req->vf_bitmap[req->vf_offset] = 4819 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4820 4821 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4822 if (status) 4823 dev_err(&hdev->pdev->dev, 4824 "Send port txvlan cfg command fail, ret =%d\n", 4825 status); 4826 4827 return status; 4828 } 4829 4830 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 4831 { 4832 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 4833 struct hclge_vport_vtag_rx_cfg_cmd *req; 4834 struct hclge_dev *hdev = vport->back; 4835 struct hclge_desc desc; 4836 int status; 4837 4838 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 4839 4840 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 4841 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 4842 vcfg->strip_tag1_en ? 1 : 0); 4843 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 4844 vcfg->strip_tag2_en ? 1 : 0); 4845 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 4846 vcfg->vlan1_vlan_prionly ? 1 : 0); 4847 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 4848 vcfg->vlan2_vlan_prionly ? 1 : 0); 4849 4850 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4851 req->vf_bitmap[req->vf_offset] = 4852 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4853 4854 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4855 if (status) 4856 dev_err(&hdev->pdev->dev, 4857 "Send port rxvlan cfg command fail, ret =%d\n", 4858 status); 4859 4860 return status; 4861 } 4862 4863 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 4864 { 4865 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 4866 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 4867 struct hclge_desc desc; 4868 int status; 4869 4870 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 4871 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 4872 rx_req->ot_fst_vlan_type = 4873 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 4874 rx_req->ot_sec_vlan_type = 4875 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 4876 rx_req->in_fst_vlan_type = 4877 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 4878 rx_req->in_sec_vlan_type = 4879 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 4880 4881 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4882 if (status) { 4883 dev_err(&hdev->pdev->dev, 4884 "Send rxvlan protocol type command fail, ret =%d\n", 4885 status); 4886 return status; 4887 } 4888 4889 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 4890 4891 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; 4892 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 4893 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 4894 4895 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4896 if (status) 4897 dev_err(&hdev->pdev->dev, 4898 "Send txvlan protocol type command fail, ret =%d\n", 4899 status); 4900 4901 return status; 4902 } 4903 4904 static int hclge_init_vlan_config(struct hclge_dev *hdev) 4905 { 4906 #define HCLGE_DEF_VLAN_TYPE 0x8100 4907 4908 struct hnae3_handle *handle; 4909 struct hclge_vport *vport; 4910 int ret; 4911 int i; 4912 4913 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); 4914 if (ret) 4915 return ret; 4916 4917 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); 4918 if (ret) 4919 return ret; 4920 4921 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4922 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4923 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4924 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4925 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; 4926 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; 4927 4928 ret = hclge_set_vlan_protocol_type(hdev); 4929 if (ret) 4930 return ret; 4931 4932 for (i = 0; i < hdev->num_alloc_vport; i++) { 4933 vport = &hdev->vport[i]; 4934 vport->txvlan_cfg.accept_tag1 = true; 4935 vport->txvlan_cfg.accept_untag1 = true; 4936 4937 /* accept_tag2 and accept_untag2 are not supported on 4938 * pdev revision(0x20), new revision support them. The 4939 * value of this two fields will not return error when driver 4940 * send command to fireware in revision(0x20). 4941 * This two fields can not configured by user. 4942 */ 4943 vport->txvlan_cfg.accept_tag2 = true; 4944 vport->txvlan_cfg.accept_untag2 = true; 4945 4946 vport->txvlan_cfg.insert_tag1_en = false; 4947 vport->txvlan_cfg.insert_tag2_en = false; 4948 vport->txvlan_cfg.default_tag1 = 0; 4949 vport->txvlan_cfg.default_tag2 = 0; 4950 4951 ret = hclge_set_vlan_tx_offload_cfg(vport); 4952 if (ret) 4953 return ret; 4954 4955 vport->rxvlan_cfg.strip_tag1_en = false; 4956 vport->rxvlan_cfg.strip_tag2_en = true; 4957 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4958 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4959 4960 ret = hclge_set_vlan_rx_offload_cfg(vport); 4961 if (ret) 4962 return ret; 4963 } 4964 4965 handle = &hdev->vport[0].nic; 4966 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 4967 } 4968 4969 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 4970 { 4971 struct hclge_vport *vport = hclge_get_vport(handle); 4972 4973 vport->rxvlan_cfg.strip_tag1_en = false; 4974 vport->rxvlan_cfg.strip_tag2_en = enable; 4975 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4976 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4977 4978 return hclge_set_vlan_rx_offload_cfg(vport); 4979 } 4980 4981 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) 4982 { 4983 struct hclge_config_max_frm_size_cmd *req; 4984 struct hclge_desc desc; 4985 int max_frm_size; 4986 int ret; 4987 4988 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 4989 4990 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 4991 max_frm_size > HCLGE_MAC_MAX_FRAME) 4992 return -EINVAL; 4993 4994 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 4995 4996 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 4997 4998 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 4999 req->max_frm_size = cpu_to_le16(max_frm_size); 5000 5001 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5002 if (ret) { 5003 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); 5004 return ret; 5005 } 5006 5007 hdev->mps = max_frm_size; 5008 5009 return 0; 5010 } 5011 5012 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 5013 { 5014 struct hclge_vport *vport = hclge_get_vport(handle); 5015 struct hclge_dev *hdev = vport->back; 5016 int ret; 5017 5018 ret = hclge_set_mac_mtu(hdev, new_mtu); 5019 if (ret) { 5020 dev_err(&hdev->pdev->dev, 5021 "Change mtu fail, ret =%d\n", ret); 5022 return ret; 5023 } 5024 5025 ret = hclge_buffer_alloc(hdev); 5026 if (ret) 5027 dev_err(&hdev->pdev->dev, 5028 "Allocate buffer fail, ret =%d\n", ret); 5029 5030 return ret; 5031 } 5032 5033 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 5034 bool enable) 5035 { 5036 struct hclge_reset_tqp_queue_cmd *req; 5037 struct hclge_desc desc; 5038 int ret; 5039 5040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 5041 5042 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 5043 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 5044 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); 5045 5046 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5047 if (ret) { 5048 dev_err(&hdev->pdev->dev, 5049 "Send tqp reset cmd error, status =%d\n", ret); 5050 return ret; 5051 } 5052 5053 return 0; 5054 } 5055 5056 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 5057 { 5058 struct hclge_reset_tqp_queue_cmd *req; 5059 struct hclge_desc desc; 5060 int ret; 5061 5062 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 5063 5064 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 5065 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 5066 5067 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5068 if (ret) { 5069 dev_err(&hdev->pdev->dev, 5070 "Get reset status error, status =%d\n", ret); 5071 return ret; 5072 } 5073 5074 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 5075 } 5076 5077 static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, 5078 u16 queue_id) 5079 { 5080 struct hnae3_queue *queue; 5081 struct hclge_tqp *tqp; 5082 5083 queue = handle->kinfo.tqp[queue_id]; 5084 tqp = container_of(queue, struct hclge_tqp, q); 5085 5086 return tqp->index; 5087 } 5088 5089 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 5090 { 5091 struct hclge_vport *vport = hclge_get_vport(handle); 5092 struct hclge_dev *hdev = vport->back; 5093 int reset_try_times = 0; 5094 int reset_status; 5095 u16 queue_gid; 5096 int ret; 5097 5098 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 5099 return; 5100 5101 queue_gid = hclge_covert_handle_qid_global(handle, queue_id); 5102 5103 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 5104 if (ret) { 5105 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 5106 return; 5107 } 5108 5109 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 5110 if (ret) { 5111 dev_warn(&hdev->pdev->dev, 5112 "Send reset tqp cmd fail, ret = %d\n", ret); 5113 return; 5114 } 5115 5116 reset_try_times = 0; 5117 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 5118 /* Wait for tqp hw reset */ 5119 msleep(20); 5120 reset_status = hclge_get_reset_status(hdev, queue_gid); 5121 if (reset_status) 5122 break; 5123 } 5124 5125 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 5126 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 5127 return; 5128 } 5129 5130 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 5131 if (ret) { 5132 dev_warn(&hdev->pdev->dev, 5133 "Deassert the soft reset fail, ret = %d\n", ret); 5134 return; 5135 } 5136 } 5137 5138 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) 5139 { 5140 struct hclge_dev *hdev = vport->back; 5141 int reset_try_times = 0; 5142 int reset_status; 5143 u16 queue_gid; 5144 int ret; 5145 5146 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); 5147 5148 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); 5149 if (ret) { 5150 dev_warn(&hdev->pdev->dev, 5151 "Send reset tqp cmd fail, ret = %d\n", ret); 5152 return; 5153 } 5154 5155 reset_try_times = 0; 5156 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 5157 /* Wait for tqp hw reset */ 5158 msleep(20); 5159 reset_status = hclge_get_reset_status(hdev, queue_gid); 5160 if (reset_status) 5161 break; 5162 } 5163 5164 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 5165 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 5166 return; 5167 } 5168 5169 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false); 5170 if (ret) 5171 dev_warn(&hdev->pdev->dev, 5172 "Deassert the soft reset fail, ret = %d\n", ret); 5173 } 5174 5175 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 5176 { 5177 struct hclge_vport *vport = hclge_get_vport(handle); 5178 struct hclge_dev *hdev = vport->back; 5179 5180 return hdev->fw_version; 5181 } 5182 5183 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, 5184 u32 *flowctrl_adv) 5185 { 5186 struct hclge_vport *vport = hclge_get_vport(handle); 5187 struct hclge_dev *hdev = vport->back; 5188 struct phy_device *phydev = hdev->hw.mac.phydev; 5189 5190 if (!phydev) 5191 return; 5192 5193 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | 5194 (phydev->advertising & ADVERTISED_Asym_Pause); 5195 } 5196 5197 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5198 { 5199 struct phy_device *phydev = hdev->hw.mac.phydev; 5200 5201 if (!phydev) 5202 return; 5203 5204 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); 5205 5206 if (rx_en) 5207 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 5208 5209 if (tx_en) 5210 phydev->advertising ^= ADVERTISED_Asym_Pause; 5211 } 5212 5213 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 5214 { 5215 int ret; 5216 5217 if (rx_en && tx_en) 5218 hdev->fc_mode_last_time = HCLGE_FC_FULL; 5219 else if (rx_en && !tx_en) 5220 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 5221 else if (!rx_en && tx_en) 5222 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 5223 else 5224 hdev->fc_mode_last_time = HCLGE_FC_NONE; 5225 5226 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 5227 return 0; 5228 5229 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 5230 if (ret) { 5231 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", 5232 ret); 5233 return ret; 5234 } 5235 5236 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 5237 5238 return 0; 5239 } 5240 5241 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 5242 { 5243 struct phy_device *phydev = hdev->hw.mac.phydev; 5244 u16 remote_advertising = 0; 5245 u16 local_advertising = 0; 5246 u32 rx_pause, tx_pause; 5247 u8 flowctl; 5248 5249 if (!phydev->link || !phydev->autoneg) 5250 return 0; 5251 5252 if (phydev->advertising & ADVERTISED_Pause) 5253 local_advertising = ADVERTISE_PAUSE_CAP; 5254 5255 if (phydev->advertising & ADVERTISED_Asym_Pause) 5256 local_advertising |= ADVERTISE_PAUSE_ASYM; 5257 5258 if (phydev->pause) 5259 remote_advertising = LPA_PAUSE_CAP; 5260 5261 if (phydev->asym_pause) 5262 remote_advertising |= LPA_PAUSE_ASYM; 5263 5264 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 5265 remote_advertising); 5266 tx_pause = flowctl & FLOW_CTRL_TX; 5267 rx_pause = flowctl & FLOW_CTRL_RX; 5268 5269 if (phydev->duplex == HCLGE_MAC_HALF) { 5270 tx_pause = 0; 5271 rx_pause = 0; 5272 } 5273 5274 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 5275 } 5276 5277 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 5278 u32 *rx_en, u32 *tx_en) 5279 { 5280 struct hclge_vport *vport = hclge_get_vport(handle); 5281 struct hclge_dev *hdev = vport->back; 5282 5283 *auto_neg = hclge_get_autoneg(handle); 5284 5285 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5286 *rx_en = 0; 5287 *tx_en = 0; 5288 return; 5289 } 5290 5291 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 5292 *rx_en = 1; 5293 *tx_en = 0; 5294 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 5295 *tx_en = 1; 5296 *rx_en = 0; 5297 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 5298 *rx_en = 1; 5299 *tx_en = 1; 5300 } else { 5301 *rx_en = 0; 5302 *tx_en = 0; 5303 } 5304 } 5305 5306 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 5307 u32 rx_en, u32 tx_en) 5308 { 5309 struct hclge_vport *vport = hclge_get_vport(handle); 5310 struct hclge_dev *hdev = vport->back; 5311 struct phy_device *phydev = hdev->hw.mac.phydev; 5312 u32 fc_autoneg; 5313 5314 fc_autoneg = hclge_get_autoneg(handle); 5315 if (auto_neg != fc_autoneg) { 5316 dev_info(&hdev->pdev->dev, 5317 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 5318 return -EOPNOTSUPP; 5319 } 5320 5321 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 5322 dev_info(&hdev->pdev->dev, 5323 "Priority flow control enabled. Cannot set link flow control.\n"); 5324 return -EOPNOTSUPP; 5325 } 5326 5327 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 5328 5329 if (!fc_autoneg) 5330 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 5331 5332 /* Only support flow control negotiation for netdev with 5333 * phy attached for now. 5334 */ 5335 if (!phydev) 5336 return -EOPNOTSUPP; 5337 5338 return phy_start_aneg(phydev); 5339 } 5340 5341 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 5342 u8 *auto_neg, u32 *speed, u8 *duplex) 5343 { 5344 struct hclge_vport *vport = hclge_get_vport(handle); 5345 struct hclge_dev *hdev = vport->back; 5346 5347 if (speed) 5348 *speed = hdev->hw.mac.speed; 5349 if (duplex) 5350 *duplex = hdev->hw.mac.duplex; 5351 if (auto_neg) 5352 *auto_neg = hdev->hw.mac.autoneg; 5353 } 5354 5355 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) 5356 { 5357 struct hclge_vport *vport = hclge_get_vport(handle); 5358 struct hclge_dev *hdev = vport->back; 5359 5360 if (media_type) 5361 *media_type = hdev->hw.mac.media_type; 5362 } 5363 5364 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 5365 u8 *tp_mdix_ctrl, u8 *tp_mdix) 5366 { 5367 struct hclge_vport *vport = hclge_get_vport(handle); 5368 struct hclge_dev *hdev = vport->back; 5369 struct phy_device *phydev = hdev->hw.mac.phydev; 5370 int mdix_ctrl, mdix, retval, is_resolved; 5371 5372 if (!phydev) { 5373 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5374 *tp_mdix = ETH_TP_MDI_INVALID; 5375 return; 5376 } 5377 5378 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 5379 5380 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 5381 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 5382 HCLGE_PHY_MDIX_CTRL_S); 5383 5384 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 5385 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 5386 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 5387 5388 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 5389 5390 switch (mdix_ctrl) { 5391 case 0x0: 5392 *tp_mdix_ctrl = ETH_TP_MDI; 5393 break; 5394 case 0x1: 5395 *tp_mdix_ctrl = ETH_TP_MDI_X; 5396 break; 5397 case 0x3: 5398 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 5399 break; 5400 default: 5401 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 5402 break; 5403 } 5404 5405 if (!is_resolved) 5406 *tp_mdix = ETH_TP_MDI_INVALID; 5407 else if (mdix) 5408 *tp_mdix = ETH_TP_MDI_X; 5409 else 5410 *tp_mdix = ETH_TP_MDI; 5411 } 5412 5413 static int hclge_init_client_instance(struct hnae3_client *client, 5414 struct hnae3_ae_dev *ae_dev) 5415 { 5416 struct hclge_dev *hdev = ae_dev->priv; 5417 struct hclge_vport *vport; 5418 int i, ret; 5419 5420 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5421 vport = &hdev->vport[i]; 5422 5423 switch (client->type) { 5424 case HNAE3_CLIENT_KNIC: 5425 5426 hdev->nic_client = client; 5427 vport->nic.client = client; 5428 ret = client->ops->init_instance(&vport->nic); 5429 if (ret) 5430 return ret; 5431 5432 if (hdev->roce_client && 5433 hnae3_dev_roce_supported(hdev)) { 5434 struct hnae3_client *rc = hdev->roce_client; 5435 5436 ret = hclge_init_roce_base_info(vport); 5437 if (ret) 5438 return ret; 5439 5440 ret = rc->ops->init_instance(&vport->roce); 5441 if (ret) 5442 return ret; 5443 } 5444 5445 break; 5446 case HNAE3_CLIENT_UNIC: 5447 hdev->nic_client = client; 5448 vport->nic.client = client; 5449 5450 ret = client->ops->init_instance(&vport->nic); 5451 if (ret) 5452 return ret; 5453 5454 break; 5455 case HNAE3_CLIENT_ROCE: 5456 if (hnae3_dev_roce_supported(hdev)) { 5457 hdev->roce_client = client; 5458 vport->roce.client = client; 5459 } 5460 5461 if (hdev->roce_client && hdev->nic_client) { 5462 ret = hclge_init_roce_base_info(vport); 5463 if (ret) 5464 return ret; 5465 5466 ret = client->ops->init_instance(&vport->roce); 5467 if (ret) 5468 return ret; 5469 } 5470 } 5471 } 5472 5473 return 0; 5474 } 5475 5476 static void hclge_uninit_client_instance(struct hnae3_client *client, 5477 struct hnae3_ae_dev *ae_dev) 5478 { 5479 struct hclge_dev *hdev = ae_dev->priv; 5480 struct hclge_vport *vport; 5481 int i; 5482 5483 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 5484 vport = &hdev->vport[i]; 5485 if (hdev->roce_client) { 5486 hdev->roce_client->ops->uninit_instance(&vport->roce, 5487 0); 5488 hdev->roce_client = NULL; 5489 vport->roce.client = NULL; 5490 } 5491 if (client->type == HNAE3_CLIENT_ROCE) 5492 return; 5493 if (client->ops->uninit_instance) { 5494 client->ops->uninit_instance(&vport->nic, 0); 5495 hdev->nic_client = NULL; 5496 vport->nic.client = NULL; 5497 } 5498 } 5499 } 5500 5501 static int hclge_pci_init(struct hclge_dev *hdev) 5502 { 5503 struct pci_dev *pdev = hdev->pdev; 5504 struct hclge_hw *hw; 5505 int ret; 5506 5507 ret = pci_enable_device(pdev); 5508 if (ret) { 5509 dev_err(&pdev->dev, "failed to enable PCI device\n"); 5510 return ret; 5511 } 5512 5513 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 5514 if (ret) { 5515 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 5516 if (ret) { 5517 dev_err(&pdev->dev, 5518 "can't set consistent PCI DMA"); 5519 goto err_disable_device; 5520 } 5521 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 5522 } 5523 5524 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 5525 if (ret) { 5526 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 5527 goto err_disable_device; 5528 } 5529 5530 pci_set_master(pdev); 5531 hw = &hdev->hw; 5532 hw->io_base = pcim_iomap(pdev, 2, 0); 5533 if (!hw->io_base) { 5534 dev_err(&pdev->dev, "Can't map configuration register space\n"); 5535 ret = -ENOMEM; 5536 goto err_clr_master; 5537 } 5538 5539 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 5540 5541 return 0; 5542 err_clr_master: 5543 pci_clear_master(pdev); 5544 pci_release_regions(pdev); 5545 err_disable_device: 5546 pci_disable_device(pdev); 5547 5548 return ret; 5549 } 5550 5551 static void hclge_pci_uninit(struct hclge_dev *hdev) 5552 { 5553 struct pci_dev *pdev = hdev->pdev; 5554 5555 pcim_iounmap(pdev, hdev->hw.io_base); 5556 pci_free_irq_vectors(pdev); 5557 pci_clear_master(pdev); 5558 pci_release_mem_regions(pdev); 5559 pci_disable_device(pdev); 5560 } 5561 5562 static void hclge_state_init(struct hclge_dev *hdev) 5563 { 5564 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 5565 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5566 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 5567 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 5568 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 5569 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 5570 } 5571 5572 static void hclge_state_uninit(struct hclge_dev *hdev) 5573 { 5574 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5575 5576 if (hdev->service_timer.function) 5577 del_timer_sync(&hdev->service_timer); 5578 if (hdev->service_task.func) 5579 cancel_work_sync(&hdev->service_task); 5580 if (hdev->rst_service_task.func) 5581 cancel_work_sync(&hdev->rst_service_task); 5582 if (hdev->mbx_service_task.func) 5583 cancel_work_sync(&hdev->mbx_service_task); 5584 } 5585 5586 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 5587 { 5588 struct pci_dev *pdev = ae_dev->pdev; 5589 struct hclge_dev *hdev; 5590 int ret; 5591 5592 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 5593 if (!hdev) { 5594 ret = -ENOMEM; 5595 goto out; 5596 } 5597 5598 hdev->pdev = pdev; 5599 hdev->ae_dev = ae_dev; 5600 hdev->reset_type = HNAE3_NONE_RESET; 5601 hdev->reset_request = 0; 5602 hdev->reset_pending = 0; 5603 ae_dev->priv = hdev; 5604 5605 ret = hclge_pci_init(hdev); 5606 if (ret) { 5607 dev_err(&pdev->dev, "PCI init failed\n"); 5608 goto out; 5609 } 5610 5611 /* Firmware command queue initialize */ 5612 ret = hclge_cmd_queue_init(hdev); 5613 if (ret) { 5614 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 5615 goto err_pci_uninit; 5616 } 5617 5618 /* Firmware command initialize */ 5619 ret = hclge_cmd_init(hdev); 5620 if (ret) 5621 goto err_cmd_uninit; 5622 5623 ret = hclge_get_cap(hdev); 5624 if (ret) { 5625 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5626 ret); 5627 goto err_cmd_uninit; 5628 } 5629 5630 ret = hclge_configure(hdev); 5631 if (ret) { 5632 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5633 goto err_cmd_uninit; 5634 } 5635 5636 ret = hclge_init_msi(hdev); 5637 if (ret) { 5638 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 5639 goto err_cmd_uninit; 5640 } 5641 5642 ret = hclge_misc_irq_init(hdev); 5643 if (ret) { 5644 dev_err(&pdev->dev, 5645 "Misc IRQ(vector0) init error, ret = %d.\n", 5646 ret); 5647 goto err_msi_uninit; 5648 } 5649 5650 ret = hclge_alloc_tqps(hdev); 5651 if (ret) { 5652 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 5653 goto err_msi_irq_uninit; 5654 } 5655 5656 ret = hclge_alloc_vport(hdev); 5657 if (ret) { 5658 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 5659 goto err_msi_irq_uninit; 5660 } 5661 5662 ret = hclge_map_tqp(hdev); 5663 if (ret) { 5664 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5665 goto err_msi_irq_uninit; 5666 } 5667 5668 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 5669 ret = hclge_mac_mdio_config(hdev); 5670 if (ret) { 5671 dev_err(&hdev->pdev->dev, 5672 "mdio config fail ret=%d\n", ret); 5673 goto err_msi_irq_uninit; 5674 } 5675 } 5676 5677 ret = hclge_mac_init(hdev); 5678 if (ret) { 5679 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5680 goto err_mdiobus_unreg; 5681 } 5682 5683 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5684 if (ret) { 5685 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5686 goto err_mdiobus_unreg; 5687 } 5688 5689 ret = hclge_init_vlan_config(hdev); 5690 if (ret) { 5691 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5692 goto err_mdiobus_unreg; 5693 } 5694 5695 ret = hclge_tm_schd_init(hdev); 5696 if (ret) { 5697 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 5698 goto err_mdiobus_unreg; 5699 } 5700 5701 hclge_rss_init_cfg(hdev); 5702 ret = hclge_rss_init_hw(hdev); 5703 if (ret) { 5704 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5705 goto err_mdiobus_unreg; 5706 } 5707 5708 ret = init_mgr_tbl(hdev); 5709 if (ret) { 5710 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 5711 goto err_mdiobus_unreg; 5712 } 5713 5714 hclge_dcb_ops_set(hdev); 5715 5716 timer_setup(&hdev->service_timer, hclge_service_timer, 0); 5717 INIT_WORK(&hdev->service_task, hclge_service_task); 5718 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); 5719 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); 5720 5721 hclge_clear_all_event_cause(hdev); 5722 5723 /* Enable MISC vector(vector0) */ 5724 hclge_enable_vector(&hdev->misc_vector, true); 5725 5726 hclge_state_init(hdev); 5727 5728 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 5729 return 0; 5730 5731 err_mdiobus_unreg: 5732 if (hdev->hw.mac.phydev) 5733 mdiobus_unregister(hdev->hw.mac.mdio_bus); 5734 err_msi_irq_uninit: 5735 hclge_misc_irq_uninit(hdev); 5736 err_msi_uninit: 5737 pci_free_irq_vectors(pdev); 5738 err_cmd_uninit: 5739 hclge_destroy_cmd_queue(&hdev->hw); 5740 err_pci_uninit: 5741 pcim_iounmap(pdev, hdev->hw.io_base); 5742 pci_clear_master(pdev); 5743 pci_release_regions(pdev); 5744 pci_disable_device(pdev); 5745 out: 5746 return ret; 5747 } 5748 5749 static void hclge_stats_clear(struct hclge_dev *hdev) 5750 { 5751 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); 5752 } 5753 5754 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 5755 { 5756 struct hclge_dev *hdev = ae_dev->priv; 5757 struct pci_dev *pdev = ae_dev->pdev; 5758 int ret; 5759 5760 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5761 5762 hclge_stats_clear(hdev); 5763 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 5764 5765 ret = hclge_cmd_init(hdev); 5766 if (ret) { 5767 dev_err(&pdev->dev, "Cmd queue init failed\n"); 5768 return ret; 5769 } 5770 5771 ret = hclge_get_cap(hdev); 5772 if (ret) { 5773 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5774 ret); 5775 return ret; 5776 } 5777 5778 ret = hclge_configure(hdev); 5779 if (ret) { 5780 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5781 return ret; 5782 } 5783 5784 ret = hclge_map_tqp(hdev); 5785 if (ret) { 5786 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5787 return ret; 5788 } 5789 5790 ret = hclge_mac_init(hdev); 5791 if (ret) { 5792 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5793 return ret; 5794 } 5795 5796 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5797 if (ret) { 5798 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5799 return ret; 5800 } 5801 5802 ret = hclge_init_vlan_config(hdev); 5803 if (ret) { 5804 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5805 return ret; 5806 } 5807 5808 ret = hclge_tm_init_hw(hdev); 5809 if (ret) { 5810 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 5811 return ret; 5812 } 5813 5814 ret = hclge_rss_init_hw(hdev); 5815 if (ret) { 5816 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5817 return ret; 5818 } 5819 5820 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 5821 HCLGE_DRIVER_NAME); 5822 5823 return 0; 5824 } 5825 5826 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 5827 { 5828 struct hclge_dev *hdev = ae_dev->priv; 5829 struct hclge_mac *mac = &hdev->hw.mac; 5830 5831 hclge_state_uninit(hdev); 5832 5833 if (mac->phydev) 5834 mdiobus_unregister(mac->mdio_bus); 5835 5836 /* Disable MISC vector(vector0) */ 5837 hclge_enable_vector(&hdev->misc_vector, false); 5838 synchronize_irq(hdev->misc_vector.vector_irq); 5839 5840 hclge_destroy_cmd_queue(&hdev->hw); 5841 hclge_misc_irq_uninit(hdev); 5842 hclge_pci_uninit(hdev); 5843 ae_dev->priv = NULL; 5844 } 5845 5846 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 5847 { 5848 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5849 struct hclge_vport *vport = hclge_get_vport(handle); 5850 struct hclge_dev *hdev = vport->back; 5851 5852 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 5853 } 5854 5855 static void hclge_get_channels(struct hnae3_handle *handle, 5856 struct ethtool_channels *ch) 5857 { 5858 struct hclge_vport *vport = hclge_get_vport(handle); 5859 5860 ch->max_combined = hclge_get_max_channels(handle); 5861 ch->other_count = 1; 5862 ch->max_other = 1; 5863 ch->combined_count = vport->alloc_tqps; 5864 } 5865 5866 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 5867 u16 *free_tqps, u16 *max_rss_size) 5868 { 5869 struct hclge_vport *vport = hclge_get_vport(handle); 5870 struct hclge_dev *hdev = vport->back; 5871 u16 temp_tqps = 0; 5872 int i; 5873 5874 for (i = 0; i < hdev->num_tqps; i++) { 5875 if (!hdev->htqp[i].alloced) 5876 temp_tqps++; 5877 } 5878 *free_tqps = temp_tqps; 5879 *max_rss_size = hdev->rss_size_max; 5880 } 5881 5882 static void hclge_release_tqp(struct hclge_vport *vport) 5883 { 5884 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5885 struct hclge_dev *hdev = vport->back; 5886 int i; 5887 5888 for (i = 0; i < kinfo->num_tqps; i++) { 5889 struct hclge_tqp *tqp = 5890 container_of(kinfo->tqp[i], struct hclge_tqp, q); 5891 5892 tqp->q.handle = NULL; 5893 tqp->q.tqp_index = 0; 5894 tqp->alloced = false; 5895 } 5896 5897 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 5898 kinfo->tqp = NULL; 5899 } 5900 5901 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) 5902 { 5903 struct hclge_vport *vport = hclge_get_vport(handle); 5904 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5905 struct hclge_dev *hdev = vport->back; 5906 int cur_rss_size = kinfo->rss_size; 5907 int cur_tqps = kinfo->num_tqps; 5908 u16 tc_offset[HCLGE_MAX_TC_NUM]; 5909 u16 tc_valid[HCLGE_MAX_TC_NUM]; 5910 u16 tc_size[HCLGE_MAX_TC_NUM]; 5911 u16 roundup_size; 5912 u32 *rss_indir; 5913 int ret, i; 5914 5915 hclge_release_tqp(vport); 5916 5917 ret = hclge_knic_setup(vport, new_tqps_num); 5918 if (ret) { 5919 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); 5920 return ret; 5921 } 5922 5923 ret = hclge_map_tqp_to_vport(hdev, vport); 5924 if (ret) { 5925 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); 5926 return ret; 5927 } 5928 5929 ret = hclge_tm_schd_init(hdev); 5930 if (ret) { 5931 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); 5932 return ret; 5933 } 5934 5935 roundup_size = roundup_pow_of_two(kinfo->rss_size); 5936 roundup_size = ilog2(roundup_size); 5937 /* Set the RSS TC mode according to the new RSS size */ 5938 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 5939 tc_valid[i] = 0; 5940 5941 if (!(hdev->hw_tc_map & BIT(i))) 5942 continue; 5943 5944 tc_valid[i] = 1; 5945 tc_size[i] = roundup_size; 5946 tc_offset[i] = kinfo->rss_size * i; 5947 } 5948 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 5949 if (ret) 5950 return ret; 5951 5952 /* Reinitializes the rss indirect table according to the new RSS size */ 5953 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 5954 if (!rss_indir) 5955 return -ENOMEM; 5956 5957 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 5958 rss_indir[i] = i % kinfo->rss_size; 5959 5960 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 5961 if (ret) 5962 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 5963 ret); 5964 5965 kfree(rss_indir); 5966 5967 if (!ret) 5968 dev_info(&hdev->pdev->dev, 5969 "Channels changed, rss_size from %d to %d, tqps from %d to %d", 5970 cur_rss_size, kinfo->rss_size, 5971 cur_tqps, kinfo->rss_size * kinfo->num_tc); 5972 5973 return ret; 5974 } 5975 5976 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit, 5977 u32 *regs_num_64_bit) 5978 { 5979 struct hclge_desc desc; 5980 u32 total_num; 5981 int ret; 5982 5983 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true); 5984 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5985 if (ret) { 5986 dev_err(&hdev->pdev->dev, 5987 "Query register number cmd failed, ret = %d.\n", ret); 5988 return ret; 5989 } 5990 5991 *regs_num_32_bit = le32_to_cpu(desc.data[0]); 5992 *regs_num_64_bit = le32_to_cpu(desc.data[1]); 5993 5994 total_num = *regs_num_32_bit + *regs_num_64_bit; 5995 if (!total_num) 5996 return -EINVAL; 5997 5998 return 0; 5999 } 6000 6001 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num, 6002 void *data) 6003 { 6004 #define HCLGE_32_BIT_REG_RTN_DATANUM 8 6005 6006 struct hclge_desc *desc; 6007 u32 *reg_val = data; 6008 __le32 *desc_data; 6009 int cmd_num; 6010 int i, k, n; 6011 int ret; 6012 6013 if (regs_num == 0) 6014 return 0; 6015 6016 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM); 6017 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 6018 if (!desc) 6019 return -ENOMEM; 6020 6021 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true); 6022 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 6023 if (ret) { 6024 dev_err(&hdev->pdev->dev, 6025 "Query 32 bit register cmd failed, ret = %d.\n", ret); 6026 kfree(desc); 6027 return ret; 6028 } 6029 6030 for (i = 0; i < cmd_num; i++) { 6031 if (i == 0) { 6032 desc_data = (__le32 *)(&desc[i].data[0]); 6033 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2; 6034 } else { 6035 desc_data = (__le32 *)(&desc[i]); 6036 n = HCLGE_32_BIT_REG_RTN_DATANUM; 6037 } 6038 for (k = 0; k < n; k++) { 6039 *reg_val++ = le32_to_cpu(*desc_data++); 6040 6041 regs_num--; 6042 if (!regs_num) 6043 break; 6044 } 6045 } 6046 6047 kfree(desc); 6048 return 0; 6049 } 6050 6051 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, 6052 void *data) 6053 { 6054 #define HCLGE_64_BIT_REG_RTN_DATANUM 4 6055 6056 struct hclge_desc *desc; 6057 u64 *reg_val = data; 6058 __le64 *desc_data; 6059 int cmd_num; 6060 int i, k, n; 6061 int ret; 6062 6063 if (regs_num == 0) 6064 return 0; 6065 6066 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM); 6067 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL); 6068 if (!desc) 6069 return -ENOMEM; 6070 6071 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true); 6072 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num); 6073 if (ret) { 6074 dev_err(&hdev->pdev->dev, 6075 "Query 64 bit register cmd failed, ret = %d.\n", ret); 6076 kfree(desc); 6077 return ret; 6078 } 6079 6080 for (i = 0; i < cmd_num; i++) { 6081 if (i == 0) { 6082 desc_data = (__le64 *)(&desc[i].data[0]); 6083 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1; 6084 } else { 6085 desc_data = (__le64 *)(&desc[i]); 6086 n = HCLGE_64_BIT_REG_RTN_DATANUM; 6087 } 6088 for (k = 0; k < n; k++) { 6089 *reg_val++ = le64_to_cpu(*desc_data++); 6090 6091 regs_num--; 6092 if (!regs_num) 6093 break; 6094 } 6095 } 6096 6097 kfree(desc); 6098 return 0; 6099 } 6100 6101 static int hclge_get_regs_len(struct hnae3_handle *handle) 6102 { 6103 struct hclge_vport *vport = hclge_get_vport(handle); 6104 struct hclge_dev *hdev = vport->back; 6105 u32 regs_num_32_bit, regs_num_64_bit; 6106 int ret; 6107 6108 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 6109 if (ret) { 6110 dev_err(&hdev->pdev->dev, 6111 "Get register number failed, ret = %d.\n", ret); 6112 return -EOPNOTSUPP; 6113 } 6114 6115 return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); 6116 } 6117 6118 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, 6119 void *data) 6120 { 6121 struct hclge_vport *vport = hclge_get_vport(handle); 6122 struct hclge_dev *hdev = vport->back; 6123 u32 regs_num_32_bit, regs_num_64_bit; 6124 int ret; 6125 6126 *version = hdev->fw_version; 6127 6128 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit); 6129 if (ret) { 6130 dev_err(&hdev->pdev->dev, 6131 "Get register number failed, ret = %d.\n", ret); 6132 return; 6133 } 6134 6135 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); 6136 if (ret) { 6137 dev_err(&hdev->pdev->dev, 6138 "Get 32 bit register failed, ret = %d.\n", ret); 6139 return; 6140 } 6141 6142 data = (u32 *)data + regs_num_32_bit; 6143 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, 6144 data); 6145 if (ret) 6146 dev_err(&hdev->pdev->dev, 6147 "Get 64 bit register failed, ret = %d.\n", ret); 6148 } 6149 6150 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 6151 { 6152 struct hclge_set_led_state_cmd *req; 6153 struct hclge_desc desc; 6154 int ret; 6155 6156 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 6157 6158 req = (struct hclge_set_led_state_cmd *)desc.data; 6159 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 6160 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 6161 6162 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 6163 if (ret) 6164 dev_err(&hdev->pdev->dev, 6165 "Send set led state cmd error, ret =%d\n", ret); 6166 6167 return ret; 6168 } 6169 6170 enum hclge_led_status { 6171 HCLGE_LED_OFF, 6172 HCLGE_LED_ON, 6173 HCLGE_LED_NO_CHANGE = 0xFF, 6174 }; 6175 6176 static int hclge_set_led_id(struct hnae3_handle *handle, 6177 enum ethtool_phys_id_state status) 6178 { 6179 struct hclge_vport *vport = hclge_get_vport(handle); 6180 struct hclge_dev *hdev = vport->back; 6181 6182 switch (status) { 6183 case ETHTOOL_ID_ACTIVE: 6184 return hclge_set_led_status(hdev, HCLGE_LED_ON); 6185 case ETHTOOL_ID_INACTIVE: 6186 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 6187 default: 6188 return -EINVAL; 6189 } 6190 } 6191 6192 static void hclge_get_link_mode(struct hnae3_handle *handle, 6193 unsigned long *supported, 6194 unsigned long *advertising) 6195 { 6196 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 6197 struct hclge_vport *vport = hclge_get_vport(handle); 6198 struct hclge_dev *hdev = vport->back; 6199 unsigned int idx = 0; 6200 6201 for (; idx < size; idx++) { 6202 supported[idx] = hdev->hw.mac.supported[idx]; 6203 advertising[idx] = hdev->hw.mac.advertising[idx]; 6204 } 6205 } 6206 6207 static void hclge_get_port_type(struct hnae3_handle *handle, 6208 u8 *port_type) 6209 { 6210 struct hclge_vport *vport = hclge_get_vport(handle); 6211 struct hclge_dev *hdev = vport->back; 6212 u8 media_type = hdev->hw.mac.media_type; 6213 6214 switch (media_type) { 6215 case HNAE3_MEDIA_TYPE_FIBER: 6216 *port_type = PORT_FIBRE; 6217 break; 6218 case HNAE3_MEDIA_TYPE_COPPER: 6219 *port_type = PORT_TP; 6220 break; 6221 case HNAE3_MEDIA_TYPE_UNKNOWN: 6222 default: 6223 *port_type = PORT_OTHER; 6224 break; 6225 } 6226 } 6227 6228 static const struct hnae3_ae_ops hclge_ops = { 6229 .init_ae_dev = hclge_init_ae_dev, 6230 .uninit_ae_dev = hclge_uninit_ae_dev, 6231 .init_client_instance = hclge_init_client_instance, 6232 .uninit_client_instance = hclge_uninit_client_instance, 6233 .map_ring_to_vector = hclge_map_ring_to_vector, 6234 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 6235 .get_vector = hclge_get_vector, 6236 .put_vector = hclge_put_vector, 6237 .set_promisc_mode = hclge_set_promisc_mode, 6238 .set_loopback = hclge_set_loopback, 6239 .start = hclge_ae_start, 6240 .stop = hclge_ae_stop, 6241 .get_status = hclge_get_status, 6242 .get_ksettings_an_result = hclge_get_ksettings_an_result, 6243 .update_speed_duplex_h = hclge_update_speed_duplex_h, 6244 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 6245 .get_media_type = hclge_get_media_type, 6246 .get_rss_key_size = hclge_get_rss_key_size, 6247 .get_rss_indir_size = hclge_get_rss_indir_size, 6248 .get_rss = hclge_get_rss, 6249 .set_rss = hclge_set_rss, 6250 .set_rss_tuple = hclge_set_rss_tuple, 6251 .get_rss_tuple = hclge_get_rss_tuple, 6252 .get_tc_size = hclge_get_tc_size, 6253 .get_mac_addr = hclge_get_mac_addr, 6254 .set_mac_addr = hclge_set_mac_addr, 6255 .add_uc_addr = hclge_add_uc_addr, 6256 .rm_uc_addr = hclge_rm_uc_addr, 6257 .add_mc_addr = hclge_add_mc_addr, 6258 .rm_mc_addr = hclge_rm_mc_addr, 6259 .update_mta_status = hclge_update_mta_status, 6260 .set_autoneg = hclge_set_autoneg, 6261 .get_autoneg = hclge_get_autoneg, 6262 .get_pauseparam = hclge_get_pauseparam, 6263 .set_pauseparam = hclge_set_pauseparam, 6264 .set_mtu = hclge_set_mtu, 6265 .reset_queue = hclge_reset_tqp, 6266 .get_stats = hclge_get_stats, 6267 .update_stats = hclge_update_stats, 6268 .get_strings = hclge_get_strings, 6269 .get_sset_count = hclge_get_sset_count, 6270 .get_fw_version = hclge_get_fw_version, 6271 .get_mdix_mode = hclge_get_mdix_mode, 6272 .enable_vlan_filter = hclge_enable_vlan_filter, 6273 .set_vlan_filter = hclge_set_vlan_filter, 6274 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 6275 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 6276 .reset_event = hclge_reset_event, 6277 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 6278 .set_channels = hclge_set_channels, 6279 .get_channels = hclge_get_channels, 6280 .get_flowctrl_adv = hclge_get_flowctrl_adv, 6281 .get_regs_len = hclge_get_regs_len, 6282 .get_regs = hclge_get_regs, 6283 .set_led_id = hclge_set_led_id, 6284 .get_link_mode = hclge_get_link_mode, 6285 .get_port_type = hclge_get_port_type, 6286 }; 6287 6288 static struct hnae3_ae_algo ae_algo = { 6289 .ops = &hclge_ops, 6290 .pdev_id_table = ae_algo_pci_tbl, 6291 }; 6292 6293 static int hclge_init(void) 6294 { 6295 pr_info("%s is initializing\n", HCLGE_NAME); 6296 6297 hnae3_register_ae_algo(&ae_algo); 6298 6299 return 0; 6300 } 6301 6302 static void hclge_exit(void) 6303 { 6304 hnae3_unregister_ae_algo(&ae_algo); 6305 } 6306 module_init(hclge_init); 6307 module_exit(hclge_exit); 6308 6309 MODULE_LICENSE("GPL"); 6310 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 6311 MODULE_DESCRIPTION("HCLGE Driver"); 6312 MODULE_VERSION(HCLGE_MOD_VERSION); 6313