1 /* 2 * Copyright (c) 2016-2017 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/acpi.h> 11 #include <linux/device.h> 12 #include <linux/etherdevice.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/netdevice.h> 18 #include <linux/pci.h> 19 #include <linux/platform_device.h> 20 #include <net/rtnetlink.h> 21 #include "hclge_cmd.h" 22 #include "hclge_dcb.h" 23 #include "hclge_main.h" 24 #include "hclge_mbx.h" 25 #include "hclge_mdio.h" 26 #include "hclge_tm.h" 27 #include "hnae3.h" 28 29 #define HCLGE_NAME "hclge" 30 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) 31 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) 32 #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) 33 #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) 34 35 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 36 enum hclge_mta_dmac_sel_type mta_mac_sel, 37 bool enable); 38 static int hclge_init_vlan_config(struct hclge_dev *hdev); 39 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 40 41 static struct hnae3_ae_algo ae_algo; 42 43 static const struct pci_device_id ae_algo_pci_tbl[] = { 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 49 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 51 /* required last entry */ 52 {0, } 53 }; 54 55 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 56 "Mac Loopback test", 57 "Serdes Loopback test", 58 "Phy Loopback test" 59 }; 60 61 static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { 62 {"igu_rx_oversize_pkt", 63 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, 64 {"igu_rx_undersize_pkt", 65 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, 66 {"igu_rx_out_all_pkt", 67 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, 68 {"igu_rx_uni_pkt", 69 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, 70 {"igu_rx_multi_pkt", 71 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, 72 {"igu_rx_broad_pkt", 73 HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, 74 {"egu_tx_out_all_pkt", 75 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, 76 {"egu_tx_uni_pkt", 77 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, 78 {"egu_tx_multi_pkt", 79 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, 80 {"egu_tx_broad_pkt", 81 HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, 82 {"ssu_ppp_mac_key_num", 83 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, 84 {"ssu_ppp_host_key_num", 85 HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, 86 {"ppp_ssu_mac_rlt_num", 87 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, 88 {"ppp_ssu_host_rlt_num", 89 HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, 90 {"ssu_tx_in_num", 91 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, 92 {"ssu_tx_out_num", 93 HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, 94 {"ssu_rx_in_num", 95 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, 96 {"ssu_rx_out_num", 97 HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} 98 }; 99 100 static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { 101 {"igu_rx_err_pkt", 102 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, 103 {"igu_rx_no_eof_pkt", 104 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, 105 {"igu_rx_no_sof_pkt", 106 HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, 107 {"egu_tx_1588_pkt", 108 HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, 109 {"ssu_full_drop_num", 110 HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, 111 {"ssu_part_drop_num", 112 HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, 113 {"ppp_key_drop_num", 114 HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, 115 {"ppp_rlt_drop_num", 116 HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, 117 {"ssu_key_drop_num", 118 HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, 119 {"pkt_curr_buf_cnt", 120 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, 121 {"qcn_fb_rcv_cnt", 122 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, 123 {"qcn_fb_drop_cnt", 124 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, 125 {"qcn_fb_invaild_cnt", 126 HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, 127 {"rx_packet_tc0_in_cnt", 128 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, 129 {"rx_packet_tc1_in_cnt", 130 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, 131 {"rx_packet_tc2_in_cnt", 132 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, 133 {"rx_packet_tc3_in_cnt", 134 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, 135 {"rx_packet_tc4_in_cnt", 136 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, 137 {"rx_packet_tc5_in_cnt", 138 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, 139 {"rx_packet_tc6_in_cnt", 140 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, 141 {"rx_packet_tc7_in_cnt", 142 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, 143 {"rx_packet_tc0_out_cnt", 144 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, 145 {"rx_packet_tc1_out_cnt", 146 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, 147 {"rx_packet_tc2_out_cnt", 148 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, 149 {"rx_packet_tc3_out_cnt", 150 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, 151 {"rx_packet_tc4_out_cnt", 152 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, 153 {"rx_packet_tc5_out_cnt", 154 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, 155 {"rx_packet_tc6_out_cnt", 156 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, 157 {"rx_packet_tc7_out_cnt", 158 HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, 159 {"tx_packet_tc0_in_cnt", 160 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, 161 {"tx_packet_tc1_in_cnt", 162 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, 163 {"tx_packet_tc2_in_cnt", 164 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, 165 {"tx_packet_tc3_in_cnt", 166 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, 167 {"tx_packet_tc4_in_cnt", 168 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, 169 {"tx_packet_tc5_in_cnt", 170 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, 171 {"tx_packet_tc6_in_cnt", 172 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, 173 {"tx_packet_tc7_in_cnt", 174 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, 175 {"tx_packet_tc0_out_cnt", 176 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, 177 {"tx_packet_tc1_out_cnt", 178 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, 179 {"tx_packet_tc2_out_cnt", 180 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, 181 {"tx_packet_tc3_out_cnt", 182 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, 183 {"tx_packet_tc4_out_cnt", 184 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, 185 {"tx_packet_tc5_out_cnt", 186 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, 187 {"tx_packet_tc6_out_cnt", 188 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, 189 {"tx_packet_tc7_out_cnt", 190 HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, 191 {"pkt_curr_buf_tc0_cnt", 192 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, 193 {"pkt_curr_buf_tc1_cnt", 194 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, 195 {"pkt_curr_buf_tc2_cnt", 196 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, 197 {"pkt_curr_buf_tc3_cnt", 198 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, 199 {"pkt_curr_buf_tc4_cnt", 200 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, 201 {"pkt_curr_buf_tc5_cnt", 202 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, 203 {"pkt_curr_buf_tc6_cnt", 204 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, 205 {"pkt_curr_buf_tc7_cnt", 206 HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, 207 {"mb_uncopy_num", 208 HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, 209 {"lo_pri_unicast_rlt_drop_num", 210 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, 211 {"hi_pri_multicast_rlt_drop_num", 212 HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, 213 {"lo_pri_multicast_rlt_drop_num", 214 HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, 215 {"rx_oq_drop_pkt_cnt", 216 HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, 217 {"tx_oq_drop_pkt_cnt", 218 HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, 219 {"nic_l2_err_drop_pkt_cnt", 220 HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, 221 {"roc_l2_err_drop_pkt_cnt", 222 HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} 223 }; 224 225 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 226 {"mac_tx_mac_pause_num", 227 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 228 {"mac_rx_mac_pause_num", 229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 230 {"mac_tx_pfc_pri0_pkt_num", 231 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 232 {"mac_tx_pfc_pri1_pkt_num", 233 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 234 {"mac_tx_pfc_pri2_pkt_num", 235 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 236 {"mac_tx_pfc_pri3_pkt_num", 237 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 238 {"mac_tx_pfc_pri4_pkt_num", 239 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 240 {"mac_tx_pfc_pri5_pkt_num", 241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 242 {"mac_tx_pfc_pri6_pkt_num", 243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 244 {"mac_tx_pfc_pri7_pkt_num", 245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 246 {"mac_rx_pfc_pri0_pkt_num", 247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 248 {"mac_rx_pfc_pri1_pkt_num", 249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 250 {"mac_rx_pfc_pri2_pkt_num", 251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 252 {"mac_rx_pfc_pri3_pkt_num", 253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 254 {"mac_rx_pfc_pri4_pkt_num", 255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 256 {"mac_rx_pfc_pri5_pkt_num", 257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 258 {"mac_rx_pfc_pri6_pkt_num", 259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 260 {"mac_rx_pfc_pri7_pkt_num", 261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 262 {"mac_tx_total_pkt_num", 263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 264 {"mac_tx_total_oct_num", 265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 266 {"mac_tx_good_pkt_num", 267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 268 {"mac_tx_bad_pkt_num", 269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 270 {"mac_tx_good_oct_num", 271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 272 {"mac_tx_bad_oct_num", 273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 274 {"mac_tx_uni_pkt_num", 275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 276 {"mac_tx_multi_pkt_num", 277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 278 {"mac_tx_broad_pkt_num", 279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 280 {"mac_tx_undersize_pkt_num", 281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 282 {"mac_tx_overrsize_pkt_num", 283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, 284 {"mac_tx_64_oct_pkt_num", 285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 286 {"mac_tx_65_127_oct_pkt_num", 287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 288 {"mac_tx_128_255_oct_pkt_num", 289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 290 {"mac_tx_256_511_oct_pkt_num", 291 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 292 {"mac_tx_512_1023_oct_pkt_num", 293 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 294 {"mac_tx_1024_1518_oct_pkt_num", 295 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 296 {"mac_tx_1519_max_oct_pkt_num", 297 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, 298 {"mac_rx_total_pkt_num", 299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 300 {"mac_rx_total_oct_num", 301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 302 {"mac_rx_good_pkt_num", 303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 304 {"mac_rx_bad_pkt_num", 305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 306 {"mac_rx_good_oct_num", 307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 308 {"mac_rx_bad_oct_num", 309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 310 {"mac_rx_uni_pkt_num", 311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 312 {"mac_rx_multi_pkt_num", 313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 314 {"mac_rx_broad_pkt_num", 315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 316 {"mac_rx_undersize_pkt_num", 317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 318 {"mac_rx_overrsize_pkt_num", 319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, 320 {"mac_rx_64_oct_pkt_num", 321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 322 {"mac_rx_65_127_oct_pkt_num", 323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 324 {"mac_rx_128_255_oct_pkt_num", 325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 326 {"mac_rx_256_511_oct_pkt_num", 327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 328 {"mac_rx_512_1023_oct_pkt_num", 329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 330 {"mac_rx_1024_1518_oct_pkt_num", 331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 332 {"mac_rx_1519_max_oct_pkt_num", 333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, 334 335 {"mac_trans_fragment_pkt_num", 336 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, 337 {"mac_trans_undermin_pkt_num", 338 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, 339 {"mac_trans_jabber_pkt_num", 340 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, 341 {"mac_trans_err_all_pkt_num", 342 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, 343 {"mac_trans_from_app_good_pkt_num", 344 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, 345 {"mac_trans_from_app_bad_pkt_num", 346 HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, 347 {"mac_rcv_fragment_pkt_num", 348 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, 349 {"mac_rcv_undermin_pkt_num", 350 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)}, 351 {"mac_rcv_jabber_pkt_num", 352 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, 353 {"mac_rcv_fcs_err_pkt_num", 354 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, 355 {"mac_rcv_send_app_good_pkt_num", 356 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, 357 {"mac_rcv_send_app_bad_pkt_num", 358 HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} 359 }; 360 361 static int hclge_64_bit_update_stats(struct hclge_dev *hdev) 362 { 363 #define HCLGE_64_BIT_CMD_NUM 5 364 #define HCLGE_64_BIT_RTN_DATANUM 4 365 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); 366 struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; 367 __le64 *desc_data; 368 int i, k, n; 369 int ret; 370 371 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); 372 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); 373 if (ret) { 374 dev_err(&hdev->pdev->dev, 375 "Get 64 bit pkt stats fail, status = %d.\n", ret); 376 return ret; 377 } 378 379 for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { 380 if (unlikely(i == 0)) { 381 desc_data = (__le64 *)(&desc[i].data[0]); 382 n = HCLGE_64_BIT_RTN_DATANUM - 1; 383 } else { 384 desc_data = (__le64 *)(&desc[i]); 385 n = HCLGE_64_BIT_RTN_DATANUM; 386 } 387 for (k = 0; k < n; k++) { 388 *data++ += le64_to_cpu(*desc_data); 389 desc_data++; 390 } 391 } 392 393 return 0; 394 } 395 396 static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) 397 { 398 stats->pkt_curr_buf_cnt = 0; 399 stats->pkt_curr_buf_tc0_cnt = 0; 400 stats->pkt_curr_buf_tc1_cnt = 0; 401 stats->pkt_curr_buf_tc2_cnt = 0; 402 stats->pkt_curr_buf_tc3_cnt = 0; 403 stats->pkt_curr_buf_tc4_cnt = 0; 404 stats->pkt_curr_buf_tc5_cnt = 0; 405 stats->pkt_curr_buf_tc6_cnt = 0; 406 stats->pkt_curr_buf_tc7_cnt = 0; 407 } 408 409 static int hclge_32_bit_update_stats(struct hclge_dev *hdev) 410 { 411 #define HCLGE_32_BIT_CMD_NUM 8 412 #define HCLGE_32_BIT_RTN_DATANUM 8 413 414 struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; 415 struct hclge_32_bit_stats *all_32_bit_stats; 416 __le32 *desc_data; 417 int i, k, n; 418 u64 *data; 419 int ret; 420 421 all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; 422 data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); 423 424 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); 425 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); 426 if (ret) { 427 dev_err(&hdev->pdev->dev, 428 "Get 32 bit pkt stats fail, status = %d.\n", ret); 429 430 return ret; 431 } 432 433 hclge_reset_partial_32bit_counter(all_32_bit_stats); 434 for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { 435 if (unlikely(i == 0)) { 436 __le16 *desc_data_16bit; 437 438 all_32_bit_stats->igu_rx_err_pkt += 439 le32_to_cpu(desc[i].data[0]); 440 441 desc_data_16bit = (__le16 *)&desc[i].data[1]; 442 all_32_bit_stats->igu_rx_no_eof_pkt += 443 le16_to_cpu(*desc_data_16bit); 444 445 desc_data_16bit++; 446 all_32_bit_stats->igu_rx_no_sof_pkt += 447 le16_to_cpu(*desc_data_16bit); 448 449 desc_data = &desc[i].data[2]; 450 n = HCLGE_32_BIT_RTN_DATANUM - 4; 451 } else { 452 desc_data = (__le32 *)&desc[i]; 453 n = HCLGE_32_BIT_RTN_DATANUM; 454 } 455 for (k = 0; k < n; k++) { 456 *data++ += le32_to_cpu(*desc_data); 457 desc_data++; 458 } 459 } 460 461 return 0; 462 } 463 464 static int hclge_mac_update_stats(struct hclge_dev *hdev) 465 { 466 #define HCLGE_MAC_CMD_NUM 17 467 #define HCLGE_RTN_DATA_NUM 4 468 469 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); 470 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 471 __le64 *desc_data; 472 int i, k, n; 473 int ret; 474 475 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 476 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 477 if (ret) { 478 dev_err(&hdev->pdev->dev, 479 "Get MAC pkt stats fail, status = %d.\n", ret); 480 481 return ret; 482 } 483 484 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { 485 if (unlikely(i == 0)) { 486 desc_data = (__le64 *)(&desc[i].data[0]); 487 n = HCLGE_RTN_DATA_NUM - 2; 488 } else { 489 desc_data = (__le64 *)(&desc[i]); 490 n = HCLGE_RTN_DATA_NUM; 491 } 492 for (k = 0; k < n; k++) { 493 *data++ += le64_to_cpu(*desc_data); 494 desc_data++; 495 } 496 } 497 498 return 0; 499 } 500 501 static int hclge_tqps_update_stats(struct hnae3_handle *handle) 502 { 503 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 504 struct hclge_vport *vport = hclge_get_vport(handle); 505 struct hclge_dev *hdev = vport->back; 506 struct hnae3_queue *queue; 507 struct hclge_desc desc[1]; 508 struct hclge_tqp *tqp; 509 int ret, i; 510 511 for (i = 0; i < kinfo->num_tqps; i++) { 512 queue = handle->kinfo.tqp[i]; 513 tqp = container_of(queue, struct hclge_tqp, q); 514 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 515 hclge_cmd_setup_basic_desc(&desc[0], 516 HCLGE_OPC_QUERY_RX_STATUS, 517 true); 518 519 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 520 ret = hclge_cmd_send(&hdev->hw, desc, 1); 521 if (ret) { 522 dev_err(&hdev->pdev->dev, 523 "Query tqp stat fail, status = %d,queue = %d\n", 524 ret, i); 525 return ret; 526 } 527 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += 528 le32_to_cpu(desc[0].data[4]); 529 } 530 531 for (i = 0; i < kinfo->num_tqps; i++) { 532 queue = handle->kinfo.tqp[i]; 533 tqp = container_of(queue, struct hclge_tqp, q); 534 /* command : HCLGE_OPC_QUERY_IGU_STAT */ 535 hclge_cmd_setup_basic_desc(&desc[0], 536 HCLGE_OPC_QUERY_TX_STATUS, 537 true); 538 539 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); 540 ret = hclge_cmd_send(&hdev->hw, desc, 1); 541 if (ret) { 542 dev_err(&hdev->pdev->dev, 543 "Query tqp stat fail, status = %d,queue = %d\n", 544 ret, i); 545 return ret; 546 } 547 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += 548 le32_to_cpu(desc[0].data[4]); 549 } 550 551 return 0; 552 } 553 554 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) 555 { 556 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 557 struct hclge_tqp *tqp; 558 u64 *buff = data; 559 int i; 560 561 for (i = 0; i < kinfo->num_tqps; i++) { 562 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 563 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; 564 } 565 566 for (i = 0; i < kinfo->num_tqps; i++) { 567 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); 568 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; 569 } 570 571 return buff; 572 } 573 574 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) 575 { 576 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 577 578 return kinfo->num_tqps * (2); 579 } 580 581 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) 582 { 583 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 584 u8 *buff = data; 585 int i = 0; 586 587 for (i = 0; i < kinfo->num_tqps; i++) { 588 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], 589 struct hclge_tqp, q); 590 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", 591 tqp->index); 592 buff = buff + ETH_GSTRING_LEN; 593 } 594 595 for (i = 0; i < kinfo->num_tqps; i++) { 596 struct hclge_tqp *tqp = container_of(kinfo->tqp[i], 597 struct hclge_tqp, q); 598 snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", 599 tqp->index); 600 buff = buff + ETH_GSTRING_LEN; 601 } 602 603 return buff; 604 } 605 606 static u64 *hclge_comm_get_stats(void *comm_stats, 607 const struct hclge_comm_stats_str strs[], 608 int size, u64 *data) 609 { 610 u64 *buf = data; 611 u32 i; 612 613 for (i = 0; i < size; i++) 614 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); 615 616 return buf + size; 617 } 618 619 static u8 *hclge_comm_get_strings(u32 stringset, 620 const struct hclge_comm_stats_str strs[], 621 int size, u8 *data) 622 { 623 char *buff = (char *)data; 624 u32 i; 625 626 if (stringset != ETH_SS_STATS) 627 return buff; 628 629 for (i = 0; i < size; i++) { 630 snprintf(buff, ETH_GSTRING_LEN, 631 strs[i].desc); 632 buff = buff + ETH_GSTRING_LEN; 633 } 634 635 return (u8 *)buff; 636 } 637 638 static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, 639 struct net_device_stats *net_stats) 640 { 641 net_stats->tx_dropped = 0; 642 net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; 643 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; 644 net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; 645 646 net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; 647 net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; 648 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt; 649 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; 650 net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; 651 net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; 652 653 net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; 654 net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; 655 656 net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; 657 net_stats->rx_length_errors = 658 hw_stats->mac_stats.mac_rx_undersize_pkt_num; 659 net_stats->rx_length_errors += 660 hw_stats->mac_stats.mac_rx_overrsize_pkt_num; 661 net_stats->rx_over_errors = 662 hw_stats->mac_stats.mac_rx_overrsize_pkt_num; 663 } 664 665 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 666 { 667 struct hnae3_handle *handle; 668 int status; 669 670 handle = &hdev->vport[0].nic; 671 if (handle->client) { 672 status = hclge_tqps_update_stats(handle); 673 if (status) { 674 dev_err(&hdev->pdev->dev, 675 "Update TQPS stats fail, status = %d.\n", 676 status); 677 } 678 } 679 680 status = hclge_mac_update_stats(hdev); 681 if (status) 682 dev_err(&hdev->pdev->dev, 683 "Update MAC stats fail, status = %d.\n", status); 684 685 status = hclge_32_bit_update_stats(hdev); 686 if (status) 687 dev_err(&hdev->pdev->dev, 688 "Update 32 bit stats fail, status = %d.\n", 689 status); 690 691 hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); 692 } 693 694 static void hclge_update_stats(struct hnae3_handle *handle, 695 struct net_device_stats *net_stats) 696 { 697 struct hclge_vport *vport = hclge_get_vport(handle); 698 struct hclge_dev *hdev = vport->back; 699 struct hclge_hw_stats *hw_stats = &hdev->hw_stats; 700 int status; 701 702 status = hclge_mac_update_stats(hdev); 703 if (status) 704 dev_err(&hdev->pdev->dev, 705 "Update MAC stats fail, status = %d.\n", 706 status); 707 708 status = hclge_32_bit_update_stats(hdev); 709 if (status) 710 dev_err(&hdev->pdev->dev, 711 "Update 32 bit stats fail, status = %d.\n", 712 status); 713 714 status = hclge_64_bit_update_stats(hdev); 715 if (status) 716 dev_err(&hdev->pdev->dev, 717 "Update 64 bit stats fail, status = %d.\n", 718 status); 719 720 status = hclge_tqps_update_stats(handle); 721 if (status) 722 dev_err(&hdev->pdev->dev, 723 "Update TQPS stats fail, status = %d.\n", 724 status); 725 726 hclge_update_netstat(hw_stats, net_stats); 727 } 728 729 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 730 { 731 #define HCLGE_LOOPBACK_TEST_FLAGS 0x7 732 733 struct hclge_vport *vport = hclge_get_vport(handle); 734 struct hclge_dev *hdev = vport->back; 735 int count = 0; 736 737 /* Loopback test support rules: 738 * mac: only GE mode support 739 * serdes: all mac mode will support include GE/XGE/LGE/CGE 740 * phy: only support when phy device exist on board 741 */ 742 if (stringset == ETH_SS_TEST) { 743 /* clear loopback bit flags at first */ 744 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 745 if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 746 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 747 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 748 count += 1; 749 handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; 750 } else { 751 count = -EOPNOTSUPP; 752 } 753 } else if (stringset == ETH_SS_STATS) { 754 count = ARRAY_SIZE(g_mac_stats_string) + 755 ARRAY_SIZE(g_all_32bit_stats_string) + 756 ARRAY_SIZE(g_all_64bit_stats_string) + 757 hclge_tqps_get_sset_count(handle, stringset); 758 } 759 760 return count; 761 } 762 763 static void hclge_get_strings(struct hnae3_handle *handle, 764 u32 stringset, 765 u8 *data) 766 { 767 u8 *p = (char *)data; 768 int size; 769 770 if (stringset == ETH_SS_STATS) { 771 size = ARRAY_SIZE(g_mac_stats_string); 772 p = hclge_comm_get_strings(stringset, 773 g_mac_stats_string, 774 size, 775 p); 776 size = ARRAY_SIZE(g_all_32bit_stats_string); 777 p = hclge_comm_get_strings(stringset, 778 g_all_32bit_stats_string, 779 size, 780 p); 781 size = ARRAY_SIZE(g_all_64bit_stats_string); 782 p = hclge_comm_get_strings(stringset, 783 g_all_64bit_stats_string, 784 size, 785 p); 786 p = hclge_tqps_get_strings(handle, p); 787 } else if (stringset == ETH_SS_TEST) { 788 if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { 789 memcpy(p, 790 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], 791 ETH_GSTRING_LEN); 792 p += ETH_GSTRING_LEN; 793 } 794 if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { 795 memcpy(p, 796 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], 797 ETH_GSTRING_LEN); 798 p += ETH_GSTRING_LEN; 799 } 800 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 801 memcpy(p, 802 hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], 803 ETH_GSTRING_LEN); 804 p += ETH_GSTRING_LEN; 805 } 806 } 807 } 808 809 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 810 { 811 struct hclge_vport *vport = hclge_get_vport(handle); 812 struct hclge_dev *hdev = vport->back; 813 u64 *p; 814 815 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, 816 g_mac_stats_string, 817 ARRAY_SIZE(g_mac_stats_string), 818 data); 819 p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, 820 g_all_32bit_stats_string, 821 ARRAY_SIZE(g_all_32bit_stats_string), 822 p); 823 p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, 824 g_all_64bit_stats_string, 825 ARRAY_SIZE(g_all_64bit_stats_string), 826 p); 827 p = hclge_tqps_get_stats(handle, p); 828 } 829 830 static int hclge_parse_func_status(struct hclge_dev *hdev, 831 struct hclge_func_status_cmd *status) 832 { 833 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 834 return -EINVAL; 835 836 /* Set the pf to main pf */ 837 if (status->pf_state & HCLGE_PF_STATE_MAIN) 838 hdev->flag |= HCLGE_FLAG_MAIN; 839 else 840 hdev->flag &= ~HCLGE_FLAG_MAIN; 841 842 return 0; 843 } 844 845 static int hclge_query_function_status(struct hclge_dev *hdev) 846 { 847 struct hclge_func_status_cmd *req; 848 struct hclge_desc desc; 849 int timeout = 0; 850 int ret; 851 852 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 853 req = (struct hclge_func_status_cmd *)desc.data; 854 855 do { 856 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 857 if (ret) { 858 dev_err(&hdev->pdev->dev, 859 "query function status failed %d.\n", 860 ret); 861 862 return ret; 863 } 864 865 /* Check pf reset is done */ 866 if (req->pf_state) 867 break; 868 usleep_range(1000, 2000); 869 } while (timeout++ < 5); 870 871 ret = hclge_parse_func_status(hdev, req); 872 873 return ret; 874 } 875 876 static int hclge_query_pf_resource(struct hclge_dev *hdev) 877 { 878 struct hclge_pf_res_cmd *req; 879 struct hclge_desc desc; 880 int ret; 881 882 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 883 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 884 if (ret) { 885 dev_err(&hdev->pdev->dev, 886 "query pf resource failed %d.\n", ret); 887 return ret; 888 } 889 890 req = (struct hclge_pf_res_cmd *)desc.data; 891 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 892 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 893 894 if (hnae3_dev_roce_supported(hdev)) { 895 hdev->num_roce_msi = 896 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 897 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 898 899 /* PF should have NIC vectors and Roce vectors, 900 * NIC vectors are queued before Roce vectors. 901 */ 902 hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; 903 } else { 904 hdev->num_msi = 905 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 906 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 907 } 908 909 return 0; 910 } 911 912 static int hclge_parse_speed(int speed_cmd, int *speed) 913 { 914 switch (speed_cmd) { 915 case 6: 916 *speed = HCLGE_MAC_SPEED_10M; 917 break; 918 case 7: 919 *speed = HCLGE_MAC_SPEED_100M; 920 break; 921 case 0: 922 *speed = HCLGE_MAC_SPEED_1G; 923 break; 924 case 1: 925 *speed = HCLGE_MAC_SPEED_10G; 926 break; 927 case 2: 928 *speed = HCLGE_MAC_SPEED_25G; 929 break; 930 case 3: 931 *speed = HCLGE_MAC_SPEED_40G; 932 break; 933 case 4: 934 *speed = HCLGE_MAC_SPEED_50G; 935 break; 936 case 5: 937 *speed = HCLGE_MAC_SPEED_100G; 938 break; 939 default: 940 return -EINVAL; 941 } 942 943 return 0; 944 } 945 946 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 947 { 948 struct hclge_cfg_param_cmd *req; 949 u64 mac_addr_tmp_high; 950 u64 mac_addr_tmp; 951 int i; 952 953 req = (struct hclge_cfg_param_cmd *)desc[0].data; 954 955 /* get the configuration */ 956 cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), 957 HCLGE_CFG_VMDQ_M, 958 HCLGE_CFG_VMDQ_S); 959 cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 960 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 961 cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), 962 HCLGE_CFG_TQP_DESC_N_M, 963 HCLGE_CFG_TQP_DESC_N_S); 964 965 cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), 966 HCLGE_CFG_PHY_ADDR_M, 967 HCLGE_CFG_PHY_ADDR_S); 968 cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), 969 HCLGE_CFG_MEDIA_TP_M, 970 HCLGE_CFG_MEDIA_TP_S); 971 cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), 972 HCLGE_CFG_RX_BUF_LEN_M, 973 HCLGE_CFG_RX_BUF_LEN_S); 974 /* get mac_address */ 975 mac_addr_tmp = __le32_to_cpu(req->param[2]); 976 mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), 977 HCLGE_CFG_MAC_ADDR_H_M, 978 HCLGE_CFG_MAC_ADDR_H_S); 979 980 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 981 982 cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), 983 HCLGE_CFG_DEFAULT_SPEED_M, 984 HCLGE_CFG_DEFAULT_SPEED_S); 985 cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), 986 HCLGE_CFG_RSS_SIZE_M, 987 HCLGE_CFG_RSS_SIZE_S); 988 989 for (i = 0; i < ETH_ALEN; i++) 990 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 991 992 req = (struct hclge_cfg_param_cmd *)desc[1].data; 993 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 994 } 995 996 /* hclge_get_cfg: query the static parameter from flash 997 * @hdev: pointer to struct hclge_dev 998 * @hcfg: the config structure to be getted 999 */ 1000 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1001 { 1002 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1003 struct hclge_cfg_param_cmd *req; 1004 int i, ret; 1005 1006 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1007 u32 offset = 0; 1008 1009 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1010 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1011 true); 1012 hnae_set_field(offset, HCLGE_CFG_OFFSET_M, 1013 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1014 /* Len should be united by 4 bytes when send to hardware */ 1015 hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1016 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1017 req->offset = cpu_to_le32(offset); 1018 } 1019 1020 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1021 if (ret) { 1022 dev_err(&hdev->pdev->dev, 1023 "get config failed %d.\n", ret); 1024 return ret; 1025 } 1026 1027 hclge_parse_cfg(hcfg, desc); 1028 return 0; 1029 } 1030 1031 static int hclge_get_cap(struct hclge_dev *hdev) 1032 { 1033 int ret; 1034 1035 ret = hclge_query_function_status(hdev); 1036 if (ret) { 1037 dev_err(&hdev->pdev->dev, 1038 "query function status error %d.\n", ret); 1039 return ret; 1040 } 1041 1042 /* get pf resource */ 1043 ret = hclge_query_pf_resource(hdev); 1044 if (ret) { 1045 dev_err(&hdev->pdev->dev, 1046 "query pf resource error %d.\n", ret); 1047 return ret; 1048 } 1049 1050 return 0; 1051 } 1052 1053 static int hclge_configure(struct hclge_dev *hdev) 1054 { 1055 struct hclge_cfg cfg; 1056 int ret, i; 1057 1058 ret = hclge_get_cfg(hdev, &cfg); 1059 if (ret) { 1060 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); 1061 return ret; 1062 } 1063 1064 hdev->num_vmdq_vport = cfg.vmdq_vport_num; 1065 hdev->base_tqp_pid = 0; 1066 hdev->rss_size_max = cfg.rss_size_max; 1067 hdev->rx_buf_len = cfg.rx_buf_len; 1068 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1069 hdev->hw.mac.media_type = cfg.media_type; 1070 hdev->hw.mac.phy_addr = cfg.phy_addr; 1071 hdev->num_desc = cfg.tqp_desc_num; 1072 hdev->tm_info.num_pg = 1; 1073 hdev->tc_max = cfg.tc_num; 1074 hdev->tm_info.hw_pfc_map = 0; 1075 1076 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1077 if (ret) { 1078 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); 1079 return ret; 1080 } 1081 1082 if ((hdev->tc_max > HNAE3_MAX_TC) || 1083 (hdev->tc_max < 1)) { 1084 dev_warn(&hdev->pdev->dev, "TC num = %d.\n", 1085 hdev->tc_max); 1086 hdev->tc_max = 1; 1087 } 1088 1089 /* Dev does not support DCB */ 1090 if (!hnae3_dev_dcb_supported(hdev)) { 1091 hdev->tc_max = 1; 1092 hdev->pfc_max = 0; 1093 } else { 1094 hdev->pfc_max = hdev->tc_max; 1095 } 1096 1097 hdev->tm_info.num_tc = hdev->tc_max; 1098 1099 /* Currently not support uncontiuous tc */ 1100 for (i = 0; i < hdev->tm_info.num_tc; i++) 1101 hnae_set_bit(hdev->hw_tc_map, i, 1); 1102 1103 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1104 1105 return ret; 1106 } 1107 1108 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, 1109 int tso_mss_max) 1110 { 1111 struct hclge_cfg_tso_status_cmd *req; 1112 struct hclge_desc desc; 1113 u16 tso_mss; 1114 1115 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1116 1117 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1118 1119 tso_mss = 0; 1120 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1121 HCLGE_TSO_MSS_MIN_S, tso_mss_min); 1122 req->tso_mss_min = cpu_to_le16(tso_mss); 1123 1124 tso_mss = 0; 1125 hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, 1126 HCLGE_TSO_MSS_MIN_S, tso_mss_max); 1127 req->tso_mss_max = cpu_to_le16(tso_mss); 1128 1129 return hclge_cmd_send(&hdev->hw, &desc, 1); 1130 } 1131 1132 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1133 { 1134 struct hclge_tqp *tqp; 1135 int i; 1136 1137 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1138 sizeof(struct hclge_tqp), GFP_KERNEL); 1139 if (!hdev->htqp) 1140 return -ENOMEM; 1141 1142 tqp = hdev->htqp; 1143 1144 for (i = 0; i < hdev->num_tqps; i++) { 1145 tqp->dev = &hdev->pdev->dev; 1146 tqp->index = i; 1147 1148 tqp->q.ae_algo = &ae_algo; 1149 tqp->q.buf_size = hdev->rx_buf_len; 1150 tqp->q.desc_num = hdev->num_desc; 1151 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + 1152 i * HCLGE_TQP_REG_SIZE; 1153 1154 tqp++; 1155 } 1156 1157 return 0; 1158 } 1159 1160 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1161 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1162 { 1163 struct hclge_tqp_map_cmd *req; 1164 struct hclge_desc desc; 1165 int ret; 1166 1167 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1168 1169 req = (struct hclge_tqp_map_cmd *)desc.data; 1170 req->tqp_id = cpu_to_le16(tqp_pid); 1171 req->tqp_vf = func_id; 1172 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 1173 1 << HCLGE_TQP_MAP_EN_B; 1174 req->tqp_vid = cpu_to_le16(tqp_vid); 1175 1176 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1177 if (ret) { 1178 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", 1179 ret); 1180 return ret; 1181 } 1182 1183 return 0; 1184 } 1185 1186 static int hclge_assign_tqp(struct hclge_vport *vport, 1187 struct hnae3_queue **tqp, u16 num_tqps) 1188 { 1189 struct hclge_dev *hdev = vport->back; 1190 int i, alloced; 1191 1192 for (i = 0, alloced = 0; i < hdev->num_tqps && 1193 alloced < num_tqps; i++) { 1194 if (!hdev->htqp[i].alloced) { 1195 hdev->htqp[i].q.handle = &vport->nic; 1196 hdev->htqp[i].q.tqp_index = alloced; 1197 tqp[alloced] = &hdev->htqp[i].q; 1198 hdev->htqp[i].alloced = true; 1199 alloced++; 1200 } 1201 } 1202 vport->alloc_tqps = num_tqps; 1203 1204 return 0; 1205 } 1206 1207 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) 1208 { 1209 struct hnae3_handle *nic = &vport->nic; 1210 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1211 struct hclge_dev *hdev = vport->back; 1212 int i, ret; 1213 1214 kinfo->num_desc = hdev->num_desc; 1215 kinfo->rx_buf_len = hdev->rx_buf_len; 1216 kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); 1217 kinfo->rss_size 1218 = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); 1219 kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; 1220 1221 for (i = 0; i < HNAE3_MAX_TC; i++) { 1222 if (hdev->hw_tc_map & BIT(i)) { 1223 kinfo->tc_info[i].enable = true; 1224 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 1225 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 1226 kinfo->tc_info[i].tc = i; 1227 } else { 1228 /* Set to default queue if TC is disable */ 1229 kinfo->tc_info[i].enable = false; 1230 kinfo->tc_info[i].tqp_offset = 0; 1231 kinfo->tc_info[i].tqp_count = 1; 1232 kinfo->tc_info[i].tc = 0; 1233 } 1234 } 1235 1236 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 1237 sizeof(struct hnae3_queue *), GFP_KERNEL); 1238 if (!kinfo->tqp) 1239 return -ENOMEM; 1240 1241 ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); 1242 if (ret) { 1243 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1244 return -EINVAL; 1245 } 1246 1247 return 0; 1248 } 1249 1250 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1251 struct hclge_vport *vport) 1252 { 1253 struct hnae3_handle *nic = &vport->nic; 1254 struct hnae3_knic_private_info *kinfo; 1255 u16 i; 1256 1257 kinfo = &nic->kinfo; 1258 for (i = 0; i < kinfo->num_tqps; i++) { 1259 struct hclge_tqp *q = 1260 container_of(kinfo->tqp[i], struct hclge_tqp, q); 1261 bool is_pf; 1262 int ret; 1263 1264 is_pf = !(vport->vport_id); 1265 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1266 i, is_pf); 1267 if (ret) 1268 return ret; 1269 } 1270 1271 return 0; 1272 } 1273 1274 static int hclge_map_tqp(struct hclge_dev *hdev) 1275 { 1276 struct hclge_vport *vport = hdev->vport; 1277 u16 i, num_vport; 1278 1279 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1280 for (i = 0; i < num_vport; i++) { 1281 int ret; 1282 1283 ret = hclge_map_tqp_to_vport(hdev, vport); 1284 if (ret) 1285 return ret; 1286 1287 vport++; 1288 } 1289 1290 return 0; 1291 } 1292 1293 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) 1294 { 1295 /* this would be initialized later */ 1296 } 1297 1298 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1299 { 1300 struct hnae3_handle *nic = &vport->nic; 1301 struct hclge_dev *hdev = vport->back; 1302 int ret; 1303 1304 nic->pdev = hdev->pdev; 1305 nic->ae_algo = &ae_algo; 1306 nic->numa_node_mask = hdev->numa_node_mask; 1307 1308 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { 1309 ret = hclge_knic_setup(vport, num_tqps); 1310 if (ret) { 1311 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", 1312 ret); 1313 return ret; 1314 } 1315 } else { 1316 hclge_unic_setup(vport, num_tqps); 1317 } 1318 1319 return 0; 1320 } 1321 1322 static int hclge_alloc_vport(struct hclge_dev *hdev) 1323 { 1324 struct pci_dev *pdev = hdev->pdev; 1325 struct hclge_vport *vport; 1326 u32 tqp_main_vport; 1327 u32 tqp_per_vport; 1328 int num_vport, i; 1329 int ret; 1330 1331 /* We need to alloc a vport for main NIC of PF */ 1332 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; 1333 1334 if (hdev->num_tqps < num_vport) 1335 num_vport = hdev->num_tqps; 1336 1337 /* Alloc the same number of TQPs for every vport */ 1338 tqp_per_vport = hdev->num_tqps / num_vport; 1339 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1340 1341 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1342 GFP_KERNEL); 1343 if (!vport) 1344 return -ENOMEM; 1345 1346 hdev->vport = vport; 1347 hdev->num_alloc_vport = num_vport; 1348 1349 #ifdef CONFIG_PCI_IOV 1350 /* Enable SRIOV */ 1351 if (hdev->num_req_vfs) { 1352 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", 1353 hdev->num_req_vfs); 1354 ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs); 1355 if (ret) { 1356 hdev->num_alloc_vfs = 0; 1357 dev_err(&pdev->dev, "SRIOV enable failed %d\n", 1358 ret); 1359 return ret; 1360 } 1361 } 1362 hdev->num_alloc_vfs = hdev->num_req_vfs; 1363 #endif 1364 1365 for (i = 0; i < num_vport; i++) { 1366 vport->back = hdev; 1367 vport->vport_id = i; 1368 1369 if (i == 0) 1370 ret = hclge_vport_setup(vport, tqp_main_vport); 1371 else 1372 ret = hclge_vport_setup(vport, tqp_per_vport); 1373 if (ret) { 1374 dev_err(&pdev->dev, 1375 "vport setup failed for vport %d, %d\n", 1376 i, ret); 1377 return ret; 1378 } 1379 1380 vport++; 1381 } 1382 1383 return 0; 1384 } 1385 1386 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1387 struct hclge_pkt_buf_alloc *buf_alloc) 1388 { 1389 /* TX buffer size is unit by 128 byte */ 1390 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1391 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1392 struct hclge_tx_buff_alloc_cmd *req; 1393 struct hclge_desc desc; 1394 int ret; 1395 u8 i; 1396 1397 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1398 1399 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1400 for (i = 0; i < HCLGE_TC_NUM; i++) { 1401 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1402 1403 req->tx_pkt_buff[i] = 1404 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1405 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1406 } 1407 1408 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1409 if (ret) { 1410 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1411 ret); 1412 return ret; 1413 } 1414 1415 return 0; 1416 } 1417 1418 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1419 struct hclge_pkt_buf_alloc *buf_alloc) 1420 { 1421 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1422 1423 if (ret) { 1424 dev_err(&hdev->pdev->dev, 1425 "tx buffer alloc failed %d\n", ret); 1426 return ret; 1427 } 1428 1429 return 0; 1430 } 1431 1432 static int hclge_get_tc_num(struct hclge_dev *hdev) 1433 { 1434 int i, cnt = 0; 1435 1436 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1437 if (hdev->hw_tc_map & BIT(i)) 1438 cnt++; 1439 return cnt; 1440 } 1441 1442 static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) 1443 { 1444 int i, cnt = 0; 1445 1446 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1447 if (hdev->hw_tc_map & BIT(i) && 1448 hdev->tm_info.hw_pfc_map & BIT(i)) 1449 cnt++; 1450 return cnt; 1451 } 1452 1453 /* Get the number of pfc enabled TCs, which have private buffer */ 1454 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1455 struct hclge_pkt_buf_alloc *buf_alloc) 1456 { 1457 struct hclge_priv_buf *priv; 1458 int i, cnt = 0; 1459 1460 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1461 priv = &buf_alloc->priv_buf[i]; 1462 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1463 priv->enable) 1464 cnt++; 1465 } 1466 1467 return cnt; 1468 } 1469 1470 /* Get the number of pfc disabled TCs, which have private buffer */ 1471 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1472 struct hclge_pkt_buf_alloc *buf_alloc) 1473 { 1474 struct hclge_priv_buf *priv; 1475 int i, cnt = 0; 1476 1477 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1478 priv = &buf_alloc->priv_buf[i]; 1479 if (hdev->hw_tc_map & BIT(i) && 1480 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1481 priv->enable) 1482 cnt++; 1483 } 1484 1485 return cnt; 1486 } 1487 1488 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1489 { 1490 struct hclge_priv_buf *priv; 1491 u32 rx_priv = 0; 1492 int i; 1493 1494 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1495 priv = &buf_alloc->priv_buf[i]; 1496 if (priv->enable) 1497 rx_priv += priv->buf_size; 1498 } 1499 return rx_priv; 1500 } 1501 1502 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1503 { 1504 u32 i, total_tx_size = 0; 1505 1506 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1507 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1508 1509 return total_tx_size; 1510 } 1511 1512 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1513 struct hclge_pkt_buf_alloc *buf_alloc, 1514 u32 rx_all) 1515 { 1516 u32 shared_buf_min, shared_buf_tc, shared_std; 1517 int tc_num, pfc_enable_num; 1518 u32 shared_buf; 1519 u32 rx_priv; 1520 int i; 1521 1522 tc_num = hclge_get_tc_num(hdev); 1523 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1524 1525 if (hnae3_dev_dcb_supported(hdev)) 1526 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; 1527 else 1528 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; 1529 1530 shared_buf_tc = pfc_enable_num * hdev->mps + 1531 (tc_num - pfc_enable_num) * hdev->mps / 2 + 1532 hdev->mps; 1533 shared_std = max_t(u32, shared_buf_min, shared_buf_tc); 1534 1535 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1536 if (rx_all <= rx_priv + shared_std) 1537 return false; 1538 1539 shared_buf = rx_all - rx_priv; 1540 buf_alloc->s_buf.buf_size = shared_buf; 1541 buf_alloc->s_buf.self.high = shared_buf; 1542 buf_alloc->s_buf.self.low = 2 * hdev->mps; 1543 1544 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1545 if ((hdev->hw_tc_map & BIT(i)) && 1546 (hdev->tm_info.hw_pfc_map & BIT(i))) { 1547 buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; 1548 buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; 1549 } else { 1550 buf_alloc->s_buf.tc_thrd[i].low = 0; 1551 buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; 1552 } 1553 } 1554 1555 return true; 1556 } 1557 1558 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 1559 struct hclge_pkt_buf_alloc *buf_alloc) 1560 { 1561 u32 i, total_size; 1562 1563 total_size = hdev->pkt_buf_size; 1564 1565 /* alloc tx buffer for all enabled tc */ 1566 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1567 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1568 1569 if (total_size < HCLGE_DEFAULT_TX_BUF) 1570 return -ENOMEM; 1571 1572 if (hdev->hw_tc_map & BIT(i)) 1573 priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 1574 else 1575 priv->tx_buf_size = 0; 1576 1577 total_size -= priv->tx_buf_size; 1578 } 1579 1580 return 0; 1581 } 1582 1583 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 1584 * @hdev: pointer to struct hclge_dev 1585 * @buf_alloc: pointer to buffer calculation data 1586 * @return: 0: calculate sucessful, negative: fail 1587 */ 1588 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 1589 struct hclge_pkt_buf_alloc *buf_alloc) 1590 { 1591 u32 rx_all = hdev->pkt_buf_size; 1592 int no_pfc_priv_num, pfc_priv_num; 1593 struct hclge_priv_buf *priv; 1594 int i; 1595 1596 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 1597 1598 /* When DCB is not supported, rx private 1599 * buffer is not allocated. 1600 */ 1601 if (!hnae3_dev_dcb_supported(hdev)) { 1602 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1603 return -ENOMEM; 1604 1605 return 0; 1606 } 1607 1608 /* step 1, try to alloc private buffer for all enabled tc */ 1609 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1610 priv = &buf_alloc->priv_buf[i]; 1611 if (hdev->hw_tc_map & BIT(i)) { 1612 priv->enable = 1; 1613 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1614 priv->wl.low = hdev->mps; 1615 priv->wl.high = priv->wl.low + hdev->mps; 1616 priv->buf_size = priv->wl.high + 1617 HCLGE_DEFAULT_DV; 1618 } else { 1619 priv->wl.low = 0; 1620 priv->wl.high = 2 * hdev->mps; 1621 priv->buf_size = priv->wl.high; 1622 } 1623 } else { 1624 priv->enable = 0; 1625 priv->wl.low = 0; 1626 priv->wl.high = 0; 1627 priv->buf_size = 0; 1628 } 1629 } 1630 1631 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1632 return 0; 1633 1634 /* step 2, try to decrease the buffer size of 1635 * no pfc TC's private buffer 1636 */ 1637 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1638 priv = &buf_alloc->priv_buf[i]; 1639 1640 priv->enable = 0; 1641 priv->wl.low = 0; 1642 priv->wl.high = 0; 1643 priv->buf_size = 0; 1644 1645 if (!(hdev->hw_tc_map & BIT(i))) 1646 continue; 1647 1648 priv->enable = 1; 1649 1650 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1651 priv->wl.low = 128; 1652 priv->wl.high = priv->wl.low + hdev->mps; 1653 priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; 1654 } else { 1655 priv->wl.low = 0; 1656 priv->wl.high = hdev->mps; 1657 priv->buf_size = priv->wl.high; 1658 } 1659 } 1660 1661 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1662 return 0; 1663 1664 /* step 3, try to reduce the number of pfc disabled TCs, 1665 * which have private buffer 1666 */ 1667 /* get the total no pfc enable TC number, which have private buffer */ 1668 no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 1669 1670 /* let the last to be cleared first */ 1671 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1672 priv = &buf_alloc->priv_buf[i]; 1673 1674 if (hdev->hw_tc_map & BIT(i) && 1675 !(hdev->tm_info.hw_pfc_map & BIT(i))) { 1676 /* Clear the no pfc TC private buffer */ 1677 priv->wl.low = 0; 1678 priv->wl.high = 0; 1679 priv->buf_size = 0; 1680 priv->enable = 0; 1681 no_pfc_priv_num--; 1682 } 1683 1684 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1685 no_pfc_priv_num == 0) 1686 break; 1687 } 1688 1689 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1690 return 0; 1691 1692 /* step 4, try to reduce the number of pfc enabled TCs 1693 * which have private buffer. 1694 */ 1695 pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 1696 1697 /* let the last to be cleared first */ 1698 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 1699 priv = &buf_alloc->priv_buf[i]; 1700 1701 if (hdev->hw_tc_map & BIT(i) && 1702 hdev->tm_info.hw_pfc_map & BIT(i)) { 1703 /* Reduce the number of pfc TC with private buffer */ 1704 priv->wl.low = 0; 1705 priv->enable = 0; 1706 priv->wl.high = 0; 1707 priv->buf_size = 0; 1708 pfc_priv_num--; 1709 } 1710 1711 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 1712 pfc_priv_num == 0) 1713 break; 1714 } 1715 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 1716 return 0; 1717 1718 return -ENOMEM; 1719 } 1720 1721 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 1722 struct hclge_pkt_buf_alloc *buf_alloc) 1723 { 1724 struct hclge_rx_priv_buff_cmd *req; 1725 struct hclge_desc desc; 1726 int ret; 1727 int i; 1728 1729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 1730 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 1731 1732 /* Alloc private buffer TCs */ 1733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1734 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 1735 1736 req->buf_num[i] = 1737 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 1738 req->buf_num[i] |= 1739 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 1740 } 1741 1742 req->shared_buf = 1743 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 1744 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 1745 1746 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1747 if (ret) { 1748 dev_err(&hdev->pdev->dev, 1749 "rx private buffer alloc cmd failed %d\n", ret); 1750 return ret; 1751 } 1752 1753 return 0; 1754 } 1755 1756 #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) 1757 1758 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 1759 struct hclge_pkt_buf_alloc *buf_alloc) 1760 { 1761 struct hclge_rx_priv_wl_buf *req; 1762 struct hclge_priv_buf *priv; 1763 struct hclge_desc desc[2]; 1764 int i, j; 1765 int ret; 1766 1767 for (i = 0; i < 2; i++) { 1768 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 1769 false); 1770 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 1771 1772 /* The first descriptor set the NEXT bit to 1 */ 1773 if (i == 0) 1774 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1775 else 1776 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1777 1778 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1779 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 1780 1781 priv = &buf_alloc->priv_buf[idx]; 1782 req->tc_wl[j].high = 1783 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 1784 req->tc_wl[j].high |= 1785 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << 1786 HCLGE_RX_PRIV_EN_B); 1787 req->tc_wl[j].low = 1788 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 1789 req->tc_wl[j].low |= 1790 cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << 1791 HCLGE_RX_PRIV_EN_B); 1792 } 1793 } 1794 1795 /* Send 2 descriptor at one time */ 1796 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1797 if (ret) { 1798 dev_err(&hdev->pdev->dev, 1799 "rx private waterline config cmd failed %d\n", 1800 ret); 1801 return ret; 1802 } 1803 return 0; 1804 } 1805 1806 static int hclge_common_thrd_config(struct hclge_dev *hdev, 1807 struct hclge_pkt_buf_alloc *buf_alloc) 1808 { 1809 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 1810 struct hclge_rx_com_thrd *req; 1811 struct hclge_desc desc[2]; 1812 struct hclge_tc_thrd *tc; 1813 int i, j; 1814 int ret; 1815 1816 for (i = 0; i < 2; i++) { 1817 hclge_cmd_setup_basic_desc(&desc[i], 1818 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 1819 req = (struct hclge_rx_com_thrd *)&desc[i].data; 1820 1821 /* The first descriptor set the NEXT bit to 1 */ 1822 if (i == 0) 1823 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1824 else 1825 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 1826 1827 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 1828 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 1829 1830 req->com_thrd[j].high = 1831 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 1832 req->com_thrd[j].high |= 1833 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << 1834 HCLGE_RX_PRIV_EN_B); 1835 req->com_thrd[j].low = 1836 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 1837 req->com_thrd[j].low |= 1838 cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << 1839 HCLGE_RX_PRIV_EN_B); 1840 } 1841 } 1842 1843 /* Send 2 descriptors at one time */ 1844 ret = hclge_cmd_send(&hdev->hw, desc, 2); 1845 if (ret) { 1846 dev_err(&hdev->pdev->dev, 1847 "common threshold config cmd failed %d\n", ret); 1848 return ret; 1849 } 1850 return 0; 1851 } 1852 1853 static int hclge_common_wl_config(struct hclge_dev *hdev, 1854 struct hclge_pkt_buf_alloc *buf_alloc) 1855 { 1856 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 1857 struct hclge_rx_com_wl *req; 1858 struct hclge_desc desc; 1859 int ret; 1860 1861 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 1862 1863 req = (struct hclge_rx_com_wl *)desc.data; 1864 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 1865 req->com_wl.high |= 1866 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << 1867 HCLGE_RX_PRIV_EN_B); 1868 1869 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 1870 req->com_wl.low |= 1871 cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << 1872 HCLGE_RX_PRIV_EN_B); 1873 1874 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1875 if (ret) { 1876 dev_err(&hdev->pdev->dev, 1877 "common waterline config cmd failed %d\n", ret); 1878 return ret; 1879 } 1880 1881 return 0; 1882 } 1883 1884 int hclge_buffer_alloc(struct hclge_dev *hdev) 1885 { 1886 struct hclge_pkt_buf_alloc *pkt_buf; 1887 int ret; 1888 1889 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 1890 if (!pkt_buf) 1891 return -ENOMEM; 1892 1893 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 1894 if (ret) { 1895 dev_err(&hdev->pdev->dev, 1896 "could not calc tx buffer size for all TCs %d\n", ret); 1897 goto out; 1898 } 1899 1900 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 1901 if (ret) { 1902 dev_err(&hdev->pdev->dev, 1903 "could not alloc tx buffers %d\n", ret); 1904 goto out; 1905 } 1906 1907 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 1908 if (ret) { 1909 dev_err(&hdev->pdev->dev, 1910 "could not calc rx priv buffer size for all TCs %d\n", 1911 ret); 1912 goto out; 1913 } 1914 1915 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 1916 if (ret) { 1917 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 1918 ret); 1919 goto out; 1920 } 1921 1922 if (hnae3_dev_dcb_supported(hdev)) { 1923 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 1924 if (ret) { 1925 dev_err(&hdev->pdev->dev, 1926 "could not configure rx private waterline %d\n", 1927 ret); 1928 goto out; 1929 } 1930 1931 ret = hclge_common_thrd_config(hdev, pkt_buf); 1932 if (ret) { 1933 dev_err(&hdev->pdev->dev, 1934 "could not configure common threshold %d\n", 1935 ret); 1936 goto out; 1937 } 1938 } 1939 1940 ret = hclge_common_wl_config(hdev, pkt_buf); 1941 if (ret) 1942 dev_err(&hdev->pdev->dev, 1943 "could not configure common waterline %d\n", ret); 1944 1945 out: 1946 kfree(pkt_buf); 1947 return ret; 1948 } 1949 1950 static int hclge_init_roce_base_info(struct hclge_vport *vport) 1951 { 1952 struct hnae3_handle *roce = &vport->roce; 1953 struct hnae3_handle *nic = &vport->nic; 1954 1955 roce->rinfo.num_vectors = vport->back->num_roce_msi; 1956 1957 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || 1958 vport->back->num_msi_left == 0) 1959 return -EINVAL; 1960 1961 roce->rinfo.base_vector = vport->back->roce_base_vector; 1962 1963 roce->rinfo.netdev = nic->kinfo.netdev; 1964 roce->rinfo.roce_io_base = vport->back->hw.io_base; 1965 1966 roce->pdev = nic->pdev; 1967 roce->ae_algo = nic->ae_algo; 1968 roce->numa_node_mask = nic->numa_node_mask; 1969 1970 return 0; 1971 } 1972 1973 static int hclge_init_msi(struct hclge_dev *hdev) 1974 { 1975 struct pci_dev *pdev = hdev->pdev; 1976 int vectors; 1977 int i; 1978 1979 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, 1980 PCI_IRQ_MSI | PCI_IRQ_MSIX); 1981 if (vectors < 0) { 1982 dev_err(&pdev->dev, 1983 "failed(%d) to allocate MSI/MSI-X vectors\n", 1984 vectors); 1985 return vectors; 1986 } 1987 if (vectors < hdev->num_msi) 1988 dev_warn(&hdev->pdev->dev, 1989 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", 1990 hdev->num_msi, vectors); 1991 1992 hdev->num_msi = vectors; 1993 hdev->num_msi_left = vectors; 1994 hdev->base_msi_vector = pdev->irq; 1995 hdev->roce_base_vector = hdev->base_msi_vector + 1996 HCLGE_ROCE_VECTOR_OFFSET; 1997 1998 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 1999 sizeof(u16), GFP_KERNEL); 2000 if (!hdev->vector_status) { 2001 pci_free_irq_vectors(pdev); 2002 return -ENOMEM; 2003 } 2004 2005 for (i = 0; i < hdev->num_msi; i++) 2006 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2007 2008 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2009 sizeof(int), GFP_KERNEL); 2010 if (!hdev->vector_irq) { 2011 pci_free_irq_vectors(pdev); 2012 return -ENOMEM; 2013 } 2014 2015 return 0; 2016 } 2017 2018 static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) 2019 { 2020 struct hclge_mac *mac = &hdev->hw.mac; 2021 2022 if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) 2023 mac->duplex = (u8)duplex; 2024 else 2025 mac->duplex = HCLGE_MAC_FULL; 2026 2027 mac->speed = speed; 2028 } 2029 2030 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) 2031 { 2032 struct hclge_config_mac_speed_dup_cmd *req; 2033 struct hclge_desc desc; 2034 int ret; 2035 2036 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2037 2038 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2039 2040 hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); 2041 2042 switch (speed) { 2043 case HCLGE_MAC_SPEED_10M: 2044 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2045 HCLGE_CFG_SPEED_S, 6); 2046 break; 2047 case HCLGE_MAC_SPEED_100M: 2048 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2049 HCLGE_CFG_SPEED_S, 7); 2050 break; 2051 case HCLGE_MAC_SPEED_1G: 2052 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2053 HCLGE_CFG_SPEED_S, 0); 2054 break; 2055 case HCLGE_MAC_SPEED_10G: 2056 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2057 HCLGE_CFG_SPEED_S, 1); 2058 break; 2059 case HCLGE_MAC_SPEED_25G: 2060 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2061 HCLGE_CFG_SPEED_S, 2); 2062 break; 2063 case HCLGE_MAC_SPEED_40G: 2064 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2065 HCLGE_CFG_SPEED_S, 3); 2066 break; 2067 case HCLGE_MAC_SPEED_50G: 2068 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2069 HCLGE_CFG_SPEED_S, 4); 2070 break; 2071 case HCLGE_MAC_SPEED_100G: 2072 hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, 2073 HCLGE_CFG_SPEED_S, 5); 2074 break; 2075 default: 2076 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2077 return -EINVAL; 2078 } 2079 2080 hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2081 1); 2082 2083 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2084 if (ret) { 2085 dev_err(&hdev->pdev->dev, 2086 "mac speed/duplex config cmd failed %d.\n", ret); 2087 return ret; 2088 } 2089 2090 hclge_check_speed_dup(hdev, duplex, speed); 2091 2092 return 0; 2093 } 2094 2095 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2096 u8 duplex) 2097 { 2098 struct hclge_vport *vport = hclge_get_vport(handle); 2099 struct hclge_dev *hdev = vport->back; 2100 2101 return hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2102 } 2103 2104 static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, 2105 u8 *duplex) 2106 { 2107 struct hclge_query_an_speed_dup_cmd *req; 2108 struct hclge_desc desc; 2109 int speed_tmp; 2110 int ret; 2111 2112 req = (struct hclge_query_an_speed_dup_cmd *)desc.data; 2113 2114 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); 2115 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2116 if (ret) { 2117 dev_err(&hdev->pdev->dev, 2118 "mac speed/autoneg/duplex query cmd failed %d\n", 2119 ret); 2120 return ret; 2121 } 2122 2123 *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); 2124 speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, 2125 HCLGE_QUERY_SPEED_S); 2126 2127 ret = hclge_parse_speed(speed_tmp, speed); 2128 if (ret) { 2129 dev_err(&hdev->pdev->dev, 2130 "could not parse speed(=%d), %d\n", speed_tmp, ret); 2131 return -EIO; 2132 } 2133 2134 return 0; 2135 } 2136 2137 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2138 { 2139 struct hclge_config_auto_neg_cmd *req; 2140 struct hclge_desc desc; 2141 u32 flag = 0; 2142 int ret; 2143 2144 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2145 2146 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2147 hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); 2148 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2149 2150 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2151 if (ret) { 2152 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2153 ret); 2154 return ret; 2155 } 2156 2157 return 0; 2158 } 2159 2160 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2161 { 2162 struct hclge_vport *vport = hclge_get_vport(handle); 2163 struct hclge_dev *hdev = vport->back; 2164 2165 return hclge_set_autoneg_en(hdev, enable); 2166 } 2167 2168 static int hclge_get_autoneg(struct hnae3_handle *handle) 2169 { 2170 struct hclge_vport *vport = hclge_get_vport(handle); 2171 struct hclge_dev *hdev = vport->back; 2172 struct phy_device *phydev = hdev->hw.mac.phydev; 2173 2174 if (phydev) 2175 return phydev->autoneg; 2176 2177 return hdev->hw.mac.autoneg; 2178 } 2179 2180 static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, 2181 bool mask_vlan, 2182 u8 *mac_mask) 2183 { 2184 struct hclge_mac_vlan_mask_entry_cmd *req; 2185 struct hclge_desc desc; 2186 int status; 2187 2188 req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; 2189 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); 2190 2191 hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, 2192 mask_vlan ? 1 : 0); 2193 ether_addr_copy(req->mac_mask, mac_mask); 2194 2195 status = hclge_cmd_send(&hdev->hw, &desc, 1); 2196 if (status) 2197 dev_err(&hdev->pdev->dev, 2198 "Config mac_vlan_mask failed for cmd_send, ret =%d\n", 2199 status); 2200 2201 return status; 2202 } 2203 2204 static int hclge_mac_init(struct hclge_dev *hdev) 2205 { 2206 struct hclge_mac *mac = &hdev->hw.mac; 2207 u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 2208 int ret; 2209 2210 ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); 2211 if (ret) { 2212 dev_err(&hdev->pdev->dev, 2213 "Config mac speed dup fail ret=%d\n", ret); 2214 return ret; 2215 } 2216 2217 mac->link = 0; 2218 2219 /* Initialize the MTA table work mode */ 2220 hdev->accept_mta_mc = true; 2221 hdev->enable_mta = true; 2222 hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; 2223 2224 ret = hclge_set_mta_filter_mode(hdev, 2225 hdev->mta_mac_sel_type, 2226 hdev->enable_mta); 2227 if (ret) { 2228 dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", 2229 ret); 2230 return ret; 2231 } 2232 2233 ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); 2234 if (ret) { 2235 dev_err(&hdev->pdev->dev, 2236 "set mta filter mode fail ret=%d\n", ret); 2237 return ret; 2238 } 2239 2240 ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); 2241 if (ret) 2242 dev_err(&hdev->pdev->dev, 2243 "set default mac_vlan_mask fail ret=%d\n", ret); 2244 2245 return ret; 2246 } 2247 2248 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2249 { 2250 if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) 2251 schedule_work(&hdev->mbx_service_task); 2252 } 2253 2254 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2255 { 2256 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 2257 schedule_work(&hdev->rst_service_task); 2258 } 2259 2260 static void hclge_task_schedule(struct hclge_dev *hdev) 2261 { 2262 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && 2263 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2264 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) 2265 (void)schedule_work(&hdev->service_task); 2266 } 2267 2268 static int hclge_get_mac_link_status(struct hclge_dev *hdev) 2269 { 2270 struct hclge_link_status_cmd *req; 2271 struct hclge_desc desc; 2272 int link_status; 2273 int ret; 2274 2275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2276 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2277 if (ret) { 2278 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2279 ret); 2280 return ret; 2281 } 2282 2283 req = (struct hclge_link_status_cmd *)desc.data; 2284 link_status = req->status & HCLGE_LINK_STATUS; 2285 2286 return !!link_status; 2287 } 2288 2289 static int hclge_get_mac_phy_link(struct hclge_dev *hdev) 2290 { 2291 int mac_state; 2292 int link_stat; 2293 2294 mac_state = hclge_get_mac_link_status(hdev); 2295 2296 if (hdev->hw.mac.phydev) { 2297 if (!genphy_read_status(hdev->hw.mac.phydev)) 2298 link_stat = mac_state & 2299 hdev->hw.mac.phydev->link; 2300 else 2301 link_stat = 0; 2302 2303 } else { 2304 link_stat = mac_state; 2305 } 2306 2307 return !!link_stat; 2308 } 2309 2310 static void hclge_update_link_status(struct hclge_dev *hdev) 2311 { 2312 struct hnae3_client *client = hdev->nic_client; 2313 struct hnae3_handle *handle; 2314 int state; 2315 int i; 2316 2317 if (!client) 2318 return; 2319 state = hclge_get_mac_phy_link(hdev); 2320 if (state != hdev->hw.mac.link) { 2321 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2322 handle = &hdev->vport[i].nic; 2323 client->ops->link_status_change(handle, state); 2324 } 2325 hdev->hw.mac.link = state; 2326 } 2327 } 2328 2329 static int hclge_update_speed_duplex(struct hclge_dev *hdev) 2330 { 2331 struct hclge_mac mac = hdev->hw.mac; 2332 u8 duplex; 2333 int speed; 2334 int ret; 2335 2336 /* get the speed and duplex as autoneg'result from mac cmd when phy 2337 * doesn't exit. 2338 */ 2339 if (mac.phydev || !mac.autoneg) 2340 return 0; 2341 2342 ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); 2343 if (ret) { 2344 dev_err(&hdev->pdev->dev, 2345 "mac autoneg/speed/duplex query failed %d\n", ret); 2346 return ret; 2347 } 2348 2349 if ((mac.speed != speed) || (mac.duplex != duplex)) { 2350 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); 2351 if (ret) { 2352 dev_err(&hdev->pdev->dev, 2353 "mac speed/duplex config failed %d\n", ret); 2354 return ret; 2355 } 2356 } 2357 2358 return 0; 2359 } 2360 2361 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) 2362 { 2363 struct hclge_vport *vport = hclge_get_vport(handle); 2364 struct hclge_dev *hdev = vport->back; 2365 2366 return hclge_update_speed_duplex(hdev); 2367 } 2368 2369 static int hclge_get_status(struct hnae3_handle *handle) 2370 { 2371 struct hclge_vport *vport = hclge_get_vport(handle); 2372 struct hclge_dev *hdev = vport->back; 2373 2374 hclge_update_link_status(hdev); 2375 2376 return hdev->hw.mac.link; 2377 } 2378 2379 static void hclge_service_timer(struct timer_list *t) 2380 { 2381 struct hclge_dev *hdev = from_timer(hdev, t, service_timer); 2382 2383 mod_timer(&hdev->service_timer, jiffies + HZ); 2384 hclge_task_schedule(hdev); 2385 } 2386 2387 static void hclge_service_complete(struct hclge_dev *hdev) 2388 { 2389 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); 2390 2391 /* Flush memory before next watchdog */ 2392 smp_mb__before_atomic(); 2393 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); 2394 } 2395 2396 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 2397 { 2398 u32 rst_src_reg; 2399 u32 cmdq_src_reg; 2400 2401 /* fetch the events from their corresponding regs */ 2402 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); 2403 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 2404 2405 /* Assumption: If by any chance reset and mailbox events are reported 2406 * together then we will only process reset event in this go and will 2407 * defer the processing of the mailbox events. Since, we would have not 2408 * cleared RX CMDQ event this time we would receive again another 2409 * interrupt from H/W just for the mailbox. 2410 */ 2411 2412 /* check for vector0 reset event sources */ 2413 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { 2414 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 2415 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 2416 return HCLGE_VECTOR0_EVENT_RST; 2417 } 2418 2419 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { 2420 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); 2421 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); 2422 return HCLGE_VECTOR0_EVENT_RST; 2423 } 2424 2425 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { 2426 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 2427 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 2428 return HCLGE_VECTOR0_EVENT_RST; 2429 } 2430 2431 /* check for vector0 mailbox(=CMDQ RX) event source */ 2432 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 2433 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 2434 *clearval = cmdq_src_reg; 2435 return HCLGE_VECTOR0_EVENT_MBX; 2436 } 2437 2438 return HCLGE_VECTOR0_EVENT_OTHER; 2439 } 2440 2441 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 2442 u32 regclr) 2443 { 2444 switch (event_type) { 2445 case HCLGE_VECTOR0_EVENT_RST: 2446 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 2447 break; 2448 case HCLGE_VECTOR0_EVENT_MBX: 2449 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 2450 break; 2451 } 2452 } 2453 2454 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 2455 { 2456 writel(enable ? 1 : 0, vector->addr); 2457 } 2458 2459 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 2460 { 2461 struct hclge_dev *hdev = data; 2462 u32 event_cause; 2463 u32 clearval; 2464 2465 hclge_enable_vector(&hdev->misc_vector, false); 2466 event_cause = hclge_check_event_cause(hdev, &clearval); 2467 2468 /* vector 0 interrupt is shared with reset and mailbox source events.*/ 2469 switch (event_cause) { 2470 case HCLGE_VECTOR0_EVENT_RST: 2471 hclge_reset_task_schedule(hdev); 2472 break; 2473 case HCLGE_VECTOR0_EVENT_MBX: 2474 /* If we are here then, 2475 * 1. Either we are not handling any mbx task and we are not 2476 * scheduled as well 2477 * OR 2478 * 2. We could be handling a mbx task but nothing more is 2479 * scheduled. 2480 * In both cases, we should schedule mbx task as there are more 2481 * mbx messages reported by this interrupt. 2482 */ 2483 hclge_mbx_task_schedule(hdev); 2484 2485 default: 2486 dev_dbg(&hdev->pdev->dev, 2487 "received unknown or unhandled event of vector0\n"); 2488 break; 2489 } 2490 2491 /* we should clear the source of interrupt */ 2492 hclge_clear_event_cause(hdev, event_cause, clearval); 2493 hclge_enable_vector(&hdev->misc_vector, true); 2494 2495 return IRQ_HANDLED; 2496 } 2497 2498 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 2499 { 2500 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 2501 hdev->num_msi_left += 1; 2502 hdev->num_msi_used -= 1; 2503 } 2504 2505 static void hclge_get_misc_vector(struct hclge_dev *hdev) 2506 { 2507 struct hclge_misc_vector *vector = &hdev->misc_vector; 2508 2509 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 2510 2511 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 2512 hdev->vector_status[0] = 0; 2513 2514 hdev->num_msi_left -= 1; 2515 hdev->num_msi_used += 1; 2516 } 2517 2518 static int hclge_misc_irq_init(struct hclge_dev *hdev) 2519 { 2520 int ret; 2521 2522 hclge_get_misc_vector(hdev); 2523 2524 /* this would be explicitly freed in the end */ 2525 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 2526 0, "hclge_misc", hdev); 2527 if (ret) { 2528 hclge_free_vector(hdev, 0); 2529 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 2530 hdev->misc_vector.vector_irq); 2531 } 2532 2533 return ret; 2534 } 2535 2536 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 2537 { 2538 free_irq(hdev->misc_vector.vector_irq, hdev); 2539 hclge_free_vector(hdev, 0); 2540 } 2541 2542 static int hclge_notify_client(struct hclge_dev *hdev, 2543 enum hnae3_reset_notify_type type) 2544 { 2545 struct hnae3_client *client = hdev->nic_client; 2546 u16 i; 2547 2548 if (!client->ops->reset_notify) 2549 return -EOPNOTSUPP; 2550 2551 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 2552 struct hnae3_handle *handle = &hdev->vport[i].nic; 2553 int ret; 2554 2555 ret = client->ops->reset_notify(handle, type); 2556 if (ret) 2557 return ret; 2558 } 2559 2560 return 0; 2561 } 2562 2563 static int hclge_reset_wait(struct hclge_dev *hdev) 2564 { 2565 #define HCLGE_RESET_WATI_MS 100 2566 #define HCLGE_RESET_WAIT_CNT 5 2567 u32 val, reg, reg_bit; 2568 u32 cnt = 0; 2569 2570 switch (hdev->reset_type) { 2571 case HNAE3_GLOBAL_RESET: 2572 reg = HCLGE_GLOBAL_RESET_REG; 2573 reg_bit = HCLGE_GLOBAL_RESET_BIT; 2574 break; 2575 case HNAE3_CORE_RESET: 2576 reg = HCLGE_GLOBAL_RESET_REG; 2577 reg_bit = HCLGE_CORE_RESET_BIT; 2578 break; 2579 case HNAE3_FUNC_RESET: 2580 reg = HCLGE_FUN_RST_ING; 2581 reg_bit = HCLGE_FUN_RST_ING_B; 2582 break; 2583 default: 2584 dev_err(&hdev->pdev->dev, 2585 "Wait for unsupported reset type: %d\n", 2586 hdev->reset_type); 2587 return -EINVAL; 2588 } 2589 2590 val = hclge_read_dev(&hdev->hw, reg); 2591 while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 2592 msleep(HCLGE_RESET_WATI_MS); 2593 val = hclge_read_dev(&hdev->hw, reg); 2594 cnt++; 2595 } 2596 2597 if (cnt >= HCLGE_RESET_WAIT_CNT) { 2598 dev_warn(&hdev->pdev->dev, 2599 "Wait for reset timeout: %d\n", hdev->reset_type); 2600 return -EBUSY; 2601 } 2602 2603 return 0; 2604 } 2605 2606 static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 2607 { 2608 struct hclge_desc desc; 2609 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 2610 int ret; 2611 2612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 2613 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); 2614 hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 2615 req->fun_reset_vfid = func_id; 2616 2617 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2618 if (ret) 2619 dev_err(&hdev->pdev->dev, 2620 "send function reset cmd fail, status =%d\n", ret); 2621 2622 return ret; 2623 } 2624 2625 static void hclge_do_reset(struct hclge_dev *hdev) 2626 { 2627 struct pci_dev *pdev = hdev->pdev; 2628 u32 val; 2629 2630 switch (hdev->reset_type) { 2631 case HNAE3_GLOBAL_RESET: 2632 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2633 hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 2634 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2635 dev_info(&pdev->dev, "Global Reset requested\n"); 2636 break; 2637 case HNAE3_CORE_RESET: 2638 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 2639 hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); 2640 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 2641 dev_info(&pdev->dev, "Core Reset requested\n"); 2642 break; 2643 case HNAE3_FUNC_RESET: 2644 dev_info(&pdev->dev, "PF Reset requested\n"); 2645 hclge_func_reset_cmd(hdev, 0); 2646 /* schedule again to check later */ 2647 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 2648 hclge_reset_task_schedule(hdev); 2649 break; 2650 default: 2651 dev_warn(&pdev->dev, 2652 "Unsupported reset type: %d\n", hdev->reset_type); 2653 break; 2654 } 2655 } 2656 2657 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, 2658 unsigned long *addr) 2659 { 2660 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 2661 2662 /* return the highest priority reset level amongst all */ 2663 if (test_bit(HNAE3_GLOBAL_RESET, addr)) 2664 rst_level = HNAE3_GLOBAL_RESET; 2665 else if (test_bit(HNAE3_CORE_RESET, addr)) 2666 rst_level = HNAE3_CORE_RESET; 2667 else if (test_bit(HNAE3_IMP_RESET, addr)) 2668 rst_level = HNAE3_IMP_RESET; 2669 else if (test_bit(HNAE3_FUNC_RESET, addr)) 2670 rst_level = HNAE3_FUNC_RESET; 2671 2672 /* now, clear all other resets */ 2673 clear_bit(HNAE3_GLOBAL_RESET, addr); 2674 clear_bit(HNAE3_CORE_RESET, addr); 2675 clear_bit(HNAE3_IMP_RESET, addr); 2676 clear_bit(HNAE3_FUNC_RESET, addr); 2677 2678 return rst_level; 2679 } 2680 2681 static void hclge_reset(struct hclge_dev *hdev) 2682 { 2683 /* perform reset of the stack & ae device for a client */ 2684 2685 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2686 2687 if (!hclge_reset_wait(hdev)) { 2688 rtnl_lock(); 2689 hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 2690 hclge_reset_ae_dev(hdev->ae_dev); 2691 hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 2692 rtnl_unlock(); 2693 } else { 2694 /* schedule again to check pending resets later */ 2695 set_bit(hdev->reset_type, &hdev->reset_pending); 2696 hclge_reset_task_schedule(hdev); 2697 } 2698 2699 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2700 } 2701 2702 static void hclge_reset_event(struct hnae3_handle *handle, 2703 enum hnae3_reset_type reset) 2704 { 2705 struct hclge_vport *vport = hclge_get_vport(handle); 2706 struct hclge_dev *hdev = vport->back; 2707 2708 dev_info(&hdev->pdev->dev, 2709 "Receive reset event , reset_type is %d", reset); 2710 2711 switch (reset) { 2712 case HNAE3_FUNC_RESET: 2713 case HNAE3_CORE_RESET: 2714 case HNAE3_GLOBAL_RESET: 2715 /* request reset & schedule reset task */ 2716 set_bit(reset, &hdev->reset_request); 2717 hclge_reset_task_schedule(hdev); 2718 break; 2719 default: 2720 dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset); 2721 break; 2722 } 2723 } 2724 2725 static void hclge_reset_subtask(struct hclge_dev *hdev) 2726 { 2727 /* check if there is any ongoing reset in the hardware. This status can 2728 * be checked from reset_pending. If there is then, we need to wait for 2729 * hardware to complete reset. 2730 * a. If we are able to figure out in reasonable time that hardware 2731 * has fully resetted then, we can proceed with driver, client 2732 * reset. 2733 * b. else, we can come back later to check this status so re-sched 2734 * now. 2735 */ 2736 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); 2737 if (hdev->reset_type != HNAE3_NONE_RESET) 2738 hclge_reset(hdev); 2739 2740 /* check if we got any *new* reset requests to be honored */ 2741 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request); 2742 if (hdev->reset_type != HNAE3_NONE_RESET) 2743 hclge_do_reset(hdev); 2744 2745 hdev->reset_type = HNAE3_NONE_RESET; 2746 } 2747 2748 static void hclge_reset_service_task(struct work_struct *work) 2749 { 2750 struct hclge_dev *hdev = 2751 container_of(work, struct hclge_dev, rst_service_task); 2752 2753 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2754 return; 2755 2756 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 2757 2758 hclge_reset_subtask(hdev); 2759 2760 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 2761 } 2762 2763 static void hclge_mailbox_service_task(struct work_struct *work) 2764 { 2765 struct hclge_dev *hdev = 2766 container_of(work, struct hclge_dev, mbx_service_task); 2767 2768 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 2769 return; 2770 2771 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 2772 2773 hclge_mbx_handler(hdev); 2774 2775 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 2776 } 2777 2778 static void hclge_service_task(struct work_struct *work) 2779 { 2780 struct hclge_dev *hdev = 2781 container_of(work, struct hclge_dev, service_task); 2782 2783 hclge_update_speed_duplex(hdev); 2784 hclge_update_link_status(hdev); 2785 hclge_update_stats_for_all(hdev); 2786 hclge_service_complete(hdev); 2787 } 2788 2789 static void hclge_disable_sriov(struct hclge_dev *hdev) 2790 { 2791 /* If our VFs are assigned we cannot shut down SR-IOV 2792 * without causing issues, so just leave the hardware 2793 * available but disabled 2794 */ 2795 if (pci_vfs_assigned(hdev->pdev)) { 2796 dev_warn(&hdev->pdev->dev, 2797 "disabling driver while VFs are assigned\n"); 2798 return; 2799 } 2800 2801 pci_disable_sriov(hdev->pdev); 2802 } 2803 2804 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 2805 { 2806 /* VF handle has no client */ 2807 if (!handle->client) 2808 return container_of(handle, struct hclge_vport, nic); 2809 else if (handle->client->type == HNAE3_CLIENT_ROCE) 2810 return container_of(handle, struct hclge_vport, roce); 2811 else 2812 return container_of(handle, struct hclge_vport, nic); 2813 } 2814 2815 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 2816 struct hnae3_vector_info *vector_info) 2817 { 2818 struct hclge_vport *vport = hclge_get_vport(handle); 2819 struct hnae3_vector_info *vector = vector_info; 2820 struct hclge_dev *hdev = vport->back; 2821 int alloc = 0; 2822 int i, j; 2823 2824 vector_num = min(hdev->num_msi_left, vector_num); 2825 2826 for (j = 0; j < vector_num; j++) { 2827 for (i = 1; i < hdev->num_msi; i++) { 2828 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 2829 vector->vector = pci_irq_vector(hdev->pdev, i); 2830 vector->io_addr = hdev->hw.io_base + 2831 HCLGE_VECTOR_REG_BASE + 2832 (i - 1) * HCLGE_VECTOR_REG_OFFSET + 2833 vport->vport_id * 2834 HCLGE_VECTOR_VF_OFFSET; 2835 hdev->vector_status[i] = vport->vport_id; 2836 hdev->vector_irq[i] = vector->vector; 2837 2838 vector++; 2839 alloc++; 2840 2841 break; 2842 } 2843 } 2844 } 2845 hdev->num_msi_left -= alloc; 2846 hdev->num_msi_used += alloc; 2847 2848 return alloc; 2849 } 2850 2851 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 2852 { 2853 int i; 2854 2855 for (i = 0; i < hdev->num_msi; i++) 2856 if (vector == hdev->vector_irq[i]) 2857 return i; 2858 2859 return -EINVAL; 2860 } 2861 2862 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) 2863 { 2864 return HCLGE_RSS_KEY_SIZE; 2865 } 2866 2867 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) 2868 { 2869 return HCLGE_RSS_IND_TBL_SIZE; 2870 } 2871 2872 static int hclge_get_rss_algo(struct hclge_dev *hdev) 2873 { 2874 struct hclge_rss_config_cmd *req; 2875 struct hclge_desc desc; 2876 int rss_hash_algo; 2877 int ret; 2878 2879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true); 2880 2881 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2882 if (ret) { 2883 dev_err(&hdev->pdev->dev, 2884 "Get link status error, status =%d\n", ret); 2885 return ret; 2886 } 2887 2888 req = (struct hclge_rss_config_cmd *)desc.data; 2889 rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK); 2890 2891 if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ) 2892 return ETH_RSS_HASH_TOP; 2893 2894 return -EINVAL; 2895 } 2896 2897 static int hclge_set_rss_algo_key(struct hclge_dev *hdev, 2898 const u8 hfunc, const u8 *key) 2899 { 2900 struct hclge_rss_config_cmd *req; 2901 struct hclge_desc desc; 2902 int key_offset; 2903 int key_size; 2904 int ret; 2905 2906 req = (struct hclge_rss_config_cmd *)desc.data; 2907 2908 for (key_offset = 0; key_offset < 3; key_offset++) { 2909 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, 2910 false); 2911 2912 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); 2913 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); 2914 2915 if (key_offset == 2) 2916 key_size = 2917 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; 2918 else 2919 key_size = HCLGE_RSS_HASH_KEY_NUM; 2920 2921 memcpy(req->hash_key, 2922 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); 2923 2924 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2925 if (ret) { 2926 dev_err(&hdev->pdev->dev, 2927 "Configure RSS config fail, status = %d\n", 2928 ret); 2929 return ret; 2930 } 2931 } 2932 return 0; 2933 } 2934 2935 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) 2936 { 2937 struct hclge_rss_indirection_table_cmd *req; 2938 struct hclge_desc desc; 2939 int i, j; 2940 int ret; 2941 2942 req = (struct hclge_rss_indirection_table_cmd *)desc.data; 2943 2944 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { 2945 hclge_cmd_setup_basic_desc 2946 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); 2947 2948 req->start_table_index = 2949 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); 2950 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); 2951 2952 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) 2953 req->rss_result[j] = 2954 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; 2955 2956 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2957 if (ret) { 2958 dev_err(&hdev->pdev->dev, 2959 "Configure rss indir table fail,status = %d\n", 2960 ret); 2961 return ret; 2962 } 2963 } 2964 return 0; 2965 } 2966 2967 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, 2968 u16 *tc_size, u16 *tc_offset) 2969 { 2970 struct hclge_rss_tc_mode_cmd *req; 2971 struct hclge_desc desc; 2972 int ret; 2973 int i; 2974 2975 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); 2976 req = (struct hclge_rss_tc_mode_cmd *)desc.data; 2977 2978 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2979 u16 mode = 0; 2980 2981 hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); 2982 hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, 2983 HCLGE_RSS_TC_SIZE_S, tc_size[i]); 2984 hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, 2985 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); 2986 2987 req->rss_tc_mode[i] = cpu_to_le16(mode); 2988 } 2989 2990 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2991 if (ret) { 2992 dev_err(&hdev->pdev->dev, 2993 "Configure rss tc mode fail, status = %d\n", ret); 2994 return ret; 2995 } 2996 2997 return 0; 2998 } 2999 3000 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) 3001 { 3002 struct hclge_rss_input_tuple_cmd *req; 3003 struct hclge_desc desc; 3004 int ret; 3005 3006 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); 3007 3008 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3009 req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3010 req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3011 req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; 3012 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3013 req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3014 req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3015 req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; 3016 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3017 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3018 if (ret) { 3019 dev_err(&hdev->pdev->dev, 3020 "Configure rss input fail, status = %d\n", ret); 3021 return ret; 3022 } 3023 3024 return 0; 3025 } 3026 3027 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 3028 u8 *key, u8 *hfunc) 3029 { 3030 struct hclge_vport *vport = hclge_get_vport(handle); 3031 struct hclge_dev *hdev = vport->back; 3032 int i; 3033 3034 /* Get hash algorithm */ 3035 if (hfunc) 3036 *hfunc = hclge_get_rss_algo(hdev); 3037 3038 /* Get the RSS Key required by the user */ 3039 if (key) 3040 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); 3041 3042 /* Get indirect table */ 3043 if (indir) 3044 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3045 indir[i] = vport->rss_indirection_tbl[i]; 3046 3047 return 0; 3048 } 3049 3050 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 3051 const u8 *key, const u8 hfunc) 3052 { 3053 struct hclge_vport *vport = hclge_get_vport(handle); 3054 struct hclge_dev *hdev = vport->back; 3055 u8 hash_algo; 3056 int ret, i; 3057 3058 /* Set the RSS Hash Key if specififed by the user */ 3059 if (key) { 3060 /* Update the shadow RSS key with user specified qids */ 3061 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 3062 3063 if (hfunc == ETH_RSS_HASH_TOP || 3064 hfunc == ETH_RSS_HASH_NO_CHANGE) 3065 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3066 else 3067 return -EINVAL; 3068 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 3069 if (ret) 3070 return ret; 3071 } 3072 3073 /* Update the shadow RSS table with user specified qids */ 3074 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 3075 vport->rss_indirection_tbl[i] = indir[i]; 3076 3077 /* Update the hardware */ 3078 ret = hclge_set_rss_indir_table(hdev, indir); 3079 return ret; 3080 } 3081 3082 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) 3083 { 3084 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; 3085 3086 if (nfc->data & RXH_L4_B_2_3) 3087 hash_sets |= HCLGE_D_PORT_BIT; 3088 else 3089 hash_sets &= ~HCLGE_D_PORT_BIT; 3090 3091 if (nfc->data & RXH_IP_SRC) 3092 hash_sets |= HCLGE_S_IP_BIT; 3093 else 3094 hash_sets &= ~HCLGE_S_IP_BIT; 3095 3096 if (nfc->data & RXH_IP_DST) 3097 hash_sets |= HCLGE_D_IP_BIT; 3098 else 3099 hash_sets &= ~HCLGE_D_IP_BIT; 3100 3101 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) 3102 hash_sets |= HCLGE_V_TAG_BIT; 3103 3104 return hash_sets; 3105 } 3106 3107 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 3108 struct ethtool_rxnfc *nfc) 3109 { 3110 struct hclge_vport *vport = hclge_get_vport(handle); 3111 struct hclge_dev *hdev = vport->back; 3112 struct hclge_rss_input_tuple_cmd *req; 3113 struct hclge_desc desc; 3114 u8 tuple_sets; 3115 int ret; 3116 3117 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 3118 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 3119 return -EINVAL; 3120 3121 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3122 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); 3123 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3124 if (ret) { 3125 dev_err(&hdev->pdev->dev, 3126 "Read rss tuple fail, status = %d\n", ret); 3127 return ret; 3128 } 3129 3130 hclge_cmd_reuse_desc(&desc, false); 3131 3132 tuple_sets = hclge_get_rss_hash_bits(nfc); 3133 switch (nfc->flow_type) { 3134 case TCP_V4_FLOW: 3135 req->ipv4_tcp_en = tuple_sets; 3136 break; 3137 case TCP_V6_FLOW: 3138 req->ipv6_tcp_en = tuple_sets; 3139 break; 3140 case UDP_V4_FLOW: 3141 req->ipv4_udp_en = tuple_sets; 3142 break; 3143 case UDP_V6_FLOW: 3144 req->ipv6_udp_en = tuple_sets; 3145 break; 3146 case SCTP_V4_FLOW: 3147 req->ipv4_sctp_en = tuple_sets; 3148 break; 3149 case SCTP_V6_FLOW: 3150 if ((nfc->data & RXH_L4_B_0_1) || 3151 (nfc->data & RXH_L4_B_2_3)) 3152 return -EINVAL; 3153 3154 req->ipv6_sctp_en = tuple_sets; 3155 break; 3156 case IPV4_FLOW: 3157 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3158 break; 3159 case IPV6_FLOW: 3160 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; 3161 break; 3162 default: 3163 return -EINVAL; 3164 } 3165 3166 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3167 if (ret) 3168 dev_err(&hdev->pdev->dev, 3169 "Set rss tuple fail, status = %d\n", ret); 3170 3171 return ret; 3172 } 3173 3174 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 3175 struct ethtool_rxnfc *nfc) 3176 { 3177 struct hclge_vport *vport = hclge_get_vport(handle); 3178 struct hclge_dev *hdev = vport->back; 3179 struct hclge_rss_input_tuple_cmd *req; 3180 struct hclge_desc desc; 3181 u8 tuple_sets; 3182 int ret; 3183 3184 nfc->data = 0; 3185 3186 req = (struct hclge_rss_input_tuple_cmd *)desc.data; 3187 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); 3188 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3189 if (ret) { 3190 dev_err(&hdev->pdev->dev, 3191 "Read rss tuple fail, status = %d\n", ret); 3192 return ret; 3193 } 3194 3195 switch (nfc->flow_type) { 3196 case TCP_V4_FLOW: 3197 tuple_sets = req->ipv4_tcp_en; 3198 break; 3199 case UDP_V4_FLOW: 3200 tuple_sets = req->ipv4_udp_en; 3201 break; 3202 case TCP_V6_FLOW: 3203 tuple_sets = req->ipv6_tcp_en; 3204 break; 3205 case UDP_V6_FLOW: 3206 tuple_sets = req->ipv6_udp_en; 3207 break; 3208 case SCTP_V4_FLOW: 3209 tuple_sets = req->ipv4_sctp_en; 3210 break; 3211 case SCTP_V6_FLOW: 3212 tuple_sets = req->ipv6_sctp_en; 3213 break; 3214 case IPV4_FLOW: 3215 case IPV6_FLOW: 3216 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; 3217 break; 3218 default: 3219 return -EINVAL; 3220 } 3221 3222 if (!tuple_sets) 3223 return 0; 3224 3225 if (tuple_sets & HCLGE_D_PORT_BIT) 3226 nfc->data |= RXH_L4_B_2_3; 3227 if (tuple_sets & HCLGE_S_PORT_BIT) 3228 nfc->data |= RXH_L4_B_0_1; 3229 if (tuple_sets & HCLGE_D_IP_BIT) 3230 nfc->data |= RXH_IP_DST; 3231 if (tuple_sets & HCLGE_S_IP_BIT) 3232 nfc->data |= RXH_IP_SRC; 3233 3234 return 0; 3235 } 3236 3237 static int hclge_get_tc_size(struct hnae3_handle *handle) 3238 { 3239 struct hclge_vport *vport = hclge_get_vport(handle); 3240 struct hclge_dev *hdev = vport->back; 3241 3242 return hdev->rss_size_max; 3243 } 3244 3245 int hclge_rss_init_hw(struct hclge_dev *hdev) 3246 { 3247 const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 3248 struct hclge_vport *vport = hdev->vport; 3249 u16 tc_offset[HCLGE_MAX_TC_NUM]; 3250 u8 rss_key[HCLGE_RSS_KEY_SIZE]; 3251 u16 tc_valid[HCLGE_MAX_TC_NUM]; 3252 u16 tc_size[HCLGE_MAX_TC_NUM]; 3253 u32 *rss_indir = NULL; 3254 u16 rss_size = 0, roundup_size; 3255 const u8 *key; 3256 int i, ret, j; 3257 3258 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 3259 if (!rss_indir) 3260 return -ENOMEM; 3261 3262 /* Get default RSS key */ 3263 netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE); 3264 3265 /* Initialize RSS indirect table for each vport */ 3266 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 3267 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { 3268 vport[j].rss_indirection_tbl[i] = 3269 i % vport[j].alloc_rss_size; 3270 3271 /* vport 0 is for PF */ 3272 if (j != 0) 3273 continue; 3274 3275 rss_size = vport[j].alloc_rss_size; 3276 rss_indir[i] = vport[j].rss_indirection_tbl[i]; 3277 } 3278 } 3279 ret = hclge_set_rss_indir_table(hdev, rss_indir); 3280 if (ret) 3281 goto err; 3282 3283 key = rss_key; 3284 ret = hclge_set_rss_algo_key(hdev, hfunc, key); 3285 if (ret) 3286 goto err; 3287 3288 ret = hclge_set_rss_input_tuple(hdev); 3289 if (ret) 3290 goto err; 3291 3292 /* Each TC have the same queue size, and tc_size set to hardware is 3293 * the log2 of roundup power of two of rss_size, the acutal queue 3294 * size is limited by indirection table. 3295 */ 3296 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { 3297 dev_err(&hdev->pdev->dev, 3298 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 3299 rss_size); 3300 ret = -EINVAL; 3301 goto err; 3302 } 3303 3304 roundup_size = roundup_pow_of_two(rss_size); 3305 roundup_size = ilog2(roundup_size); 3306 3307 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 3308 tc_valid[i] = 0; 3309 3310 if (!(hdev->hw_tc_map & BIT(i))) 3311 continue; 3312 3313 tc_valid[i] = 1; 3314 tc_size[i] = roundup_size; 3315 tc_offset[i] = rss_size * i; 3316 } 3317 3318 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 3319 3320 err: 3321 kfree(rss_indir); 3322 3323 return ret; 3324 } 3325 3326 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 3327 int vector_id, bool en, 3328 struct hnae3_ring_chain_node *ring_chain) 3329 { 3330 struct hclge_dev *hdev = vport->back; 3331 struct hnae3_ring_chain_node *node; 3332 struct hclge_desc desc; 3333 struct hclge_ctrl_vector_chain_cmd *req 3334 = (struct hclge_ctrl_vector_chain_cmd *)desc.data; 3335 enum hclge_cmd_status status; 3336 enum hclge_opcode_type op; 3337 u16 tqp_type_and_id; 3338 int i; 3339 3340 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 3341 hclge_cmd_setup_basic_desc(&desc, op, false); 3342 req->int_vector_id = vector_id; 3343 3344 i = 0; 3345 for (node = ring_chain; node; node = node->next) { 3346 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 3347 hnae_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 3348 HCLGE_INT_TYPE_S, 3349 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 3350 hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 3351 HCLGE_TQP_ID_S, node->tqp_index); 3352 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 3353 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 3354 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 3355 req->vfid = vport->vport_id; 3356 3357 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3358 if (status) { 3359 dev_err(&hdev->pdev->dev, 3360 "Map TQP fail, status is %d.\n", 3361 status); 3362 return -EIO; 3363 } 3364 i = 0; 3365 3366 hclge_cmd_setup_basic_desc(&desc, 3367 op, 3368 false); 3369 req->int_vector_id = vector_id; 3370 } 3371 } 3372 3373 if (i > 0) { 3374 req->int_cause_num = i; 3375 req->vfid = vport->vport_id; 3376 status = hclge_cmd_send(&hdev->hw, &desc, 1); 3377 if (status) { 3378 dev_err(&hdev->pdev->dev, 3379 "Map TQP fail, status is %d.\n", status); 3380 return -EIO; 3381 } 3382 } 3383 3384 return 0; 3385 } 3386 3387 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, 3388 int vector, 3389 struct hnae3_ring_chain_node *ring_chain) 3390 { 3391 struct hclge_vport *vport = hclge_get_vport(handle); 3392 struct hclge_dev *hdev = vport->back; 3393 int vector_id; 3394 3395 vector_id = hclge_get_vector_index(hdev, vector); 3396 if (vector_id < 0) { 3397 dev_err(&hdev->pdev->dev, 3398 "Get vector index fail. vector_id =%d\n", vector_id); 3399 return vector_id; 3400 } 3401 3402 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 3403 } 3404 3405 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, 3406 int vector, 3407 struct hnae3_ring_chain_node *ring_chain) 3408 { 3409 struct hclge_vport *vport = hclge_get_vport(handle); 3410 struct hclge_dev *hdev = vport->back; 3411 int vector_id, ret; 3412 3413 vector_id = hclge_get_vector_index(hdev, vector); 3414 if (vector_id < 0) { 3415 dev_err(&handle->pdev->dev, 3416 "Get vector index fail. ret =%d\n", vector_id); 3417 return vector_id; 3418 } 3419 3420 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 3421 if (ret) { 3422 dev_err(&handle->pdev->dev, 3423 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 3424 vector_id, 3425 ret); 3426 return ret; 3427 } 3428 3429 /* Free this MSIX or MSI vector */ 3430 hclge_free_vector(hdev, vector_id); 3431 3432 return 0; 3433 } 3434 3435 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, 3436 struct hclge_promisc_param *param) 3437 { 3438 struct hclge_promisc_cfg_cmd *req; 3439 struct hclge_desc desc; 3440 int ret; 3441 3442 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 3443 3444 req = (struct hclge_promisc_cfg_cmd *)desc.data; 3445 req->vf_id = param->vf_id; 3446 req->flag = (param->enable << HCLGE_PROMISC_EN_B); 3447 3448 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3449 if (ret) { 3450 dev_err(&hdev->pdev->dev, 3451 "Set promisc mode fail, status is %d.\n", ret); 3452 return ret; 3453 } 3454 return 0; 3455 } 3456 3457 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, 3458 bool en_mc, bool en_bc, int vport_id) 3459 { 3460 if (!param) 3461 return; 3462 3463 memset(param, 0, sizeof(struct hclge_promisc_param)); 3464 if (en_uc) 3465 param->enable = HCLGE_PROMISC_EN_UC; 3466 if (en_mc) 3467 param->enable |= HCLGE_PROMISC_EN_MC; 3468 if (en_bc) 3469 param->enable |= HCLGE_PROMISC_EN_BC; 3470 param->vf_id = vport_id; 3471 } 3472 3473 static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) 3474 { 3475 struct hclge_vport *vport = hclge_get_vport(handle); 3476 struct hclge_dev *hdev = vport->back; 3477 struct hclge_promisc_param param; 3478 3479 hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); 3480 hclge_cmd_set_promisc_mode(hdev, ¶m); 3481 } 3482 3483 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 3484 { 3485 struct hclge_desc desc; 3486 struct hclge_config_mac_mode_cmd *req = 3487 (struct hclge_config_mac_mode_cmd *)desc.data; 3488 u32 loop_en = 0; 3489 int ret; 3490 3491 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 3492 hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); 3493 hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); 3494 hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); 3495 hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); 3496 hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); 3497 hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); 3498 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 3499 hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); 3500 hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); 3501 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); 3502 hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); 3503 hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); 3504 hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); 3505 hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); 3506 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3507 3508 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3509 if (ret) 3510 dev_err(&hdev->pdev->dev, 3511 "mac enable fail, ret =%d.\n", ret); 3512 } 3513 3514 static int hclge_set_loopback(struct hnae3_handle *handle, 3515 enum hnae3_loop loop_mode, bool en) 3516 { 3517 struct hclge_vport *vport = hclge_get_vport(handle); 3518 struct hclge_config_mac_mode_cmd *req; 3519 struct hclge_dev *hdev = vport->back; 3520 struct hclge_desc desc; 3521 u32 loop_en; 3522 int ret; 3523 3524 switch (loop_mode) { 3525 case HNAE3_MAC_INTER_LOOP_MAC: 3526 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 3527 /* 1 Read out the MAC mode config at first */ 3528 hclge_cmd_setup_basic_desc(&desc, 3529 HCLGE_OPC_CONFIG_MAC_MODE, 3530 true); 3531 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3532 if (ret) { 3533 dev_err(&hdev->pdev->dev, 3534 "mac loopback get fail, ret =%d.\n", 3535 ret); 3536 return ret; 3537 } 3538 3539 /* 2 Then setup the loopback flag */ 3540 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 3541 if (en) 3542 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1); 3543 else 3544 hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); 3545 3546 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 3547 3548 /* 3 Config mac work mode with loopback flag 3549 * and its original configure parameters 3550 */ 3551 hclge_cmd_reuse_desc(&desc, false); 3552 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3553 if (ret) 3554 dev_err(&hdev->pdev->dev, 3555 "mac loopback set fail, ret =%d.\n", ret); 3556 break; 3557 default: 3558 ret = -ENOTSUPP; 3559 dev_err(&hdev->pdev->dev, 3560 "loop_mode %d is not supported\n", loop_mode); 3561 break; 3562 } 3563 3564 return ret; 3565 } 3566 3567 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, 3568 int stream_id, bool enable) 3569 { 3570 struct hclge_desc desc; 3571 struct hclge_cfg_com_tqp_queue_cmd *req = 3572 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 3573 int ret; 3574 3575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 3576 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); 3577 req->stream_id = cpu_to_le16(stream_id); 3578 req->enable |= enable << HCLGE_TQP_ENABLE_B; 3579 3580 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3581 if (ret) 3582 dev_err(&hdev->pdev->dev, 3583 "Tqp enable fail, status =%d.\n", ret); 3584 return ret; 3585 } 3586 3587 static void hclge_reset_tqp_stats(struct hnae3_handle *handle) 3588 { 3589 struct hclge_vport *vport = hclge_get_vport(handle); 3590 struct hnae3_queue *queue; 3591 struct hclge_tqp *tqp; 3592 int i; 3593 3594 for (i = 0; i < vport->alloc_tqps; i++) { 3595 queue = handle->kinfo.tqp[i]; 3596 tqp = container_of(queue, struct hclge_tqp, q); 3597 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); 3598 } 3599 } 3600 3601 static int hclge_ae_start(struct hnae3_handle *handle) 3602 { 3603 struct hclge_vport *vport = hclge_get_vport(handle); 3604 struct hclge_dev *hdev = vport->back; 3605 int i, queue_id, ret; 3606 3607 for (i = 0; i < vport->alloc_tqps; i++) { 3608 /* todo clear interrupt */ 3609 /* ring enable */ 3610 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); 3611 if (queue_id < 0) { 3612 dev_warn(&hdev->pdev->dev, 3613 "Get invalid queue id, ignore it\n"); 3614 continue; 3615 } 3616 3617 hclge_tqp_enable(hdev, queue_id, 0, true); 3618 } 3619 /* mac enable */ 3620 hclge_cfg_mac_mode(hdev, true); 3621 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 3622 mod_timer(&hdev->service_timer, jiffies + HZ); 3623 3624 ret = hclge_mac_start_phy(hdev); 3625 if (ret) 3626 return ret; 3627 3628 /* reset tqp stats */ 3629 hclge_reset_tqp_stats(handle); 3630 3631 return 0; 3632 } 3633 3634 static void hclge_ae_stop(struct hnae3_handle *handle) 3635 { 3636 struct hclge_vport *vport = hclge_get_vport(handle); 3637 struct hclge_dev *hdev = vport->back; 3638 int i, queue_id; 3639 3640 for (i = 0; i < vport->alloc_tqps; i++) { 3641 /* Ring disable */ 3642 queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); 3643 if (queue_id < 0) { 3644 dev_warn(&hdev->pdev->dev, 3645 "Get invalid queue id, ignore it\n"); 3646 continue; 3647 } 3648 3649 hclge_tqp_enable(hdev, queue_id, 0, false); 3650 } 3651 /* Mac disable */ 3652 hclge_cfg_mac_mode(hdev, false); 3653 3654 hclge_mac_stop_phy(hdev); 3655 3656 /* reset tqp stats */ 3657 hclge_reset_tqp_stats(handle); 3658 } 3659 3660 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 3661 u16 cmdq_resp, u8 resp_code, 3662 enum hclge_mac_vlan_tbl_opcode op) 3663 { 3664 struct hclge_dev *hdev = vport->back; 3665 int return_status = -EIO; 3666 3667 if (cmdq_resp) { 3668 dev_err(&hdev->pdev->dev, 3669 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", 3670 cmdq_resp); 3671 return -EIO; 3672 } 3673 3674 if (op == HCLGE_MAC_VLAN_ADD) { 3675 if ((!resp_code) || (resp_code == 1)) { 3676 return_status = 0; 3677 } else if (resp_code == 2) { 3678 return_status = -EIO; 3679 dev_err(&hdev->pdev->dev, 3680 "add mac addr failed for uc_overflow.\n"); 3681 } else if (resp_code == 3) { 3682 return_status = -EIO; 3683 dev_err(&hdev->pdev->dev, 3684 "add mac addr failed for mc_overflow.\n"); 3685 } else { 3686 dev_err(&hdev->pdev->dev, 3687 "add mac addr failed for undefined, code=%d.\n", 3688 resp_code); 3689 } 3690 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 3691 if (!resp_code) { 3692 return_status = 0; 3693 } else if (resp_code == 1) { 3694 return_status = -EIO; 3695 dev_dbg(&hdev->pdev->dev, 3696 "remove mac addr failed for miss.\n"); 3697 } else { 3698 dev_err(&hdev->pdev->dev, 3699 "remove mac addr failed for undefined, code=%d.\n", 3700 resp_code); 3701 } 3702 } else if (op == HCLGE_MAC_VLAN_LKUP) { 3703 if (!resp_code) { 3704 return_status = 0; 3705 } else if (resp_code == 1) { 3706 return_status = -EIO; 3707 dev_dbg(&hdev->pdev->dev, 3708 "lookup mac addr failed for miss.\n"); 3709 } else { 3710 dev_err(&hdev->pdev->dev, 3711 "lookup mac addr failed for undefined, code=%d.\n", 3712 resp_code); 3713 } 3714 } else { 3715 return_status = -EIO; 3716 dev_err(&hdev->pdev->dev, 3717 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", 3718 op); 3719 } 3720 3721 return return_status; 3722 } 3723 3724 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 3725 { 3726 int word_num; 3727 int bit_num; 3728 3729 if (vfid > 255 || vfid < 0) 3730 return -EIO; 3731 3732 if (vfid >= 0 && vfid <= 191) { 3733 word_num = vfid / 32; 3734 bit_num = vfid % 32; 3735 if (clr) 3736 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3737 else 3738 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 3739 } else { 3740 word_num = (vfid - 192) / 32; 3741 bit_num = vfid % 32; 3742 if (clr) 3743 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 3744 else 3745 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 3746 } 3747 3748 return 0; 3749 } 3750 3751 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 3752 { 3753 #define HCLGE_DESC_NUMBER 3 3754 #define HCLGE_FUNC_NUMBER_PER_DESC 6 3755 int i, j; 3756 3757 for (i = 0; i < HCLGE_DESC_NUMBER; i++) 3758 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 3759 if (desc[i].data[j]) 3760 return false; 3761 3762 return true; 3763 } 3764 3765 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 3766 const u8 *addr) 3767 { 3768 const unsigned char *mac_addr = addr; 3769 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 3770 (mac_addr[0]) | (mac_addr[1] << 8); 3771 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 3772 3773 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 3774 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 3775 } 3776 3777 static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, 3778 const u8 *addr) 3779 { 3780 u16 high_val = addr[1] | (addr[0] << 8); 3781 struct hclge_dev *hdev = vport->back; 3782 u32 rsh = 4 - hdev->mta_mac_sel_type; 3783 u16 ret_val = (high_val >> rsh) & 0xfff; 3784 3785 return ret_val; 3786 } 3787 3788 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, 3789 enum hclge_mta_dmac_sel_type mta_mac_sel, 3790 bool enable) 3791 { 3792 struct hclge_mta_filter_mode_cmd *req; 3793 struct hclge_desc desc; 3794 int ret; 3795 3796 req = (struct hclge_mta_filter_mode_cmd *)desc.data; 3797 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); 3798 3799 hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, 3800 enable); 3801 hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, 3802 HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); 3803 3804 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3805 if (ret) { 3806 dev_err(&hdev->pdev->dev, 3807 "Config mat filter mode failed for cmd_send, ret =%d.\n", 3808 ret); 3809 return ret; 3810 } 3811 3812 return 0; 3813 } 3814 3815 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, 3816 u8 func_id, 3817 bool enable) 3818 { 3819 struct hclge_cfg_func_mta_filter_cmd *req; 3820 struct hclge_desc desc; 3821 int ret; 3822 3823 req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; 3824 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); 3825 3826 hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, 3827 enable); 3828 req->function_id = func_id; 3829 3830 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3831 if (ret) { 3832 dev_err(&hdev->pdev->dev, 3833 "Config func_id enable failed for cmd_send, ret =%d.\n", 3834 ret); 3835 return ret; 3836 } 3837 3838 return 0; 3839 } 3840 3841 static int hclge_set_mta_table_item(struct hclge_vport *vport, 3842 u16 idx, 3843 bool enable) 3844 { 3845 struct hclge_dev *hdev = vport->back; 3846 struct hclge_cfg_func_mta_item_cmd *req; 3847 struct hclge_desc desc; 3848 u16 item_idx = 0; 3849 int ret; 3850 3851 req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; 3852 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); 3853 hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); 3854 3855 hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, 3856 HCLGE_CFG_MTA_ITEM_IDX_S, idx); 3857 req->item_idx = cpu_to_le16(item_idx); 3858 3859 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3860 if (ret) { 3861 dev_err(&hdev->pdev->dev, 3862 "Config mta table item failed for cmd_send, ret =%d.\n", 3863 ret); 3864 return ret; 3865 } 3866 3867 return 0; 3868 } 3869 3870 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 3871 struct hclge_mac_vlan_tbl_entry_cmd *req) 3872 { 3873 struct hclge_dev *hdev = vport->back; 3874 struct hclge_desc desc; 3875 u8 resp_code; 3876 u16 retval; 3877 int ret; 3878 3879 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 3880 3881 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3882 3883 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3884 if (ret) { 3885 dev_err(&hdev->pdev->dev, 3886 "del mac addr failed for cmd_send, ret =%d.\n", 3887 ret); 3888 return ret; 3889 } 3890 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 3891 retval = le16_to_cpu(desc.retval); 3892 3893 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 3894 HCLGE_MAC_VLAN_REMOVE); 3895 } 3896 3897 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 3898 struct hclge_mac_vlan_tbl_entry_cmd *req, 3899 struct hclge_desc *desc, 3900 bool is_mc) 3901 { 3902 struct hclge_dev *hdev = vport->back; 3903 u8 resp_code; 3904 u16 retval; 3905 int ret; 3906 3907 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 3908 if (is_mc) { 3909 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3910 memcpy(desc[0].data, 3911 req, 3912 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3913 hclge_cmd_setup_basic_desc(&desc[1], 3914 HCLGE_OPC_MAC_VLAN_ADD, 3915 true); 3916 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3917 hclge_cmd_setup_basic_desc(&desc[2], 3918 HCLGE_OPC_MAC_VLAN_ADD, 3919 true); 3920 ret = hclge_cmd_send(&hdev->hw, desc, 3); 3921 } else { 3922 memcpy(desc[0].data, 3923 req, 3924 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3925 ret = hclge_cmd_send(&hdev->hw, desc, 1); 3926 } 3927 if (ret) { 3928 dev_err(&hdev->pdev->dev, 3929 "lookup mac addr failed for cmd_send, ret =%d.\n", 3930 ret); 3931 return ret; 3932 } 3933 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 3934 retval = le16_to_cpu(desc[0].retval); 3935 3936 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 3937 HCLGE_MAC_VLAN_LKUP); 3938 } 3939 3940 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 3941 struct hclge_mac_vlan_tbl_entry_cmd *req, 3942 struct hclge_desc *mc_desc) 3943 { 3944 struct hclge_dev *hdev = vport->back; 3945 int cfg_status; 3946 u8 resp_code; 3947 u16 retval; 3948 int ret; 3949 3950 if (!mc_desc) { 3951 struct hclge_desc desc; 3952 3953 hclge_cmd_setup_basic_desc(&desc, 3954 HCLGE_OPC_MAC_VLAN_ADD, 3955 false); 3956 memcpy(desc.data, req, 3957 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3958 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3959 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 3960 retval = le16_to_cpu(desc.retval); 3961 3962 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 3963 resp_code, 3964 HCLGE_MAC_VLAN_ADD); 3965 } else { 3966 hclge_cmd_reuse_desc(&mc_desc[0], false); 3967 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3968 hclge_cmd_reuse_desc(&mc_desc[1], false); 3969 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 3970 hclge_cmd_reuse_desc(&mc_desc[2], false); 3971 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); 3972 memcpy(mc_desc[0].data, req, 3973 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 3974 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 3975 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 3976 retval = le16_to_cpu(mc_desc[0].retval); 3977 3978 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 3979 resp_code, 3980 HCLGE_MAC_VLAN_ADD); 3981 } 3982 3983 if (ret) { 3984 dev_err(&hdev->pdev->dev, 3985 "add mac addr failed for cmd_send, ret =%d.\n", 3986 ret); 3987 return ret; 3988 } 3989 3990 return cfg_status; 3991 } 3992 3993 static int hclge_add_uc_addr(struct hnae3_handle *handle, 3994 const unsigned char *addr) 3995 { 3996 struct hclge_vport *vport = hclge_get_vport(handle); 3997 3998 return hclge_add_uc_addr_common(vport, addr); 3999 } 4000 4001 int hclge_add_uc_addr_common(struct hclge_vport *vport, 4002 const unsigned char *addr) 4003 { 4004 struct hclge_dev *hdev = vport->back; 4005 struct hclge_mac_vlan_tbl_entry_cmd req; 4006 enum hclge_cmd_status status; 4007 u16 egress_port = 0; 4008 4009 /* mac addr check */ 4010 if (is_zero_ether_addr(addr) || 4011 is_broadcast_ether_addr(addr) || 4012 is_multicast_ether_addr(addr)) { 4013 dev_err(&hdev->pdev->dev, 4014 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", 4015 addr, 4016 is_zero_ether_addr(addr), 4017 is_broadcast_ether_addr(addr), 4018 is_multicast_ether_addr(addr)); 4019 return -EINVAL; 4020 } 4021 4022 memset(&req, 0, sizeof(req)); 4023 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4024 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4025 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); 4026 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4027 4028 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); 4029 hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); 4030 hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 4031 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 4032 hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, 4033 HCLGE_MAC_EPORT_PFID_S, 0); 4034 4035 req.egress_port = cpu_to_le16(egress_port); 4036 4037 hclge_prepare_mac_addr(&req, addr); 4038 4039 status = hclge_add_mac_vlan_tbl(vport, &req, NULL); 4040 4041 return status; 4042 } 4043 4044 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 4045 const unsigned char *addr) 4046 { 4047 struct hclge_vport *vport = hclge_get_vport(handle); 4048 4049 return hclge_rm_uc_addr_common(vport, addr); 4050 } 4051 4052 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 4053 const unsigned char *addr) 4054 { 4055 struct hclge_dev *hdev = vport->back; 4056 struct hclge_mac_vlan_tbl_entry_cmd req; 4057 enum hclge_cmd_status status; 4058 4059 /* mac addr check */ 4060 if (is_zero_ether_addr(addr) || 4061 is_broadcast_ether_addr(addr) || 4062 is_multicast_ether_addr(addr)) { 4063 dev_dbg(&hdev->pdev->dev, 4064 "Remove mac err! invalid mac:%pM.\n", 4065 addr); 4066 return -EINVAL; 4067 } 4068 4069 memset(&req, 0, sizeof(req)); 4070 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4071 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4072 hclge_prepare_mac_addr(&req, addr); 4073 status = hclge_remove_mac_vlan_tbl(vport, &req); 4074 4075 return status; 4076 } 4077 4078 static int hclge_add_mc_addr(struct hnae3_handle *handle, 4079 const unsigned char *addr) 4080 { 4081 struct hclge_vport *vport = hclge_get_vport(handle); 4082 4083 return hclge_add_mc_addr_common(vport, addr); 4084 } 4085 4086 int hclge_add_mc_addr_common(struct hclge_vport *vport, 4087 const unsigned char *addr) 4088 { 4089 struct hclge_dev *hdev = vport->back; 4090 struct hclge_mac_vlan_tbl_entry_cmd req; 4091 struct hclge_desc desc[3]; 4092 u16 tbl_idx; 4093 int status; 4094 4095 /* mac addr check */ 4096 if (!is_multicast_ether_addr(addr)) { 4097 dev_err(&hdev->pdev->dev, 4098 "Add mc mac err! invalid mac:%pM.\n", 4099 addr); 4100 return -EINVAL; 4101 } 4102 memset(&req, 0, sizeof(req)); 4103 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4104 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4105 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4106 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4107 hclge_prepare_mac_addr(&req, addr); 4108 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4109 if (!status) { 4110 /* This mac addr exist, update VFID for it */ 4111 hclge_update_desc_vfid(desc, vport->vport_id, false); 4112 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4113 } else { 4114 /* This mac addr do not exist, add new entry for it */ 4115 memset(desc[0].data, 0, sizeof(desc[0].data)); 4116 memset(desc[1].data, 0, sizeof(desc[0].data)); 4117 memset(desc[2].data, 0, sizeof(desc[0].data)); 4118 hclge_update_desc_vfid(desc, vport->vport_id, false); 4119 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4120 } 4121 4122 /* Set MTA table for this MAC address */ 4123 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4124 status = hclge_set_mta_table_item(vport, tbl_idx, true); 4125 4126 return status; 4127 } 4128 4129 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 4130 const unsigned char *addr) 4131 { 4132 struct hclge_vport *vport = hclge_get_vport(handle); 4133 4134 return hclge_rm_mc_addr_common(vport, addr); 4135 } 4136 4137 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 4138 const unsigned char *addr) 4139 { 4140 struct hclge_dev *hdev = vport->back; 4141 struct hclge_mac_vlan_tbl_entry_cmd req; 4142 enum hclge_cmd_status status; 4143 struct hclge_desc desc[3]; 4144 u16 tbl_idx; 4145 4146 /* mac addr check */ 4147 if (!is_multicast_ether_addr(addr)) { 4148 dev_dbg(&hdev->pdev->dev, 4149 "Remove mc mac err! invalid mac:%pM.\n", 4150 addr); 4151 return -EINVAL; 4152 } 4153 4154 memset(&req, 0, sizeof(req)); 4155 hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 4156 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4157 hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 4158 hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 4159 hclge_prepare_mac_addr(&req, addr); 4160 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 4161 if (!status) { 4162 /* This mac addr exist, remove this handle's VFID for it */ 4163 hclge_update_desc_vfid(desc, vport->vport_id, true); 4164 4165 if (hclge_is_all_function_id_zero(desc)) 4166 /* All the vfid is zero, so need to delete this entry */ 4167 status = hclge_remove_mac_vlan_tbl(vport, &req); 4168 else 4169 /* Not all the vfid is zero, update the vfid */ 4170 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 4171 4172 } else { 4173 /* This mac addr do not exist, can't delete it */ 4174 dev_err(&hdev->pdev->dev, 4175 "Rm multicast mac addr failed, ret = %d.\n", 4176 status); 4177 return -EIO; 4178 } 4179 4180 /* Set MTB table for this MAC address */ 4181 tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); 4182 status = hclge_set_mta_table_item(vport, tbl_idx, false); 4183 4184 return status; 4185 } 4186 4187 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 4188 { 4189 struct hclge_vport *vport = hclge_get_vport(handle); 4190 struct hclge_dev *hdev = vport->back; 4191 4192 ether_addr_copy(p, hdev->hw.mac.mac_addr); 4193 } 4194 4195 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) 4196 { 4197 const unsigned char *new_addr = (const unsigned char *)p; 4198 struct hclge_vport *vport = hclge_get_vport(handle); 4199 struct hclge_dev *hdev = vport->back; 4200 4201 /* mac addr check */ 4202 if (is_zero_ether_addr(new_addr) || 4203 is_broadcast_ether_addr(new_addr) || 4204 is_multicast_ether_addr(new_addr)) { 4205 dev_err(&hdev->pdev->dev, 4206 "Change uc mac err! invalid mac:%p.\n", 4207 new_addr); 4208 return -EINVAL; 4209 } 4210 4211 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); 4212 4213 if (!hclge_add_uc_addr(handle, new_addr)) { 4214 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 4215 return 0; 4216 } 4217 4218 return -EIO; 4219 } 4220 4221 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 4222 bool filter_en) 4223 { 4224 struct hclge_vlan_filter_ctrl_cmd *req; 4225 struct hclge_desc desc; 4226 int ret; 4227 4228 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); 4229 4230 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 4231 req->vlan_type = vlan_type; 4232 req->vlan_fe = filter_en; 4233 4234 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4235 if (ret) { 4236 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", 4237 ret); 4238 return ret; 4239 } 4240 4241 return 0; 4242 } 4243 4244 int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, 4245 bool is_kill, u16 vlan, u8 qos, __be16 proto) 4246 { 4247 #define HCLGE_MAX_VF_BYTES 16 4248 struct hclge_vlan_filter_vf_cfg_cmd *req0; 4249 struct hclge_vlan_filter_vf_cfg_cmd *req1; 4250 struct hclge_desc desc[2]; 4251 u8 vf_byte_val; 4252 u8 vf_byte_off; 4253 int ret; 4254 4255 hclge_cmd_setup_basic_desc(&desc[0], 4256 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4257 hclge_cmd_setup_basic_desc(&desc[1], 4258 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 4259 4260 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 4261 4262 vf_byte_off = vfid / 8; 4263 vf_byte_val = 1 << (vfid % 8); 4264 4265 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 4266 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 4267 4268 req0->vlan_id = cpu_to_le16(vlan); 4269 req0->vlan_cfg = is_kill; 4270 4271 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 4272 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 4273 else 4274 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 4275 4276 ret = hclge_cmd_send(&hdev->hw, desc, 2); 4277 if (ret) { 4278 dev_err(&hdev->pdev->dev, 4279 "Send vf vlan command fail, ret =%d.\n", 4280 ret); 4281 return ret; 4282 } 4283 4284 if (!is_kill) { 4285 if (!req0->resp_code || req0->resp_code == 1) 4286 return 0; 4287 4288 dev_err(&hdev->pdev->dev, 4289 "Add vf vlan filter fail, ret =%d.\n", 4290 req0->resp_code); 4291 } else { 4292 if (!req0->resp_code) 4293 return 0; 4294 4295 dev_err(&hdev->pdev->dev, 4296 "Kill vf vlan filter fail, ret =%d.\n", 4297 req0->resp_code); 4298 } 4299 4300 return -EIO; 4301 } 4302 4303 static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, 4304 __be16 proto, u16 vlan_id, 4305 bool is_kill) 4306 { 4307 struct hclge_vport *vport = hclge_get_vport(handle); 4308 struct hclge_dev *hdev = vport->back; 4309 struct hclge_vlan_filter_pf_cfg_cmd *req; 4310 struct hclge_desc desc; 4311 u8 vlan_offset_byte_val; 4312 u8 vlan_offset_byte; 4313 u8 vlan_offset_160; 4314 int ret; 4315 4316 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 4317 4318 vlan_offset_160 = vlan_id / 160; 4319 vlan_offset_byte = (vlan_id % 160) / 8; 4320 vlan_offset_byte_val = 1 << (vlan_id % 8); 4321 4322 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 4323 req->vlan_offset = vlan_offset_160; 4324 req->vlan_cfg = is_kill; 4325 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 4326 4327 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4328 if (ret) { 4329 dev_err(&hdev->pdev->dev, 4330 "port vlan command, send fail, ret =%d.\n", 4331 ret); 4332 return ret; 4333 } 4334 4335 ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto); 4336 if (ret) { 4337 dev_err(&hdev->pdev->dev, 4338 "Set pf vlan filter config fail, ret =%d.\n", 4339 ret); 4340 return -EIO; 4341 } 4342 4343 return 0; 4344 } 4345 4346 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 4347 u16 vlan, u8 qos, __be16 proto) 4348 { 4349 struct hclge_vport *vport = hclge_get_vport(handle); 4350 struct hclge_dev *hdev = vport->back; 4351 4352 if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) 4353 return -EINVAL; 4354 if (proto != htons(ETH_P_8021Q)) 4355 return -EPROTONOSUPPORT; 4356 4357 return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); 4358 } 4359 4360 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 4361 { 4362 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 4363 struct hclge_vport_vtag_tx_cfg_cmd *req; 4364 struct hclge_dev *hdev = vport->back; 4365 struct hclge_desc desc; 4366 int status; 4367 4368 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 4369 4370 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 4371 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 4372 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 4373 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, 4374 vcfg->accept_tag ? 1 : 0); 4375 hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, 4376 vcfg->accept_untag ? 1 : 0); 4377 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 4378 vcfg->insert_tag1_en ? 1 : 0); 4379 hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 4380 vcfg->insert_tag2_en ? 1 : 0); 4381 hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 4382 4383 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4384 req->vf_bitmap[req->vf_offset] = 4385 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4386 4387 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4388 if (status) 4389 dev_err(&hdev->pdev->dev, 4390 "Send port txvlan cfg command fail, ret =%d\n", 4391 status); 4392 4393 return status; 4394 } 4395 4396 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 4397 { 4398 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 4399 struct hclge_vport_vtag_rx_cfg_cmd *req; 4400 struct hclge_dev *hdev = vport->back; 4401 struct hclge_desc desc; 4402 int status; 4403 4404 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 4405 4406 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 4407 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 4408 vcfg->strip_tag1_en ? 1 : 0); 4409 hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 4410 vcfg->strip_tag2_en ? 1 : 0); 4411 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 4412 vcfg->vlan1_vlan_prionly ? 1 : 0); 4413 hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 4414 vcfg->vlan2_vlan_prionly ? 1 : 0); 4415 4416 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 4417 req->vf_bitmap[req->vf_offset] = 4418 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 4419 4420 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4421 if (status) 4422 dev_err(&hdev->pdev->dev, 4423 "Send port rxvlan cfg command fail, ret =%d\n", 4424 status); 4425 4426 return status; 4427 } 4428 4429 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 4430 { 4431 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 4432 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 4433 struct hclge_desc desc; 4434 int status; 4435 4436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 4437 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 4438 rx_req->ot_fst_vlan_type = 4439 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 4440 rx_req->ot_sec_vlan_type = 4441 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 4442 rx_req->in_fst_vlan_type = 4443 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 4444 rx_req->in_sec_vlan_type = 4445 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 4446 4447 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4448 if (status) { 4449 dev_err(&hdev->pdev->dev, 4450 "Send rxvlan protocol type command fail, ret =%d\n", 4451 status); 4452 return status; 4453 } 4454 4455 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 4456 4457 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; 4458 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 4459 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 4460 4461 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4462 if (status) 4463 dev_err(&hdev->pdev->dev, 4464 "Send txvlan protocol type command fail, ret =%d\n", 4465 status); 4466 4467 return status; 4468 } 4469 4470 static int hclge_init_vlan_config(struct hclge_dev *hdev) 4471 { 4472 #define HCLGE_FILTER_TYPE_VF 0 4473 #define HCLGE_FILTER_TYPE_PORT 1 4474 #define HCLGE_DEF_VLAN_TYPE 0x8100 4475 4476 struct hnae3_handle *handle; 4477 struct hclge_vport *vport; 4478 int ret; 4479 int i; 4480 4481 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); 4482 if (ret) 4483 return ret; 4484 4485 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); 4486 if (ret) 4487 return ret; 4488 4489 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4490 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4491 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; 4492 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; 4493 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; 4494 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; 4495 4496 ret = hclge_set_vlan_protocol_type(hdev); 4497 if (ret) 4498 return ret; 4499 4500 for (i = 0; i < hdev->num_alloc_vport; i++) { 4501 vport = &hdev->vport[i]; 4502 vport->txvlan_cfg.accept_tag = true; 4503 vport->txvlan_cfg.accept_untag = true; 4504 vport->txvlan_cfg.insert_tag1_en = false; 4505 vport->txvlan_cfg.insert_tag2_en = false; 4506 vport->txvlan_cfg.default_tag1 = 0; 4507 vport->txvlan_cfg.default_tag2 = 0; 4508 4509 ret = hclge_set_vlan_tx_offload_cfg(vport); 4510 if (ret) 4511 return ret; 4512 4513 vport->rxvlan_cfg.strip_tag1_en = false; 4514 vport->rxvlan_cfg.strip_tag2_en = true; 4515 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4516 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4517 4518 ret = hclge_set_vlan_rx_offload_cfg(vport); 4519 if (ret) 4520 return ret; 4521 } 4522 4523 handle = &hdev->vport[0].nic; 4524 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 4525 } 4526 4527 static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 4528 { 4529 struct hclge_vport *vport = hclge_get_vport(handle); 4530 4531 vport->rxvlan_cfg.strip_tag1_en = false; 4532 vport->rxvlan_cfg.strip_tag2_en = enable; 4533 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 4534 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 4535 4536 return hclge_set_vlan_rx_offload_cfg(vport); 4537 } 4538 4539 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 4540 { 4541 struct hclge_vport *vport = hclge_get_vport(handle); 4542 struct hclge_config_max_frm_size_cmd *req; 4543 struct hclge_dev *hdev = vport->back; 4544 struct hclge_desc desc; 4545 int ret; 4546 4547 if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) 4548 return -EINVAL; 4549 4550 hdev->mps = new_mtu; 4551 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 4552 4553 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 4554 req->max_frm_size = cpu_to_le16(new_mtu); 4555 4556 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4557 if (ret) { 4558 dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); 4559 return ret; 4560 } 4561 4562 return 0; 4563 } 4564 4565 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, 4566 bool enable) 4567 { 4568 struct hclge_reset_tqp_queue_cmd *req; 4569 struct hclge_desc desc; 4570 int ret; 4571 4572 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 4573 4574 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4575 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4576 hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); 4577 4578 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4579 if (ret) { 4580 dev_err(&hdev->pdev->dev, 4581 "Send tqp reset cmd error, status =%d\n", ret); 4582 return ret; 4583 } 4584 4585 return 0; 4586 } 4587 4588 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 4589 { 4590 struct hclge_reset_tqp_queue_cmd *req; 4591 struct hclge_desc desc; 4592 int ret; 4593 4594 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 4595 4596 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 4597 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); 4598 4599 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4600 if (ret) { 4601 dev_err(&hdev->pdev->dev, 4602 "Get reset status error, status =%d\n", ret); 4603 return ret; 4604 } 4605 4606 return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 4607 } 4608 4609 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 4610 { 4611 struct hclge_vport *vport = hclge_get_vport(handle); 4612 struct hclge_dev *hdev = vport->back; 4613 int reset_try_times = 0; 4614 int reset_status; 4615 int ret; 4616 4617 ret = hclge_tqp_enable(hdev, queue_id, 0, false); 4618 if (ret) { 4619 dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); 4620 return; 4621 } 4622 4623 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true); 4624 if (ret) { 4625 dev_warn(&hdev->pdev->dev, 4626 "Send reset tqp cmd fail, ret = %d\n", ret); 4627 return; 4628 } 4629 4630 reset_try_times = 0; 4631 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 4632 /* Wait for tqp hw reset */ 4633 msleep(20); 4634 reset_status = hclge_get_reset_status(hdev, queue_id); 4635 if (reset_status) 4636 break; 4637 } 4638 4639 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 4640 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); 4641 return; 4642 } 4643 4644 ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false); 4645 if (ret) { 4646 dev_warn(&hdev->pdev->dev, 4647 "Deassert the soft reset fail, ret = %d\n", ret); 4648 return; 4649 } 4650 } 4651 4652 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 4653 { 4654 struct hclge_vport *vport = hclge_get_vport(handle); 4655 struct hclge_dev *hdev = vport->back; 4656 4657 return hdev->fw_version; 4658 } 4659 4660 static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, 4661 u32 *flowctrl_adv) 4662 { 4663 struct hclge_vport *vport = hclge_get_vport(handle); 4664 struct hclge_dev *hdev = vport->back; 4665 struct phy_device *phydev = hdev->hw.mac.phydev; 4666 4667 if (!phydev) 4668 return; 4669 4670 *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | 4671 (phydev->advertising & ADVERTISED_Asym_Pause); 4672 } 4673 4674 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 4675 { 4676 struct phy_device *phydev = hdev->hw.mac.phydev; 4677 4678 if (!phydev) 4679 return; 4680 4681 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); 4682 4683 if (rx_en) 4684 phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; 4685 4686 if (tx_en) 4687 phydev->advertising ^= ADVERTISED_Asym_Pause; 4688 } 4689 4690 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 4691 { 4692 enum hclge_fc_mode fc_mode; 4693 int ret; 4694 4695 if (rx_en && tx_en) 4696 fc_mode = HCLGE_FC_FULL; 4697 else if (rx_en && !tx_en) 4698 fc_mode = HCLGE_FC_RX_PAUSE; 4699 else if (!rx_en && tx_en) 4700 fc_mode = HCLGE_FC_TX_PAUSE; 4701 else 4702 fc_mode = HCLGE_FC_NONE; 4703 4704 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 4705 hdev->fc_mode_last_time = fc_mode; 4706 return 0; 4707 } 4708 4709 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 4710 if (ret) { 4711 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", 4712 ret); 4713 return ret; 4714 } 4715 4716 hdev->tm_info.fc_mode = fc_mode; 4717 4718 return 0; 4719 } 4720 4721 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 4722 { 4723 struct phy_device *phydev = hdev->hw.mac.phydev; 4724 u16 remote_advertising = 0; 4725 u16 local_advertising = 0; 4726 u32 rx_pause, tx_pause; 4727 u8 flowctl; 4728 4729 if (!phydev->link || !phydev->autoneg) 4730 return 0; 4731 4732 if (phydev->advertising & ADVERTISED_Pause) 4733 local_advertising = ADVERTISE_PAUSE_CAP; 4734 4735 if (phydev->advertising & ADVERTISED_Asym_Pause) 4736 local_advertising |= ADVERTISE_PAUSE_ASYM; 4737 4738 if (phydev->pause) 4739 remote_advertising = LPA_PAUSE_CAP; 4740 4741 if (phydev->asym_pause) 4742 remote_advertising |= LPA_PAUSE_ASYM; 4743 4744 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 4745 remote_advertising); 4746 tx_pause = flowctl & FLOW_CTRL_TX; 4747 rx_pause = flowctl & FLOW_CTRL_RX; 4748 4749 if (phydev->duplex == HCLGE_MAC_HALF) { 4750 tx_pause = 0; 4751 rx_pause = 0; 4752 } 4753 4754 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 4755 } 4756 4757 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 4758 u32 *rx_en, u32 *tx_en) 4759 { 4760 struct hclge_vport *vport = hclge_get_vport(handle); 4761 struct hclge_dev *hdev = vport->back; 4762 4763 *auto_neg = hclge_get_autoneg(handle); 4764 4765 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 4766 *rx_en = 0; 4767 *tx_en = 0; 4768 return; 4769 } 4770 4771 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 4772 *rx_en = 1; 4773 *tx_en = 0; 4774 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 4775 *tx_en = 1; 4776 *rx_en = 0; 4777 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 4778 *rx_en = 1; 4779 *tx_en = 1; 4780 } else { 4781 *rx_en = 0; 4782 *tx_en = 0; 4783 } 4784 } 4785 4786 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 4787 u32 rx_en, u32 tx_en) 4788 { 4789 struct hclge_vport *vport = hclge_get_vport(handle); 4790 struct hclge_dev *hdev = vport->back; 4791 struct phy_device *phydev = hdev->hw.mac.phydev; 4792 u32 fc_autoneg; 4793 4794 /* Only support flow control negotiation for netdev with 4795 * phy attached for now. 4796 */ 4797 if (!phydev) 4798 return -EOPNOTSUPP; 4799 4800 fc_autoneg = hclge_get_autoneg(handle); 4801 if (auto_neg != fc_autoneg) { 4802 dev_info(&hdev->pdev->dev, 4803 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 4804 return -EOPNOTSUPP; 4805 } 4806 4807 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 4808 dev_info(&hdev->pdev->dev, 4809 "Priority flow control enabled. Cannot set link flow control.\n"); 4810 return -EOPNOTSUPP; 4811 } 4812 4813 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 4814 4815 if (!fc_autoneg) 4816 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 4817 4818 return phy_start_aneg(phydev); 4819 } 4820 4821 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 4822 u8 *auto_neg, u32 *speed, u8 *duplex) 4823 { 4824 struct hclge_vport *vport = hclge_get_vport(handle); 4825 struct hclge_dev *hdev = vport->back; 4826 4827 if (speed) 4828 *speed = hdev->hw.mac.speed; 4829 if (duplex) 4830 *duplex = hdev->hw.mac.duplex; 4831 if (auto_neg) 4832 *auto_neg = hdev->hw.mac.autoneg; 4833 } 4834 4835 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) 4836 { 4837 struct hclge_vport *vport = hclge_get_vport(handle); 4838 struct hclge_dev *hdev = vport->back; 4839 4840 if (media_type) 4841 *media_type = hdev->hw.mac.media_type; 4842 } 4843 4844 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 4845 u8 *tp_mdix_ctrl, u8 *tp_mdix) 4846 { 4847 struct hclge_vport *vport = hclge_get_vport(handle); 4848 struct hclge_dev *hdev = vport->back; 4849 struct phy_device *phydev = hdev->hw.mac.phydev; 4850 int mdix_ctrl, mdix, retval, is_resolved; 4851 4852 if (!phydev) { 4853 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 4854 *tp_mdix = ETH_TP_MDI_INVALID; 4855 return; 4856 } 4857 4858 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 4859 4860 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 4861 mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 4862 HCLGE_PHY_MDIX_CTRL_S); 4863 4864 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 4865 mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 4866 is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 4867 4868 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 4869 4870 switch (mdix_ctrl) { 4871 case 0x0: 4872 *tp_mdix_ctrl = ETH_TP_MDI; 4873 break; 4874 case 0x1: 4875 *tp_mdix_ctrl = ETH_TP_MDI_X; 4876 break; 4877 case 0x3: 4878 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 4879 break; 4880 default: 4881 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 4882 break; 4883 } 4884 4885 if (!is_resolved) 4886 *tp_mdix = ETH_TP_MDI_INVALID; 4887 else if (mdix) 4888 *tp_mdix = ETH_TP_MDI_X; 4889 else 4890 *tp_mdix = ETH_TP_MDI; 4891 } 4892 4893 static int hclge_init_client_instance(struct hnae3_client *client, 4894 struct hnae3_ae_dev *ae_dev) 4895 { 4896 struct hclge_dev *hdev = ae_dev->priv; 4897 struct hclge_vport *vport; 4898 int i, ret; 4899 4900 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 4901 vport = &hdev->vport[i]; 4902 4903 switch (client->type) { 4904 case HNAE3_CLIENT_KNIC: 4905 4906 hdev->nic_client = client; 4907 vport->nic.client = client; 4908 ret = client->ops->init_instance(&vport->nic); 4909 if (ret) 4910 goto err; 4911 4912 if (hdev->roce_client && 4913 hnae3_dev_roce_supported(hdev)) { 4914 struct hnae3_client *rc = hdev->roce_client; 4915 4916 ret = hclge_init_roce_base_info(vport); 4917 if (ret) 4918 goto err; 4919 4920 ret = rc->ops->init_instance(&vport->roce); 4921 if (ret) 4922 goto err; 4923 } 4924 4925 break; 4926 case HNAE3_CLIENT_UNIC: 4927 hdev->nic_client = client; 4928 vport->nic.client = client; 4929 4930 ret = client->ops->init_instance(&vport->nic); 4931 if (ret) 4932 goto err; 4933 4934 break; 4935 case HNAE3_CLIENT_ROCE: 4936 if (hnae3_dev_roce_supported(hdev)) { 4937 hdev->roce_client = client; 4938 vport->roce.client = client; 4939 } 4940 4941 if (hdev->roce_client && hdev->nic_client) { 4942 ret = hclge_init_roce_base_info(vport); 4943 if (ret) 4944 goto err; 4945 4946 ret = client->ops->init_instance(&vport->roce); 4947 if (ret) 4948 goto err; 4949 } 4950 } 4951 } 4952 4953 return 0; 4954 err: 4955 return ret; 4956 } 4957 4958 static void hclge_uninit_client_instance(struct hnae3_client *client, 4959 struct hnae3_ae_dev *ae_dev) 4960 { 4961 struct hclge_dev *hdev = ae_dev->priv; 4962 struct hclge_vport *vport; 4963 int i; 4964 4965 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { 4966 vport = &hdev->vport[i]; 4967 if (hdev->roce_client) { 4968 hdev->roce_client->ops->uninit_instance(&vport->roce, 4969 0); 4970 hdev->roce_client = NULL; 4971 vport->roce.client = NULL; 4972 } 4973 if (client->type == HNAE3_CLIENT_ROCE) 4974 return; 4975 if (client->ops->uninit_instance) { 4976 client->ops->uninit_instance(&vport->nic, 0); 4977 hdev->nic_client = NULL; 4978 vport->nic.client = NULL; 4979 } 4980 } 4981 } 4982 4983 static int hclge_pci_init(struct hclge_dev *hdev) 4984 { 4985 struct pci_dev *pdev = hdev->pdev; 4986 struct hclge_hw *hw; 4987 int ret; 4988 4989 ret = pci_enable_device(pdev); 4990 if (ret) { 4991 dev_err(&pdev->dev, "failed to enable PCI device\n"); 4992 goto err_no_drvdata; 4993 } 4994 4995 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 4996 if (ret) { 4997 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 4998 if (ret) { 4999 dev_err(&pdev->dev, 5000 "can't set consistent PCI DMA"); 5001 goto err_disable_device; 5002 } 5003 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 5004 } 5005 5006 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 5007 if (ret) { 5008 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 5009 goto err_disable_device; 5010 } 5011 5012 pci_set_master(pdev); 5013 hw = &hdev->hw; 5014 hw->back = hdev; 5015 hw->io_base = pcim_iomap(pdev, 2, 0); 5016 if (!hw->io_base) { 5017 dev_err(&pdev->dev, "Can't map configuration register space\n"); 5018 ret = -ENOMEM; 5019 goto err_clr_master; 5020 } 5021 5022 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 5023 5024 return 0; 5025 err_clr_master: 5026 pci_clear_master(pdev); 5027 pci_release_regions(pdev); 5028 err_disable_device: 5029 pci_disable_device(pdev); 5030 err_no_drvdata: 5031 pci_set_drvdata(pdev, NULL); 5032 5033 return ret; 5034 } 5035 5036 static void hclge_pci_uninit(struct hclge_dev *hdev) 5037 { 5038 struct pci_dev *pdev = hdev->pdev; 5039 5040 pci_free_irq_vectors(pdev); 5041 pci_clear_master(pdev); 5042 pci_release_mem_regions(pdev); 5043 pci_disable_device(pdev); 5044 } 5045 5046 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 5047 { 5048 struct pci_dev *pdev = ae_dev->pdev; 5049 struct hclge_dev *hdev; 5050 int ret; 5051 5052 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 5053 if (!hdev) { 5054 ret = -ENOMEM; 5055 goto err_hclge_dev; 5056 } 5057 5058 hdev->pdev = pdev; 5059 hdev->ae_dev = ae_dev; 5060 hdev->reset_type = HNAE3_NONE_RESET; 5061 hdev->reset_request = 0; 5062 hdev->reset_pending = 0; 5063 ae_dev->priv = hdev; 5064 5065 ret = hclge_pci_init(hdev); 5066 if (ret) { 5067 dev_err(&pdev->dev, "PCI init failed\n"); 5068 goto err_pci_init; 5069 } 5070 5071 /* Firmware command queue initialize */ 5072 ret = hclge_cmd_queue_init(hdev); 5073 if (ret) { 5074 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); 5075 return ret; 5076 } 5077 5078 /* Firmware command initialize */ 5079 ret = hclge_cmd_init(hdev); 5080 if (ret) 5081 goto err_cmd_init; 5082 5083 ret = hclge_get_cap(hdev); 5084 if (ret) { 5085 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5086 ret); 5087 return ret; 5088 } 5089 5090 ret = hclge_configure(hdev); 5091 if (ret) { 5092 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5093 return ret; 5094 } 5095 5096 ret = hclge_init_msi(hdev); 5097 if (ret) { 5098 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 5099 return ret; 5100 } 5101 5102 ret = hclge_misc_irq_init(hdev); 5103 if (ret) { 5104 dev_err(&pdev->dev, 5105 "Misc IRQ(vector0) init error, ret = %d.\n", 5106 ret); 5107 return ret; 5108 } 5109 5110 ret = hclge_alloc_tqps(hdev); 5111 if (ret) { 5112 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 5113 return ret; 5114 } 5115 5116 ret = hclge_alloc_vport(hdev); 5117 if (ret) { 5118 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); 5119 return ret; 5120 } 5121 5122 ret = hclge_map_tqp(hdev); 5123 if (ret) { 5124 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5125 return ret; 5126 } 5127 5128 ret = hclge_mac_mdio_config(hdev); 5129 if (ret) { 5130 dev_warn(&hdev->pdev->dev, 5131 "mdio config fail ret=%d\n", ret); 5132 return ret; 5133 } 5134 5135 ret = hclge_mac_init(hdev); 5136 if (ret) { 5137 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5138 return ret; 5139 } 5140 ret = hclge_buffer_alloc(hdev); 5141 if (ret) { 5142 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); 5143 return ret; 5144 } 5145 5146 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5147 if (ret) { 5148 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5149 return ret; 5150 } 5151 5152 ret = hclge_init_vlan_config(hdev); 5153 if (ret) { 5154 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5155 return ret; 5156 } 5157 5158 ret = hclge_tm_schd_init(hdev); 5159 if (ret) { 5160 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 5161 return ret; 5162 } 5163 5164 ret = hclge_rss_init_hw(hdev); 5165 if (ret) { 5166 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5167 return ret; 5168 } 5169 5170 hclge_dcb_ops_set(hdev); 5171 5172 timer_setup(&hdev->service_timer, hclge_service_timer, 0); 5173 INIT_WORK(&hdev->service_task, hclge_service_task); 5174 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); 5175 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); 5176 5177 /* Enable MISC vector(vector0) */ 5178 hclge_enable_vector(&hdev->misc_vector, true); 5179 5180 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 5181 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5182 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 5183 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 5184 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 5185 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 5186 5187 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); 5188 return 0; 5189 5190 err_cmd_init: 5191 pci_release_regions(pdev); 5192 err_pci_init: 5193 pci_set_drvdata(pdev, NULL); 5194 err_hclge_dev: 5195 return ret; 5196 } 5197 5198 static void hclge_stats_clear(struct hclge_dev *hdev) 5199 { 5200 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); 5201 } 5202 5203 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 5204 { 5205 struct hclge_dev *hdev = ae_dev->priv; 5206 struct pci_dev *pdev = ae_dev->pdev; 5207 int ret; 5208 5209 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5210 5211 hclge_stats_clear(hdev); 5212 5213 ret = hclge_cmd_init(hdev); 5214 if (ret) { 5215 dev_err(&pdev->dev, "Cmd queue init failed\n"); 5216 return ret; 5217 } 5218 5219 ret = hclge_get_cap(hdev); 5220 if (ret) { 5221 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", 5222 ret); 5223 return ret; 5224 } 5225 5226 ret = hclge_configure(hdev); 5227 if (ret) { 5228 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 5229 return ret; 5230 } 5231 5232 ret = hclge_map_tqp(hdev); 5233 if (ret) { 5234 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 5235 return ret; 5236 } 5237 5238 ret = hclge_mac_init(hdev); 5239 if (ret) { 5240 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 5241 return ret; 5242 } 5243 5244 ret = hclge_buffer_alloc(hdev); 5245 if (ret) { 5246 dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); 5247 return ret; 5248 } 5249 5250 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 5251 if (ret) { 5252 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 5253 return ret; 5254 } 5255 5256 ret = hclge_init_vlan_config(hdev); 5257 if (ret) { 5258 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 5259 return ret; 5260 } 5261 5262 ret = hclge_tm_schd_init(hdev); 5263 if (ret) { 5264 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 5265 return ret; 5266 } 5267 5268 ret = hclge_rss_init_hw(hdev); 5269 if (ret) { 5270 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 5271 return ret; 5272 } 5273 5274 /* Enable MISC vector(vector0) */ 5275 hclge_enable_vector(&hdev->misc_vector, true); 5276 5277 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 5278 HCLGE_DRIVER_NAME); 5279 5280 return 0; 5281 } 5282 5283 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 5284 { 5285 struct hclge_dev *hdev = ae_dev->priv; 5286 struct hclge_mac *mac = &hdev->hw.mac; 5287 5288 set_bit(HCLGE_STATE_DOWN, &hdev->state); 5289 5290 if (IS_ENABLED(CONFIG_PCI_IOV)) 5291 hclge_disable_sriov(hdev); 5292 5293 if (hdev->service_timer.function) 5294 del_timer_sync(&hdev->service_timer); 5295 if (hdev->service_task.func) 5296 cancel_work_sync(&hdev->service_task); 5297 if (hdev->rst_service_task.func) 5298 cancel_work_sync(&hdev->rst_service_task); 5299 if (hdev->mbx_service_task.func) 5300 cancel_work_sync(&hdev->mbx_service_task); 5301 5302 if (mac->phydev) 5303 mdiobus_unregister(mac->mdio_bus); 5304 5305 /* Disable MISC vector(vector0) */ 5306 hclge_enable_vector(&hdev->misc_vector, false); 5307 hclge_destroy_cmd_queue(&hdev->hw); 5308 hclge_misc_irq_uninit(hdev); 5309 hclge_pci_uninit(hdev); 5310 ae_dev->priv = NULL; 5311 } 5312 5313 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 5314 { 5315 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 5316 struct hclge_vport *vport = hclge_get_vport(handle); 5317 struct hclge_dev *hdev = vport->back; 5318 5319 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); 5320 } 5321 5322 static void hclge_get_channels(struct hnae3_handle *handle, 5323 struct ethtool_channels *ch) 5324 { 5325 struct hclge_vport *vport = hclge_get_vport(handle); 5326 5327 ch->max_combined = hclge_get_max_channels(handle); 5328 ch->other_count = 1; 5329 ch->max_other = 1; 5330 ch->combined_count = vport->alloc_tqps; 5331 } 5332 5333 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 5334 u16 *free_tqps, u16 *max_rss_size) 5335 { 5336 struct hclge_vport *vport = hclge_get_vport(handle); 5337 struct hclge_dev *hdev = vport->back; 5338 u16 temp_tqps = 0; 5339 int i; 5340 5341 for (i = 0; i < hdev->num_tqps; i++) { 5342 if (!hdev->htqp[i].alloced) 5343 temp_tqps++; 5344 } 5345 *free_tqps = temp_tqps; 5346 *max_rss_size = hdev->rss_size_max; 5347 } 5348 5349 static void hclge_release_tqp(struct hclge_vport *vport) 5350 { 5351 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5352 struct hclge_dev *hdev = vport->back; 5353 int i; 5354 5355 for (i = 0; i < kinfo->num_tqps; i++) { 5356 struct hclge_tqp *tqp = 5357 container_of(kinfo->tqp[i], struct hclge_tqp, q); 5358 5359 tqp->q.handle = NULL; 5360 tqp->q.tqp_index = 0; 5361 tqp->alloced = false; 5362 } 5363 5364 devm_kfree(&hdev->pdev->dev, kinfo->tqp); 5365 kinfo->tqp = NULL; 5366 } 5367 5368 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) 5369 { 5370 struct hclge_vport *vport = hclge_get_vport(handle); 5371 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5372 struct hclge_dev *hdev = vport->back; 5373 int cur_rss_size = kinfo->rss_size; 5374 int cur_tqps = kinfo->num_tqps; 5375 u16 tc_offset[HCLGE_MAX_TC_NUM]; 5376 u16 tc_valid[HCLGE_MAX_TC_NUM]; 5377 u16 tc_size[HCLGE_MAX_TC_NUM]; 5378 u16 roundup_size; 5379 u32 *rss_indir; 5380 int ret, i; 5381 5382 hclge_release_tqp(vport); 5383 5384 ret = hclge_knic_setup(vport, new_tqps_num); 5385 if (ret) { 5386 dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); 5387 return ret; 5388 } 5389 5390 ret = hclge_map_tqp_to_vport(hdev, vport); 5391 if (ret) { 5392 dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); 5393 return ret; 5394 } 5395 5396 ret = hclge_tm_schd_init(hdev); 5397 if (ret) { 5398 dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); 5399 return ret; 5400 } 5401 5402 roundup_size = roundup_pow_of_two(kinfo->rss_size); 5403 roundup_size = ilog2(roundup_size); 5404 /* Set the RSS TC mode according to the new RSS size */ 5405 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 5406 tc_valid[i] = 0; 5407 5408 if (!(hdev->hw_tc_map & BIT(i))) 5409 continue; 5410 5411 tc_valid[i] = 1; 5412 tc_size[i] = roundup_size; 5413 tc_offset[i] = kinfo->rss_size * i; 5414 } 5415 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 5416 if (ret) 5417 return ret; 5418 5419 /* Reinitializes the rss indirect table according to the new RSS size */ 5420 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); 5421 if (!rss_indir) 5422 return -ENOMEM; 5423 5424 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) 5425 rss_indir[i] = i % kinfo->rss_size; 5426 5427 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 5428 if (ret) 5429 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 5430 ret); 5431 5432 kfree(rss_indir); 5433 5434 if (!ret) 5435 dev_info(&hdev->pdev->dev, 5436 "Channels changed, rss_size from %d to %d, tqps from %d to %d", 5437 cur_rss_size, kinfo->rss_size, 5438 cur_tqps, kinfo->rss_size * kinfo->num_tc); 5439 5440 return ret; 5441 } 5442 5443 static const struct hnae3_ae_ops hclge_ops = { 5444 .init_ae_dev = hclge_init_ae_dev, 5445 .uninit_ae_dev = hclge_uninit_ae_dev, 5446 .init_client_instance = hclge_init_client_instance, 5447 .uninit_client_instance = hclge_uninit_client_instance, 5448 .map_ring_to_vector = hclge_map_ring_to_vector, 5449 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 5450 .get_vector = hclge_get_vector, 5451 .set_promisc_mode = hclge_set_promisc_mode, 5452 .set_loopback = hclge_set_loopback, 5453 .start = hclge_ae_start, 5454 .stop = hclge_ae_stop, 5455 .get_status = hclge_get_status, 5456 .get_ksettings_an_result = hclge_get_ksettings_an_result, 5457 .update_speed_duplex_h = hclge_update_speed_duplex_h, 5458 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 5459 .get_media_type = hclge_get_media_type, 5460 .get_rss_key_size = hclge_get_rss_key_size, 5461 .get_rss_indir_size = hclge_get_rss_indir_size, 5462 .get_rss = hclge_get_rss, 5463 .set_rss = hclge_set_rss, 5464 .set_rss_tuple = hclge_set_rss_tuple, 5465 .get_rss_tuple = hclge_get_rss_tuple, 5466 .get_tc_size = hclge_get_tc_size, 5467 .get_mac_addr = hclge_get_mac_addr, 5468 .set_mac_addr = hclge_set_mac_addr, 5469 .add_uc_addr = hclge_add_uc_addr, 5470 .rm_uc_addr = hclge_rm_uc_addr, 5471 .add_mc_addr = hclge_add_mc_addr, 5472 .rm_mc_addr = hclge_rm_mc_addr, 5473 .set_autoneg = hclge_set_autoneg, 5474 .get_autoneg = hclge_get_autoneg, 5475 .get_pauseparam = hclge_get_pauseparam, 5476 .set_pauseparam = hclge_set_pauseparam, 5477 .set_mtu = hclge_set_mtu, 5478 .reset_queue = hclge_reset_tqp, 5479 .get_stats = hclge_get_stats, 5480 .update_stats = hclge_update_stats, 5481 .get_strings = hclge_get_strings, 5482 .get_sset_count = hclge_get_sset_count, 5483 .get_fw_version = hclge_get_fw_version, 5484 .get_mdix_mode = hclge_get_mdix_mode, 5485 .set_vlan_filter = hclge_set_port_vlan_filter, 5486 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 5487 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 5488 .reset_event = hclge_reset_event, 5489 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 5490 .set_channels = hclge_set_channels, 5491 .get_channels = hclge_get_channels, 5492 .get_flowctrl_adv = hclge_get_flowctrl_adv, 5493 }; 5494 5495 static struct hnae3_ae_algo ae_algo = { 5496 .ops = &hclge_ops, 5497 .name = HCLGE_NAME, 5498 .pdev_id_table = ae_algo_pci_tbl, 5499 }; 5500 5501 static int hclge_init(void) 5502 { 5503 pr_info("%s is initializing\n", HCLGE_NAME); 5504 5505 return hnae3_register_ae_algo(&ae_algo); 5506 } 5507 5508 static void hclge_exit(void) 5509 { 5510 hnae3_unregister_ae_algo(&ae_algo); 5511 } 5512 module_init(hclge_init); 5513 module_exit(hclge_exit); 5514 5515 MODULE_LICENSE("GPL"); 5516 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 5517 MODULE_DESCRIPTION("HCLGE Driver"); 5518 MODULE_VERSION(HCLGE_MOD_VERSION); 5519