1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/acpi.h> 5 #include <linux/device.h> 6 #include <linux/etherdevice.h> 7 #include <linux/init.h> 8 #include <linux/interrupt.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/platform_device.h> 14 #include <linux/if_vlan.h> 15 #include <linux/crash_dump.h> 16 #include <net/ipv6.h> 17 #include <net/rtnetlink.h> 18 #include "hclge_cmd.h" 19 #include "hclge_dcb.h" 20 #include "hclge_main.h" 21 #include "hclge_mbx.h" 22 #include "hclge_mdio.h" 23 #include "hclge_regs.h" 24 #include "hclge_tm.h" 25 #include "hclge_err.h" 26 #include "hnae3.h" 27 #include "hclge_devlink.h" 28 #include "hclge_comm_cmd.h" 29 30 #include "hclge_trace.h" 31 32 #define HCLGE_NAME "hclge" 33 34 #define HCLGE_BUF_SIZE_UNIT 256U 35 #define HCLGE_BUF_MUL_BY 2 36 #define HCLGE_BUF_DIV_BY 2 37 #define NEED_RESERVE_TC_NUM 2 38 #define BUF_MAX_PERCENT 100 39 #define BUF_RESERVE_PERCENT 90 40 41 #define HCLGE_RESET_MAX_FAIL_CNT 5 42 #define HCLGE_RESET_SYNC_TIME 100 43 #define HCLGE_PF_RESET_SYNC_TIME 20 44 #define HCLGE_PF_RESET_SYNC_CNT 1500 45 46 #define HCLGE_LINK_STATUS_MS 10 47 48 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); 49 static int hclge_init_vlan_config(struct hclge_dev *hdev); 50 static void hclge_sync_vlan_filter(struct hclge_dev *hdev); 51 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 52 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); 53 static void hclge_rfs_filter_expire(struct hclge_dev *hdev); 54 static int hclge_clear_arfs_rules(struct hclge_dev *hdev); 55 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 56 unsigned long *addr); 57 static int hclge_set_default_loopback(struct hclge_dev *hdev); 58 59 static void hclge_sync_mac_table(struct hclge_dev *hdev); 60 static void hclge_restore_hw_table(struct hclge_dev *hdev); 61 static void hclge_sync_promisc_mode(struct hclge_dev *hdev); 62 static void hclge_sync_fd_table(struct hclge_dev *hdev); 63 static void hclge_update_fec_stats(struct hclge_dev *hdev); 64 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, 65 int wait_cnt); 66 static int hclge_update_port_info(struct hclge_dev *hdev); 67 68 static struct hnae3_ae_algo ae_algo; 69 70 static struct workqueue_struct *hclge_wq; 71 72 static const struct pci_device_id ae_algo_pci_tbl[] = { 73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0}, 81 /* required last entry */ 82 {0, } 83 }; 84 85 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 86 87 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 88 "External Loopback test", 89 "App Loopback test", 90 "Serdes serial Loopback test", 91 "Serdes parallel Loopback test", 92 "Phy Loopback test" 93 }; 94 95 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 96 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, 97 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 98 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, 99 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 100 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 101 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, 102 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 103 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, 104 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 105 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, 106 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 107 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, 108 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 109 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, 110 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 111 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 112 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 113 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 114 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 115 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 116 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 118 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 120 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 122 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 124 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 125 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 126 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 127 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, 128 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 129 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, 130 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 131 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, 132 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 133 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, 134 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 135 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, 136 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 137 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, 138 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 139 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, 140 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 141 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, 142 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 143 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, 144 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 145 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 146 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 147 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 148 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 149 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 150 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 151 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 152 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 153 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 154 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 155 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 156 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 157 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 158 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 159 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 160 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, 162 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 163 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, 164 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, 166 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 167 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, 168 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 169 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, 170 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 171 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, 172 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 173 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, 174 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 175 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, 176 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 178 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 180 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 182 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 184 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 185 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 186 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 187 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 188 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 189 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 190 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 191 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 192 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 194 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 196 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 198 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 200 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 202 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 204 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 206 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 208 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 210 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 212 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 214 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 216 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 218 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 220 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 222 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 224 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 226 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 228 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 230 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 231 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 232 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 234 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 235 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 236 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 238 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 239 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 240 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 241 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 242 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 243 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 244 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 246 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 248 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 250 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 252 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 254 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 256 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 258 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 260 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 262 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 264 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 266 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 268 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 270 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 272 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 274 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 276 277 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 279 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 281 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 283 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 285 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 287 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 289 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 291 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 293 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 295 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 297 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 299 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 301 }; 302 303 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 304 { 305 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 306 .ethter_type = cpu_to_le16(ETH_P_LLDP), 307 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, 308 .i_port_bitmap = 0x1, 309 }, 310 }; 311 312 static const struct key_info meta_data_key_info[] = { 313 { PACKET_TYPE_ID, 6 }, 314 { IP_FRAGEMENT, 1 }, 315 { ROCE_TYPE, 1 }, 316 { NEXT_KEY, 5 }, 317 { VLAN_NUMBER, 2 }, 318 { SRC_VPORT, 12 }, 319 { DST_VPORT, 12 }, 320 { TUNNEL_PACKET, 1 }, 321 }; 322 323 static const struct key_info tuple_key_info[] = { 324 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 }, 325 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 }, 326 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 }, 327 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, 328 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 }, 329 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 }, 330 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 }, 331 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 }, 332 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 }, 333 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 }, 334 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 }, 335 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, 336 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, 337 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, 338 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, 339 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, 340 { INNER_DST_MAC, 48, KEY_OPT_MAC, 341 offsetof(struct hclge_fd_rule, tuples.dst_mac), 342 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, 343 { INNER_SRC_MAC, 48, KEY_OPT_MAC, 344 offsetof(struct hclge_fd_rule, tuples.src_mac), 345 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, 346 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16, 347 offsetof(struct hclge_fd_rule, tuples.vlan_tag1), 348 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, 349 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, 350 { INNER_ETH_TYPE, 16, KEY_OPT_LE16, 351 offsetof(struct hclge_fd_rule, tuples.ether_proto), 352 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, 353 { INNER_L2_RSV, 16, KEY_OPT_LE16, 354 offsetof(struct hclge_fd_rule, tuples.l2_user_def), 355 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, 356 { INNER_IP_TOS, 8, KEY_OPT_U8, 357 offsetof(struct hclge_fd_rule, tuples.ip_tos), 358 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, 359 { INNER_IP_PROTO, 8, KEY_OPT_U8, 360 offsetof(struct hclge_fd_rule, tuples.ip_proto), 361 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, 362 { INNER_SRC_IP, 32, KEY_OPT_IP, 363 offsetof(struct hclge_fd_rule, tuples.src_ip), 364 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, 365 { INNER_DST_IP, 32, KEY_OPT_IP, 366 offsetof(struct hclge_fd_rule, tuples.dst_ip), 367 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, 368 { INNER_L3_RSV, 16, KEY_OPT_LE16, 369 offsetof(struct hclge_fd_rule, tuples.l3_user_def), 370 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, 371 { INNER_SRC_PORT, 16, KEY_OPT_LE16, 372 offsetof(struct hclge_fd_rule, tuples.src_port), 373 offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, 374 { INNER_DST_PORT, 16, KEY_OPT_LE16, 375 offsetof(struct hclge_fd_rule, tuples.dst_port), 376 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, 377 { INNER_L4_RSV, 32, KEY_OPT_LE32, 378 offsetof(struct hclge_fd_rule, tuples.l4_user_def), 379 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, 380 }; 381 382 /** 383 * hclge_cmd_send - send command to command queue 384 * @hw: pointer to the hw struct 385 * @desc: prefilled descriptor for describing the command 386 * @num : the number of descriptors to be sent 387 * 388 * This is the main send command for command queue, it 389 * sends the queue, cleans the queue, etc 390 **/ 391 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) 392 { 393 return hclge_comm_cmd_send(&hw->hw, desc, num); 394 } 395 396 static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, 397 int num, bool is_special) 398 { 399 int i; 400 401 trace_hclge_pf_cmd_send(hw, desc, 0, num); 402 403 if (!is_special) { 404 for (i = 1; i < num; i++) 405 trace_hclge_pf_cmd_send(hw, &desc[i], i, num); 406 } else { 407 for (i = 1; i < num; i++) 408 trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i], 409 i, num); 410 } 411 } 412 413 static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, 414 int num, bool is_special) 415 { 416 int i; 417 418 if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) 419 return; 420 421 trace_hclge_pf_cmd_get(hw, desc, 0, num); 422 423 if (!is_special) { 424 for (i = 1; i < num; i++) 425 trace_hclge_pf_cmd_get(hw, &desc[i], i, num); 426 } else { 427 for (i = 1; i < num; i++) 428 trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i], 429 i, num); 430 } 431 } 432 433 static const struct hclge_comm_cmq_ops hclge_cmq_ops = { 434 .trace_cmd_send = hclge_trace_cmd_send, 435 .trace_cmd_get = hclge_trace_cmd_get, 436 }; 437 438 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) 439 { 440 #define HCLGE_MAC_CMD_NUM 21 441 442 u64 *data = (u64 *)(&hdev->mac_stats); 443 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 444 __le64 *desc_data; 445 u32 data_size; 446 int ret; 447 u32 i; 448 449 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 450 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 451 if (ret) { 452 dev_err(&hdev->pdev->dev, 453 "Get MAC pkt stats fail, status = %d.\n", ret); 454 455 return ret; 456 } 457 458 /* The first desc has a 64-bit header, so data size need to minus 1 */ 459 data_size = sizeof(desc) / (sizeof(u64)) - 1; 460 461 desc_data = (__le64 *)(&desc[0].data[0]); 462 for (i = 0; i < data_size; i++) { 463 /* data memory is continuous becase only the first desc has a 464 * header in this command 465 */ 466 *data += le64_to_cpu(*desc_data); 467 data++; 468 desc_data++; 469 } 470 471 return 0; 472 } 473 474 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) 475 { 476 #define HCLGE_REG_NUM_PER_DESC 4 477 478 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; 479 u64 *data = (u64 *)(&hdev->mac_stats); 480 struct hclge_desc *desc; 481 __le64 *desc_data; 482 u32 data_size; 483 u32 desc_num; 484 int ret; 485 u32 i; 486 487 /* The first desc has a 64-bit header, so need to consider it */ 488 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1; 489 490 /* This may be called inside atomic sections, 491 * so GFP_ATOMIC is more suitalbe here 492 */ 493 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC); 494 if (!desc) 495 return -ENOMEM; 496 497 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); 498 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); 499 if (ret) { 500 kfree(desc); 501 return ret; 502 } 503 504 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); 505 506 desc_data = (__le64 *)(&desc[0].data[0]); 507 for (i = 0; i < data_size; i++) { 508 /* data memory is continuous becase only the first desc has a 509 * header in this command 510 */ 511 *data += le64_to_cpu(*desc_data); 512 data++; 513 desc_data++; 514 } 515 516 kfree(desc); 517 518 return 0; 519 } 520 521 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) 522 { 523 struct hclge_desc desc; 524 int ret; 525 526 /* Driver needs total register number of both valid registers and 527 * reserved registers, but the old firmware only returns number 528 * of valid registers in device V2. To be compatible with these 529 * devices, driver uses a fixed value. 530 */ 531 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { 532 *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1; 533 return 0; 534 } 535 536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); 537 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 538 if (ret) { 539 dev_err(&hdev->pdev->dev, 540 "failed to query mac statistic reg number, ret = %d\n", 541 ret); 542 return ret; 543 } 544 545 *reg_num = le32_to_cpu(desc.data[0]); 546 if (*reg_num == 0) { 547 dev_err(&hdev->pdev->dev, 548 "mac statistic reg number is invalid!\n"); 549 return -ENODATA; 550 } 551 552 return 0; 553 } 554 555 int hclge_mac_update_stats(struct hclge_dev *hdev) 556 { 557 /* The firmware supports the new statistics acquisition method */ 558 if (hdev->ae_dev->dev_specs.mac_stats_num) 559 return hclge_mac_update_stats_complete(hdev); 560 else 561 return hclge_mac_update_stats_defective(hdev); 562 } 563 564 static int hclge_comm_get_count(struct hclge_dev *hdev, 565 const struct hclge_comm_stats_str strs[], 566 u32 size) 567 { 568 int count = 0; 569 u32 i; 570 571 for (i = 0; i < size; i++) 572 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) 573 count++; 574 575 return count; 576 } 577 578 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, 579 const struct hclge_comm_stats_str strs[], 580 int size, u64 *data) 581 { 582 u64 *buf = data; 583 u32 i; 584 585 for (i = 0; i < size; i++) { 586 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) 587 continue; 588 589 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); 590 buf++; 591 } 592 593 return buf; 594 } 595 596 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, 597 const struct hclge_comm_stats_str strs[], 598 int size, u8 *data) 599 { 600 char *buff = (char *)data; 601 u32 i; 602 603 if (stringset != ETH_SS_STATS) 604 return buff; 605 606 for (i = 0; i < size; i++) { 607 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) 608 continue; 609 610 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); 611 buff = buff + ETH_GSTRING_LEN; 612 } 613 614 return (u8 *)buff; 615 } 616 617 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 618 { 619 struct hnae3_handle *handle; 620 int status; 621 622 handle = &hdev->vport[0].nic; 623 if (handle->client) { 624 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 625 if (status) { 626 dev_err(&hdev->pdev->dev, 627 "Update TQPS stats fail, status = %d.\n", 628 status); 629 } 630 } 631 632 hclge_update_fec_stats(hdev); 633 634 status = hclge_mac_update_stats(hdev); 635 if (status) 636 dev_err(&hdev->pdev->dev, 637 "Update MAC stats fail, status = %d.\n", status); 638 } 639 640 static void hclge_update_stats(struct hnae3_handle *handle) 641 { 642 struct hclge_vport *vport = hclge_get_vport(handle); 643 struct hclge_dev *hdev = vport->back; 644 int status; 645 646 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 647 return; 648 649 status = hclge_mac_update_stats(hdev); 650 if (status) 651 dev_err(&hdev->pdev->dev, 652 "Update MAC stats fail, status = %d.\n", 653 status); 654 655 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 656 if (status) 657 dev_err(&hdev->pdev->dev, 658 "Update TQPS stats fail, status = %d.\n", 659 status); 660 661 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 662 } 663 664 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 665 { 666 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ 667 HNAE3_SUPPORT_PHY_LOOPBACK | \ 668 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ 669 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \ 670 HNAE3_SUPPORT_EXTERNAL_LOOPBACK) 671 672 struct hclge_vport *vport = hclge_get_vport(handle); 673 struct hclge_dev *hdev = vport->back; 674 int count = 0; 675 676 /* Loopback test support rules: 677 * mac: only GE mode support 678 * serdes: all mac mode will support include GE/XGE/LGE/CGE 679 * phy: only support when phy device exist on board 680 */ 681 if (stringset == ETH_SS_TEST) { 682 /* clear loopback bit flags at first */ 683 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 684 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || 685 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 686 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 687 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 688 count += 1; 689 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; 690 } 691 692 if (hdev->ae_dev->dev_specs.hilink_version != 693 HCLGE_HILINK_H60) { 694 count += 1; 695 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 696 } 697 698 count += 1; 699 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 700 count += 1; 701 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; 702 703 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && 704 hdev->hw.mac.phydev->drv->set_loopback) || 705 hnae3_dev_phy_imp_supported(hdev)) { 706 count += 1; 707 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; 708 } 709 } else if (stringset == ETH_SS_STATS) { 710 count = hclge_comm_get_count(hdev, g_mac_stats_string, 711 ARRAY_SIZE(g_mac_stats_string)) + 712 hclge_comm_tqps_get_sset_count(handle); 713 } 714 715 return count; 716 } 717 718 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, 719 u8 *data) 720 { 721 struct hclge_vport *vport = hclge_get_vport(handle); 722 struct hclge_dev *hdev = vport->back; 723 u8 *p = (char *)data; 724 int size; 725 726 if (stringset == ETH_SS_STATS) { 727 size = ARRAY_SIZE(g_mac_stats_string); 728 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, 729 size, p); 730 p = hclge_comm_tqps_get_strings(handle, p); 731 } else if (stringset == ETH_SS_TEST) { 732 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { 733 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL], 734 ETH_GSTRING_LEN); 735 p += ETH_GSTRING_LEN; 736 } 737 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { 738 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], 739 ETH_GSTRING_LEN); 740 p += ETH_GSTRING_LEN; 741 } 742 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { 743 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], 744 ETH_GSTRING_LEN); 745 p += ETH_GSTRING_LEN; 746 } 747 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { 748 memcpy(p, 749 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], 750 ETH_GSTRING_LEN); 751 p += ETH_GSTRING_LEN; 752 } 753 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 754 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], 755 ETH_GSTRING_LEN); 756 p += ETH_GSTRING_LEN; 757 } 758 } 759 } 760 761 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 762 { 763 struct hclge_vport *vport = hclge_get_vport(handle); 764 struct hclge_dev *hdev = vport->back; 765 u64 *p; 766 767 p = hclge_comm_get_stats(hdev, g_mac_stats_string, 768 ARRAY_SIZE(g_mac_stats_string), data); 769 p = hclge_comm_tqps_get_stats(handle, p); 770 } 771 772 static void hclge_get_mac_stat(struct hnae3_handle *handle, 773 struct hns3_mac_stats *mac_stats) 774 { 775 struct hclge_vport *vport = hclge_get_vport(handle); 776 struct hclge_dev *hdev = vport->back; 777 778 hclge_update_stats(handle); 779 780 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; 781 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; 782 } 783 784 static int hclge_parse_func_status(struct hclge_dev *hdev, 785 struct hclge_func_status_cmd *status) 786 { 787 #define HCLGE_MAC_ID_MASK 0xF 788 789 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 790 return -EINVAL; 791 792 /* Set the pf to main pf */ 793 if (status->pf_state & HCLGE_PF_STATE_MAIN) 794 hdev->flag |= HCLGE_FLAG_MAIN; 795 else 796 hdev->flag &= ~HCLGE_FLAG_MAIN; 797 798 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; 799 return 0; 800 } 801 802 static int hclge_query_function_status(struct hclge_dev *hdev) 803 { 804 #define HCLGE_QUERY_MAX_CNT 5 805 806 struct hclge_func_status_cmd *req; 807 struct hclge_desc desc; 808 int timeout = 0; 809 int ret; 810 811 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 812 req = (struct hclge_func_status_cmd *)desc.data; 813 814 do { 815 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 816 if (ret) { 817 dev_err(&hdev->pdev->dev, 818 "query function status failed %d.\n", ret); 819 return ret; 820 } 821 822 /* Check pf reset is done */ 823 if (req->pf_state) 824 break; 825 usleep_range(1000, 2000); 826 } while (timeout++ < HCLGE_QUERY_MAX_CNT); 827 828 return hclge_parse_func_status(hdev, req); 829 } 830 831 static int hclge_query_pf_resource(struct hclge_dev *hdev) 832 { 833 struct hclge_pf_res_cmd *req; 834 struct hclge_desc desc; 835 int ret; 836 837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 838 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 839 if (ret) { 840 dev_err(&hdev->pdev->dev, 841 "query pf resource failed %d.\n", ret); 842 return ret; 843 } 844 845 req = (struct hclge_pf_res_cmd *)desc.data; 846 hdev->num_tqps = le16_to_cpu(req->tqp_num) + 847 le16_to_cpu(req->ext_tqp_num); 848 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 849 850 if (req->tx_buf_size) 851 hdev->tx_buf_size = 852 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; 853 else 854 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 855 856 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); 857 858 if (req->dv_buf_size) 859 hdev->dv_buf_size = 860 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; 861 else 862 hdev->dv_buf_size = HCLGE_DEFAULT_DV; 863 864 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); 865 866 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); 867 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { 868 dev_err(&hdev->pdev->dev, 869 "only %u msi resources available, not enough for pf(min:2).\n", 870 hdev->num_nic_msi); 871 return -EINVAL; 872 } 873 874 if (hnae3_dev_roce_supported(hdev)) { 875 hdev->num_roce_msi = 876 le16_to_cpu(req->pf_intr_vector_number_roce); 877 878 /* PF should have NIC vectors and Roce vectors, 879 * NIC vectors are queued before Roce vectors. 880 */ 881 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; 882 } else { 883 hdev->num_msi = hdev->num_nic_msi; 884 } 885 886 return 0; 887 } 888 889 static int hclge_parse_speed(u8 speed_cmd, u32 *speed) 890 { 891 switch (speed_cmd) { 892 case HCLGE_FW_MAC_SPEED_10M: 893 *speed = HCLGE_MAC_SPEED_10M; 894 break; 895 case HCLGE_FW_MAC_SPEED_100M: 896 *speed = HCLGE_MAC_SPEED_100M; 897 break; 898 case HCLGE_FW_MAC_SPEED_1G: 899 *speed = HCLGE_MAC_SPEED_1G; 900 break; 901 case HCLGE_FW_MAC_SPEED_10G: 902 *speed = HCLGE_MAC_SPEED_10G; 903 break; 904 case HCLGE_FW_MAC_SPEED_25G: 905 *speed = HCLGE_MAC_SPEED_25G; 906 break; 907 case HCLGE_FW_MAC_SPEED_40G: 908 *speed = HCLGE_MAC_SPEED_40G; 909 break; 910 case HCLGE_FW_MAC_SPEED_50G: 911 *speed = HCLGE_MAC_SPEED_50G; 912 break; 913 case HCLGE_FW_MAC_SPEED_100G: 914 *speed = HCLGE_MAC_SPEED_100G; 915 break; 916 case HCLGE_FW_MAC_SPEED_200G: 917 *speed = HCLGE_MAC_SPEED_200G; 918 break; 919 default: 920 return -EINVAL; 921 } 922 923 return 0; 924 } 925 926 static const struct hclge_speed_bit_map speed_bit_map[] = { 927 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, 928 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, 929 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, 930 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, 931 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, 932 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, 933 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS}, 934 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS}, 935 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS}, 936 }; 937 938 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) 939 { 940 u16 i; 941 942 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { 943 if (speed == speed_bit_map[i].speed) { 944 *speed_bit = speed_bit_map[i].speed_bit; 945 return 0; 946 } 947 } 948 949 return -EINVAL; 950 } 951 952 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) 953 { 954 struct hclge_vport *vport = hclge_get_vport(handle); 955 struct hclge_dev *hdev = vport->back; 956 u32 speed_ability = hdev->hw.mac.speed_ability; 957 u32 speed_bit = 0; 958 int ret; 959 960 ret = hclge_get_speed_bit(speed, &speed_bit); 961 if (ret) 962 return ret; 963 964 if (speed_bit & speed_ability) 965 return 0; 966 967 return -EINVAL; 968 } 969 970 static void hclge_update_fec_support(struct hclge_mac *mac) 971 { 972 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); 973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); 974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported); 975 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 976 977 if (mac->fec_ability & BIT(HNAE3_FEC_BASER)) 978 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 979 mac->supported); 980 if (mac->fec_ability & BIT(HNAE3_FEC_RS)) 981 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 982 mac->supported); 983 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS)) 984 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 985 mac->supported); 986 if (mac->fec_ability & BIT(HNAE3_FEC_NONE)) 987 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 988 mac->supported); 989 } 990 991 static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = { 992 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, 993 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, 994 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, 995 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, 996 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT}, 997 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, 998 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT}, 999 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1000 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, 1001 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, 1002 }; 1003 1004 static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = { 1005 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, 1006 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, 1007 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT}, 1008 {HCLGE_SUPPORT_100G_R4_BIT, 1009 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, 1010 {HCLGE_SUPPORT_100G_R2_BIT, 1011 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT}, 1012 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1013 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, 1014 {HCLGE_SUPPORT_200G_R4_BIT, 1015 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, 1016 }; 1017 1018 static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = { 1019 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, 1020 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, 1021 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, 1022 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, 1023 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT}, 1024 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, 1025 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT}, 1026 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1027 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, 1028 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, 1029 }; 1030 1031 static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = { 1032 {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, 1033 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, 1034 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, 1035 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, 1036 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, 1037 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT}, 1038 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, 1039 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT}, 1040 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1041 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, 1042 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, 1043 }; 1044 1045 static void hclge_convert_setting_sr(u16 speed_ability, 1046 unsigned long *link_mode) 1047 { 1048 int i; 1049 1050 for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) { 1051 if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit) 1052 linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode, 1053 link_mode); 1054 } 1055 } 1056 1057 static void hclge_convert_setting_lr(u16 speed_ability, 1058 unsigned long *link_mode) 1059 { 1060 int i; 1061 1062 for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) { 1063 if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit) 1064 linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode, 1065 link_mode); 1066 } 1067 } 1068 1069 static void hclge_convert_setting_cr(u16 speed_ability, 1070 unsigned long *link_mode) 1071 { 1072 int i; 1073 1074 for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) { 1075 if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit) 1076 linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode, 1077 link_mode); 1078 } 1079 } 1080 1081 static void hclge_convert_setting_kr(u16 speed_ability, 1082 unsigned long *link_mode) 1083 { 1084 int i; 1085 1086 for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) { 1087 if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit) 1088 linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode, 1089 link_mode); 1090 } 1091 } 1092 1093 static void hclge_convert_setting_fec(struct hclge_mac *mac) 1094 { 1095 /* If firmware has reported fec_ability, don't need to convert by speed */ 1096 if (mac->fec_ability) 1097 goto out; 1098 1099 switch (mac->speed) { 1100 case HCLGE_MAC_SPEED_10G: 1101 case HCLGE_MAC_SPEED_40G: 1102 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) | 1103 BIT(HNAE3_FEC_NONE); 1104 break; 1105 case HCLGE_MAC_SPEED_25G: 1106 case HCLGE_MAC_SPEED_50G: 1107 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | 1108 BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE); 1109 break; 1110 case HCLGE_MAC_SPEED_100G: 1111 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | 1112 BIT(HNAE3_FEC_NONE); 1113 break; 1114 case HCLGE_MAC_SPEED_200G: 1115 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | 1116 BIT(HNAE3_FEC_LLRS); 1117 break; 1118 default: 1119 mac->fec_ability = 0; 1120 break; 1121 } 1122 1123 out: 1124 hclge_update_fec_support(mac); 1125 } 1126 1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 1128 u16 speed_ability) 1129 { 1130 struct hclge_mac *mac = &hdev->hw.mac; 1131 1132 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1133 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1134 mac->supported); 1135 1136 hclge_convert_setting_sr(speed_ability, mac->supported); 1137 hclge_convert_setting_lr(speed_ability, mac->supported); 1138 hclge_convert_setting_cr(speed_ability, mac->supported); 1139 if (hnae3_dev_fec_supported(hdev)) 1140 hclge_convert_setting_fec(mac); 1141 1142 if (hnae3_dev_pause_supported(hdev)) 1143 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1144 1145 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); 1146 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1147 } 1148 1149 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, 1150 u16 speed_ability) 1151 { 1152 struct hclge_mac *mac = &hdev->hw.mac; 1153 1154 hclge_convert_setting_kr(speed_ability, mac->supported); 1155 if (hnae3_dev_fec_supported(hdev)) 1156 hclge_convert_setting_fec(mac); 1157 1158 if (hnae3_dev_pause_supported(hdev)) 1159 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1160 1161 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); 1162 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1163 } 1164 1165 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, 1166 u16 speed_ability) 1167 { 1168 unsigned long *supported = hdev->hw.mac.supported; 1169 1170 /* default to support all speed for GE port */ 1171 if (!speed_ability) 1172 speed_ability = HCLGE_SUPPORT_GE; 1173 1174 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1175 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1176 supported); 1177 1178 if (speed_ability & HCLGE_SUPPORT_100M_BIT) { 1179 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1180 supported); 1181 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 1182 supported); 1183 } 1184 1185 if (speed_ability & HCLGE_SUPPORT_10M_BIT) { 1186 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); 1187 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); 1188 } 1189 1190 if (hnae3_dev_pause_supported(hdev)) { 1191 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1192 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); 1193 } 1194 1195 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); 1196 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); 1197 } 1198 1199 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) 1200 { 1201 u8 media_type = hdev->hw.mac.media_type; 1202 1203 if (media_type == HNAE3_MEDIA_TYPE_FIBER) 1204 hclge_parse_fiber_link_mode(hdev, speed_ability); 1205 else if (media_type == HNAE3_MEDIA_TYPE_COPPER) 1206 hclge_parse_copper_link_mode(hdev, speed_ability); 1207 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) 1208 hclge_parse_backplane_link_mode(hdev, speed_ability); 1209 } 1210 1211 static u32 hclge_get_max_speed(u16 speed_ability) 1212 { 1213 if (speed_ability & HCLGE_SUPPORT_200G_BITS) 1214 return HCLGE_MAC_SPEED_200G; 1215 1216 if (speed_ability & HCLGE_SUPPORT_100G_BITS) 1217 return HCLGE_MAC_SPEED_100G; 1218 1219 if (speed_ability & HCLGE_SUPPORT_50G_BITS) 1220 return HCLGE_MAC_SPEED_50G; 1221 1222 if (speed_ability & HCLGE_SUPPORT_40G_BIT) 1223 return HCLGE_MAC_SPEED_40G; 1224 1225 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1226 return HCLGE_MAC_SPEED_25G; 1227 1228 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1229 return HCLGE_MAC_SPEED_10G; 1230 1231 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1232 return HCLGE_MAC_SPEED_1G; 1233 1234 if (speed_ability & HCLGE_SUPPORT_100M_BIT) 1235 return HCLGE_MAC_SPEED_100M; 1236 1237 if (speed_ability & HCLGE_SUPPORT_10M_BIT) 1238 return HCLGE_MAC_SPEED_10M; 1239 1240 return HCLGE_MAC_SPEED_1G; 1241 } 1242 1243 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1244 { 1245 #define HCLGE_TX_SPARE_SIZE_UNIT 4096 1246 #define SPEED_ABILITY_EXT_SHIFT 8 1247 1248 struct hclge_cfg_param_cmd *req; 1249 u64 mac_addr_tmp_high; 1250 u16 speed_ability_ext; 1251 u64 mac_addr_tmp; 1252 unsigned int i; 1253 1254 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1255 1256 /* get the configuration */ 1257 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1258 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1259 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1260 HCLGE_CFG_TQP_DESC_N_M, 1261 HCLGE_CFG_TQP_DESC_N_S); 1262 1263 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), 1264 HCLGE_CFG_PHY_ADDR_M, 1265 HCLGE_CFG_PHY_ADDR_S); 1266 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), 1267 HCLGE_CFG_MEDIA_TP_M, 1268 HCLGE_CFG_MEDIA_TP_S); 1269 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), 1270 HCLGE_CFG_RX_BUF_LEN_M, 1271 HCLGE_CFG_RX_BUF_LEN_S); 1272 /* get mac_address */ 1273 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1274 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), 1275 HCLGE_CFG_MAC_ADDR_H_M, 1276 HCLGE_CFG_MAC_ADDR_H_S); 1277 1278 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1279 1280 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), 1281 HCLGE_CFG_DEFAULT_SPEED_M, 1282 HCLGE_CFG_DEFAULT_SPEED_S); 1283 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 1284 HCLGE_CFG_RSS_SIZE_M, 1285 HCLGE_CFG_RSS_SIZE_S); 1286 1287 for (i = 0; i < ETH_ALEN; i++) 1288 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1289 1290 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1291 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1292 1293 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), 1294 HCLGE_CFG_SPEED_ABILITY_M, 1295 HCLGE_CFG_SPEED_ABILITY_S); 1296 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), 1297 HCLGE_CFG_SPEED_ABILITY_EXT_M, 1298 HCLGE_CFG_SPEED_ABILITY_EXT_S); 1299 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; 1300 1301 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), 1302 HCLGE_CFG_VLAN_FLTR_CAP_M, 1303 HCLGE_CFG_VLAN_FLTR_CAP_S); 1304 1305 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), 1306 HCLGE_CFG_UMV_TBL_SPACE_M, 1307 HCLGE_CFG_UMV_TBL_SPACE_S); 1308 1309 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), 1310 HCLGE_CFG_PF_RSS_SIZE_M, 1311 HCLGE_CFG_PF_RSS_SIZE_S); 1312 1313 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a 1314 * power of 2, instead of reading out directly. This would 1315 * be more flexible for future changes and expansions. 1316 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, 1317 * it does not make sense if PF's field is 0. In this case, PF and VF 1318 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. 1319 */ 1320 cfg->pf_rss_size_max = cfg->pf_rss_size_max ? 1321 1U << cfg->pf_rss_size_max : 1322 cfg->vf_rss_size_max; 1323 1324 /* The unit of the tx spare buffer size queried from configuration 1325 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is 1326 * needed here. 1327 */ 1328 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]), 1329 HCLGE_CFG_TX_SPARE_BUF_SIZE_M, 1330 HCLGE_CFG_TX_SPARE_BUF_SIZE_S); 1331 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT; 1332 } 1333 1334 /* hclge_get_cfg: query the static parameter from flash 1335 * @hdev: pointer to struct hclge_dev 1336 * @hcfg: the config structure to be getted 1337 */ 1338 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1339 { 1340 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1341 struct hclge_cfg_param_cmd *req; 1342 unsigned int i; 1343 int ret; 1344 1345 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1346 u32 offset = 0; 1347 1348 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1349 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1350 true); 1351 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, 1352 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1353 /* Len should be united by 4 bytes when send to hardware */ 1354 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1355 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1356 req->offset = cpu_to_le32(offset); 1357 } 1358 1359 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1360 if (ret) { 1361 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); 1362 return ret; 1363 } 1364 1365 hclge_parse_cfg(hcfg, desc); 1366 1367 return 0; 1368 } 1369 1370 static void hclge_set_default_dev_specs(struct hclge_dev *hdev) 1371 { 1372 #define HCLGE_MAX_NON_TSO_BD_NUM 8U 1373 1374 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1375 1376 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; 1377 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; 1378 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 1379 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; 1380 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; 1381 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; 1382 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; 1383 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1384 ae_dev->dev_specs.tnl_num = 0; 1385 } 1386 1387 static void hclge_parse_dev_specs(struct hclge_dev *hdev, 1388 struct hclge_desc *desc) 1389 { 1390 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1391 struct hclge_dev_specs_0_cmd *req0; 1392 struct hclge_dev_specs_1_cmd *req1; 1393 1394 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; 1395 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data; 1396 1397 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 1398 ae_dev->dev_specs.rss_ind_tbl_size = 1399 le16_to_cpu(req0->rss_ind_tbl_size); 1400 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 1401 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 1402 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); 1403 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); 1404 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 1405 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 1406 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); 1407 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); 1408 ae_dev->dev_specs.tnl_num = req1->tnl_num; 1409 ae_dev->dev_specs.hilink_version = req1->hilink_version; 1410 } 1411 1412 static void hclge_check_dev_specs(struct hclge_dev *hdev) 1413 { 1414 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 1415 1416 if (!dev_specs->max_non_tso_bd_num) 1417 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; 1418 if (!dev_specs->rss_ind_tbl_size) 1419 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; 1420 if (!dev_specs->rss_key_size) 1421 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 1422 if (!dev_specs->max_tm_rate) 1423 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; 1424 if (!dev_specs->max_qset_num) 1425 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM; 1426 if (!dev_specs->max_int_gl) 1427 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; 1428 if (!dev_specs->max_frm_size) 1429 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; 1430 if (!dev_specs->umv_size) 1431 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1432 } 1433 1434 static int hclge_query_mac_stats_num(struct hclge_dev *hdev) 1435 { 1436 u32 reg_num = 0; 1437 int ret; 1438 1439 ret = hclge_mac_query_reg_num(hdev, ®_num); 1440 if (ret && ret != -EOPNOTSUPP) 1441 return ret; 1442 1443 hdev->ae_dev->dev_specs.mac_stats_num = reg_num; 1444 return 0; 1445 } 1446 1447 static int hclge_query_dev_specs(struct hclge_dev *hdev) 1448 { 1449 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM]; 1450 int ret; 1451 int i; 1452 1453 ret = hclge_query_mac_stats_num(hdev); 1454 if (ret) 1455 return ret; 1456 1457 /* set default specifications as devices lower than version V3 do not 1458 * support querying specifications from firmware. 1459 */ 1460 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 1461 hclge_set_default_dev_specs(hdev); 1462 return 0; 1463 } 1464 1465 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 1466 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, 1467 true); 1468 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 1469 } 1470 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); 1471 1472 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); 1473 if (ret) 1474 return ret; 1475 1476 hclge_parse_dev_specs(hdev, desc); 1477 hclge_check_dev_specs(hdev); 1478 1479 return 0; 1480 } 1481 1482 static int hclge_get_cap(struct hclge_dev *hdev) 1483 { 1484 int ret; 1485 1486 ret = hclge_query_function_status(hdev); 1487 if (ret) { 1488 dev_err(&hdev->pdev->dev, 1489 "query function status error %d.\n", ret); 1490 return ret; 1491 } 1492 1493 /* get pf resource */ 1494 return hclge_query_pf_resource(hdev); 1495 } 1496 1497 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) 1498 { 1499 #define HCLGE_MIN_TX_DESC 64 1500 #define HCLGE_MIN_RX_DESC 64 1501 1502 if (!is_kdump_kernel()) 1503 return; 1504 1505 dev_info(&hdev->pdev->dev, 1506 "Running kdump kernel. Using minimal resources\n"); 1507 1508 /* minimal queue pairs equals to the number of vports */ 1509 hdev->num_tqps = hdev->num_req_vfs + 1; 1510 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; 1511 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; 1512 } 1513 1514 static void hclge_init_tc_config(struct hclge_dev *hdev) 1515 { 1516 unsigned int i; 1517 1518 if (hdev->tc_max > HNAE3_MAX_TC || 1519 hdev->tc_max < 1) { 1520 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", 1521 hdev->tc_max); 1522 hdev->tc_max = 1; 1523 } 1524 1525 /* Dev does not support DCB */ 1526 if (!hnae3_dev_dcb_supported(hdev)) { 1527 hdev->tc_max = 1; 1528 hdev->pfc_max = 0; 1529 } else { 1530 hdev->pfc_max = hdev->tc_max; 1531 } 1532 1533 hdev->tm_info.num_tc = 1; 1534 1535 /* Currently not support uncontiuous tc */ 1536 for (i = 0; i < hdev->tm_info.num_tc; i++) 1537 hnae3_set_bit(hdev->hw_tc_map, i, 1); 1538 1539 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1540 } 1541 1542 static int hclge_configure(struct hclge_dev *hdev) 1543 { 1544 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1545 struct hclge_cfg cfg; 1546 int ret; 1547 1548 ret = hclge_get_cfg(hdev, &cfg); 1549 if (ret) 1550 return ret; 1551 1552 hdev->base_tqp_pid = 0; 1553 hdev->vf_rss_size_max = cfg.vf_rss_size_max; 1554 hdev->pf_rss_size_max = cfg.pf_rss_size_max; 1555 hdev->rx_buf_len = cfg.rx_buf_len; 1556 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1557 hdev->hw.mac.media_type = cfg.media_type; 1558 hdev->hw.mac.phy_addr = cfg.phy_addr; 1559 hdev->num_tx_desc = cfg.tqp_desc_num; 1560 hdev->num_rx_desc = cfg.tqp_desc_num; 1561 hdev->tm_info.num_pg = 1; 1562 hdev->tc_max = cfg.tc_num; 1563 hdev->tm_info.hw_pfc_map = 0; 1564 if (cfg.umv_space) 1565 hdev->wanted_umv_size = cfg.umv_space; 1566 else 1567 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; 1568 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; 1569 hdev->gro_en = true; 1570 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) 1571 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 1572 1573 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 1574 hdev->fd_en = true; 1575 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 1576 } 1577 1578 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1579 if (ret) { 1580 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", 1581 cfg.default_speed, ret); 1582 return ret; 1583 } 1584 1585 hclge_parse_link_mode(hdev, cfg.speed_ability); 1586 1587 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); 1588 1589 hclge_init_tc_config(hdev); 1590 hclge_init_kdump_kernel_config(hdev); 1591 1592 return ret; 1593 } 1594 1595 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, 1596 u16 tso_mss_max) 1597 { 1598 struct hclge_cfg_tso_status_cmd *req; 1599 struct hclge_desc desc; 1600 1601 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1602 1603 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1604 req->tso_mss_min = cpu_to_le16(tso_mss_min); 1605 req->tso_mss_max = cpu_to_le16(tso_mss_max); 1606 1607 return hclge_cmd_send(&hdev->hw, &desc, 1); 1608 } 1609 1610 static int hclge_config_gro(struct hclge_dev *hdev) 1611 { 1612 struct hclge_cfg_gro_status_cmd *req; 1613 struct hclge_desc desc; 1614 int ret; 1615 1616 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) 1617 return 0; 1618 1619 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); 1620 req = (struct hclge_cfg_gro_status_cmd *)desc.data; 1621 1622 req->gro_en = hdev->gro_en ? 1 : 0; 1623 1624 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1625 if (ret) 1626 dev_err(&hdev->pdev->dev, 1627 "GRO hardware config cmd failed, ret = %d\n", ret); 1628 1629 return ret; 1630 } 1631 1632 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1633 { 1634 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1635 struct hclge_comm_tqp *tqp; 1636 int i; 1637 1638 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1639 sizeof(struct hclge_comm_tqp), GFP_KERNEL); 1640 if (!hdev->htqp) 1641 return -ENOMEM; 1642 1643 tqp = hdev->htqp; 1644 1645 for (i = 0; i < hdev->num_tqps; i++) { 1646 tqp->dev = &hdev->pdev->dev; 1647 tqp->index = i; 1648 1649 tqp->q.ae_algo = &ae_algo; 1650 tqp->q.buf_size = hdev->rx_buf_len; 1651 tqp->q.tx_desc_num = hdev->num_tx_desc; 1652 tqp->q.rx_desc_num = hdev->num_rx_desc; 1653 1654 /* need an extended offset to configure queues >= 1655 * HCLGE_TQP_MAX_SIZE_DEV_V2 1656 */ 1657 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) 1658 tqp->q.io_base = hdev->hw.hw.io_base + 1659 HCLGE_TQP_REG_OFFSET + 1660 i * HCLGE_TQP_REG_SIZE; 1661 else 1662 tqp->q.io_base = hdev->hw.hw.io_base + 1663 HCLGE_TQP_REG_OFFSET + 1664 HCLGE_TQP_EXT_REG_OFFSET + 1665 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * 1666 HCLGE_TQP_REG_SIZE; 1667 1668 /* when device supports tx push and has device memory, 1669 * the queue can execute push mode or doorbell mode on 1670 * device memory. 1671 */ 1672 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) 1673 tqp->q.mem_base = hdev->hw.hw.mem_base + 1674 HCLGE_TQP_MEM_OFFSET(hdev, i); 1675 1676 tqp++; 1677 } 1678 1679 return 0; 1680 } 1681 1682 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1683 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1684 { 1685 struct hclge_tqp_map_cmd *req; 1686 struct hclge_desc desc; 1687 int ret; 1688 1689 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1690 1691 req = (struct hclge_tqp_map_cmd *)desc.data; 1692 req->tqp_id = cpu_to_le16(tqp_pid); 1693 req->tqp_vf = func_id; 1694 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; 1695 if (!is_pf) 1696 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; 1697 req->tqp_vid = cpu_to_le16(tqp_vid); 1698 1699 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1700 if (ret) 1701 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); 1702 1703 return ret; 1704 } 1705 1706 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) 1707 { 1708 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1709 struct hclge_dev *hdev = vport->back; 1710 int i, alloced; 1711 1712 for (i = 0, alloced = 0; i < hdev->num_tqps && 1713 alloced < num_tqps; i++) { 1714 if (!hdev->htqp[i].alloced) { 1715 hdev->htqp[i].q.handle = &vport->nic; 1716 hdev->htqp[i].q.tqp_index = alloced; 1717 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; 1718 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; 1719 kinfo->tqp[alloced] = &hdev->htqp[i].q; 1720 hdev->htqp[i].alloced = true; 1721 alloced++; 1722 } 1723 } 1724 vport->alloc_tqps = alloced; 1725 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, 1726 vport->alloc_tqps / hdev->tm_info.num_tc); 1727 1728 /* ensure one to one mapping between irq and queue at default */ 1729 kinfo->rss_size = min_t(u16, kinfo->rss_size, 1730 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); 1731 1732 return 0; 1733 } 1734 1735 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, 1736 u16 num_tx_desc, u16 num_rx_desc) 1737 1738 { 1739 struct hnae3_handle *nic = &vport->nic; 1740 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1741 struct hclge_dev *hdev = vport->back; 1742 int ret; 1743 1744 kinfo->num_tx_desc = num_tx_desc; 1745 kinfo->num_rx_desc = num_rx_desc; 1746 1747 kinfo->rx_buf_len = hdev->rx_buf_len; 1748 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; 1749 1750 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, 1751 sizeof(struct hnae3_queue *), GFP_KERNEL); 1752 if (!kinfo->tqp) 1753 return -ENOMEM; 1754 1755 ret = hclge_assign_tqp(vport, num_tqps); 1756 if (ret) 1757 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1758 1759 return ret; 1760 } 1761 1762 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1763 struct hclge_vport *vport) 1764 { 1765 struct hnae3_handle *nic = &vport->nic; 1766 struct hnae3_knic_private_info *kinfo; 1767 u16 i; 1768 1769 kinfo = &nic->kinfo; 1770 for (i = 0; i < vport->alloc_tqps; i++) { 1771 struct hclge_comm_tqp *q = 1772 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); 1773 bool is_pf; 1774 int ret; 1775 1776 is_pf = !(vport->vport_id); 1777 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1778 i, is_pf); 1779 if (ret) 1780 return ret; 1781 } 1782 1783 return 0; 1784 } 1785 1786 static int hclge_map_tqp(struct hclge_dev *hdev) 1787 { 1788 struct hclge_vport *vport = hdev->vport; 1789 u16 i, num_vport; 1790 1791 num_vport = hdev->num_req_vfs + 1; 1792 for (i = 0; i < num_vport; i++) { 1793 int ret; 1794 1795 ret = hclge_map_tqp_to_vport(hdev, vport); 1796 if (ret) 1797 return ret; 1798 1799 vport++; 1800 } 1801 1802 return 0; 1803 } 1804 1805 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1806 { 1807 struct hnae3_handle *nic = &vport->nic; 1808 struct hclge_dev *hdev = vport->back; 1809 int ret; 1810 1811 nic->pdev = hdev->pdev; 1812 nic->ae_algo = &ae_algo; 1813 nic->numa_node_mask = hdev->numa_node_mask; 1814 nic->kinfo.io_base = hdev->hw.hw.io_base; 1815 1816 ret = hclge_knic_setup(vport, num_tqps, 1817 hdev->num_tx_desc, hdev->num_rx_desc); 1818 if (ret) 1819 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); 1820 1821 return ret; 1822 } 1823 1824 static int hclge_alloc_vport(struct hclge_dev *hdev) 1825 { 1826 struct pci_dev *pdev = hdev->pdev; 1827 struct hclge_vport *vport; 1828 u32 tqp_main_vport; 1829 u32 tqp_per_vport; 1830 int num_vport, i; 1831 int ret; 1832 1833 /* We need to alloc a vport for main NIC of PF */ 1834 num_vport = hdev->num_req_vfs + 1; 1835 1836 if (hdev->num_tqps < num_vport) { 1837 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", 1838 hdev->num_tqps, num_vport); 1839 return -EINVAL; 1840 } 1841 1842 /* Alloc the same number of TQPs for every vport */ 1843 tqp_per_vport = hdev->num_tqps / num_vport; 1844 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1845 1846 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1847 GFP_KERNEL); 1848 if (!vport) 1849 return -ENOMEM; 1850 1851 hdev->vport = vport; 1852 hdev->num_alloc_vport = num_vport; 1853 1854 if (IS_ENABLED(CONFIG_PCI_IOV)) 1855 hdev->num_alloc_vfs = hdev->num_req_vfs; 1856 1857 for (i = 0; i < num_vport; i++) { 1858 vport->back = hdev; 1859 vport->vport_id = i; 1860 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; 1861 vport->mps = HCLGE_MAC_DEFAULT_FRAME; 1862 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; 1863 vport->port_base_vlan_cfg.tbl_sta = true; 1864 vport->rxvlan_cfg.rx_vlan_offload_en = true; 1865 vport->req_vlan_fltr_en = true; 1866 INIT_LIST_HEAD(&vport->vlan_list); 1867 INIT_LIST_HEAD(&vport->uc_mac_list); 1868 INIT_LIST_HEAD(&vport->mc_mac_list); 1869 spin_lock_init(&vport->mac_list_lock); 1870 1871 if (i == 0) 1872 ret = hclge_vport_setup(vport, tqp_main_vport); 1873 else 1874 ret = hclge_vport_setup(vport, tqp_per_vport); 1875 if (ret) { 1876 dev_err(&pdev->dev, 1877 "vport setup failed for vport %d, %d\n", 1878 i, ret); 1879 return ret; 1880 } 1881 1882 vport++; 1883 } 1884 1885 return 0; 1886 } 1887 1888 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1889 struct hclge_pkt_buf_alloc *buf_alloc) 1890 { 1891 /* TX buffer size is unit by 128 byte */ 1892 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1893 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1894 struct hclge_tx_buff_alloc_cmd *req; 1895 struct hclge_desc desc; 1896 int ret; 1897 u8 i; 1898 1899 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1900 1901 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1902 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1903 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1904 1905 req->tx_pkt_buff[i] = 1906 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1907 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1908 } 1909 1910 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1911 if (ret) 1912 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1913 ret); 1914 1915 return ret; 1916 } 1917 1918 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1919 struct hclge_pkt_buf_alloc *buf_alloc) 1920 { 1921 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1922 1923 if (ret) 1924 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); 1925 1926 return ret; 1927 } 1928 1929 static u32 hclge_get_tc_num(struct hclge_dev *hdev) 1930 { 1931 unsigned int i; 1932 u32 cnt = 0; 1933 1934 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1935 if (hdev->hw_tc_map & BIT(i)) 1936 cnt++; 1937 return cnt; 1938 } 1939 1940 /* Get the number of pfc enabled TCs, which have private buffer */ 1941 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1942 struct hclge_pkt_buf_alloc *buf_alloc) 1943 { 1944 struct hclge_priv_buf *priv; 1945 unsigned int i; 1946 int cnt = 0; 1947 1948 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1949 priv = &buf_alloc->priv_buf[i]; 1950 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1951 priv->enable) 1952 cnt++; 1953 } 1954 1955 return cnt; 1956 } 1957 1958 /* Get the number of pfc disabled TCs, which have private buffer */ 1959 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1960 struct hclge_pkt_buf_alloc *buf_alloc) 1961 { 1962 struct hclge_priv_buf *priv; 1963 unsigned int i; 1964 int cnt = 0; 1965 1966 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1967 priv = &buf_alloc->priv_buf[i]; 1968 if (hdev->hw_tc_map & BIT(i) && 1969 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1970 priv->enable) 1971 cnt++; 1972 } 1973 1974 return cnt; 1975 } 1976 1977 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1978 { 1979 struct hclge_priv_buf *priv; 1980 u32 rx_priv = 0; 1981 int i; 1982 1983 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1984 priv = &buf_alloc->priv_buf[i]; 1985 if (priv->enable) 1986 rx_priv += priv->buf_size; 1987 } 1988 return rx_priv; 1989 } 1990 1991 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1992 { 1993 u32 i, total_tx_size = 0; 1994 1995 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1996 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1997 1998 return total_tx_size; 1999 } 2000 2001 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 2002 struct hclge_pkt_buf_alloc *buf_alloc, 2003 u32 rx_all) 2004 { 2005 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 2006 u32 tc_num = hclge_get_tc_num(hdev); 2007 u32 shared_buf, aligned_mps; 2008 u32 rx_priv; 2009 int i; 2010 2011 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); 2012 2013 if (hnae3_dev_dcb_supported(hdev)) 2014 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + 2015 hdev->dv_buf_size; 2016 else 2017 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF 2018 + hdev->dv_buf_size; 2019 2020 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 2021 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), 2022 HCLGE_BUF_SIZE_UNIT); 2023 2024 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 2025 if (rx_all < rx_priv + shared_std) 2026 return false; 2027 2028 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); 2029 buf_alloc->s_buf.buf_size = shared_buf; 2030 if (hnae3_dev_dcb_supported(hdev)) { 2031 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; 2032 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 2033 - roundup(aligned_mps / HCLGE_BUF_DIV_BY, 2034 HCLGE_BUF_SIZE_UNIT); 2035 } else { 2036 buf_alloc->s_buf.self.high = aligned_mps + 2037 HCLGE_NON_DCB_ADDITIONAL_BUF; 2038 buf_alloc->s_buf.self.low = aligned_mps; 2039 } 2040 2041 if (hnae3_dev_dcb_supported(hdev)) { 2042 hi_thrd = shared_buf - hdev->dv_buf_size; 2043 2044 if (tc_num <= NEED_RESERVE_TC_NUM) 2045 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT 2046 / BUF_MAX_PERCENT; 2047 2048 if (tc_num) 2049 hi_thrd = hi_thrd / tc_num; 2050 2051 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps); 2052 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); 2053 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; 2054 } else { 2055 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; 2056 lo_thrd = aligned_mps; 2057 } 2058 2059 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2060 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 2061 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 2062 } 2063 2064 return true; 2065 } 2066 2067 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 2068 struct hclge_pkt_buf_alloc *buf_alloc) 2069 { 2070 u32 i, total_size; 2071 2072 total_size = hdev->pkt_buf_size; 2073 2074 /* alloc tx buffer for all enabled tc */ 2075 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2076 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2077 2078 if (hdev->hw_tc_map & BIT(i)) { 2079 if (total_size < hdev->tx_buf_size) 2080 return -ENOMEM; 2081 2082 priv->tx_buf_size = hdev->tx_buf_size; 2083 } else { 2084 priv->tx_buf_size = 0; 2085 } 2086 2087 total_size -= priv->tx_buf_size; 2088 } 2089 2090 return 0; 2091 } 2092 2093 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, 2094 struct hclge_pkt_buf_alloc *buf_alloc) 2095 { 2096 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2097 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); 2098 unsigned int i; 2099 2100 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2101 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2102 2103 priv->enable = 0; 2104 priv->wl.low = 0; 2105 priv->wl.high = 0; 2106 priv->buf_size = 0; 2107 2108 if (!(hdev->hw_tc_map & BIT(i))) 2109 continue; 2110 2111 priv->enable = 1; 2112 2113 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 2114 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; 2115 priv->wl.high = roundup(priv->wl.low + aligned_mps, 2116 HCLGE_BUF_SIZE_UNIT); 2117 } else { 2118 priv->wl.low = 0; 2119 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : 2120 aligned_mps; 2121 } 2122 2123 priv->buf_size = priv->wl.high + hdev->dv_buf_size; 2124 } 2125 2126 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2127 } 2128 2129 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, 2130 struct hclge_pkt_buf_alloc *buf_alloc) 2131 { 2132 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2133 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 2134 int i; 2135 2136 /* let the last to be cleared first */ 2137 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 2138 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2139 unsigned int mask = BIT((unsigned int)i); 2140 2141 if (hdev->hw_tc_map & mask && 2142 !(hdev->tm_info.hw_pfc_map & mask)) { 2143 /* Clear the no pfc TC private buffer */ 2144 priv->wl.low = 0; 2145 priv->wl.high = 0; 2146 priv->buf_size = 0; 2147 priv->enable = 0; 2148 no_pfc_priv_num--; 2149 } 2150 2151 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 2152 no_pfc_priv_num == 0) 2153 break; 2154 } 2155 2156 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2157 } 2158 2159 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, 2160 struct hclge_pkt_buf_alloc *buf_alloc) 2161 { 2162 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2163 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 2164 int i; 2165 2166 /* let the last to be cleared first */ 2167 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 2168 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2169 unsigned int mask = BIT((unsigned int)i); 2170 2171 if (hdev->hw_tc_map & mask && 2172 hdev->tm_info.hw_pfc_map & mask) { 2173 /* Reduce the number of pfc TC with private buffer */ 2174 priv->wl.low = 0; 2175 priv->enable = 0; 2176 priv->wl.high = 0; 2177 priv->buf_size = 0; 2178 pfc_priv_num--; 2179 } 2180 2181 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 2182 pfc_priv_num == 0) 2183 break; 2184 } 2185 2186 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2187 } 2188 2189 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, 2190 struct hclge_pkt_buf_alloc *buf_alloc) 2191 { 2192 #define COMPENSATE_BUFFER 0x3C00 2193 #define COMPENSATE_HALF_MPS_NUM 5 2194 #define PRIV_WL_GAP 0x1800 2195 2196 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2197 u32 tc_num = hclge_get_tc_num(hdev); 2198 u32 half_mps = hdev->mps >> 1; 2199 u32 min_rx_priv; 2200 unsigned int i; 2201 2202 if (tc_num) 2203 rx_priv = rx_priv / tc_num; 2204 2205 if (tc_num <= NEED_RESERVE_TC_NUM) 2206 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 2207 2208 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + 2209 COMPENSATE_HALF_MPS_NUM * half_mps; 2210 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); 2211 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); 2212 if (rx_priv < min_rx_priv) 2213 return false; 2214 2215 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2216 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2217 2218 priv->enable = 0; 2219 priv->wl.low = 0; 2220 priv->wl.high = 0; 2221 priv->buf_size = 0; 2222 2223 if (!(hdev->hw_tc_map & BIT(i))) 2224 continue; 2225 2226 priv->enable = 1; 2227 priv->buf_size = rx_priv; 2228 priv->wl.high = rx_priv - hdev->dv_buf_size; 2229 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 2230 } 2231 2232 buf_alloc->s_buf.buf_size = 0; 2233 2234 return true; 2235 } 2236 2237 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 2238 * @hdev: pointer to struct hclge_dev 2239 * @buf_alloc: pointer to buffer calculation data 2240 * @return: 0: calculate successful, negative: fail 2241 */ 2242 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 2243 struct hclge_pkt_buf_alloc *buf_alloc) 2244 { 2245 /* When DCB is not supported, rx private buffer is not allocated. */ 2246 if (!hnae3_dev_dcb_supported(hdev)) { 2247 u32 rx_all = hdev->pkt_buf_size; 2248 2249 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 2250 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 2251 return -ENOMEM; 2252 2253 return 0; 2254 } 2255 2256 if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) 2257 return 0; 2258 2259 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) 2260 return 0; 2261 2262 /* try to decrease the buffer size */ 2263 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) 2264 return 0; 2265 2266 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) 2267 return 0; 2268 2269 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) 2270 return 0; 2271 2272 return -ENOMEM; 2273 } 2274 2275 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 2276 struct hclge_pkt_buf_alloc *buf_alloc) 2277 { 2278 struct hclge_rx_priv_buff_cmd *req; 2279 struct hclge_desc desc; 2280 int ret; 2281 int i; 2282 2283 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 2284 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 2285 2286 /* Alloc private buffer TCs */ 2287 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2288 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2289 2290 req->buf_num[i] = 2291 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 2292 req->buf_num[i] |= 2293 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 2294 } 2295 2296 req->shared_buf = 2297 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 2298 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 2299 2300 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2301 if (ret) 2302 dev_err(&hdev->pdev->dev, 2303 "rx private buffer alloc cmd failed %d\n", ret); 2304 2305 return ret; 2306 } 2307 2308 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 2309 struct hclge_pkt_buf_alloc *buf_alloc) 2310 { 2311 struct hclge_rx_priv_wl_buf *req; 2312 struct hclge_priv_buf *priv; 2313 struct hclge_desc desc[2]; 2314 int i, j; 2315 int ret; 2316 2317 for (i = 0; i < 2; i++) { 2318 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 2319 false); 2320 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 2321 2322 /* The first descriptor set the NEXT bit to 1 */ 2323 if (i == 0) 2324 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2325 else 2326 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2327 2328 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2329 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 2330 2331 priv = &buf_alloc->priv_buf[idx]; 2332 req->tc_wl[j].high = 2333 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 2334 req->tc_wl[j].high |= 2335 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2336 req->tc_wl[j].low = 2337 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 2338 req->tc_wl[j].low |= 2339 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2340 } 2341 } 2342 2343 /* Send 2 descriptor at one time */ 2344 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2345 if (ret) 2346 dev_err(&hdev->pdev->dev, 2347 "rx private waterline config cmd failed %d\n", 2348 ret); 2349 return ret; 2350 } 2351 2352 static int hclge_common_thrd_config(struct hclge_dev *hdev, 2353 struct hclge_pkt_buf_alloc *buf_alloc) 2354 { 2355 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 2356 struct hclge_rx_com_thrd *req; 2357 struct hclge_desc desc[2]; 2358 struct hclge_tc_thrd *tc; 2359 int i, j; 2360 int ret; 2361 2362 for (i = 0; i < 2; i++) { 2363 hclge_cmd_setup_basic_desc(&desc[i], 2364 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 2365 req = (struct hclge_rx_com_thrd *)&desc[i].data; 2366 2367 /* The first descriptor set the NEXT bit to 1 */ 2368 if (i == 0) 2369 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2370 else 2371 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2372 2373 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2374 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 2375 2376 req->com_thrd[j].high = 2377 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 2378 req->com_thrd[j].high |= 2379 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2380 req->com_thrd[j].low = 2381 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 2382 req->com_thrd[j].low |= 2383 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2384 } 2385 } 2386 2387 /* Send 2 descriptors at one time */ 2388 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2389 if (ret) 2390 dev_err(&hdev->pdev->dev, 2391 "common threshold config cmd failed %d\n", ret); 2392 return ret; 2393 } 2394 2395 static int hclge_common_wl_config(struct hclge_dev *hdev, 2396 struct hclge_pkt_buf_alloc *buf_alloc) 2397 { 2398 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 2399 struct hclge_rx_com_wl *req; 2400 struct hclge_desc desc; 2401 int ret; 2402 2403 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 2404 2405 req = (struct hclge_rx_com_wl *)desc.data; 2406 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 2407 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2408 2409 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 2410 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2411 2412 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2413 if (ret) 2414 dev_err(&hdev->pdev->dev, 2415 "common waterline config cmd failed %d\n", ret); 2416 2417 return ret; 2418 } 2419 2420 int hclge_buffer_alloc(struct hclge_dev *hdev) 2421 { 2422 struct hclge_pkt_buf_alloc *pkt_buf; 2423 int ret; 2424 2425 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 2426 if (!pkt_buf) 2427 return -ENOMEM; 2428 2429 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 2430 if (ret) { 2431 dev_err(&hdev->pdev->dev, 2432 "could not calc tx buffer size for all TCs %d\n", ret); 2433 goto out; 2434 } 2435 2436 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 2437 if (ret) { 2438 dev_err(&hdev->pdev->dev, 2439 "could not alloc tx buffers %d\n", ret); 2440 goto out; 2441 } 2442 2443 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 2444 if (ret) { 2445 dev_err(&hdev->pdev->dev, 2446 "could not calc rx priv buffer size for all TCs %d\n", 2447 ret); 2448 goto out; 2449 } 2450 2451 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 2452 if (ret) { 2453 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 2454 ret); 2455 goto out; 2456 } 2457 2458 if (hnae3_dev_dcb_supported(hdev)) { 2459 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2460 if (ret) { 2461 dev_err(&hdev->pdev->dev, 2462 "could not configure rx private waterline %d\n", 2463 ret); 2464 goto out; 2465 } 2466 2467 ret = hclge_common_thrd_config(hdev, pkt_buf); 2468 if (ret) { 2469 dev_err(&hdev->pdev->dev, 2470 "could not configure common threshold %d\n", 2471 ret); 2472 goto out; 2473 } 2474 } 2475 2476 ret = hclge_common_wl_config(hdev, pkt_buf); 2477 if (ret) 2478 dev_err(&hdev->pdev->dev, 2479 "could not configure common waterline %d\n", ret); 2480 2481 out: 2482 kfree(pkt_buf); 2483 return ret; 2484 } 2485 2486 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2487 { 2488 struct hnae3_handle *roce = &vport->roce; 2489 struct hnae3_handle *nic = &vport->nic; 2490 struct hclge_dev *hdev = vport->back; 2491 2492 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2493 2494 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) 2495 return -EINVAL; 2496 2497 roce->rinfo.base_vector = hdev->num_nic_msi; 2498 2499 roce->rinfo.netdev = nic->kinfo.netdev; 2500 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2501 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2502 2503 roce->pdev = nic->pdev; 2504 roce->ae_algo = nic->ae_algo; 2505 roce->numa_node_mask = nic->numa_node_mask; 2506 2507 return 0; 2508 } 2509 2510 static int hclge_init_msi(struct hclge_dev *hdev) 2511 { 2512 struct pci_dev *pdev = hdev->pdev; 2513 int vectors; 2514 int i; 2515 2516 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2517 hdev->num_msi, 2518 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2519 if (vectors < 0) { 2520 dev_err(&pdev->dev, 2521 "failed(%d) to allocate MSI/MSI-X vectors\n", 2522 vectors); 2523 return vectors; 2524 } 2525 if (vectors < hdev->num_msi) 2526 dev_warn(&hdev->pdev->dev, 2527 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2528 hdev->num_msi, vectors); 2529 2530 hdev->num_msi = vectors; 2531 hdev->num_msi_left = vectors; 2532 2533 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2534 sizeof(u16), GFP_KERNEL); 2535 if (!hdev->vector_status) { 2536 pci_free_irq_vectors(pdev); 2537 return -ENOMEM; 2538 } 2539 2540 for (i = 0; i < hdev->num_msi; i++) 2541 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2542 2543 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2544 sizeof(int), GFP_KERNEL); 2545 if (!hdev->vector_irq) { 2546 pci_free_irq_vectors(pdev); 2547 return -ENOMEM; 2548 } 2549 2550 return 0; 2551 } 2552 2553 static u8 hclge_check_speed_dup(u8 duplex, int speed) 2554 { 2555 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) 2556 duplex = HCLGE_MAC_FULL; 2557 2558 return duplex; 2559 } 2560 2561 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = { 2562 {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M}, 2563 {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M}, 2564 {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G}, 2565 {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G}, 2566 {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G}, 2567 {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G}, 2568 {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G}, 2569 {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G}, 2570 {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G}, 2571 }; 2572 2573 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw) 2574 { 2575 u16 i; 2576 2577 for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) { 2578 if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) { 2579 *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw; 2580 return 0; 2581 } 2582 } 2583 2584 return -EINVAL; 2585 } 2586 2587 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, 2588 u8 duplex, u8 lane_num) 2589 { 2590 struct hclge_config_mac_speed_dup_cmd *req; 2591 struct hclge_desc desc; 2592 u32 speed_fw; 2593 int ret; 2594 2595 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2596 2597 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2598 2599 if (duplex) 2600 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); 2601 2602 ret = hclge_convert_to_fw_speed(speed, &speed_fw); 2603 if (ret) { 2604 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2605 return ret; 2606 } 2607 2608 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, 2609 speed_fw); 2610 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2611 1); 2612 req->lane_num = lane_num; 2613 2614 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2615 if (ret) { 2616 dev_err(&hdev->pdev->dev, 2617 "mac speed/duplex config cmd failed %d.\n", ret); 2618 return ret; 2619 } 2620 2621 return 0; 2622 } 2623 2624 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) 2625 { 2626 struct hclge_mac *mac = &hdev->hw.mac; 2627 int ret; 2628 2629 duplex = hclge_check_speed_dup(duplex, speed); 2630 if (!mac->support_autoneg && mac->speed == speed && 2631 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0)) 2632 return 0; 2633 2634 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); 2635 if (ret) 2636 return ret; 2637 2638 hdev->hw.mac.speed = speed; 2639 hdev->hw.mac.duplex = duplex; 2640 if (!lane_num) 2641 hdev->hw.mac.lane_num = lane_num; 2642 2643 return 0; 2644 } 2645 2646 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2647 u8 duplex, u8 lane_num) 2648 { 2649 struct hclge_vport *vport = hclge_get_vport(handle); 2650 struct hclge_dev *hdev = vport->back; 2651 2652 return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); 2653 } 2654 2655 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2656 { 2657 struct hclge_config_auto_neg_cmd *req; 2658 struct hclge_desc desc; 2659 u32 flag = 0; 2660 int ret; 2661 2662 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2663 2664 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2665 if (enable) 2666 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U); 2667 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2668 2669 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2670 if (ret) 2671 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2672 ret); 2673 2674 return ret; 2675 } 2676 2677 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2678 { 2679 struct hclge_vport *vport = hclge_get_vport(handle); 2680 struct hclge_dev *hdev = vport->back; 2681 2682 if (!hdev->hw.mac.support_autoneg) { 2683 if (enable) { 2684 dev_err(&hdev->pdev->dev, 2685 "autoneg is not supported by current port\n"); 2686 return -EOPNOTSUPP; 2687 } else { 2688 return 0; 2689 } 2690 } 2691 2692 return hclge_set_autoneg_en(hdev, enable); 2693 } 2694 2695 static int hclge_get_autoneg(struct hnae3_handle *handle) 2696 { 2697 struct hclge_vport *vport = hclge_get_vport(handle); 2698 struct hclge_dev *hdev = vport->back; 2699 struct phy_device *phydev = hdev->hw.mac.phydev; 2700 2701 if (phydev) 2702 return phydev->autoneg; 2703 2704 return hdev->hw.mac.autoneg; 2705 } 2706 2707 static int hclge_restart_autoneg(struct hnae3_handle *handle) 2708 { 2709 struct hclge_vport *vport = hclge_get_vport(handle); 2710 struct hclge_dev *hdev = vport->back; 2711 int ret; 2712 2713 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); 2714 2715 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2716 if (ret) 2717 return ret; 2718 return hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2719 } 2720 2721 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt) 2722 { 2723 struct hclge_vport *vport = hclge_get_vport(handle); 2724 struct hclge_dev *hdev = vport->back; 2725 2726 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) 2727 return hclge_set_autoneg_en(hdev, !halt); 2728 2729 return 0; 2730 } 2731 2732 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, 2733 struct hclge_desc *desc, u32 desc_len) 2734 { 2735 u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2; 2736 u32 desc_index = 0; 2737 u32 data_index = 0; 2738 u32 i; 2739 2740 for (i = 0; i < lane_size; i++) { 2741 if (data_index >= HCLGE_DESC_DATA_LEN) { 2742 desc_index++; 2743 data_index = 0; 2744 } 2745 2746 if (desc_index >= desc_len) 2747 return; 2748 2749 hdev->fec_stats.per_lanes[i] += 2750 le32_to_cpu(desc[desc_index].data[data_index]); 2751 data_index++; 2752 } 2753 } 2754 2755 static void hclge_parse_fec_stats(struct hclge_dev *hdev, 2756 struct hclge_desc *desc, u32 desc_len) 2757 { 2758 struct hclge_query_fec_stats_cmd *req; 2759 2760 req = (struct hclge_query_fec_stats_cmd *)desc[0].data; 2761 2762 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; 2763 hdev->fec_stats.rs_corr_blocks += 2764 le32_to_cpu(req->rs_fec_corr_blocks); 2765 hdev->fec_stats.rs_uncorr_blocks += 2766 le32_to_cpu(req->rs_fec_uncorr_blocks); 2767 hdev->fec_stats.rs_error_blocks += 2768 le32_to_cpu(req->rs_fec_error_blocks); 2769 hdev->fec_stats.base_r_corr_blocks += 2770 le32_to_cpu(req->base_r_fec_corr_blocks); 2771 hdev->fec_stats.base_r_uncorr_blocks += 2772 le32_to_cpu(req->base_r_fec_uncorr_blocks); 2773 2774 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); 2775 } 2776 2777 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) 2778 { 2779 struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM]; 2780 int ret; 2781 u32 i; 2782 2783 for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) { 2784 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS, 2785 true); 2786 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1)) 2787 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2788 } 2789 2790 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); 2791 if (ret) 2792 return ret; 2793 2794 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); 2795 2796 return 0; 2797 } 2798 2799 static void hclge_update_fec_stats(struct hclge_dev *hdev) 2800 { 2801 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2802 int ret; 2803 2804 if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || 2805 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) 2806 return; 2807 2808 ret = hclge_update_fec_stats_hw(hdev); 2809 if (ret) 2810 dev_err(&hdev->pdev->dev, 2811 "failed to update fec stats, ret = %d\n", ret); 2812 2813 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); 2814 } 2815 2816 static void hclge_get_fec_stats_total(struct hclge_dev *hdev, 2817 struct ethtool_fec_stats *fec_stats) 2818 { 2819 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; 2820 fec_stats->uncorrectable_blocks.total = 2821 hdev->fec_stats.rs_uncorr_blocks; 2822 } 2823 2824 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, 2825 struct ethtool_fec_stats *fec_stats) 2826 { 2827 u32 i; 2828 2829 if (hdev->fec_stats.base_r_lane_num == 0 || 2830 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { 2831 dev_err(&hdev->pdev->dev, 2832 "fec stats lane number(%llu) is invalid\n", 2833 hdev->fec_stats.base_r_lane_num); 2834 return; 2835 } 2836 2837 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { 2838 fec_stats->corrected_blocks.lanes[i] = 2839 hdev->fec_stats.base_r_corr_per_lanes[i]; 2840 fec_stats->uncorrectable_blocks.lanes[i] = 2841 hdev->fec_stats.base_r_uncorr_per_lanes[i]; 2842 } 2843 } 2844 2845 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, 2846 struct ethtool_fec_stats *fec_stats) 2847 { 2848 u32 fec_mode = hdev->hw.mac.fec_mode; 2849 2850 switch (fec_mode) { 2851 case BIT(HNAE3_FEC_RS): 2852 case BIT(HNAE3_FEC_LLRS): 2853 hclge_get_fec_stats_total(hdev, fec_stats); 2854 break; 2855 case BIT(HNAE3_FEC_BASER): 2856 hclge_get_fec_stats_lanes(hdev, fec_stats); 2857 break; 2858 default: 2859 dev_err(&hdev->pdev->dev, 2860 "fec stats is not supported by current fec mode(0x%x)\n", 2861 fec_mode); 2862 break; 2863 } 2864 } 2865 2866 static void hclge_get_fec_stats(struct hnae3_handle *handle, 2867 struct ethtool_fec_stats *fec_stats) 2868 { 2869 struct hclge_vport *vport = hclge_get_vport(handle); 2870 struct hclge_dev *hdev = vport->back; 2871 u32 fec_mode = hdev->hw.mac.fec_mode; 2872 2873 if (fec_mode == BIT(HNAE3_FEC_NONE) || 2874 fec_mode == BIT(HNAE3_FEC_AUTO) || 2875 fec_mode == BIT(HNAE3_FEC_USER_DEF)) 2876 return; 2877 2878 hclge_update_fec_stats(hdev); 2879 2880 hclge_comm_get_fec_stats(hdev, fec_stats); 2881 } 2882 2883 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) 2884 { 2885 struct hclge_config_fec_cmd *req; 2886 struct hclge_desc desc; 2887 int ret; 2888 2889 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); 2890 2891 req = (struct hclge_config_fec_cmd *)desc.data; 2892 if (fec_mode & BIT(HNAE3_FEC_AUTO)) 2893 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); 2894 if (fec_mode & BIT(HNAE3_FEC_RS)) 2895 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2896 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); 2897 if (fec_mode & BIT(HNAE3_FEC_LLRS)) 2898 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2899 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS); 2900 if (fec_mode & BIT(HNAE3_FEC_BASER)) 2901 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2902 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); 2903 2904 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2905 if (ret) 2906 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); 2907 2908 return ret; 2909 } 2910 2911 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) 2912 { 2913 struct hclge_vport *vport = hclge_get_vport(handle); 2914 struct hclge_dev *hdev = vport->back; 2915 struct hclge_mac *mac = &hdev->hw.mac; 2916 int ret; 2917 2918 if (fec_mode && !(mac->fec_ability & fec_mode)) { 2919 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); 2920 return -EINVAL; 2921 } 2922 2923 ret = hclge_set_fec_hw(hdev, fec_mode); 2924 if (ret) 2925 return ret; 2926 2927 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); 2928 return 0; 2929 } 2930 2931 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, 2932 u8 *fec_mode) 2933 { 2934 struct hclge_vport *vport = hclge_get_vport(handle); 2935 struct hclge_dev *hdev = vport->back; 2936 struct hclge_mac *mac = &hdev->hw.mac; 2937 2938 if (fec_ability) 2939 *fec_ability = mac->fec_ability; 2940 if (fec_mode) 2941 *fec_mode = mac->fec_mode; 2942 } 2943 2944 static int hclge_mac_init(struct hclge_dev *hdev) 2945 { 2946 struct hclge_mac *mac = &hdev->hw.mac; 2947 int ret; 2948 2949 hdev->support_sfp_query = true; 2950 2951 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2952 hdev->hw.mac.duplex = HCLGE_MAC_FULL; 2953 2954 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, 2955 hdev->hw.mac.duplex, hdev->hw.mac.lane_num); 2956 if (ret) 2957 return ret; 2958 2959 if (hdev->hw.mac.support_autoneg) { 2960 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); 2961 if (ret) 2962 return ret; 2963 } 2964 2965 mac->link = 0; 2966 2967 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { 2968 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); 2969 if (ret) 2970 return ret; 2971 } 2972 2973 ret = hclge_set_mac_mtu(hdev, hdev->mps); 2974 if (ret) { 2975 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); 2976 return ret; 2977 } 2978 2979 ret = hclge_set_default_loopback(hdev); 2980 if (ret) 2981 return ret; 2982 2983 ret = hclge_buffer_alloc(hdev); 2984 if (ret) 2985 dev_err(&hdev->pdev->dev, 2986 "allocate buffer fail, ret=%d\n", ret); 2987 2988 return ret; 2989 } 2990 2991 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2992 { 2993 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2994 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { 2995 hdev->last_mbx_scheduled = jiffies; 2996 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 2997 } 2998 } 2999 3000 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 3001 { 3002 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 3003 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && 3004 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { 3005 hdev->last_rst_scheduled = jiffies; 3006 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 3007 } 3008 } 3009 3010 static void hclge_errhand_task_schedule(struct hclge_dev *hdev) 3011 { 3012 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 3013 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) 3014 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 3015 } 3016 3017 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) 3018 { 3019 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 3020 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) 3021 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); 3022 } 3023 3024 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) 3025 { 3026 struct hclge_link_status_cmd *req; 3027 struct hclge_desc desc; 3028 int ret; 3029 3030 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 3031 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3032 if (ret) { 3033 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 3034 ret); 3035 return ret; 3036 } 3037 3038 req = (struct hclge_link_status_cmd *)desc.data; 3039 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? 3040 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 3041 3042 return 0; 3043 } 3044 3045 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) 3046 { 3047 struct phy_device *phydev = hdev->hw.mac.phydev; 3048 3049 *link_status = HCLGE_LINK_STATUS_DOWN; 3050 3051 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) 3052 return 0; 3053 3054 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) 3055 return 0; 3056 3057 return hclge_get_mac_link_status(hdev, link_status); 3058 } 3059 3060 static void hclge_push_link_status(struct hclge_dev *hdev) 3061 { 3062 struct hclge_vport *vport; 3063 int ret; 3064 u16 i; 3065 3066 for (i = 0; i < pci_num_vf(hdev->pdev); i++) { 3067 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; 3068 3069 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || 3070 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) 3071 continue; 3072 3073 ret = hclge_push_vf_link_status(vport); 3074 if (ret) { 3075 dev_err(&hdev->pdev->dev, 3076 "failed to push link status to vf%u, ret = %d\n", 3077 i, ret); 3078 } 3079 } 3080 } 3081 3082 static void hclge_update_link_status(struct hclge_dev *hdev) 3083 { 3084 struct hnae3_handle *rhandle = &hdev->vport[0].roce; 3085 struct hnae3_handle *handle = &hdev->vport[0].nic; 3086 struct hnae3_client *rclient = hdev->roce_client; 3087 struct hnae3_client *client = hdev->nic_client; 3088 int state; 3089 int ret; 3090 3091 if (!client) 3092 return; 3093 3094 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) 3095 return; 3096 3097 ret = hclge_get_mac_phy_link(hdev, &state); 3098 if (ret) { 3099 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 3100 return; 3101 } 3102 3103 if (state != hdev->hw.mac.link) { 3104 hdev->hw.mac.link = state; 3105 if (state == HCLGE_LINK_STATUS_UP) 3106 hclge_update_port_info(hdev); 3107 3108 client->ops->link_status_change(handle, state); 3109 hclge_config_mac_tnl_int(hdev, state); 3110 if (rclient && rclient->ops->link_status_change) 3111 rclient->ops->link_status_change(rhandle, state); 3112 3113 hclge_push_link_status(hdev); 3114 } 3115 3116 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 3117 } 3118 3119 static void hclge_update_speed_advertising(struct hclge_mac *mac) 3120 { 3121 u32 speed_ability; 3122 3123 if (hclge_get_speed_bit(mac->speed, &speed_ability)) 3124 return; 3125 3126 switch (mac->module_type) { 3127 case HNAE3_MODULE_TYPE_FIBRE_LR: 3128 hclge_convert_setting_lr(speed_ability, mac->advertising); 3129 break; 3130 case HNAE3_MODULE_TYPE_FIBRE_SR: 3131 case HNAE3_MODULE_TYPE_AOC: 3132 hclge_convert_setting_sr(speed_ability, mac->advertising); 3133 break; 3134 case HNAE3_MODULE_TYPE_CR: 3135 hclge_convert_setting_cr(speed_ability, mac->advertising); 3136 break; 3137 case HNAE3_MODULE_TYPE_KR: 3138 hclge_convert_setting_kr(speed_ability, mac->advertising); 3139 break; 3140 default: 3141 break; 3142 } 3143 } 3144 3145 static void hclge_update_fec_advertising(struct hclge_mac *mac) 3146 { 3147 if (mac->fec_mode & BIT(HNAE3_FEC_RS)) 3148 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 3149 mac->advertising); 3150 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS)) 3151 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 3152 mac->advertising); 3153 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) 3154 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 3155 mac->advertising); 3156 else 3157 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 3158 mac->advertising); 3159 } 3160 3161 static void hclge_update_pause_advertising(struct hclge_dev *hdev) 3162 { 3163 struct hclge_mac *mac = &hdev->hw.mac; 3164 bool rx_en, tx_en; 3165 3166 switch (hdev->fc_mode_last_time) { 3167 case HCLGE_FC_RX_PAUSE: 3168 rx_en = true; 3169 tx_en = false; 3170 break; 3171 case HCLGE_FC_TX_PAUSE: 3172 rx_en = false; 3173 tx_en = true; 3174 break; 3175 case HCLGE_FC_FULL: 3176 rx_en = true; 3177 tx_en = true; 3178 break; 3179 default: 3180 rx_en = false; 3181 tx_en = false; 3182 break; 3183 } 3184 3185 linkmode_set_pause(mac->advertising, tx_en, rx_en); 3186 } 3187 3188 static void hclge_update_advertising(struct hclge_dev *hdev) 3189 { 3190 struct hclge_mac *mac = &hdev->hw.mac; 3191 3192 linkmode_zero(mac->advertising); 3193 hclge_update_speed_advertising(mac); 3194 hclge_update_fec_advertising(mac); 3195 hclge_update_pause_advertising(hdev); 3196 } 3197 3198 static void hclge_update_port_capability(struct hclge_dev *hdev, 3199 struct hclge_mac *mac) 3200 { 3201 if (hnae3_dev_fec_supported(hdev)) 3202 hclge_convert_setting_fec(mac); 3203 3204 /* firmware can not identify back plane type, the media type 3205 * read from configuration can help deal it 3206 */ 3207 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && 3208 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) 3209 mac->module_type = HNAE3_MODULE_TYPE_KR; 3210 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 3211 mac->module_type = HNAE3_MODULE_TYPE_TP; 3212 3213 if (mac->support_autoneg) { 3214 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); 3215 linkmode_copy(mac->advertising, mac->supported); 3216 } else { 3217 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 3218 mac->supported); 3219 hclge_update_advertising(hdev); 3220 } 3221 } 3222 3223 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) 3224 { 3225 struct hclge_sfp_info_cmd *resp; 3226 struct hclge_desc desc; 3227 int ret; 3228 3229 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 3230 resp = (struct hclge_sfp_info_cmd *)desc.data; 3231 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3232 if (ret == -EOPNOTSUPP) { 3233 dev_warn(&hdev->pdev->dev, 3234 "IMP do not support get SFP speed %d\n", ret); 3235 return ret; 3236 } else if (ret) { 3237 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); 3238 return ret; 3239 } 3240 3241 *speed = le32_to_cpu(resp->speed); 3242 3243 return 0; 3244 } 3245 3246 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) 3247 { 3248 struct hclge_sfp_info_cmd *resp; 3249 struct hclge_desc desc; 3250 int ret; 3251 3252 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 3253 resp = (struct hclge_sfp_info_cmd *)desc.data; 3254 3255 resp->query_type = QUERY_ACTIVE_SPEED; 3256 3257 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3258 if (ret == -EOPNOTSUPP) { 3259 dev_warn(&hdev->pdev->dev, 3260 "IMP does not support get SFP info %d\n", ret); 3261 return ret; 3262 } else if (ret) { 3263 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); 3264 return ret; 3265 } 3266 3267 /* In some case, mac speed get from IMP may be 0, it shouldn't be 3268 * set to mac->speed. 3269 */ 3270 if (!le32_to_cpu(resp->speed)) 3271 return 0; 3272 3273 mac->speed = le32_to_cpu(resp->speed); 3274 /* if resp->speed_ability is 0, it means it's an old version 3275 * firmware, do not update these params 3276 */ 3277 if (resp->speed_ability) { 3278 mac->module_type = le32_to_cpu(resp->module_type); 3279 mac->speed_ability = le32_to_cpu(resp->speed_ability); 3280 mac->autoneg = resp->autoneg; 3281 mac->support_autoneg = resp->autoneg_ability; 3282 mac->speed_type = QUERY_ACTIVE_SPEED; 3283 mac->lane_num = resp->lane_num; 3284 if (!resp->active_fec) 3285 mac->fec_mode = 0; 3286 else 3287 mac->fec_mode = BIT(resp->active_fec); 3288 mac->fec_ability = resp->fec_ability; 3289 } else { 3290 mac->speed_type = QUERY_SFP_SPEED; 3291 } 3292 3293 return 0; 3294 } 3295 3296 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, 3297 struct ethtool_link_ksettings *cmd) 3298 { 3299 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; 3300 struct hclge_vport *vport = hclge_get_vport(handle); 3301 struct hclge_phy_link_ksetting_0_cmd *req0; 3302 struct hclge_phy_link_ksetting_1_cmd *req1; 3303 u32 supported, advertising, lp_advertising; 3304 struct hclge_dev *hdev = vport->back; 3305 int ret; 3306 3307 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, 3308 true); 3309 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3310 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, 3311 true); 3312 3313 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); 3314 if (ret) { 3315 dev_err(&hdev->pdev->dev, 3316 "failed to get phy link ksetting, ret = %d.\n", ret); 3317 return ret; 3318 } 3319 3320 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; 3321 cmd->base.autoneg = req0->autoneg; 3322 cmd->base.speed = le32_to_cpu(req0->speed); 3323 cmd->base.duplex = req0->duplex; 3324 cmd->base.port = req0->port; 3325 cmd->base.transceiver = req0->transceiver; 3326 cmd->base.phy_address = req0->phy_address; 3327 cmd->base.eth_tp_mdix = req0->eth_tp_mdix; 3328 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; 3329 supported = le32_to_cpu(req0->supported); 3330 advertising = le32_to_cpu(req0->advertising); 3331 lp_advertising = le32_to_cpu(req0->lp_advertising); 3332 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 3333 supported); 3334 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 3335 advertising); 3336 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, 3337 lp_advertising); 3338 3339 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; 3340 cmd->base.master_slave_cfg = req1->master_slave_cfg; 3341 cmd->base.master_slave_state = req1->master_slave_state; 3342 3343 return 0; 3344 } 3345 3346 static int 3347 hclge_set_phy_link_ksettings(struct hnae3_handle *handle, 3348 const struct ethtool_link_ksettings *cmd) 3349 { 3350 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; 3351 struct hclge_vport *vport = hclge_get_vport(handle); 3352 struct hclge_phy_link_ksetting_0_cmd *req0; 3353 struct hclge_phy_link_ksetting_1_cmd *req1; 3354 struct hclge_dev *hdev = vport->back; 3355 u32 advertising; 3356 int ret; 3357 3358 if (cmd->base.autoneg == AUTONEG_DISABLE && 3359 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || 3360 (cmd->base.duplex != DUPLEX_HALF && 3361 cmd->base.duplex != DUPLEX_FULL))) 3362 return -EINVAL; 3363 3364 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, 3365 false); 3366 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3367 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, 3368 false); 3369 3370 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; 3371 req0->autoneg = cmd->base.autoneg; 3372 req0->speed = cpu_to_le32(cmd->base.speed); 3373 req0->duplex = cmd->base.duplex; 3374 ethtool_convert_link_mode_to_legacy_u32(&advertising, 3375 cmd->link_modes.advertising); 3376 req0->advertising = cpu_to_le32(advertising); 3377 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; 3378 3379 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; 3380 req1->master_slave_cfg = cmd->base.master_slave_cfg; 3381 3382 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); 3383 if (ret) { 3384 dev_err(&hdev->pdev->dev, 3385 "failed to set phy link ksettings, ret = %d.\n", ret); 3386 return ret; 3387 } 3388 3389 hdev->hw.mac.autoneg = cmd->base.autoneg; 3390 hdev->hw.mac.speed = cmd->base.speed; 3391 hdev->hw.mac.duplex = cmd->base.duplex; 3392 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); 3393 3394 return 0; 3395 } 3396 3397 static int hclge_update_tp_port_info(struct hclge_dev *hdev) 3398 { 3399 struct ethtool_link_ksettings cmd; 3400 int ret; 3401 3402 if (!hnae3_dev_phy_imp_supported(hdev)) 3403 return 0; 3404 3405 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); 3406 if (ret) 3407 return ret; 3408 3409 hdev->hw.mac.autoneg = cmd.base.autoneg; 3410 hdev->hw.mac.speed = cmd.base.speed; 3411 hdev->hw.mac.duplex = cmd.base.duplex; 3412 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); 3413 3414 return 0; 3415 } 3416 3417 static int hclge_tp_port_init(struct hclge_dev *hdev) 3418 { 3419 struct ethtool_link_ksettings cmd; 3420 3421 if (!hnae3_dev_phy_imp_supported(hdev)) 3422 return 0; 3423 3424 cmd.base.autoneg = hdev->hw.mac.autoneg; 3425 cmd.base.speed = hdev->hw.mac.speed; 3426 cmd.base.duplex = hdev->hw.mac.duplex; 3427 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); 3428 3429 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); 3430 } 3431 3432 static int hclge_update_port_info(struct hclge_dev *hdev) 3433 { 3434 struct hclge_mac *mac = &hdev->hw.mac; 3435 int speed; 3436 int ret; 3437 3438 /* get the port info from SFP cmd if not copper port */ 3439 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 3440 return hclge_update_tp_port_info(hdev); 3441 3442 /* if IMP does not support get SFP/qSFP info, return directly */ 3443 if (!hdev->support_sfp_query) 3444 return 0; 3445 3446 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3447 speed = mac->speed; 3448 ret = hclge_get_sfp_info(hdev, mac); 3449 } else { 3450 speed = HCLGE_MAC_SPEED_UNKNOWN; 3451 ret = hclge_get_sfp_speed(hdev, &speed); 3452 } 3453 3454 if (ret == -EOPNOTSUPP) { 3455 hdev->support_sfp_query = false; 3456 return ret; 3457 } else if (ret) { 3458 return ret; 3459 } 3460 3461 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3462 if (mac->speed_type == QUERY_ACTIVE_SPEED) { 3463 hclge_update_port_capability(hdev, mac); 3464 if (mac->speed != speed) 3465 (void)hclge_tm_port_shaper_cfg(hdev); 3466 return 0; 3467 } 3468 return hclge_cfg_mac_speed_dup(hdev, mac->speed, 3469 HCLGE_MAC_FULL, mac->lane_num); 3470 } else { 3471 if (speed == HCLGE_MAC_SPEED_UNKNOWN) 3472 return 0; /* do nothing if no SFP */ 3473 3474 /* must config full duplex for SFP */ 3475 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0); 3476 } 3477 } 3478 3479 static int hclge_get_status(struct hnae3_handle *handle) 3480 { 3481 struct hclge_vport *vport = hclge_get_vport(handle); 3482 struct hclge_dev *hdev = vport->back; 3483 3484 hclge_update_link_status(hdev); 3485 3486 return hdev->hw.mac.link; 3487 } 3488 3489 struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) 3490 { 3491 if (!pci_num_vf(hdev->pdev)) { 3492 dev_err(&hdev->pdev->dev, 3493 "SRIOV is disabled, can not get vport(%d) info.\n", vf); 3494 return NULL; 3495 } 3496 3497 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { 3498 dev_err(&hdev->pdev->dev, 3499 "vf id(%d) is out of range(0 <= vfid < %d)\n", 3500 vf, pci_num_vf(hdev->pdev)); 3501 return NULL; 3502 } 3503 3504 /* VF start from 1 in vport */ 3505 vf += HCLGE_VF_VPORT_START_NUM; 3506 return &hdev->vport[vf]; 3507 } 3508 3509 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, 3510 struct ifla_vf_info *ivf) 3511 { 3512 struct hclge_vport *vport = hclge_get_vport(handle); 3513 struct hclge_dev *hdev = vport->back; 3514 3515 vport = hclge_get_vf_vport(hdev, vf); 3516 if (!vport) 3517 return -EINVAL; 3518 3519 ivf->vf = vf; 3520 ivf->linkstate = vport->vf_info.link_state; 3521 ivf->spoofchk = vport->vf_info.spoofchk; 3522 ivf->trusted = vport->vf_info.trusted; 3523 ivf->min_tx_rate = 0; 3524 ivf->max_tx_rate = vport->vf_info.max_tx_rate; 3525 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; 3526 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); 3527 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; 3528 ether_addr_copy(ivf->mac, vport->vf_info.mac); 3529 3530 return 0; 3531 } 3532 3533 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, 3534 int link_state) 3535 { 3536 struct hclge_vport *vport = hclge_get_vport(handle); 3537 struct hclge_dev *hdev = vport->back; 3538 int link_state_old; 3539 int ret; 3540 3541 vport = hclge_get_vf_vport(hdev, vf); 3542 if (!vport) 3543 return -EINVAL; 3544 3545 link_state_old = vport->vf_info.link_state; 3546 vport->vf_info.link_state = link_state; 3547 3548 /* return success directly if the VF is unalive, VF will 3549 * query link state itself when it starts work. 3550 */ 3551 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 3552 return 0; 3553 3554 ret = hclge_push_vf_link_status(vport); 3555 if (ret) { 3556 vport->vf_info.link_state = link_state_old; 3557 dev_err(&hdev->pdev->dev, 3558 "failed to push vf%d link status, ret = %d\n", vf, ret); 3559 } 3560 3561 return ret; 3562 } 3563 3564 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 3565 { 3566 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg; 3567 3568 /* fetch the events from their corresponding regs */ 3569 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 3570 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 3571 hw_err_src_reg = hclge_read_dev(&hdev->hw, 3572 HCLGE_RAS_PF_OTHER_INT_STS_REG); 3573 3574 /* Assumption: If by any chance reset and mailbox events are reported 3575 * together then we will only process reset event in this go and will 3576 * defer the processing of the mailbox events. Since, we would have not 3577 * cleared RX CMDQ event this time we would receive again another 3578 * interrupt from H/W just for the mailbox. 3579 * 3580 * check for vector0 reset event sources 3581 */ 3582 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { 3583 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); 3584 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 3585 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3586 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 3587 hdev->rst_stats.imp_rst_cnt++; 3588 return HCLGE_VECTOR0_EVENT_RST; 3589 } 3590 3591 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { 3592 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); 3593 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3594 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 3595 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 3596 hdev->rst_stats.global_rst_cnt++; 3597 return HCLGE_VECTOR0_EVENT_RST; 3598 } 3599 3600 /* check for vector0 msix event and hardware error event source */ 3601 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK || 3602 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK) 3603 return HCLGE_VECTOR0_EVENT_ERR; 3604 3605 /* check for vector0 ptp event source */ 3606 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) { 3607 *clearval = msix_src_reg; 3608 return HCLGE_VECTOR0_EVENT_PTP; 3609 } 3610 3611 /* check for vector0 mailbox(=CMDQ RX) event source */ 3612 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 3613 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 3614 *clearval = cmdq_src_reg; 3615 return HCLGE_VECTOR0_EVENT_MBX; 3616 } 3617 3618 /* print other vector0 event source */ 3619 dev_info(&hdev->pdev->dev, 3620 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n", 3621 cmdq_src_reg, hw_err_src_reg, msix_src_reg); 3622 3623 return HCLGE_VECTOR0_EVENT_OTHER; 3624 } 3625 3626 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 3627 u32 regclr) 3628 { 3629 #define HCLGE_IMP_RESET_DELAY 5 3630 3631 switch (event_type) { 3632 case HCLGE_VECTOR0_EVENT_PTP: 3633 case HCLGE_VECTOR0_EVENT_RST: 3634 if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) 3635 mdelay(HCLGE_IMP_RESET_DELAY); 3636 3637 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 3638 break; 3639 case HCLGE_VECTOR0_EVENT_MBX: 3640 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 3641 break; 3642 default: 3643 break; 3644 } 3645 } 3646 3647 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) 3648 { 3649 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, 3650 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | 3651 BIT(HCLGE_VECTOR0_CORERESET_INT_B) | 3652 BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); 3653 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); 3654 } 3655 3656 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 3657 { 3658 writel(enable ? 1 : 0, vector->addr); 3659 } 3660 3661 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 3662 { 3663 struct hclge_dev *hdev = data; 3664 unsigned long flags; 3665 u32 clearval = 0; 3666 u32 event_cause; 3667 3668 hclge_enable_vector(&hdev->misc_vector, false); 3669 event_cause = hclge_check_event_cause(hdev, &clearval); 3670 3671 /* vector 0 interrupt is shared with reset and mailbox source events. */ 3672 switch (event_cause) { 3673 case HCLGE_VECTOR0_EVENT_ERR: 3674 hclge_errhand_task_schedule(hdev); 3675 break; 3676 case HCLGE_VECTOR0_EVENT_RST: 3677 hclge_reset_task_schedule(hdev); 3678 break; 3679 case HCLGE_VECTOR0_EVENT_PTP: 3680 spin_lock_irqsave(&hdev->ptp->lock, flags); 3681 hclge_ptp_clean_tx_hwts(hdev); 3682 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 3683 break; 3684 case HCLGE_VECTOR0_EVENT_MBX: 3685 /* If we are here then, 3686 * 1. Either we are not handling any mbx task and we are not 3687 * scheduled as well 3688 * OR 3689 * 2. We could be handling a mbx task but nothing more is 3690 * scheduled. 3691 * In both cases, we should schedule mbx task as there are more 3692 * mbx messages reported by this interrupt. 3693 */ 3694 hclge_mbx_task_schedule(hdev); 3695 break; 3696 default: 3697 dev_warn(&hdev->pdev->dev, 3698 "received unknown or unhandled event of vector0\n"); 3699 break; 3700 } 3701 3702 hclge_clear_event_cause(hdev, event_cause, clearval); 3703 3704 /* Enable interrupt if it is not caused by reset event or error event */ 3705 if (event_cause == HCLGE_VECTOR0_EVENT_PTP || 3706 event_cause == HCLGE_VECTOR0_EVENT_MBX || 3707 event_cause == HCLGE_VECTOR0_EVENT_OTHER) 3708 hclge_enable_vector(&hdev->misc_vector, true); 3709 3710 return IRQ_HANDLED; 3711 } 3712 3713 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 3714 { 3715 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { 3716 dev_warn(&hdev->pdev->dev, 3717 "vector(vector_id %d) has been freed.\n", vector_id); 3718 return; 3719 } 3720 3721 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 3722 hdev->num_msi_left += 1; 3723 hdev->num_msi_used -= 1; 3724 } 3725 3726 static void hclge_get_misc_vector(struct hclge_dev *hdev) 3727 { 3728 struct hclge_misc_vector *vector = &hdev->misc_vector; 3729 3730 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 3731 3732 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 3733 hdev->vector_status[0] = 0; 3734 3735 hdev->num_msi_left -= 1; 3736 hdev->num_msi_used += 1; 3737 } 3738 3739 static int hclge_misc_irq_init(struct hclge_dev *hdev) 3740 { 3741 int ret; 3742 3743 hclge_get_misc_vector(hdev); 3744 3745 /* this would be explicitly freed in the end */ 3746 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 3747 HCLGE_NAME, pci_name(hdev->pdev)); 3748 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 3749 0, hdev->misc_vector.name, hdev); 3750 if (ret) { 3751 hclge_free_vector(hdev, 0); 3752 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 3753 hdev->misc_vector.vector_irq); 3754 } 3755 3756 return ret; 3757 } 3758 3759 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 3760 { 3761 free_irq(hdev->misc_vector.vector_irq, hdev); 3762 hclge_free_vector(hdev, 0); 3763 } 3764 3765 int hclge_notify_client(struct hclge_dev *hdev, 3766 enum hnae3_reset_notify_type type) 3767 { 3768 struct hnae3_handle *handle = &hdev->vport[0].nic; 3769 struct hnae3_client *client = hdev->nic_client; 3770 int ret; 3771 3772 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) 3773 return 0; 3774 3775 if (!client->ops->reset_notify) 3776 return -EOPNOTSUPP; 3777 3778 ret = client->ops->reset_notify(handle, type); 3779 if (ret) 3780 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 3781 type, ret); 3782 3783 return ret; 3784 } 3785 3786 static int hclge_notify_roce_client(struct hclge_dev *hdev, 3787 enum hnae3_reset_notify_type type) 3788 { 3789 struct hnae3_handle *handle = &hdev->vport[0].roce; 3790 struct hnae3_client *client = hdev->roce_client; 3791 int ret; 3792 3793 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) 3794 return 0; 3795 3796 if (!client->ops->reset_notify) 3797 return -EOPNOTSUPP; 3798 3799 ret = client->ops->reset_notify(handle, type); 3800 if (ret) 3801 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 3802 type, ret); 3803 3804 return ret; 3805 } 3806 3807 static int hclge_reset_wait(struct hclge_dev *hdev) 3808 { 3809 #define HCLGE_RESET_WATI_MS 100 3810 #define HCLGE_RESET_WAIT_CNT 350 3811 3812 u32 val, reg, reg_bit; 3813 u32 cnt = 0; 3814 3815 switch (hdev->reset_type) { 3816 case HNAE3_IMP_RESET: 3817 reg = HCLGE_GLOBAL_RESET_REG; 3818 reg_bit = HCLGE_IMP_RESET_BIT; 3819 break; 3820 case HNAE3_GLOBAL_RESET: 3821 reg = HCLGE_GLOBAL_RESET_REG; 3822 reg_bit = HCLGE_GLOBAL_RESET_BIT; 3823 break; 3824 case HNAE3_FUNC_RESET: 3825 reg = HCLGE_FUN_RST_ING; 3826 reg_bit = HCLGE_FUN_RST_ING_B; 3827 break; 3828 default: 3829 dev_err(&hdev->pdev->dev, 3830 "Wait for unsupported reset type: %d\n", 3831 hdev->reset_type); 3832 return -EINVAL; 3833 } 3834 3835 val = hclge_read_dev(&hdev->hw, reg); 3836 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 3837 msleep(HCLGE_RESET_WATI_MS); 3838 val = hclge_read_dev(&hdev->hw, reg); 3839 cnt++; 3840 } 3841 3842 if (cnt >= HCLGE_RESET_WAIT_CNT) { 3843 dev_warn(&hdev->pdev->dev, 3844 "Wait for reset timeout: %d\n", hdev->reset_type); 3845 return -EBUSY; 3846 } 3847 3848 return 0; 3849 } 3850 3851 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) 3852 { 3853 struct hclge_vf_rst_cmd *req; 3854 struct hclge_desc desc; 3855 3856 req = (struct hclge_vf_rst_cmd *)desc.data; 3857 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); 3858 req->dest_vfid = func_id; 3859 3860 if (reset) 3861 req->vf_rst = 0x1; 3862 3863 return hclge_cmd_send(&hdev->hw, &desc, 1); 3864 } 3865 3866 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) 3867 { 3868 int i; 3869 3870 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { 3871 struct hclge_vport *vport = &hdev->vport[i]; 3872 int ret; 3873 3874 /* Send cmd to set/clear VF's FUNC_RST_ING */ 3875 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); 3876 if (ret) { 3877 dev_err(&hdev->pdev->dev, 3878 "set vf(%u) rst failed %d!\n", 3879 vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3880 ret); 3881 return ret; 3882 } 3883 3884 if (!reset || 3885 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) 3886 continue; 3887 3888 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && 3889 hdev->reset_type == HNAE3_FUNC_RESET) { 3890 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, 3891 &vport->need_notify); 3892 continue; 3893 } 3894 3895 /* Inform VF to process the reset. 3896 * hclge_inform_reset_assert_to_vf may fail if VF 3897 * driver is not loaded. 3898 */ 3899 ret = hclge_inform_reset_assert_to_vf(vport); 3900 if (ret) 3901 dev_warn(&hdev->pdev->dev, 3902 "inform reset to vf(%u) failed %d!\n", 3903 vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3904 ret); 3905 } 3906 3907 return 0; 3908 } 3909 3910 static void hclge_mailbox_service_task(struct hclge_dev *hdev) 3911 { 3912 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || 3913 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || 3914 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 3915 return; 3916 3917 if (time_is_before_jiffies(hdev->last_mbx_scheduled + 3918 HCLGE_MBX_SCHED_TIMEOUT)) 3919 dev_warn(&hdev->pdev->dev, 3920 "mbx service task is scheduled after %ums on cpu%u!\n", 3921 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), 3922 smp_processor_id()); 3923 3924 hclge_mbx_handler(hdev); 3925 3926 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 3927 } 3928 3929 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) 3930 { 3931 struct hclge_pf_rst_sync_cmd *req; 3932 struct hclge_desc desc; 3933 int cnt = 0; 3934 int ret; 3935 3936 req = (struct hclge_pf_rst_sync_cmd *)desc.data; 3937 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); 3938 3939 do { 3940 /* vf need to down netdev by mbx during PF or FLR reset */ 3941 hclge_mailbox_service_task(hdev); 3942 3943 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3944 /* for compatible with old firmware, wait 3945 * 100 ms for VF to stop IO 3946 */ 3947 if (ret == -EOPNOTSUPP) { 3948 msleep(HCLGE_RESET_SYNC_TIME); 3949 return; 3950 } else if (ret) { 3951 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", 3952 ret); 3953 return; 3954 } else if (req->all_vf_ready) { 3955 return; 3956 } 3957 msleep(HCLGE_PF_RESET_SYNC_TIME); 3958 hclge_comm_cmd_reuse_desc(&desc, true); 3959 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); 3960 3961 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); 3962 } 3963 3964 void hclge_report_hw_error(struct hclge_dev *hdev, 3965 enum hnae3_hw_error_type type) 3966 { 3967 struct hnae3_client *client = hdev->nic_client; 3968 3969 if (!client || !client->ops->process_hw_error || 3970 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) 3971 return; 3972 3973 client->ops->process_hw_error(&hdev->vport[0].nic, type); 3974 } 3975 3976 static void hclge_handle_imp_error(struct hclge_dev *hdev) 3977 { 3978 u32 reg_val; 3979 3980 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 3981 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { 3982 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); 3983 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); 3984 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 3985 } 3986 3987 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { 3988 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); 3989 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); 3990 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 3991 } 3992 } 3993 3994 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 3995 { 3996 struct hclge_desc desc; 3997 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 3998 int ret; 3999 4000 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 4001 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 4002 req->fun_reset_vfid = func_id; 4003 4004 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4005 if (ret) 4006 dev_err(&hdev->pdev->dev, 4007 "send function reset cmd fail, status =%d\n", ret); 4008 4009 return ret; 4010 } 4011 4012 static void hclge_do_reset(struct hclge_dev *hdev) 4013 { 4014 struct hnae3_handle *handle = &hdev->vport[0].nic; 4015 struct pci_dev *pdev = hdev->pdev; 4016 u32 val; 4017 4018 if (hclge_get_hw_reset_stat(handle)) { 4019 dev_info(&pdev->dev, "hardware reset not finish\n"); 4020 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", 4021 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), 4022 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 4023 return; 4024 } 4025 4026 switch (hdev->reset_type) { 4027 case HNAE3_IMP_RESET: 4028 dev_info(&pdev->dev, "IMP reset requested\n"); 4029 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 4030 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); 4031 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); 4032 break; 4033 case HNAE3_GLOBAL_RESET: 4034 dev_info(&pdev->dev, "global reset requested\n"); 4035 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 4036 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 4037 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 4038 break; 4039 case HNAE3_FUNC_RESET: 4040 dev_info(&pdev->dev, "PF reset requested\n"); 4041 /* schedule again to check later */ 4042 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 4043 hclge_reset_task_schedule(hdev); 4044 break; 4045 default: 4046 dev_warn(&pdev->dev, 4047 "unsupported reset type: %d\n", hdev->reset_type); 4048 break; 4049 } 4050 } 4051 4052 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 4053 unsigned long *addr) 4054 { 4055 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 4056 struct hclge_dev *hdev = ae_dev->priv; 4057 4058 /* return the highest priority reset level amongst all */ 4059 if (test_bit(HNAE3_IMP_RESET, addr)) { 4060 rst_level = HNAE3_IMP_RESET; 4061 clear_bit(HNAE3_IMP_RESET, addr); 4062 clear_bit(HNAE3_GLOBAL_RESET, addr); 4063 clear_bit(HNAE3_FUNC_RESET, addr); 4064 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { 4065 rst_level = HNAE3_GLOBAL_RESET; 4066 clear_bit(HNAE3_GLOBAL_RESET, addr); 4067 clear_bit(HNAE3_FUNC_RESET, addr); 4068 } else if (test_bit(HNAE3_FUNC_RESET, addr)) { 4069 rst_level = HNAE3_FUNC_RESET; 4070 clear_bit(HNAE3_FUNC_RESET, addr); 4071 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 4072 rst_level = HNAE3_FLR_RESET; 4073 clear_bit(HNAE3_FLR_RESET, addr); 4074 } 4075 4076 if (hdev->reset_type != HNAE3_NONE_RESET && 4077 rst_level < hdev->reset_type) 4078 return HNAE3_NONE_RESET; 4079 4080 return rst_level; 4081 } 4082 4083 static void hclge_clear_reset_cause(struct hclge_dev *hdev) 4084 { 4085 u32 clearval = 0; 4086 4087 switch (hdev->reset_type) { 4088 case HNAE3_IMP_RESET: 4089 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 4090 break; 4091 case HNAE3_GLOBAL_RESET: 4092 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 4093 break; 4094 default: 4095 break; 4096 } 4097 4098 if (!clearval) 4099 return; 4100 4101 /* For revision 0x20, the reset interrupt source 4102 * can only be cleared after hardware reset done 4103 */ 4104 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 4105 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, 4106 clearval); 4107 4108 hclge_enable_vector(&hdev->misc_vector, true); 4109 } 4110 4111 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) 4112 { 4113 u32 reg_val; 4114 4115 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 4116 if (enable) 4117 reg_val |= HCLGE_COMM_NIC_SW_RST_RDY; 4118 else 4119 reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY; 4120 4121 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); 4122 } 4123 4124 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) 4125 { 4126 int ret; 4127 4128 ret = hclge_set_all_vf_rst(hdev, true); 4129 if (ret) 4130 return ret; 4131 4132 hclge_func_reset_sync_vf(hdev); 4133 4134 return 0; 4135 } 4136 4137 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) 4138 { 4139 u32 reg_val; 4140 int ret = 0; 4141 4142 switch (hdev->reset_type) { 4143 case HNAE3_FUNC_RESET: 4144 ret = hclge_func_reset_notify_vf(hdev); 4145 if (ret) 4146 return ret; 4147 4148 ret = hclge_func_reset_cmd(hdev, 0); 4149 if (ret) { 4150 dev_err(&hdev->pdev->dev, 4151 "asserting function reset fail %d!\n", ret); 4152 return ret; 4153 } 4154 4155 /* After performaning pf reset, it is not necessary to do the 4156 * mailbox handling or send any command to firmware, because 4157 * any mailbox handling or command to firmware is only valid 4158 * after hclge_comm_cmd_init is called. 4159 */ 4160 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 4161 hdev->rst_stats.pf_rst_cnt++; 4162 break; 4163 case HNAE3_FLR_RESET: 4164 ret = hclge_func_reset_notify_vf(hdev); 4165 if (ret) 4166 return ret; 4167 break; 4168 case HNAE3_IMP_RESET: 4169 hclge_handle_imp_error(hdev); 4170 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 4171 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, 4172 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); 4173 break; 4174 default: 4175 break; 4176 } 4177 4178 /* inform hardware that preparatory work is done */ 4179 msleep(HCLGE_RESET_SYNC_TIME); 4180 hclge_reset_handshake(hdev, true); 4181 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); 4182 4183 return ret; 4184 } 4185 4186 static void hclge_show_rst_info(struct hclge_dev *hdev) 4187 { 4188 char *buf; 4189 4190 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL); 4191 if (!buf) 4192 return; 4193 4194 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); 4195 4196 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); 4197 4198 kfree(buf); 4199 } 4200 4201 static bool hclge_reset_err_handle(struct hclge_dev *hdev) 4202 { 4203 #define MAX_RESET_FAIL_CNT 5 4204 4205 if (hdev->reset_pending) { 4206 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", 4207 hdev->reset_pending); 4208 return true; 4209 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & 4210 HCLGE_RESET_INT_M) { 4211 dev_info(&hdev->pdev->dev, 4212 "reset failed because new reset interrupt\n"); 4213 hclge_clear_reset_cause(hdev); 4214 return false; 4215 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { 4216 hdev->rst_stats.reset_fail_cnt++; 4217 set_bit(hdev->reset_type, &hdev->reset_pending); 4218 dev_info(&hdev->pdev->dev, 4219 "re-schedule reset task(%u)\n", 4220 hdev->rst_stats.reset_fail_cnt); 4221 return true; 4222 } 4223 4224 hclge_clear_reset_cause(hdev); 4225 4226 /* recover the handshake status when reset fail */ 4227 hclge_reset_handshake(hdev, true); 4228 4229 dev_err(&hdev->pdev->dev, "Reset fail!\n"); 4230 4231 hclge_show_rst_info(hdev); 4232 4233 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 4234 4235 return false; 4236 } 4237 4238 static void hclge_update_reset_level(struct hclge_dev *hdev) 4239 { 4240 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4241 enum hnae3_reset_type reset_level; 4242 4243 /* reset request will not be set during reset, so clear 4244 * pending reset request to avoid unnecessary reset 4245 * caused by the same reason. 4246 */ 4247 hclge_get_reset_level(ae_dev, &hdev->reset_request); 4248 4249 /* if default_reset_request has a higher level reset request, 4250 * it should be handled as soon as possible. since some errors 4251 * need this kind of reset to fix. 4252 */ 4253 reset_level = hclge_get_reset_level(ae_dev, 4254 &hdev->default_reset_request); 4255 if (reset_level != HNAE3_NONE_RESET) 4256 set_bit(reset_level, &hdev->reset_request); 4257 } 4258 4259 static int hclge_set_rst_done(struct hclge_dev *hdev) 4260 { 4261 struct hclge_pf_rst_done_cmd *req; 4262 struct hclge_desc desc; 4263 int ret; 4264 4265 req = (struct hclge_pf_rst_done_cmd *)desc.data; 4266 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); 4267 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; 4268 4269 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4270 /* To be compatible with the old firmware, which does not support 4271 * command HCLGE_OPC_PF_RST_DONE, just print a warning and 4272 * return success 4273 */ 4274 if (ret == -EOPNOTSUPP) { 4275 dev_warn(&hdev->pdev->dev, 4276 "current firmware does not support command(0x%x)!\n", 4277 HCLGE_OPC_PF_RST_DONE); 4278 return 0; 4279 } else if (ret) { 4280 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", 4281 ret); 4282 } 4283 4284 return ret; 4285 } 4286 4287 static int hclge_reset_prepare_up(struct hclge_dev *hdev) 4288 { 4289 int ret = 0; 4290 4291 switch (hdev->reset_type) { 4292 case HNAE3_FUNC_RESET: 4293 case HNAE3_FLR_RESET: 4294 ret = hclge_set_all_vf_rst(hdev, false); 4295 break; 4296 case HNAE3_GLOBAL_RESET: 4297 case HNAE3_IMP_RESET: 4298 ret = hclge_set_rst_done(hdev); 4299 break; 4300 default: 4301 break; 4302 } 4303 4304 /* clear up the handshake status after re-initialize done */ 4305 hclge_reset_handshake(hdev, false); 4306 4307 return ret; 4308 } 4309 4310 static int hclge_reset_stack(struct hclge_dev *hdev) 4311 { 4312 int ret; 4313 4314 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 4315 if (ret) 4316 return ret; 4317 4318 ret = hclge_reset_ae_dev(hdev->ae_dev); 4319 if (ret) 4320 return ret; 4321 4322 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 4323 } 4324 4325 static int hclge_reset_prepare(struct hclge_dev *hdev) 4326 { 4327 int ret; 4328 4329 hdev->rst_stats.reset_cnt++; 4330 /* perform reset of the stack & ae device for a client */ 4331 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 4332 if (ret) 4333 return ret; 4334 4335 rtnl_lock(); 4336 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 4337 rtnl_unlock(); 4338 if (ret) 4339 return ret; 4340 4341 return hclge_reset_prepare_wait(hdev); 4342 } 4343 4344 static int hclge_reset_rebuild(struct hclge_dev *hdev) 4345 { 4346 int ret; 4347 4348 hdev->rst_stats.hw_reset_done_cnt++; 4349 4350 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 4351 if (ret) 4352 return ret; 4353 4354 rtnl_lock(); 4355 ret = hclge_reset_stack(hdev); 4356 rtnl_unlock(); 4357 if (ret) 4358 return ret; 4359 4360 hclge_clear_reset_cause(hdev); 4361 4362 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 4363 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 4364 * times 4365 */ 4366 if (ret && 4367 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) 4368 return ret; 4369 4370 ret = hclge_reset_prepare_up(hdev); 4371 if (ret) 4372 return ret; 4373 4374 rtnl_lock(); 4375 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); 4376 rtnl_unlock(); 4377 if (ret) 4378 return ret; 4379 4380 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); 4381 if (ret) 4382 return ret; 4383 4384 hdev->last_reset_time = jiffies; 4385 hdev->rst_stats.reset_fail_cnt = 0; 4386 hdev->rst_stats.reset_done_cnt++; 4387 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 4388 4389 hclge_update_reset_level(hdev); 4390 4391 return 0; 4392 } 4393 4394 static void hclge_reset(struct hclge_dev *hdev) 4395 { 4396 if (hclge_reset_prepare(hdev)) 4397 goto err_reset; 4398 4399 if (hclge_reset_wait(hdev)) 4400 goto err_reset; 4401 4402 if (hclge_reset_rebuild(hdev)) 4403 goto err_reset; 4404 4405 return; 4406 4407 err_reset: 4408 if (hclge_reset_err_handle(hdev)) 4409 hclge_reset_task_schedule(hdev); 4410 } 4411 4412 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) 4413 { 4414 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 4415 struct hclge_dev *hdev = ae_dev->priv; 4416 4417 /* We might end up getting called broadly because of 2 below cases: 4418 * 1. Recoverable error was conveyed through APEI and only way to bring 4419 * normalcy is to reset. 4420 * 2. A new reset request from the stack due to timeout 4421 * 4422 * check if this is a new reset request and we are not here just because 4423 * last reset attempt did not succeed and watchdog hit us again. We will 4424 * know this if last reset request did not occur very recently (watchdog 4425 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 4426 * In case of new request we reset the "reset level" to PF reset. 4427 * And if it is a repeat reset request of the most recent one then we 4428 * want to make sure we throttle the reset request. Therefore, we will 4429 * not allow it again before 3*HZ times. 4430 */ 4431 4432 if (time_before(jiffies, (hdev->last_reset_time + 4433 HCLGE_RESET_INTERVAL))) { 4434 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 4435 return; 4436 } 4437 4438 if (hdev->default_reset_request) { 4439 hdev->reset_level = 4440 hclge_get_reset_level(ae_dev, 4441 &hdev->default_reset_request); 4442 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { 4443 hdev->reset_level = HNAE3_FUNC_RESET; 4444 } 4445 4446 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", 4447 hdev->reset_level); 4448 4449 /* request reset & schedule reset task */ 4450 set_bit(hdev->reset_level, &hdev->reset_request); 4451 hclge_reset_task_schedule(hdev); 4452 4453 if (hdev->reset_level < HNAE3_GLOBAL_RESET) 4454 hdev->reset_level++; 4455 } 4456 4457 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 4458 enum hnae3_reset_type rst_type) 4459 { 4460 struct hclge_dev *hdev = ae_dev->priv; 4461 4462 set_bit(rst_type, &hdev->default_reset_request); 4463 } 4464 4465 static void hclge_reset_timer(struct timer_list *t) 4466 { 4467 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); 4468 4469 /* if default_reset_request has no value, it means that this reset 4470 * request has already be handled, so just return here 4471 */ 4472 if (!hdev->default_reset_request) 4473 return; 4474 4475 dev_info(&hdev->pdev->dev, 4476 "triggering reset in reset timer\n"); 4477 hclge_reset_event(hdev->pdev, NULL); 4478 } 4479 4480 static void hclge_reset_subtask(struct hclge_dev *hdev) 4481 { 4482 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4483 4484 /* check if there is any ongoing reset in the hardware. This status can 4485 * be checked from reset_pending. If there is then, we need to wait for 4486 * hardware to complete reset. 4487 * a. If we are able to figure out in reasonable time that hardware 4488 * has fully resetted then, we can proceed with driver, client 4489 * reset. 4490 * b. else, we can come back later to check this status so re-sched 4491 * now. 4492 */ 4493 hdev->last_reset_time = jiffies; 4494 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); 4495 if (hdev->reset_type != HNAE3_NONE_RESET) 4496 hclge_reset(hdev); 4497 4498 /* check if we got any *new* reset requests to be honored */ 4499 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); 4500 if (hdev->reset_type != HNAE3_NONE_RESET) 4501 hclge_do_reset(hdev); 4502 4503 hdev->reset_type = HNAE3_NONE_RESET; 4504 } 4505 4506 static void hclge_handle_err_reset_request(struct hclge_dev *hdev) 4507 { 4508 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4509 enum hnae3_reset_type reset_type; 4510 4511 if (ae_dev->hw_err_reset_req) { 4512 reset_type = hclge_get_reset_level(ae_dev, 4513 &ae_dev->hw_err_reset_req); 4514 hclge_set_def_reset_request(ae_dev, reset_type); 4515 } 4516 4517 if (hdev->default_reset_request && ae_dev->ops->reset_event) 4518 ae_dev->ops->reset_event(hdev->pdev, NULL); 4519 4520 /* enable interrupt after error handling complete */ 4521 hclge_enable_vector(&hdev->misc_vector, true); 4522 } 4523 4524 static void hclge_handle_err_recovery(struct hclge_dev *hdev) 4525 { 4526 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4527 4528 ae_dev->hw_err_reset_req = 0; 4529 4530 if (hclge_find_error_source(hdev)) { 4531 hclge_handle_error_info_log(ae_dev); 4532 hclge_handle_mac_tnl(hdev); 4533 hclge_handle_vf_queue_err_ras(hdev); 4534 } 4535 4536 hclge_handle_err_reset_request(hdev); 4537 } 4538 4539 static void hclge_misc_err_recovery(struct hclge_dev *hdev) 4540 { 4541 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4542 struct device *dev = &hdev->pdev->dev; 4543 u32 msix_sts_reg; 4544 4545 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 4546 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { 4547 if (hclge_handle_hw_msix_error 4548 (hdev, &hdev->default_reset_request)) 4549 dev_info(dev, "received msix interrupt 0x%x\n", 4550 msix_sts_reg); 4551 } 4552 4553 hclge_handle_hw_ras_error(ae_dev); 4554 4555 hclge_handle_err_reset_request(hdev); 4556 } 4557 4558 static void hclge_errhand_service_task(struct hclge_dev *hdev) 4559 { 4560 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) 4561 return; 4562 4563 if (hnae3_dev_ras_imp_supported(hdev)) 4564 hclge_handle_err_recovery(hdev); 4565 else 4566 hclge_misc_err_recovery(hdev); 4567 } 4568 4569 static void hclge_reset_service_task(struct hclge_dev *hdev) 4570 { 4571 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 4572 return; 4573 4574 if (time_is_before_jiffies(hdev->last_rst_scheduled + 4575 HCLGE_RESET_SCHED_TIMEOUT)) 4576 dev_warn(&hdev->pdev->dev, 4577 "reset service task is scheduled after %ums on cpu%u!\n", 4578 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), 4579 smp_processor_id()); 4580 4581 down(&hdev->reset_sem); 4582 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 4583 4584 hclge_reset_subtask(hdev); 4585 4586 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 4587 up(&hdev->reset_sem); 4588 } 4589 4590 static void hclge_update_vport_alive(struct hclge_dev *hdev) 4591 { 4592 #define HCLGE_ALIVE_SECONDS_NORMAL 8 4593 4594 unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ; 4595 int i; 4596 4597 /* start from vport 1 for PF is always alive */ 4598 for (i = 1; i < hdev->num_alloc_vport; i++) { 4599 struct hclge_vport *vport = &hdev->vport[i]; 4600 4601 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || 4602 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 4603 continue; 4604 if (time_after(jiffies, vport->last_active_jiffies + 4605 alive_time)) { 4606 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 4607 dev_warn(&hdev->pdev->dev, 4608 "VF %u heartbeat timeout\n", 4609 i - HCLGE_VF_VPORT_START_NUM); 4610 } 4611 } 4612 } 4613 4614 static void hclge_periodic_service_task(struct hclge_dev *hdev) 4615 { 4616 unsigned long delta = round_jiffies_relative(HZ); 4617 4618 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) 4619 return; 4620 4621 /* Always handle the link updating to make sure link state is 4622 * updated when it is triggered by mbx. 4623 */ 4624 hclge_update_link_status(hdev); 4625 hclge_sync_mac_table(hdev); 4626 hclge_sync_promisc_mode(hdev); 4627 hclge_sync_fd_table(hdev); 4628 4629 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 4630 delta = jiffies - hdev->last_serv_processed; 4631 4632 if (delta < round_jiffies_relative(HZ)) { 4633 delta = round_jiffies_relative(HZ) - delta; 4634 goto out; 4635 } 4636 } 4637 4638 hdev->serv_processed_cnt++; 4639 hclge_update_vport_alive(hdev); 4640 4641 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { 4642 hdev->last_serv_processed = jiffies; 4643 goto out; 4644 } 4645 4646 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) 4647 hclge_update_stats_for_all(hdev); 4648 4649 hclge_update_port_info(hdev); 4650 hclge_sync_vlan_filter(hdev); 4651 4652 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) 4653 hclge_rfs_filter_expire(hdev); 4654 4655 hdev->last_serv_processed = jiffies; 4656 4657 out: 4658 hclge_task_schedule(hdev, delta); 4659 } 4660 4661 static void hclge_ptp_service_task(struct hclge_dev *hdev) 4662 { 4663 unsigned long flags; 4664 4665 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || 4666 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || 4667 !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) 4668 return; 4669 4670 /* to prevent concurrence with the irq handler */ 4671 spin_lock_irqsave(&hdev->ptp->lock, flags); 4672 4673 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq 4674 * handler may handle it just before spin_lock_irqsave(). 4675 */ 4676 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) 4677 hclge_ptp_clean_tx_hwts(hdev); 4678 4679 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 4680 } 4681 4682 static void hclge_service_task(struct work_struct *work) 4683 { 4684 struct hclge_dev *hdev = 4685 container_of(work, struct hclge_dev, service_task.work); 4686 4687 hclge_errhand_service_task(hdev); 4688 hclge_reset_service_task(hdev); 4689 hclge_ptp_service_task(hdev); 4690 hclge_mailbox_service_task(hdev); 4691 hclge_periodic_service_task(hdev); 4692 4693 /* Handle error recovery, reset and mbx again in case periodical task 4694 * delays the handling by calling hclge_task_schedule() in 4695 * hclge_periodic_service_task(). 4696 */ 4697 hclge_errhand_service_task(hdev); 4698 hclge_reset_service_task(hdev); 4699 hclge_mailbox_service_task(hdev); 4700 } 4701 4702 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 4703 { 4704 /* VF handle has no client */ 4705 if (!handle->client) 4706 return container_of(handle, struct hclge_vport, nic); 4707 else if (handle->client->type == HNAE3_CLIENT_ROCE) 4708 return container_of(handle, struct hclge_vport, roce); 4709 else 4710 return container_of(handle, struct hclge_vport, nic); 4711 } 4712 4713 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, 4714 struct hnae3_vector_info *vector_info) 4715 { 4716 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64 4717 4718 vector_info->vector = pci_irq_vector(hdev->pdev, idx); 4719 4720 /* need an extend offset to config vector >= 64 */ 4721 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) 4722 vector_info->io_addr = hdev->hw.hw.io_base + 4723 HCLGE_VECTOR_REG_BASE + 4724 (idx - 1) * HCLGE_VECTOR_REG_OFFSET; 4725 else 4726 vector_info->io_addr = hdev->hw.hw.io_base + 4727 HCLGE_VECTOR_EXT_REG_BASE + 4728 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * 4729 HCLGE_VECTOR_REG_OFFSET_H + 4730 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * 4731 HCLGE_VECTOR_REG_OFFSET; 4732 4733 hdev->vector_status[idx] = hdev->vport[0].vport_id; 4734 hdev->vector_irq[idx] = vector_info->vector; 4735 } 4736 4737 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 4738 struct hnae3_vector_info *vector_info) 4739 { 4740 struct hclge_vport *vport = hclge_get_vport(handle); 4741 struct hnae3_vector_info *vector = vector_info; 4742 struct hclge_dev *hdev = vport->back; 4743 int alloc = 0; 4744 u16 i = 0; 4745 u16 j; 4746 4747 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); 4748 vector_num = min(hdev->num_msi_left, vector_num); 4749 4750 for (j = 0; j < vector_num; j++) { 4751 while (++i < hdev->num_nic_msi) { 4752 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 4753 hclge_get_vector_info(hdev, i, vector); 4754 vector++; 4755 alloc++; 4756 4757 break; 4758 } 4759 } 4760 } 4761 hdev->num_msi_left -= alloc; 4762 hdev->num_msi_used += alloc; 4763 4764 return alloc; 4765 } 4766 4767 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 4768 { 4769 int i; 4770 4771 for (i = 0; i < hdev->num_msi; i++) 4772 if (vector == hdev->vector_irq[i]) 4773 return i; 4774 4775 return -EINVAL; 4776 } 4777 4778 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 4779 { 4780 struct hclge_vport *vport = hclge_get_vport(handle); 4781 struct hclge_dev *hdev = vport->back; 4782 int vector_id; 4783 4784 vector_id = hclge_get_vector_index(hdev, vector); 4785 if (vector_id < 0) { 4786 dev_err(&hdev->pdev->dev, 4787 "Get vector index fail. vector = %d\n", vector); 4788 return vector_id; 4789 } 4790 4791 hclge_free_vector(hdev, vector_id); 4792 4793 return 0; 4794 } 4795 4796 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 4797 u8 *key, u8 *hfunc) 4798 { 4799 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 4800 struct hclge_vport *vport = hclge_get_vport(handle); 4801 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg; 4802 4803 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); 4804 4805 hclge_comm_get_rss_indir_tbl(rss_cfg, indir, 4806 ae_dev->dev_specs.rss_ind_tbl_size); 4807 4808 return 0; 4809 } 4810 4811 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 4812 const u8 *key, const u8 hfunc) 4813 { 4814 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 4815 struct hclge_vport *vport = hclge_get_vport(handle); 4816 struct hclge_dev *hdev = vport->back; 4817 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 4818 int ret, i; 4819 4820 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); 4821 if (ret) { 4822 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); 4823 return ret; 4824 } 4825 4826 /* Update the shadow RSS table with user specified qids */ 4827 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) 4828 rss_cfg->rss_indirection_tbl[i] = indir[i]; 4829 4830 /* Update the hardware */ 4831 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, 4832 rss_cfg->rss_indirection_tbl); 4833 } 4834 4835 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 4836 struct ethtool_rxnfc *nfc) 4837 { 4838 struct hclge_vport *vport = hclge_get_vport(handle); 4839 struct hclge_dev *hdev = vport->back; 4840 int ret; 4841 4842 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, 4843 &hdev->rss_cfg, nfc); 4844 if (ret) { 4845 dev_err(&hdev->pdev->dev, 4846 "failed to set rss tuple, ret = %d.\n", ret); 4847 return ret; 4848 } 4849 4850 return 0; 4851 } 4852 4853 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 4854 struct ethtool_rxnfc *nfc) 4855 { 4856 struct hclge_vport *vport = hclge_get_vport(handle); 4857 u8 tuple_sets; 4858 int ret; 4859 4860 nfc->data = 0; 4861 4862 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type, 4863 &tuple_sets); 4864 if (ret || !tuple_sets) 4865 return ret; 4866 4867 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); 4868 4869 return 0; 4870 } 4871 4872 static int hclge_get_tc_size(struct hnae3_handle *handle) 4873 { 4874 struct hclge_vport *vport = hclge_get_vport(handle); 4875 struct hclge_dev *hdev = vport->back; 4876 4877 return hdev->pf_rss_size_max; 4878 } 4879 4880 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) 4881 { 4882 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 4883 struct hclge_vport *vport = hdev->vport; 4884 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 4885 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; 4886 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 4887 struct hnae3_tc_info *tc_info; 4888 u16 roundup_size; 4889 u16 rss_size; 4890 int i; 4891 4892 tc_info = &vport->nic.kinfo.tc_info; 4893 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 4894 rss_size = tc_info->tqp_count[i]; 4895 tc_valid[i] = 0; 4896 4897 if (!(hdev->hw_tc_map & BIT(i))) 4898 continue; 4899 4900 /* tc_size set to hardware is the log2 of roundup power of two 4901 * of rss_size, the acutal queue size is limited by indirection 4902 * table. 4903 */ 4904 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || 4905 rss_size == 0) { 4906 dev_err(&hdev->pdev->dev, 4907 "Configure rss tc size failed, invalid TC_SIZE = %u\n", 4908 rss_size); 4909 return -EINVAL; 4910 } 4911 4912 roundup_size = roundup_pow_of_two(rss_size); 4913 roundup_size = ilog2(roundup_size); 4914 4915 tc_valid[i] = 1; 4916 tc_size[i] = roundup_size; 4917 tc_offset[i] = tc_info->tqp_offset[i]; 4918 } 4919 4920 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, 4921 tc_size); 4922 } 4923 4924 int hclge_rss_init_hw(struct hclge_dev *hdev) 4925 { 4926 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; 4927 u8 *key = hdev->rss_cfg.rss_hash_key; 4928 u8 hfunc = hdev->rss_cfg.rss_algo; 4929 int ret; 4930 4931 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 4932 rss_indir); 4933 if (ret) 4934 return ret; 4935 4936 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); 4937 if (ret) 4938 return ret; 4939 4940 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg); 4941 if (ret) 4942 return ret; 4943 4944 return hclge_init_rss_tc_mode(hdev); 4945 } 4946 4947 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 4948 int vector_id, bool en, 4949 struct hnae3_ring_chain_node *ring_chain) 4950 { 4951 struct hclge_dev *hdev = vport->back; 4952 struct hnae3_ring_chain_node *node; 4953 struct hclge_desc desc; 4954 struct hclge_ctrl_vector_chain_cmd *req = 4955 (struct hclge_ctrl_vector_chain_cmd *)desc.data; 4956 enum hclge_comm_cmd_status status; 4957 enum hclge_opcode_type op; 4958 u16 tqp_type_and_id; 4959 int i; 4960 4961 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 4962 hclge_cmd_setup_basic_desc(&desc, op, false); 4963 req->int_vector_id_l = hnae3_get_field(vector_id, 4964 HCLGE_VECTOR_ID_L_M, 4965 HCLGE_VECTOR_ID_L_S); 4966 req->int_vector_id_h = hnae3_get_field(vector_id, 4967 HCLGE_VECTOR_ID_H_M, 4968 HCLGE_VECTOR_ID_H_S); 4969 4970 i = 0; 4971 for (node = ring_chain; node; node = node->next) { 4972 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 4973 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 4974 HCLGE_INT_TYPE_S, 4975 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); 4976 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 4977 HCLGE_TQP_ID_S, node->tqp_index); 4978 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 4979 HCLGE_INT_GL_IDX_S, 4980 hnae3_get_field(node->int_gl_idx, 4981 HNAE3_RING_GL_IDX_M, 4982 HNAE3_RING_GL_IDX_S)); 4983 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 4984 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 4985 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 4986 req->vfid = vport->vport_id; 4987 4988 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4989 if (status) { 4990 dev_err(&hdev->pdev->dev, 4991 "Map TQP fail, status is %d.\n", 4992 status); 4993 return -EIO; 4994 } 4995 i = 0; 4996 4997 hclge_cmd_setup_basic_desc(&desc, 4998 op, 4999 false); 5000 req->int_vector_id_l = 5001 hnae3_get_field(vector_id, 5002 HCLGE_VECTOR_ID_L_M, 5003 HCLGE_VECTOR_ID_L_S); 5004 req->int_vector_id_h = 5005 hnae3_get_field(vector_id, 5006 HCLGE_VECTOR_ID_H_M, 5007 HCLGE_VECTOR_ID_H_S); 5008 } 5009 } 5010 5011 if (i > 0) { 5012 req->int_cause_num = i; 5013 req->vfid = vport->vport_id; 5014 status = hclge_cmd_send(&hdev->hw, &desc, 1); 5015 if (status) { 5016 dev_err(&hdev->pdev->dev, 5017 "Map TQP fail, status is %d.\n", status); 5018 return -EIO; 5019 } 5020 } 5021 5022 return 0; 5023 } 5024 5025 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, 5026 struct hnae3_ring_chain_node *ring_chain) 5027 { 5028 struct hclge_vport *vport = hclge_get_vport(handle); 5029 struct hclge_dev *hdev = vport->back; 5030 int vector_id; 5031 5032 vector_id = hclge_get_vector_index(hdev, vector); 5033 if (vector_id < 0) { 5034 dev_err(&hdev->pdev->dev, 5035 "failed to get vector index. vector=%d\n", vector); 5036 return vector_id; 5037 } 5038 5039 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 5040 } 5041 5042 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, 5043 struct hnae3_ring_chain_node *ring_chain) 5044 { 5045 struct hclge_vport *vport = hclge_get_vport(handle); 5046 struct hclge_dev *hdev = vport->back; 5047 int vector_id, ret; 5048 5049 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 5050 return 0; 5051 5052 vector_id = hclge_get_vector_index(hdev, vector); 5053 if (vector_id < 0) { 5054 dev_err(&handle->pdev->dev, 5055 "Get vector index fail. ret =%d\n", vector_id); 5056 return vector_id; 5057 } 5058 5059 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 5060 if (ret) 5061 dev_err(&handle->pdev->dev, 5062 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 5063 vector_id, ret); 5064 5065 return ret; 5066 } 5067 5068 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, 5069 bool en_uc, bool en_mc, bool en_bc) 5070 { 5071 struct hclge_vport *vport = &hdev->vport[vf_id]; 5072 struct hnae3_handle *handle = &vport->nic; 5073 struct hclge_promisc_cfg_cmd *req; 5074 struct hclge_desc desc; 5075 bool uc_tx_en = en_uc; 5076 u8 promisc_cfg = 0; 5077 int ret; 5078 5079 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 5080 5081 req = (struct hclge_promisc_cfg_cmd *)desc.data; 5082 req->vf_id = vf_id; 5083 5084 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) 5085 uc_tx_en = false; 5086 5087 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0); 5088 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0); 5089 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0); 5090 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0); 5091 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0); 5092 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0); 5093 req->extend_promisc = promisc_cfg; 5094 5095 /* to be compatible with DEVICE_VERSION_V1/2 */ 5096 promisc_cfg = 0; 5097 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0); 5098 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0); 5099 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0); 5100 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1); 5101 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1); 5102 req->promisc = promisc_cfg; 5103 5104 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5105 if (ret) 5106 dev_err(&hdev->pdev->dev, 5107 "failed to set vport %u promisc mode, ret = %d.\n", 5108 vf_id, ret); 5109 5110 return ret; 5111 } 5112 5113 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, 5114 bool en_mc_pmc, bool en_bc_pmc) 5115 { 5116 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, 5117 en_uc_pmc, en_mc_pmc, en_bc_pmc); 5118 } 5119 5120 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 5121 bool en_mc_pmc) 5122 { 5123 struct hclge_vport *vport = hclge_get_vport(handle); 5124 struct hclge_dev *hdev = vport->back; 5125 bool en_bc_pmc = true; 5126 5127 /* For device whose version below V2, if broadcast promisc enabled, 5128 * vlan filter is always bypassed. So broadcast promisc should be 5129 * disabled until user enable promisc mode 5130 */ 5131 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 5132 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; 5133 5134 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, 5135 en_bc_pmc); 5136 } 5137 5138 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) 5139 { 5140 struct hclge_vport *vport = hclge_get_vport(handle); 5141 5142 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 5143 } 5144 5145 static void hclge_sync_fd_state(struct hclge_dev *hdev) 5146 { 5147 if (hlist_empty(&hdev->fd_rule_list)) 5148 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 5149 } 5150 5151 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) 5152 { 5153 if (!test_bit(location, hdev->fd_bmap)) { 5154 set_bit(location, hdev->fd_bmap); 5155 hdev->hclge_fd_rule_num++; 5156 } 5157 } 5158 5159 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) 5160 { 5161 if (test_bit(location, hdev->fd_bmap)) { 5162 clear_bit(location, hdev->fd_bmap); 5163 hdev->hclge_fd_rule_num--; 5164 } 5165 } 5166 5167 static void hclge_fd_free_node(struct hclge_dev *hdev, 5168 struct hclge_fd_rule *rule) 5169 { 5170 hlist_del(&rule->rule_node); 5171 kfree(rule); 5172 hclge_sync_fd_state(hdev); 5173 } 5174 5175 static void hclge_update_fd_rule_node(struct hclge_dev *hdev, 5176 struct hclge_fd_rule *old_rule, 5177 struct hclge_fd_rule *new_rule, 5178 enum HCLGE_FD_NODE_STATE state) 5179 { 5180 switch (state) { 5181 case HCLGE_FD_TO_ADD: 5182 case HCLGE_FD_ACTIVE: 5183 /* 1) if the new state is TO_ADD, just replace the old rule 5184 * with the same location, no matter its state, because the 5185 * new rule will be configured to the hardware. 5186 * 2) if the new state is ACTIVE, it means the new rule 5187 * has been configured to the hardware, so just replace 5188 * the old rule node with the same location. 5189 * 3) for it doesn't add a new node to the list, so it's 5190 * unnecessary to update the rule number and fd_bmap. 5191 */ 5192 new_rule->rule_node.next = old_rule->rule_node.next; 5193 new_rule->rule_node.pprev = old_rule->rule_node.pprev; 5194 memcpy(old_rule, new_rule, sizeof(*old_rule)); 5195 kfree(new_rule); 5196 break; 5197 case HCLGE_FD_DELETED: 5198 hclge_fd_dec_rule_cnt(hdev, old_rule->location); 5199 hclge_fd_free_node(hdev, old_rule); 5200 break; 5201 case HCLGE_FD_TO_DEL: 5202 /* if new request is TO_DEL, and old rule is existent 5203 * 1) the state of old rule is TO_DEL, we need do nothing, 5204 * because we delete rule by location, other rule content 5205 * is unncessary. 5206 * 2) the state of old rule is ACTIVE, we need to change its 5207 * state to TO_DEL, so the rule will be deleted when periodic 5208 * task being scheduled. 5209 * 3) the state of old rule is TO_ADD, it means the rule hasn't 5210 * been added to hardware, so we just delete the rule node from 5211 * fd_rule_list directly. 5212 */ 5213 if (old_rule->state == HCLGE_FD_TO_ADD) { 5214 hclge_fd_dec_rule_cnt(hdev, old_rule->location); 5215 hclge_fd_free_node(hdev, old_rule); 5216 return; 5217 } 5218 old_rule->state = HCLGE_FD_TO_DEL; 5219 break; 5220 } 5221 } 5222 5223 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, 5224 u16 location, 5225 struct hclge_fd_rule **parent) 5226 { 5227 struct hclge_fd_rule *rule; 5228 struct hlist_node *node; 5229 5230 hlist_for_each_entry_safe(rule, node, hlist, rule_node) { 5231 if (rule->location == location) 5232 return rule; 5233 else if (rule->location > location) 5234 return NULL; 5235 /* record the parent node, use to keep the nodes in fd_rule_list 5236 * in ascend order. 5237 */ 5238 *parent = rule; 5239 } 5240 5241 return NULL; 5242 } 5243 5244 /* insert fd rule node in ascend order according to rule->location */ 5245 static void hclge_fd_insert_rule_node(struct hlist_head *hlist, 5246 struct hclge_fd_rule *rule, 5247 struct hclge_fd_rule *parent) 5248 { 5249 INIT_HLIST_NODE(&rule->rule_node); 5250 5251 if (parent) 5252 hlist_add_behind(&rule->rule_node, &parent->rule_node); 5253 else 5254 hlist_add_head(&rule->rule_node, hlist); 5255 } 5256 5257 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, 5258 struct hclge_fd_user_def_cfg *cfg) 5259 { 5260 struct hclge_fd_user_def_cfg_cmd *req; 5261 struct hclge_desc desc; 5262 u16 data = 0; 5263 int ret; 5264 5265 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false); 5266 5267 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; 5268 5269 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0); 5270 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5271 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset); 5272 req->ol2_cfg = cpu_to_le16(data); 5273 5274 data = 0; 5275 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0); 5276 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5277 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset); 5278 req->ol3_cfg = cpu_to_le16(data); 5279 5280 data = 0; 5281 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0); 5282 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5283 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset); 5284 req->ol4_cfg = cpu_to_le16(data); 5285 5286 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5287 if (ret) 5288 dev_err(&hdev->pdev->dev, 5289 "failed to set fd user def data, ret= %d\n", ret); 5290 return ret; 5291 } 5292 5293 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) 5294 { 5295 int ret; 5296 5297 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) 5298 return; 5299 5300 if (!locked) 5301 spin_lock_bh(&hdev->fd_rule_lock); 5302 5303 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); 5304 if (ret) 5305 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5306 5307 if (!locked) 5308 spin_unlock_bh(&hdev->fd_rule_lock); 5309 } 5310 5311 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, 5312 struct hclge_fd_rule *rule) 5313 { 5314 struct hlist_head *hlist = &hdev->fd_rule_list; 5315 struct hclge_fd_rule *fd_rule, *parent = NULL; 5316 struct hclge_fd_user_def_info *info, *old_info; 5317 struct hclge_fd_user_def_cfg *cfg; 5318 5319 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5320 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5321 return 0; 5322 5323 /* for valid layer is start from 1, so need minus 1 to get the cfg */ 5324 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5325 info = &rule->ep.user_def; 5326 5327 if (!cfg->ref_cnt || cfg->offset == info->offset) 5328 return 0; 5329 5330 if (cfg->ref_cnt > 1) 5331 goto error; 5332 5333 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); 5334 if (fd_rule) { 5335 old_info = &fd_rule->ep.user_def; 5336 if (info->layer == old_info->layer) 5337 return 0; 5338 } 5339 5340 error: 5341 dev_err(&hdev->pdev->dev, 5342 "No available offset for layer%d fd rule, each layer only support one user def offset.\n", 5343 info->layer + 1); 5344 return -ENOSPC; 5345 } 5346 5347 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, 5348 struct hclge_fd_rule *rule) 5349 { 5350 struct hclge_fd_user_def_cfg *cfg; 5351 5352 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5353 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5354 return; 5355 5356 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5357 if (!cfg->ref_cnt) { 5358 cfg->offset = rule->ep.user_def.offset; 5359 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5360 } 5361 cfg->ref_cnt++; 5362 } 5363 5364 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, 5365 struct hclge_fd_rule *rule) 5366 { 5367 struct hclge_fd_user_def_cfg *cfg; 5368 5369 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5370 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5371 return; 5372 5373 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5374 if (!cfg->ref_cnt) 5375 return; 5376 5377 cfg->ref_cnt--; 5378 if (!cfg->ref_cnt) { 5379 cfg->offset = 0; 5380 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5381 } 5382 } 5383 5384 static void hclge_update_fd_list(struct hclge_dev *hdev, 5385 enum HCLGE_FD_NODE_STATE state, u16 location, 5386 struct hclge_fd_rule *new_rule) 5387 { 5388 struct hlist_head *hlist = &hdev->fd_rule_list; 5389 struct hclge_fd_rule *fd_rule, *parent = NULL; 5390 5391 fd_rule = hclge_find_fd_rule(hlist, location, &parent); 5392 if (fd_rule) { 5393 hclge_fd_dec_user_def_refcnt(hdev, fd_rule); 5394 if (state == HCLGE_FD_ACTIVE) 5395 hclge_fd_inc_user_def_refcnt(hdev, new_rule); 5396 hclge_sync_fd_user_def_cfg(hdev, true); 5397 5398 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); 5399 return; 5400 } 5401 5402 /* it's unlikely to fail here, because we have checked the rule 5403 * exist before. 5404 */ 5405 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) { 5406 dev_warn(&hdev->pdev->dev, 5407 "failed to delete fd rule %u, it's inexistent\n", 5408 location); 5409 return; 5410 } 5411 5412 hclge_fd_inc_user_def_refcnt(hdev, new_rule); 5413 hclge_sync_fd_user_def_cfg(hdev, true); 5414 5415 hclge_fd_insert_rule_node(hlist, new_rule, parent); 5416 hclge_fd_inc_rule_cnt(hdev, new_rule->location); 5417 5418 if (state == HCLGE_FD_TO_ADD) { 5419 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 5420 hclge_task_schedule(hdev, 0); 5421 } 5422 } 5423 5424 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) 5425 { 5426 struct hclge_get_fd_mode_cmd *req; 5427 struct hclge_desc desc; 5428 int ret; 5429 5430 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); 5431 5432 req = (struct hclge_get_fd_mode_cmd *)desc.data; 5433 5434 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5435 if (ret) { 5436 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); 5437 return ret; 5438 } 5439 5440 *fd_mode = req->mode; 5441 5442 return ret; 5443 } 5444 5445 static int hclge_get_fd_allocation(struct hclge_dev *hdev, 5446 u32 *stage1_entry_num, 5447 u32 *stage2_entry_num, 5448 u16 *stage1_counter_num, 5449 u16 *stage2_counter_num) 5450 { 5451 struct hclge_get_fd_allocation_cmd *req; 5452 struct hclge_desc desc; 5453 int ret; 5454 5455 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); 5456 5457 req = (struct hclge_get_fd_allocation_cmd *)desc.data; 5458 5459 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5460 if (ret) { 5461 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", 5462 ret); 5463 return ret; 5464 } 5465 5466 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); 5467 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); 5468 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); 5469 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); 5470 5471 return ret; 5472 } 5473 5474 static int hclge_set_fd_key_config(struct hclge_dev *hdev, 5475 enum HCLGE_FD_STAGE stage_num) 5476 { 5477 struct hclge_set_fd_key_config_cmd *req; 5478 struct hclge_fd_key_cfg *stage; 5479 struct hclge_desc desc; 5480 int ret; 5481 5482 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); 5483 5484 req = (struct hclge_set_fd_key_config_cmd *)desc.data; 5485 stage = &hdev->fd_cfg.key_cfg[stage_num]; 5486 req->stage = stage_num; 5487 req->key_select = stage->key_sel; 5488 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; 5489 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; 5490 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; 5491 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; 5492 req->tuple_mask = cpu_to_le32(~stage->tuple_active); 5493 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); 5494 5495 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5496 if (ret) 5497 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); 5498 5499 return ret; 5500 } 5501 5502 static void hclge_fd_disable_user_def(struct hclge_dev *hdev) 5503 { 5504 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; 5505 5506 spin_lock_bh(&hdev->fd_rule_lock); 5507 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); 5508 spin_unlock_bh(&hdev->fd_rule_lock); 5509 5510 hclge_fd_set_user_def_cmd(hdev, cfg); 5511 } 5512 5513 static int hclge_init_fd_config(struct hclge_dev *hdev) 5514 { 5515 #define LOW_2_WORDS 0x03 5516 struct hclge_fd_key_cfg *key_cfg; 5517 int ret; 5518 5519 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 5520 return 0; 5521 5522 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); 5523 if (ret) 5524 return ret; 5525 5526 switch (hdev->fd_cfg.fd_mode) { 5527 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: 5528 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; 5529 break; 5530 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: 5531 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; 5532 break; 5533 default: 5534 dev_err(&hdev->pdev->dev, 5535 "Unsupported flow director mode %u\n", 5536 hdev->fd_cfg.fd_mode); 5537 return -EOPNOTSUPP; 5538 } 5539 5540 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; 5541 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; 5542 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; 5543 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; 5544 key_cfg->outer_sipv6_word_en = 0; 5545 key_cfg->outer_dipv6_word_en = 0; 5546 5547 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | 5548 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | 5549 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 5550 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5551 5552 /* If use max 400bit key, we can support tuples for ether type */ 5553 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 5554 key_cfg->tuple_active |= 5555 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); 5556 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 5557 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; 5558 } 5559 5560 /* roce_type is used to filter roce frames 5561 * dst_vport is used to specify the rule 5562 */ 5563 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); 5564 5565 ret = hclge_get_fd_allocation(hdev, 5566 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 5567 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], 5568 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], 5569 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); 5570 if (ret) 5571 return ret; 5572 5573 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); 5574 } 5575 5576 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, 5577 int loc, u8 *key, bool is_add) 5578 { 5579 struct hclge_fd_tcam_config_1_cmd *req1; 5580 struct hclge_fd_tcam_config_2_cmd *req2; 5581 struct hclge_fd_tcam_config_3_cmd *req3; 5582 struct hclge_desc desc[3]; 5583 int ret; 5584 5585 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); 5586 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 5587 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); 5588 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 5589 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); 5590 5591 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 5592 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 5593 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 5594 5595 req1->stage = stage; 5596 req1->xy_sel = sel_x ? 1 : 0; 5597 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); 5598 req1->index = cpu_to_le32(loc); 5599 req1->entry_vld = sel_x ? is_add : 0; 5600 5601 if (key) { 5602 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); 5603 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], 5604 sizeof(req2->tcam_data)); 5605 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + 5606 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); 5607 } 5608 5609 ret = hclge_cmd_send(&hdev->hw, desc, 3); 5610 if (ret) 5611 dev_err(&hdev->pdev->dev, 5612 "config tcam key fail, ret=%d\n", 5613 ret); 5614 5615 return ret; 5616 } 5617 5618 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, 5619 struct hclge_fd_ad_data *action) 5620 { 5621 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 5622 struct hclge_fd_ad_config_cmd *req; 5623 struct hclge_desc desc; 5624 u64 ad_data = 0; 5625 int ret; 5626 5627 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); 5628 5629 req = (struct hclge_fd_ad_config_cmd *)desc.data; 5630 req->index = cpu_to_le32(loc); 5631 req->stage = stage; 5632 5633 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, 5634 action->write_rule_id_to_bd); 5635 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, 5636 action->rule_id); 5637 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { 5638 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, 5639 action->override_tc); 5640 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, 5641 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); 5642 } 5643 ad_data <<= 32; 5644 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); 5645 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, 5646 action->forward_to_direct_queue); 5647 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, 5648 action->queue_id); 5649 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); 5650 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, 5651 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); 5652 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); 5653 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, 5654 action->counter_id); 5655 5656 req->ad_data = cpu_to_le64(ad_data); 5657 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5658 if (ret) 5659 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); 5660 5661 return ret; 5662 } 5663 5664 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, 5665 struct hclge_fd_rule *rule) 5666 { 5667 int offset, moffset, ip_offset; 5668 enum HCLGE_FD_KEY_OPT key_opt; 5669 u16 tmp_x_s, tmp_y_s; 5670 u32 tmp_x_l, tmp_y_l; 5671 u8 *p = (u8 *)rule; 5672 int i; 5673 5674 if (rule->unused_tuple & BIT(tuple_bit)) 5675 return true; 5676 5677 key_opt = tuple_key_info[tuple_bit].key_opt; 5678 offset = tuple_key_info[tuple_bit].offset; 5679 moffset = tuple_key_info[tuple_bit].moffset; 5680 5681 switch (key_opt) { 5682 case KEY_OPT_U8: 5683 calc_x(*key_x, p[offset], p[moffset]); 5684 calc_y(*key_y, p[offset], p[moffset]); 5685 5686 return true; 5687 case KEY_OPT_LE16: 5688 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); 5689 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); 5690 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 5691 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 5692 5693 return true; 5694 case KEY_OPT_LE32: 5695 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); 5696 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); 5697 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5698 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5699 5700 return true; 5701 case KEY_OPT_MAC: 5702 for (i = 0; i < ETH_ALEN; i++) { 5703 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], 5704 p[moffset + i]); 5705 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], 5706 p[moffset + i]); 5707 } 5708 5709 return true; 5710 case KEY_OPT_IP: 5711 ip_offset = IPV4_INDEX * sizeof(u32); 5712 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), 5713 *(u32 *)(&p[moffset + ip_offset])); 5714 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), 5715 *(u32 *)(&p[moffset + ip_offset])); 5716 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5717 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5718 5719 return true; 5720 default: 5721 return false; 5722 } 5723 } 5724 5725 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, 5726 u8 vf_id, u8 network_port_id) 5727 { 5728 u32 port_number = 0; 5729 5730 if (port_type == HOST_PORT) { 5731 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, 5732 pf_id); 5733 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, 5734 vf_id); 5735 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); 5736 } else { 5737 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, 5738 HCLGE_NETWORK_PORT_ID_S, network_port_id); 5739 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); 5740 } 5741 5742 return port_number; 5743 } 5744 5745 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, 5746 __le32 *key_x, __le32 *key_y, 5747 struct hclge_fd_rule *rule) 5748 { 5749 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; 5750 u8 cur_pos = 0, tuple_size, shift_bits; 5751 unsigned int i; 5752 5753 for (i = 0; i < MAX_META_DATA; i++) { 5754 tuple_size = meta_data_key_info[i].key_length; 5755 tuple_bit = key_cfg->meta_data_active & BIT(i); 5756 5757 switch (tuple_bit) { 5758 case BIT(ROCE_TYPE): 5759 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); 5760 cur_pos += tuple_size; 5761 break; 5762 case BIT(DST_VPORT): 5763 port_number = hclge_get_port_number(HOST_PORT, 0, 5764 rule->vf_id, 0); 5765 hnae3_set_field(meta_data, 5766 GENMASK(cur_pos + tuple_size, cur_pos), 5767 cur_pos, port_number); 5768 cur_pos += tuple_size; 5769 break; 5770 default: 5771 break; 5772 } 5773 } 5774 5775 calc_x(tmp_x, meta_data, 0xFFFFFFFF); 5776 calc_y(tmp_y, meta_data, 0xFFFFFFFF); 5777 shift_bits = sizeof(meta_data) * 8 - cur_pos; 5778 5779 *key_x = cpu_to_le32(tmp_x << shift_bits); 5780 *key_y = cpu_to_le32(tmp_y << shift_bits); 5781 } 5782 5783 /* A complete key is combined with meta data key and tuple key. 5784 * Meta data key is stored at the MSB region, and tuple key is stored at 5785 * the LSB region, unused bits will be filled 0. 5786 */ 5787 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, 5788 struct hclge_fd_rule *rule) 5789 { 5790 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; 5791 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; 5792 u8 *cur_key_x, *cur_key_y; 5793 u8 meta_data_region; 5794 u8 tuple_size; 5795 int ret; 5796 u32 i; 5797 5798 memset(key_x, 0, sizeof(key_x)); 5799 memset(key_y, 0, sizeof(key_y)); 5800 cur_key_x = key_x; 5801 cur_key_y = key_y; 5802 5803 for (i = 0; i < MAX_TUPLE; i++) { 5804 bool tuple_valid; 5805 5806 tuple_size = tuple_key_info[i].key_length / 8; 5807 if (!(key_cfg->tuple_active & BIT(i))) 5808 continue; 5809 5810 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x, 5811 cur_key_y, rule); 5812 if (tuple_valid) { 5813 cur_key_x += tuple_size; 5814 cur_key_y += tuple_size; 5815 } 5816 } 5817 5818 meta_data_region = hdev->fd_cfg.max_key_length / 8 - 5819 MAX_META_DATA_LENGTH / 8; 5820 5821 hclge_fd_convert_meta_data(key_cfg, 5822 (__le32 *)(key_x + meta_data_region), 5823 (__le32 *)(key_y + meta_data_region), 5824 rule); 5825 5826 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, 5827 true); 5828 if (ret) { 5829 dev_err(&hdev->pdev->dev, 5830 "fd key_y config fail, loc=%u, ret=%d\n", 5831 rule->queue_id, ret); 5832 return ret; 5833 } 5834 5835 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, 5836 true); 5837 if (ret) 5838 dev_err(&hdev->pdev->dev, 5839 "fd key_x config fail, loc=%u, ret=%d\n", 5840 rule->queue_id, ret); 5841 return ret; 5842 } 5843 5844 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, 5845 struct hclge_fd_rule *rule) 5846 { 5847 struct hclge_vport *vport = hdev->vport; 5848 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5849 struct hclge_fd_ad_data ad_data; 5850 5851 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); 5852 ad_data.ad_id = rule->location; 5853 5854 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 5855 ad_data.drop_packet = true; 5856 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { 5857 ad_data.override_tc = true; 5858 ad_data.queue_id = 5859 kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; 5860 ad_data.tc_size = 5861 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); 5862 } else { 5863 ad_data.forward_to_direct_queue = true; 5864 ad_data.queue_id = rule->queue_id; 5865 } 5866 5867 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { 5868 ad_data.use_counter = true; 5869 ad_data.counter_id = rule->vf_id % 5870 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; 5871 } else { 5872 ad_data.use_counter = false; 5873 ad_data.counter_id = 0; 5874 } 5875 5876 ad_data.use_next_stage = false; 5877 ad_data.next_input_key = 0; 5878 5879 ad_data.write_rule_id_to_bd = true; 5880 ad_data.rule_id = rule->location; 5881 5882 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); 5883 } 5884 5885 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec, 5886 u32 *unused_tuple) 5887 { 5888 if (!spec || !unused_tuple) 5889 return -EINVAL; 5890 5891 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 5892 5893 if (!spec->ip4src) 5894 *unused_tuple |= BIT(INNER_SRC_IP); 5895 5896 if (!spec->ip4dst) 5897 *unused_tuple |= BIT(INNER_DST_IP); 5898 5899 if (!spec->psrc) 5900 *unused_tuple |= BIT(INNER_SRC_PORT); 5901 5902 if (!spec->pdst) 5903 *unused_tuple |= BIT(INNER_DST_PORT); 5904 5905 if (!spec->tos) 5906 *unused_tuple |= BIT(INNER_IP_TOS); 5907 5908 return 0; 5909 } 5910 5911 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, 5912 u32 *unused_tuple) 5913 { 5914 if (!spec || !unused_tuple) 5915 return -EINVAL; 5916 5917 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5918 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5919 5920 if (!spec->ip4src) 5921 *unused_tuple |= BIT(INNER_SRC_IP); 5922 5923 if (!spec->ip4dst) 5924 *unused_tuple |= BIT(INNER_DST_IP); 5925 5926 if (!spec->tos) 5927 *unused_tuple |= BIT(INNER_IP_TOS); 5928 5929 if (!spec->proto) 5930 *unused_tuple |= BIT(INNER_IP_PROTO); 5931 5932 if (spec->l4_4_bytes) 5933 return -EOPNOTSUPP; 5934 5935 if (spec->ip_ver != ETH_RX_NFC_IP4) 5936 return -EOPNOTSUPP; 5937 5938 return 0; 5939 } 5940 5941 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, 5942 u32 *unused_tuple) 5943 { 5944 if (!spec || !unused_tuple) 5945 return -EINVAL; 5946 5947 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 5948 5949 /* check whether src/dst ip address used */ 5950 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) 5951 *unused_tuple |= BIT(INNER_SRC_IP); 5952 5953 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) 5954 *unused_tuple |= BIT(INNER_DST_IP); 5955 5956 if (!spec->psrc) 5957 *unused_tuple |= BIT(INNER_SRC_PORT); 5958 5959 if (!spec->pdst) 5960 *unused_tuple |= BIT(INNER_DST_PORT); 5961 5962 if (!spec->tclass) 5963 *unused_tuple |= BIT(INNER_IP_TOS); 5964 5965 return 0; 5966 } 5967 5968 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, 5969 u32 *unused_tuple) 5970 { 5971 if (!spec || !unused_tuple) 5972 return -EINVAL; 5973 5974 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5975 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5976 5977 /* check whether src/dst ip address used */ 5978 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) 5979 *unused_tuple |= BIT(INNER_SRC_IP); 5980 5981 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) 5982 *unused_tuple |= BIT(INNER_DST_IP); 5983 5984 if (!spec->l4_proto) 5985 *unused_tuple |= BIT(INNER_IP_PROTO); 5986 5987 if (!spec->tclass) 5988 *unused_tuple |= BIT(INNER_IP_TOS); 5989 5990 if (spec->l4_4_bytes) 5991 return -EOPNOTSUPP; 5992 5993 return 0; 5994 } 5995 5996 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) 5997 { 5998 if (!spec || !unused_tuple) 5999 return -EINVAL; 6000 6001 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 6002 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | 6003 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); 6004 6005 if (is_zero_ether_addr(spec->h_source)) 6006 *unused_tuple |= BIT(INNER_SRC_MAC); 6007 6008 if (is_zero_ether_addr(spec->h_dest)) 6009 *unused_tuple |= BIT(INNER_DST_MAC); 6010 6011 if (!spec->h_proto) 6012 *unused_tuple |= BIT(INNER_ETH_TYPE); 6013 6014 return 0; 6015 } 6016 6017 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, 6018 struct ethtool_rx_flow_spec *fs, 6019 u32 *unused_tuple) 6020 { 6021 if (fs->flow_type & FLOW_EXT) { 6022 if (fs->h_ext.vlan_etype) { 6023 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); 6024 return -EOPNOTSUPP; 6025 } 6026 6027 if (!fs->h_ext.vlan_tci) 6028 *unused_tuple |= BIT(INNER_VLAN_TAG_FST); 6029 6030 if (fs->m_ext.vlan_tci && 6031 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { 6032 dev_err(&hdev->pdev->dev, 6033 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n", 6034 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); 6035 return -EINVAL; 6036 } 6037 } else { 6038 *unused_tuple |= BIT(INNER_VLAN_TAG_FST); 6039 } 6040 6041 if (fs->flow_type & FLOW_MAC_EXT) { 6042 if (hdev->fd_cfg.fd_mode != 6043 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 6044 dev_err(&hdev->pdev->dev, 6045 "FLOW_MAC_EXT is not supported in current fd mode!\n"); 6046 return -EOPNOTSUPP; 6047 } 6048 6049 if (is_zero_ether_addr(fs->h_ext.h_dest)) 6050 *unused_tuple |= BIT(INNER_DST_MAC); 6051 else 6052 *unused_tuple &= ~BIT(INNER_DST_MAC); 6053 } 6054 6055 return 0; 6056 } 6057 6058 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, 6059 struct hclge_fd_user_def_info *info) 6060 { 6061 switch (flow_type) { 6062 case ETHER_FLOW: 6063 info->layer = HCLGE_FD_USER_DEF_L2; 6064 *unused_tuple &= ~BIT(INNER_L2_RSV); 6065 break; 6066 case IP_USER_FLOW: 6067 case IPV6_USER_FLOW: 6068 info->layer = HCLGE_FD_USER_DEF_L3; 6069 *unused_tuple &= ~BIT(INNER_L3_RSV); 6070 break; 6071 case TCP_V4_FLOW: 6072 case UDP_V4_FLOW: 6073 case TCP_V6_FLOW: 6074 case UDP_V6_FLOW: 6075 info->layer = HCLGE_FD_USER_DEF_L4; 6076 *unused_tuple &= ~BIT(INNER_L4_RSV); 6077 break; 6078 default: 6079 return -EOPNOTSUPP; 6080 } 6081 6082 return 0; 6083 } 6084 6085 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) 6086 { 6087 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; 6088 } 6089 6090 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, 6091 struct ethtool_rx_flow_spec *fs, 6092 u32 *unused_tuple, 6093 struct hclge_fd_user_def_info *info) 6094 { 6095 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; 6096 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6097 u16 data, offset, data_mask, offset_mask; 6098 int ret; 6099 6100 info->layer = HCLGE_FD_USER_DEF_NONE; 6101 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; 6102 6103 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) 6104 return 0; 6105 6106 /* user-def data from ethtool is 64 bit value, the bit0~15 is used 6107 * for data, and bit32~47 is used for offset. 6108 */ 6109 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; 6110 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; 6111 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; 6112 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; 6113 6114 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) { 6115 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); 6116 return -EOPNOTSUPP; 6117 } 6118 6119 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) { 6120 dev_err(&hdev->pdev->dev, 6121 "user-def offset[%u] should be no more than %u\n", 6122 offset, HCLGE_FD_MAX_USER_DEF_OFFSET); 6123 return -EINVAL; 6124 } 6125 6126 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) { 6127 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); 6128 return -EINVAL; 6129 } 6130 6131 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); 6132 if (ret) { 6133 dev_err(&hdev->pdev->dev, 6134 "unsupported flow type for user-def bytes, ret = %d\n", 6135 ret); 6136 return ret; 6137 } 6138 6139 info->data = data; 6140 info->data_mask = data_mask; 6141 info->offset = offset; 6142 6143 return 0; 6144 } 6145 6146 static int hclge_fd_check_spec(struct hclge_dev *hdev, 6147 struct ethtool_rx_flow_spec *fs, 6148 u32 *unused_tuple, 6149 struct hclge_fd_user_def_info *info) 6150 { 6151 u32 flow_type; 6152 int ret; 6153 6154 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 6155 dev_err(&hdev->pdev->dev, 6156 "failed to config fd rules, invalid rule location: %u, max is %u\n.", 6157 fs->location, 6158 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); 6159 return -EINVAL; 6160 } 6161 6162 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); 6163 if (ret) 6164 return ret; 6165 6166 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6167 switch (flow_type) { 6168 case SCTP_V4_FLOW: 6169 case TCP_V4_FLOW: 6170 case UDP_V4_FLOW: 6171 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, 6172 unused_tuple); 6173 break; 6174 case IP_USER_FLOW: 6175 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, 6176 unused_tuple); 6177 break; 6178 case SCTP_V6_FLOW: 6179 case TCP_V6_FLOW: 6180 case UDP_V6_FLOW: 6181 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, 6182 unused_tuple); 6183 break; 6184 case IPV6_USER_FLOW: 6185 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, 6186 unused_tuple); 6187 break; 6188 case ETHER_FLOW: 6189 if (hdev->fd_cfg.fd_mode != 6190 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 6191 dev_err(&hdev->pdev->dev, 6192 "ETHER_FLOW is not supported in current fd mode!\n"); 6193 return -EOPNOTSUPP; 6194 } 6195 6196 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, 6197 unused_tuple); 6198 break; 6199 default: 6200 dev_err(&hdev->pdev->dev, 6201 "unsupported protocol type, protocol type = %#x\n", 6202 flow_type); 6203 return -EOPNOTSUPP; 6204 } 6205 6206 if (ret) { 6207 dev_err(&hdev->pdev->dev, 6208 "failed to check flow union tuple, ret = %d\n", 6209 ret); 6210 return ret; 6211 } 6212 6213 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); 6214 } 6215 6216 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs, 6217 struct hclge_fd_rule *rule, u8 ip_proto) 6218 { 6219 rule->tuples.src_ip[IPV4_INDEX] = 6220 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); 6221 rule->tuples_mask.src_ip[IPV4_INDEX] = 6222 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); 6223 6224 rule->tuples.dst_ip[IPV4_INDEX] = 6225 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); 6226 rule->tuples_mask.dst_ip[IPV4_INDEX] = 6227 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); 6228 6229 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); 6230 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); 6231 6232 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); 6233 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); 6234 6235 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; 6236 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; 6237 6238 rule->tuples.ether_proto = ETH_P_IP; 6239 rule->tuples_mask.ether_proto = 0xFFFF; 6240 6241 rule->tuples.ip_proto = ip_proto; 6242 rule->tuples_mask.ip_proto = 0xFF; 6243 } 6244 6245 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs, 6246 struct hclge_fd_rule *rule) 6247 { 6248 rule->tuples.src_ip[IPV4_INDEX] = 6249 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); 6250 rule->tuples_mask.src_ip[IPV4_INDEX] = 6251 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); 6252 6253 rule->tuples.dst_ip[IPV4_INDEX] = 6254 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); 6255 rule->tuples_mask.dst_ip[IPV4_INDEX] = 6256 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); 6257 6258 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; 6259 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; 6260 6261 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; 6262 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; 6263 6264 rule->tuples.ether_proto = ETH_P_IP; 6265 rule->tuples_mask.ether_proto = 0xFFFF; 6266 } 6267 6268 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs, 6269 struct hclge_fd_rule *rule, u8 ip_proto) 6270 { 6271 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, 6272 IPV6_SIZE); 6273 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, 6274 IPV6_SIZE); 6275 6276 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, 6277 IPV6_SIZE); 6278 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, 6279 IPV6_SIZE); 6280 6281 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); 6282 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); 6283 6284 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); 6285 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); 6286 6287 rule->tuples.ether_proto = ETH_P_IPV6; 6288 rule->tuples_mask.ether_proto = 0xFFFF; 6289 6290 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; 6291 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; 6292 6293 rule->tuples.ip_proto = ip_proto; 6294 rule->tuples_mask.ip_proto = 0xFF; 6295 } 6296 6297 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs, 6298 struct hclge_fd_rule *rule) 6299 { 6300 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, 6301 IPV6_SIZE); 6302 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, 6303 IPV6_SIZE); 6304 6305 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, 6306 IPV6_SIZE); 6307 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, 6308 IPV6_SIZE); 6309 6310 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; 6311 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; 6312 6313 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; 6314 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; 6315 6316 rule->tuples.ether_proto = ETH_P_IPV6; 6317 rule->tuples_mask.ether_proto = 0xFFFF; 6318 } 6319 6320 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs, 6321 struct hclge_fd_rule *rule) 6322 { 6323 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); 6324 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); 6325 6326 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); 6327 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); 6328 6329 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); 6330 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); 6331 } 6332 6333 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, 6334 struct hclge_fd_rule *rule) 6335 { 6336 switch (info->layer) { 6337 case HCLGE_FD_USER_DEF_L2: 6338 rule->tuples.l2_user_def = info->data; 6339 rule->tuples_mask.l2_user_def = info->data_mask; 6340 break; 6341 case HCLGE_FD_USER_DEF_L3: 6342 rule->tuples.l3_user_def = info->data; 6343 rule->tuples_mask.l3_user_def = info->data_mask; 6344 break; 6345 case HCLGE_FD_USER_DEF_L4: 6346 rule->tuples.l4_user_def = (u32)info->data << 16; 6347 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; 6348 break; 6349 default: 6350 break; 6351 } 6352 6353 rule->ep.user_def = *info; 6354 } 6355 6356 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs, 6357 struct hclge_fd_rule *rule, 6358 struct hclge_fd_user_def_info *info) 6359 { 6360 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6361 6362 switch (flow_type) { 6363 case SCTP_V4_FLOW: 6364 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP); 6365 break; 6366 case TCP_V4_FLOW: 6367 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP); 6368 break; 6369 case UDP_V4_FLOW: 6370 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP); 6371 break; 6372 case IP_USER_FLOW: 6373 hclge_fd_get_ip4_tuple(fs, rule); 6374 break; 6375 case SCTP_V6_FLOW: 6376 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP); 6377 break; 6378 case TCP_V6_FLOW: 6379 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP); 6380 break; 6381 case UDP_V6_FLOW: 6382 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP); 6383 break; 6384 case IPV6_USER_FLOW: 6385 hclge_fd_get_ip6_tuple(fs, rule); 6386 break; 6387 case ETHER_FLOW: 6388 hclge_fd_get_ether_tuple(fs, rule); 6389 break; 6390 default: 6391 return -EOPNOTSUPP; 6392 } 6393 6394 if (fs->flow_type & FLOW_EXT) { 6395 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); 6396 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); 6397 hclge_fd_get_user_def_tuple(info, rule); 6398 } 6399 6400 if (fs->flow_type & FLOW_MAC_EXT) { 6401 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); 6402 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); 6403 } 6404 6405 return 0; 6406 } 6407 6408 static int hclge_fd_config_rule(struct hclge_dev *hdev, 6409 struct hclge_fd_rule *rule) 6410 { 6411 int ret; 6412 6413 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 6414 if (ret) 6415 return ret; 6416 6417 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 6418 } 6419 6420 static int hclge_add_fd_entry_common(struct hclge_dev *hdev, 6421 struct hclge_fd_rule *rule) 6422 { 6423 int ret; 6424 6425 spin_lock_bh(&hdev->fd_rule_lock); 6426 6427 if (hdev->fd_active_type != rule->rule_type && 6428 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || 6429 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { 6430 dev_err(&hdev->pdev->dev, 6431 "mode conflict(new type %d, active type %d), please delete existent rules first\n", 6432 rule->rule_type, hdev->fd_active_type); 6433 spin_unlock_bh(&hdev->fd_rule_lock); 6434 return -EINVAL; 6435 } 6436 6437 ret = hclge_fd_check_user_def_refcnt(hdev, rule); 6438 if (ret) 6439 goto out; 6440 6441 ret = hclge_clear_arfs_rules(hdev); 6442 if (ret) 6443 goto out; 6444 6445 ret = hclge_fd_config_rule(hdev, rule); 6446 if (ret) 6447 goto out; 6448 6449 rule->state = HCLGE_FD_ACTIVE; 6450 hdev->fd_active_type = rule->rule_type; 6451 hclge_update_fd_list(hdev, rule->state, rule->location, rule); 6452 6453 out: 6454 spin_unlock_bh(&hdev->fd_rule_lock); 6455 return ret; 6456 } 6457 6458 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) 6459 { 6460 struct hclge_vport *vport = hclge_get_vport(handle); 6461 struct hclge_dev *hdev = vport->back; 6462 6463 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; 6464 } 6465 6466 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, 6467 u16 *vport_id, u8 *action, u16 *queue_id) 6468 { 6469 struct hclge_vport *vport = hdev->vport; 6470 6471 if (ring_cookie == RX_CLS_FLOW_DISC) { 6472 *action = HCLGE_FD_ACTION_DROP_PACKET; 6473 } else { 6474 u32 ring = ethtool_get_flow_spec_ring(ring_cookie); 6475 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); 6476 u16 tqps; 6477 6478 /* To keep consistent with user's configuration, minus 1 when 6479 * printing 'vf', because vf id from ethtool is added 1 for vf. 6480 */ 6481 if (vf > hdev->num_req_vfs) { 6482 dev_err(&hdev->pdev->dev, 6483 "Error: vf id (%u) should be less than %u\n", 6484 vf - 1U, hdev->num_req_vfs); 6485 return -EINVAL; 6486 } 6487 6488 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; 6489 tqps = hdev->vport[vf].nic.kinfo.num_tqps; 6490 6491 if (ring >= tqps) { 6492 dev_err(&hdev->pdev->dev, 6493 "Error: queue id (%u) > max tqp num (%u)\n", 6494 ring, tqps - 1U); 6495 return -EINVAL; 6496 } 6497 6498 *action = HCLGE_FD_ACTION_SELECT_QUEUE; 6499 *queue_id = ring; 6500 } 6501 6502 return 0; 6503 } 6504 6505 static int hclge_add_fd_entry(struct hnae3_handle *handle, 6506 struct ethtool_rxnfc *cmd) 6507 { 6508 struct hclge_vport *vport = hclge_get_vport(handle); 6509 struct hclge_dev *hdev = vport->back; 6510 struct hclge_fd_user_def_info info; 6511 u16 dst_vport_id = 0, q_index = 0; 6512 struct ethtool_rx_flow_spec *fs; 6513 struct hclge_fd_rule *rule; 6514 u32 unused = 0; 6515 u8 action; 6516 int ret; 6517 6518 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 6519 dev_err(&hdev->pdev->dev, 6520 "flow table director is not supported\n"); 6521 return -EOPNOTSUPP; 6522 } 6523 6524 if (!hdev->fd_en) { 6525 dev_err(&hdev->pdev->dev, 6526 "please enable flow director first\n"); 6527 return -EOPNOTSUPP; 6528 } 6529 6530 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6531 6532 ret = hclge_fd_check_spec(hdev, fs, &unused, &info); 6533 if (ret) 6534 return ret; 6535 6536 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, 6537 &action, &q_index); 6538 if (ret) 6539 return ret; 6540 6541 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 6542 if (!rule) 6543 return -ENOMEM; 6544 6545 ret = hclge_fd_get_tuple(fs, rule, &info); 6546 if (ret) { 6547 kfree(rule); 6548 return ret; 6549 } 6550 6551 rule->flow_type = fs->flow_type; 6552 rule->location = fs->location; 6553 rule->unused_tuple = unused; 6554 rule->vf_id = dst_vport_id; 6555 rule->queue_id = q_index; 6556 rule->action = action; 6557 rule->rule_type = HCLGE_FD_EP_ACTIVE; 6558 6559 ret = hclge_add_fd_entry_common(hdev, rule); 6560 if (ret) 6561 kfree(rule); 6562 6563 return ret; 6564 } 6565 6566 static int hclge_del_fd_entry(struct hnae3_handle *handle, 6567 struct ethtool_rxnfc *cmd) 6568 { 6569 struct hclge_vport *vport = hclge_get_vport(handle); 6570 struct hclge_dev *hdev = vport->back; 6571 struct ethtool_rx_flow_spec *fs; 6572 int ret; 6573 6574 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6575 return -EOPNOTSUPP; 6576 6577 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6578 6579 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 6580 return -EINVAL; 6581 6582 spin_lock_bh(&hdev->fd_rule_lock); 6583 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || 6584 !test_bit(fs->location, hdev->fd_bmap)) { 6585 dev_err(&hdev->pdev->dev, 6586 "Delete fail, rule %u is inexistent\n", fs->location); 6587 spin_unlock_bh(&hdev->fd_rule_lock); 6588 return -ENOENT; 6589 } 6590 6591 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, 6592 NULL, false); 6593 if (ret) 6594 goto out; 6595 6596 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); 6597 6598 out: 6599 spin_unlock_bh(&hdev->fd_rule_lock); 6600 return ret; 6601 } 6602 6603 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, 6604 bool clear_list) 6605 { 6606 struct hclge_fd_rule *rule; 6607 struct hlist_node *node; 6608 u16 location; 6609 6610 spin_lock_bh(&hdev->fd_rule_lock); 6611 6612 for_each_set_bit(location, hdev->fd_bmap, 6613 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 6614 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, 6615 NULL, false); 6616 6617 if (clear_list) { 6618 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, 6619 rule_node) { 6620 hlist_del(&rule->rule_node); 6621 kfree(rule); 6622 } 6623 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 6624 hdev->hclge_fd_rule_num = 0; 6625 bitmap_zero(hdev->fd_bmap, 6626 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 6627 } 6628 6629 spin_unlock_bh(&hdev->fd_rule_lock); 6630 } 6631 6632 static void hclge_del_all_fd_entries(struct hclge_dev *hdev) 6633 { 6634 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6635 return; 6636 6637 hclge_clear_fd_rules_in_list(hdev, true); 6638 hclge_fd_disable_user_def(hdev); 6639 } 6640 6641 static int hclge_restore_fd_entries(struct hnae3_handle *handle) 6642 { 6643 struct hclge_vport *vport = hclge_get_vport(handle); 6644 struct hclge_dev *hdev = vport->back; 6645 struct hclge_fd_rule *rule; 6646 struct hlist_node *node; 6647 6648 /* Return ok here, because reset error handling will check this 6649 * return value. If error is returned here, the reset process will 6650 * fail. 6651 */ 6652 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6653 return 0; 6654 6655 /* if fd is disabled, should not restore it when reset */ 6656 if (!hdev->fd_en) 6657 return 0; 6658 6659 spin_lock_bh(&hdev->fd_rule_lock); 6660 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 6661 if (rule->state == HCLGE_FD_ACTIVE) 6662 rule->state = HCLGE_FD_TO_ADD; 6663 } 6664 spin_unlock_bh(&hdev->fd_rule_lock); 6665 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 6666 6667 return 0; 6668 } 6669 6670 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, 6671 struct ethtool_rxnfc *cmd) 6672 { 6673 struct hclge_vport *vport = hclge_get_vport(handle); 6674 struct hclge_dev *hdev = vport->back; 6675 6676 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) 6677 return -EOPNOTSUPP; 6678 6679 cmd->rule_cnt = hdev->hclge_fd_rule_num; 6680 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 6681 6682 return 0; 6683 } 6684 6685 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, 6686 struct ethtool_tcpip4_spec *spec, 6687 struct ethtool_tcpip4_spec *spec_mask) 6688 { 6689 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 6690 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? 6691 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 6692 6693 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 6694 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? 6695 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 6696 6697 spec->psrc = cpu_to_be16(rule->tuples.src_port); 6698 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 6699 0 : cpu_to_be16(rule->tuples_mask.src_port); 6700 6701 spec->pdst = cpu_to_be16(rule->tuples.dst_port); 6702 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? 6703 0 : cpu_to_be16(rule->tuples_mask.dst_port); 6704 6705 spec->tos = rule->tuples.ip_tos; 6706 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6707 0 : rule->tuples_mask.ip_tos; 6708 } 6709 6710 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, 6711 struct ethtool_usrip4_spec *spec, 6712 struct ethtool_usrip4_spec *spec_mask) 6713 { 6714 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 6715 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? 6716 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 6717 6718 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 6719 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? 6720 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 6721 6722 spec->tos = rule->tuples.ip_tos; 6723 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6724 0 : rule->tuples_mask.ip_tos; 6725 6726 spec->proto = rule->tuples.ip_proto; 6727 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 6728 0 : rule->tuples_mask.ip_proto; 6729 6730 spec->ip_ver = ETH_RX_NFC_IP4; 6731 } 6732 6733 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, 6734 struct ethtool_tcpip6_spec *spec, 6735 struct ethtool_tcpip6_spec *spec_mask) 6736 { 6737 cpu_to_be32_array(spec->ip6src, 6738 rule->tuples.src_ip, IPV6_SIZE); 6739 cpu_to_be32_array(spec->ip6dst, 6740 rule->tuples.dst_ip, IPV6_SIZE); 6741 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 6742 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); 6743 else 6744 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, 6745 IPV6_SIZE); 6746 6747 if (rule->unused_tuple & BIT(INNER_DST_IP)) 6748 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); 6749 else 6750 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, 6751 IPV6_SIZE); 6752 6753 spec->tclass = rule->tuples.ip_tos; 6754 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6755 0 : rule->tuples_mask.ip_tos; 6756 6757 spec->psrc = cpu_to_be16(rule->tuples.src_port); 6758 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 6759 0 : cpu_to_be16(rule->tuples_mask.src_port); 6760 6761 spec->pdst = cpu_to_be16(rule->tuples.dst_port); 6762 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? 6763 0 : cpu_to_be16(rule->tuples_mask.dst_port); 6764 } 6765 6766 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, 6767 struct ethtool_usrip6_spec *spec, 6768 struct ethtool_usrip6_spec *spec_mask) 6769 { 6770 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); 6771 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); 6772 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 6773 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); 6774 else 6775 cpu_to_be32_array(spec_mask->ip6src, 6776 rule->tuples_mask.src_ip, IPV6_SIZE); 6777 6778 if (rule->unused_tuple & BIT(INNER_DST_IP)) 6779 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); 6780 else 6781 cpu_to_be32_array(spec_mask->ip6dst, 6782 rule->tuples_mask.dst_ip, IPV6_SIZE); 6783 6784 spec->tclass = rule->tuples.ip_tos; 6785 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6786 0 : rule->tuples_mask.ip_tos; 6787 6788 spec->l4_proto = rule->tuples.ip_proto; 6789 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 6790 0 : rule->tuples_mask.ip_proto; 6791 } 6792 6793 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, 6794 struct ethhdr *spec, 6795 struct ethhdr *spec_mask) 6796 { 6797 ether_addr_copy(spec->h_source, rule->tuples.src_mac); 6798 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); 6799 6800 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) 6801 eth_zero_addr(spec_mask->h_source); 6802 else 6803 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); 6804 6805 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6806 eth_zero_addr(spec_mask->h_dest); 6807 else 6808 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); 6809 6810 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); 6811 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? 6812 0 : cpu_to_be16(rule->tuples_mask.ether_proto); 6813 } 6814 6815 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, 6816 struct hclge_fd_rule *rule) 6817 { 6818 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == 6819 HCLGE_FD_TUPLE_USER_DEF_TUPLES) { 6820 fs->h_ext.data[0] = 0; 6821 fs->h_ext.data[1] = 0; 6822 fs->m_ext.data[0] = 0; 6823 fs->m_ext.data[1] = 0; 6824 } else { 6825 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); 6826 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); 6827 fs->m_ext.data[0] = 6828 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK); 6829 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); 6830 } 6831 } 6832 6833 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, 6834 struct hclge_fd_rule *rule) 6835 { 6836 if (fs->flow_type & FLOW_EXT) { 6837 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); 6838 fs->m_ext.vlan_tci = 6839 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? 6840 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); 6841 6842 hclge_fd_get_user_def_info(fs, rule); 6843 } 6844 6845 if (fs->flow_type & FLOW_MAC_EXT) { 6846 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); 6847 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6848 eth_zero_addr(fs->m_u.ether_spec.h_dest); 6849 else 6850 ether_addr_copy(fs->m_u.ether_spec.h_dest, 6851 rule->tuples_mask.dst_mac); 6852 } 6853 } 6854 6855 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, 6856 u16 location) 6857 { 6858 struct hclge_fd_rule *rule = NULL; 6859 struct hlist_node *node2; 6860 6861 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 6862 if (rule->location == location) 6863 return rule; 6864 else if (rule->location > location) 6865 return NULL; 6866 } 6867 6868 return NULL; 6869 } 6870 6871 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, 6872 struct hclge_fd_rule *rule) 6873 { 6874 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 6875 fs->ring_cookie = RX_CLS_FLOW_DISC; 6876 } else { 6877 u64 vf_id; 6878 6879 fs->ring_cookie = rule->queue_id; 6880 vf_id = rule->vf_id; 6881 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 6882 fs->ring_cookie |= vf_id; 6883 } 6884 } 6885 6886 static int hclge_get_fd_rule_info(struct hnae3_handle *handle, 6887 struct ethtool_rxnfc *cmd) 6888 { 6889 struct hclge_vport *vport = hclge_get_vport(handle); 6890 struct hclge_fd_rule *rule = NULL; 6891 struct hclge_dev *hdev = vport->back; 6892 struct ethtool_rx_flow_spec *fs; 6893 6894 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6895 return -EOPNOTSUPP; 6896 6897 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6898 6899 spin_lock_bh(&hdev->fd_rule_lock); 6900 6901 rule = hclge_get_fd_rule(hdev, fs->location); 6902 if (!rule) { 6903 spin_unlock_bh(&hdev->fd_rule_lock); 6904 return -ENOENT; 6905 } 6906 6907 fs->flow_type = rule->flow_type; 6908 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 6909 case SCTP_V4_FLOW: 6910 case TCP_V4_FLOW: 6911 case UDP_V4_FLOW: 6912 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, 6913 &fs->m_u.tcp_ip4_spec); 6914 break; 6915 case IP_USER_FLOW: 6916 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, 6917 &fs->m_u.usr_ip4_spec); 6918 break; 6919 case SCTP_V6_FLOW: 6920 case TCP_V6_FLOW: 6921 case UDP_V6_FLOW: 6922 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, 6923 &fs->m_u.tcp_ip6_spec); 6924 break; 6925 case IPV6_USER_FLOW: 6926 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, 6927 &fs->m_u.usr_ip6_spec); 6928 break; 6929 /* The flow type of fd rule has been checked before adding in to rule 6930 * list. As other flow types have been handled, it must be ETHER_FLOW 6931 * for the default case 6932 */ 6933 default: 6934 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, 6935 &fs->m_u.ether_spec); 6936 break; 6937 } 6938 6939 hclge_fd_get_ext_info(fs, rule); 6940 6941 hclge_fd_get_ring_cookie(fs, rule); 6942 6943 spin_unlock_bh(&hdev->fd_rule_lock); 6944 6945 return 0; 6946 } 6947 6948 static int hclge_get_all_rules(struct hnae3_handle *handle, 6949 struct ethtool_rxnfc *cmd, u32 *rule_locs) 6950 { 6951 struct hclge_vport *vport = hclge_get_vport(handle); 6952 struct hclge_dev *hdev = vport->back; 6953 struct hclge_fd_rule *rule; 6954 struct hlist_node *node2; 6955 int cnt = 0; 6956 6957 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6958 return -EOPNOTSUPP; 6959 6960 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 6961 6962 spin_lock_bh(&hdev->fd_rule_lock); 6963 hlist_for_each_entry_safe(rule, node2, 6964 &hdev->fd_rule_list, rule_node) { 6965 if (cnt == cmd->rule_cnt) { 6966 spin_unlock_bh(&hdev->fd_rule_lock); 6967 return -EMSGSIZE; 6968 } 6969 6970 if (rule->state == HCLGE_FD_TO_DEL) 6971 continue; 6972 6973 rule_locs[cnt] = rule->location; 6974 cnt++; 6975 } 6976 6977 spin_unlock_bh(&hdev->fd_rule_lock); 6978 6979 cmd->rule_cnt = cnt; 6980 6981 return 0; 6982 } 6983 6984 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, 6985 struct hclge_fd_rule_tuples *tuples) 6986 { 6987 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 6988 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 6989 6990 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); 6991 tuples->ip_proto = fkeys->basic.ip_proto; 6992 tuples->dst_port = be16_to_cpu(fkeys->ports.dst); 6993 6994 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 6995 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); 6996 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); 6997 } else { 6998 int i; 6999 7000 for (i = 0; i < IPV6_SIZE; i++) { 7001 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); 7002 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); 7003 } 7004 } 7005 } 7006 7007 /* traverse all rules, check whether an existed rule has the same tuples */ 7008 static struct hclge_fd_rule * 7009 hclge_fd_search_flow_keys(struct hclge_dev *hdev, 7010 const struct hclge_fd_rule_tuples *tuples) 7011 { 7012 struct hclge_fd_rule *rule = NULL; 7013 struct hlist_node *node; 7014 7015 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7016 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) 7017 return rule; 7018 } 7019 7020 return NULL; 7021 } 7022 7023 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, 7024 struct hclge_fd_rule *rule) 7025 { 7026 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 7027 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) | 7028 BIT(INNER_SRC_PORT); 7029 rule->action = 0; 7030 rule->vf_id = 0; 7031 rule->rule_type = HCLGE_FD_ARFS_ACTIVE; 7032 rule->state = HCLGE_FD_TO_ADD; 7033 if (tuples->ether_proto == ETH_P_IP) { 7034 if (tuples->ip_proto == IPPROTO_TCP) 7035 rule->flow_type = TCP_V4_FLOW; 7036 else 7037 rule->flow_type = UDP_V4_FLOW; 7038 } else { 7039 if (tuples->ip_proto == IPPROTO_TCP) 7040 rule->flow_type = TCP_V6_FLOW; 7041 else 7042 rule->flow_type = UDP_V6_FLOW; 7043 } 7044 memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); 7045 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); 7046 } 7047 7048 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, 7049 u16 flow_id, struct flow_keys *fkeys) 7050 { 7051 struct hclge_vport *vport = hclge_get_vport(handle); 7052 struct hclge_fd_rule_tuples new_tuples = {}; 7053 struct hclge_dev *hdev = vport->back; 7054 struct hclge_fd_rule *rule; 7055 u16 bit_id; 7056 7057 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7058 return -EOPNOTSUPP; 7059 7060 /* when there is already fd rule existed add by user, 7061 * arfs should not work 7062 */ 7063 spin_lock_bh(&hdev->fd_rule_lock); 7064 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && 7065 hdev->fd_active_type != HCLGE_FD_RULE_NONE) { 7066 spin_unlock_bh(&hdev->fd_rule_lock); 7067 return -EOPNOTSUPP; 7068 } 7069 7070 hclge_fd_get_flow_tuples(fkeys, &new_tuples); 7071 7072 /* check is there flow director filter existed for this flow, 7073 * if not, create a new filter for it; 7074 * if filter exist with different queue id, modify the filter; 7075 * if filter exist with same queue id, do nothing 7076 */ 7077 rule = hclge_fd_search_flow_keys(hdev, &new_tuples); 7078 if (!rule) { 7079 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); 7080 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 7081 spin_unlock_bh(&hdev->fd_rule_lock); 7082 return -ENOSPC; 7083 } 7084 7085 rule = kzalloc(sizeof(*rule), GFP_ATOMIC); 7086 if (!rule) { 7087 spin_unlock_bh(&hdev->fd_rule_lock); 7088 return -ENOMEM; 7089 } 7090 7091 rule->location = bit_id; 7092 rule->arfs.flow_id = flow_id; 7093 rule->queue_id = queue_id; 7094 hclge_fd_build_arfs_rule(&new_tuples, rule); 7095 hclge_update_fd_list(hdev, rule->state, rule->location, rule); 7096 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; 7097 } else if (rule->queue_id != queue_id) { 7098 rule->queue_id = queue_id; 7099 rule->state = HCLGE_FD_TO_ADD; 7100 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7101 hclge_task_schedule(hdev, 0); 7102 } 7103 spin_unlock_bh(&hdev->fd_rule_lock); 7104 return rule->location; 7105 } 7106 7107 static void hclge_rfs_filter_expire(struct hclge_dev *hdev) 7108 { 7109 #ifdef CONFIG_RFS_ACCEL 7110 struct hnae3_handle *handle = &hdev->vport[0].nic; 7111 struct hclge_fd_rule *rule; 7112 struct hlist_node *node; 7113 7114 spin_lock_bh(&hdev->fd_rule_lock); 7115 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { 7116 spin_unlock_bh(&hdev->fd_rule_lock); 7117 return; 7118 } 7119 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7120 if (rule->state != HCLGE_FD_ACTIVE) 7121 continue; 7122 if (rps_may_expire_flow(handle->netdev, rule->queue_id, 7123 rule->arfs.flow_id, rule->location)) { 7124 rule->state = HCLGE_FD_TO_DEL; 7125 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7126 } 7127 } 7128 spin_unlock_bh(&hdev->fd_rule_lock); 7129 #endif 7130 } 7131 7132 /* make sure being called after lock up with fd_rule_lock */ 7133 static int hclge_clear_arfs_rules(struct hclge_dev *hdev) 7134 { 7135 #ifdef CONFIG_RFS_ACCEL 7136 struct hclge_fd_rule *rule; 7137 struct hlist_node *node; 7138 int ret; 7139 7140 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) 7141 return 0; 7142 7143 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7144 switch (rule->state) { 7145 case HCLGE_FD_TO_DEL: 7146 case HCLGE_FD_ACTIVE: 7147 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 7148 rule->location, NULL, false); 7149 if (ret) 7150 return ret; 7151 fallthrough; 7152 case HCLGE_FD_TO_ADD: 7153 hclge_fd_dec_rule_cnt(hdev, rule->location); 7154 hlist_del(&rule->rule_node); 7155 kfree(rule); 7156 break; 7157 default: 7158 break; 7159 } 7160 } 7161 hclge_sync_fd_state(hdev); 7162 7163 #endif 7164 return 0; 7165 } 7166 7167 static void hclge_get_cls_key_basic(const struct flow_rule *flow, 7168 struct hclge_fd_rule *rule) 7169 { 7170 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) { 7171 struct flow_match_basic match; 7172 u16 ethtype_key, ethtype_mask; 7173 7174 flow_rule_match_basic(flow, &match); 7175 ethtype_key = ntohs(match.key->n_proto); 7176 ethtype_mask = ntohs(match.mask->n_proto); 7177 7178 if (ethtype_key == ETH_P_ALL) { 7179 ethtype_key = 0; 7180 ethtype_mask = 0; 7181 } 7182 rule->tuples.ether_proto = ethtype_key; 7183 rule->tuples_mask.ether_proto = ethtype_mask; 7184 rule->tuples.ip_proto = match.key->ip_proto; 7185 rule->tuples_mask.ip_proto = match.mask->ip_proto; 7186 } else { 7187 rule->unused_tuple |= BIT(INNER_IP_PROTO); 7188 rule->unused_tuple |= BIT(INNER_ETH_TYPE); 7189 } 7190 } 7191 7192 static void hclge_get_cls_key_mac(const struct flow_rule *flow, 7193 struct hclge_fd_rule *rule) 7194 { 7195 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 7196 struct flow_match_eth_addrs match; 7197 7198 flow_rule_match_eth_addrs(flow, &match); 7199 ether_addr_copy(rule->tuples.dst_mac, match.key->dst); 7200 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); 7201 ether_addr_copy(rule->tuples.src_mac, match.key->src); 7202 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); 7203 } else { 7204 rule->unused_tuple |= BIT(INNER_DST_MAC); 7205 rule->unused_tuple |= BIT(INNER_SRC_MAC); 7206 } 7207 } 7208 7209 static void hclge_get_cls_key_vlan(const struct flow_rule *flow, 7210 struct hclge_fd_rule *rule) 7211 { 7212 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { 7213 struct flow_match_vlan match; 7214 7215 flow_rule_match_vlan(flow, &match); 7216 rule->tuples.vlan_tag1 = match.key->vlan_id | 7217 (match.key->vlan_priority << VLAN_PRIO_SHIFT); 7218 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | 7219 (match.mask->vlan_priority << VLAN_PRIO_SHIFT); 7220 } else { 7221 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); 7222 } 7223 } 7224 7225 static void hclge_get_cls_key_ip(const struct flow_rule *flow, 7226 struct hclge_fd_rule *rule) 7227 { 7228 u16 addr_type = 0; 7229 7230 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) { 7231 struct flow_match_control match; 7232 7233 flow_rule_match_control(flow, &match); 7234 addr_type = match.key->addr_type; 7235 } 7236 7237 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 7238 struct flow_match_ipv4_addrs match; 7239 7240 flow_rule_match_ipv4_addrs(flow, &match); 7241 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); 7242 rule->tuples_mask.src_ip[IPV4_INDEX] = 7243 be32_to_cpu(match.mask->src); 7244 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); 7245 rule->tuples_mask.dst_ip[IPV4_INDEX] = 7246 be32_to_cpu(match.mask->dst); 7247 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 7248 struct flow_match_ipv6_addrs match; 7249 7250 flow_rule_match_ipv6_addrs(flow, &match); 7251 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, 7252 IPV6_SIZE); 7253 be32_to_cpu_array(rule->tuples_mask.src_ip, 7254 match.mask->src.s6_addr32, IPV6_SIZE); 7255 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, 7256 IPV6_SIZE); 7257 be32_to_cpu_array(rule->tuples_mask.dst_ip, 7258 match.mask->dst.s6_addr32, IPV6_SIZE); 7259 } else { 7260 rule->unused_tuple |= BIT(INNER_SRC_IP); 7261 rule->unused_tuple |= BIT(INNER_DST_IP); 7262 } 7263 } 7264 7265 static void hclge_get_cls_key_port(const struct flow_rule *flow, 7266 struct hclge_fd_rule *rule) 7267 { 7268 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { 7269 struct flow_match_ports match; 7270 7271 flow_rule_match_ports(flow, &match); 7272 7273 rule->tuples.src_port = be16_to_cpu(match.key->src); 7274 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); 7275 rule->tuples.dst_port = be16_to_cpu(match.key->dst); 7276 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); 7277 } else { 7278 rule->unused_tuple |= BIT(INNER_SRC_PORT); 7279 rule->unused_tuple |= BIT(INNER_DST_PORT); 7280 } 7281 } 7282 7283 static int hclge_parse_cls_flower(struct hclge_dev *hdev, 7284 struct flow_cls_offload *cls_flower, 7285 struct hclge_fd_rule *rule) 7286 { 7287 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); 7288 struct flow_dissector *dissector = flow->match.dissector; 7289 7290 if (dissector->used_keys & 7291 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 7292 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 7293 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 7294 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 7295 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 7296 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 7297 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { 7298 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n", 7299 dissector->used_keys); 7300 return -EOPNOTSUPP; 7301 } 7302 7303 hclge_get_cls_key_basic(flow, rule); 7304 hclge_get_cls_key_mac(flow, rule); 7305 hclge_get_cls_key_vlan(flow, rule); 7306 hclge_get_cls_key_ip(flow, rule); 7307 hclge_get_cls_key_port(flow, rule); 7308 7309 return 0; 7310 } 7311 7312 static int hclge_check_cls_flower(struct hclge_dev *hdev, 7313 struct flow_cls_offload *cls_flower, int tc) 7314 { 7315 u32 prio = cls_flower->common.prio; 7316 7317 if (tc < 0 || tc > hdev->tc_max) { 7318 dev_err(&hdev->pdev->dev, "invalid traffic class\n"); 7319 return -EINVAL; 7320 } 7321 7322 if (prio == 0 || 7323 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 7324 dev_err(&hdev->pdev->dev, 7325 "prio %u should be in range[1, %u]\n", 7326 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 7327 return -EINVAL; 7328 } 7329 7330 if (test_bit(prio - 1, hdev->fd_bmap)) { 7331 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); 7332 return -EINVAL; 7333 } 7334 return 0; 7335 } 7336 7337 static int hclge_add_cls_flower(struct hnae3_handle *handle, 7338 struct flow_cls_offload *cls_flower, 7339 int tc) 7340 { 7341 struct hclge_vport *vport = hclge_get_vport(handle); 7342 struct hclge_dev *hdev = vport->back; 7343 struct hclge_fd_rule *rule; 7344 int ret; 7345 7346 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 7347 dev_err(&hdev->pdev->dev, 7348 "cls flower is not supported\n"); 7349 return -EOPNOTSUPP; 7350 } 7351 7352 ret = hclge_check_cls_flower(hdev, cls_flower, tc); 7353 if (ret) { 7354 dev_err(&hdev->pdev->dev, 7355 "failed to check cls flower params, ret = %d\n", ret); 7356 return ret; 7357 } 7358 7359 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 7360 if (!rule) 7361 return -ENOMEM; 7362 7363 ret = hclge_parse_cls_flower(hdev, cls_flower, rule); 7364 if (ret) { 7365 kfree(rule); 7366 return ret; 7367 } 7368 7369 rule->action = HCLGE_FD_ACTION_SELECT_TC; 7370 rule->cls_flower.tc = tc; 7371 rule->location = cls_flower->common.prio - 1; 7372 rule->vf_id = 0; 7373 rule->cls_flower.cookie = cls_flower->cookie; 7374 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; 7375 7376 ret = hclge_add_fd_entry_common(hdev, rule); 7377 if (ret) 7378 kfree(rule); 7379 7380 return ret; 7381 } 7382 7383 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, 7384 unsigned long cookie) 7385 { 7386 struct hclge_fd_rule *rule; 7387 struct hlist_node *node; 7388 7389 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7390 if (rule->cls_flower.cookie == cookie) 7391 return rule; 7392 } 7393 7394 return NULL; 7395 } 7396 7397 static int hclge_del_cls_flower(struct hnae3_handle *handle, 7398 struct flow_cls_offload *cls_flower) 7399 { 7400 struct hclge_vport *vport = hclge_get_vport(handle); 7401 struct hclge_dev *hdev = vport->back; 7402 struct hclge_fd_rule *rule; 7403 int ret; 7404 7405 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7406 return -EOPNOTSUPP; 7407 7408 spin_lock_bh(&hdev->fd_rule_lock); 7409 7410 rule = hclge_find_cls_flower(hdev, cls_flower->cookie); 7411 if (!rule) { 7412 spin_unlock_bh(&hdev->fd_rule_lock); 7413 return -EINVAL; 7414 } 7415 7416 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, 7417 NULL, false); 7418 if (ret) { 7419 /* if tcam config fail, set rule state to TO_DEL, 7420 * so the rule will be deleted when periodic 7421 * task being scheduled. 7422 */ 7423 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); 7424 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7425 spin_unlock_bh(&hdev->fd_rule_lock); 7426 return ret; 7427 } 7428 7429 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); 7430 spin_unlock_bh(&hdev->fd_rule_lock); 7431 7432 return 0; 7433 } 7434 7435 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) 7436 { 7437 struct hclge_fd_rule *rule; 7438 struct hlist_node *node; 7439 int ret = 0; 7440 7441 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) 7442 return; 7443 7444 spin_lock_bh(&hdev->fd_rule_lock); 7445 7446 hlist_for_each_entry_safe(rule, node, hlist, rule_node) { 7447 switch (rule->state) { 7448 case HCLGE_FD_TO_ADD: 7449 ret = hclge_fd_config_rule(hdev, rule); 7450 if (ret) 7451 goto out; 7452 rule->state = HCLGE_FD_ACTIVE; 7453 break; 7454 case HCLGE_FD_TO_DEL: 7455 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 7456 rule->location, NULL, false); 7457 if (ret) 7458 goto out; 7459 hclge_fd_dec_rule_cnt(hdev, rule->location); 7460 hclge_fd_free_node(hdev, rule); 7461 break; 7462 default: 7463 break; 7464 } 7465 } 7466 7467 out: 7468 if (ret) 7469 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7470 7471 spin_unlock_bh(&hdev->fd_rule_lock); 7472 } 7473 7474 static void hclge_sync_fd_table(struct hclge_dev *hdev) 7475 { 7476 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7477 return; 7478 7479 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { 7480 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; 7481 7482 hclge_clear_fd_rules_in_list(hdev, clear_list); 7483 } 7484 7485 hclge_sync_fd_user_def_cfg(hdev, false); 7486 7487 hclge_sync_fd_list(hdev, &hdev->fd_rule_list); 7488 } 7489 7490 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) 7491 { 7492 struct hclge_vport *vport = hclge_get_vport(handle); 7493 struct hclge_dev *hdev = vport->back; 7494 7495 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || 7496 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); 7497 } 7498 7499 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle) 7500 { 7501 struct hclge_vport *vport = hclge_get_vport(handle); 7502 struct hclge_dev *hdev = vport->back; 7503 7504 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 7505 } 7506 7507 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) 7508 { 7509 struct hclge_vport *vport = hclge_get_vport(handle); 7510 struct hclge_dev *hdev = vport->back; 7511 7512 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 7513 } 7514 7515 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) 7516 { 7517 struct hclge_vport *vport = hclge_get_vport(handle); 7518 struct hclge_dev *hdev = vport->back; 7519 7520 return hdev->rst_stats.hw_reset_done_cnt; 7521 } 7522 7523 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) 7524 { 7525 struct hclge_vport *vport = hclge_get_vport(handle); 7526 struct hclge_dev *hdev = vport->back; 7527 7528 hdev->fd_en = enable; 7529 7530 if (!enable) 7531 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); 7532 else 7533 hclge_restore_fd_entries(handle); 7534 7535 hclge_task_schedule(hdev, 0); 7536 } 7537 7538 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 7539 { 7540 #define HCLGE_LINK_STATUS_WAIT_CNT 3 7541 7542 struct hclge_desc desc; 7543 struct hclge_config_mac_mode_cmd *req = 7544 (struct hclge_config_mac_mode_cmd *)desc.data; 7545 u32 loop_en = 0; 7546 int ret; 7547 7548 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 7549 7550 if (enable) { 7551 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U); 7552 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U); 7553 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U); 7554 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U); 7555 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U); 7556 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U); 7557 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U); 7558 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U); 7559 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U); 7560 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U); 7561 } 7562 7563 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 7564 7565 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7566 if (ret) { 7567 dev_err(&hdev->pdev->dev, 7568 "mac enable fail, ret =%d.\n", ret); 7569 return; 7570 } 7571 7572 if (!enable) 7573 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, 7574 HCLGE_LINK_STATUS_WAIT_CNT); 7575 } 7576 7577 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, 7578 u8 switch_param, u8 param_mask) 7579 { 7580 struct hclge_mac_vlan_switch_cmd *req; 7581 struct hclge_desc desc; 7582 u32 func_id; 7583 int ret; 7584 7585 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); 7586 req = (struct hclge_mac_vlan_switch_cmd *)desc.data; 7587 7588 /* read current config parameter */ 7589 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, 7590 true); 7591 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; 7592 req->func_id = cpu_to_le32(func_id); 7593 7594 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7595 if (ret) { 7596 dev_err(&hdev->pdev->dev, 7597 "read mac vlan switch parameter fail, ret = %d\n", ret); 7598 return ret; 7599 } 7600 7601 /* modify and write new config parameter */ 7602 hclge_comm_cmd_reuse_desc(&desc, false); 7603 req->switch_param = (req->switch_param & param_mask) | switch_param; 7604 req->param_mask = param_mask; 7605 7606 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7607 if (ret) 7608 dev_err(&hdev->pdev->dev, 7609 "set mac vlan switch parameter fail, ret = %d\n", ret); 7610 return ret; 7611 } 7612 7613 static void hclge_phy_link_status_wait(struct hclge_dev *hdev, 7614 int link_ret) 7615 { 7616 #define HCLGE_PHY_LINK_STATUS_NUM 200 7617 7618 struct phy_device *phydev = hdev->hw.mac.phydev; 7619 int i = 0; 7620 int ret; 7621 7622 do { 7623 ret = phy_read_status(phydev); 7624 if (ret) { 7625 dev_err(&hdev->pdev->dev, 7626 "phy update link status fail, ret = %d\n", ret); 7627 return; 7628 } 7629 7630 if (phydev->link == link_ret) 7631 break; 7632 7633 msleep(HCLGE_LINK_STATUS_MS); 7634 } while (++i < HCLGE_PHY_LINK_STATUS_NUM); 7635 } 7636 7637 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, 7638 int wait_cnt) 7639 { 7640 int link_status; 7641 int i = 0; 7642 int ret; 7643 7644 do { 7645 ret = hclge_get_mac_link_status(hdev, &link_status); 7646 if (ret) 7647 return ret; 7648 if (link_status == link_ret) 7649 return 0; 7650 7651 msleep(HCLGE_LINK_STATUS_MS); 7652 } while (++i < wait_cnt); 7653 return -EBUSY; 7654 } 7655 7656 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, 7657 bool is_phy) 7658 { 7659 #define HCLGE_MAC_LINK_STATUS_NUM 100 7660 7661 int link_ret; 7662 7663 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 7664 7665 if (is_phy) 7666 hclge_phy_link_status_wait(hdev, link_ret); 7667 7668 return hclge_mac_link_status_wait(hdev, link_ret, 7669 HCLGE_MAC_LINK_STATUS_NUM); 7670 } 7671 7672 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) 7673 { 7674 struct hclge_config_mac_mode_cmd *req; 7675 struct hclge_desc desc; 7676 u32 loop_en; 7677 int ret; 7678 7679 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 7680 /* 1 Read out the MAC mode config at first */ 7681 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 7682 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7683 if (ret) { 7684 dev_err(&hdev->pdev->dev, 7685 "mac loopback get fail, ret =%d.\n", ret); 7686 return ret; 7687 } 7688 7689 /* 2 Then setup the loopback flag */ 7690 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 7691 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 7692 7693 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 7694 7695 /* 3 Config mac work mode with loopback flag 7696 * and its original configure parameters 7697 */ 7698 hclge_comm_cmd_reuse_desc(&desc, false); 7699 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7700 if (ret) 7701 dev_err(&hdev->pdev->dev, 7702 "mac loopback set fail, ret =%d.\n", ret); 7703 return ret; 7704 } 7705 7706 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, 7707 enum hnae3_loop loop_mode) 7708 { 7709 struct hclge_common_lb_cmd *req; 7710 struct hclge_desc desc; 7711 u8 loop_mode_b; 7712 int ret; 7713 7714 req = (struct hclge_common_lb_cmd *)desc.data; 7715 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); 7716 7717 switch (loop_mode) { 7718 case HNAE3_LOOP_SERIAL_SERDES: 7719 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 7720 break; 7721 case HNAE3_LOOP_PARALLEL_SERDES: 7722 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 7723 break; 7724 case HNAE3_LOOP_PHY: 7725 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B; 7726 break; 7727 default: 7728 dev_err(&hdev->pdev->dev, 7729 "unsupported loopback mode %d\n", loop_mode); 7730 return -ENOTSUPP; 7731 } 7732 7733 req->mask = loop_mode_b; 7734 if (en) 7735 req->enable = loop_mode_b; 7736 7737 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7738 if (ret) 7739 dev_err(&hdev->pdev->dev, 7740 "failed to send loopback cmd, loop_mode = %d, ret = %d\n", 7741 loop_mode, ret); 7742 7743 return ret; 7744 } 7745 7746 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) 7747 { 7748 #define HCLGE_COMMON_LB_RETRY_MS 10 7749 #define HCLGE_COMMON_LB_RETRY_NUM 100 7750 7751 struct hclge_common_lb_cmd *req; 7752 struct hclge_desc desc; 7753 u32 i = 0; 7754 int ret; 7755 7756 req = (struct hclge_common_lb_cmd *)desc.data; 7757 7758 do { 7759 msleep(HCLGE_COMMON_LB_RETRY_MS); 7760 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, 7761 true); 7762 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7763 if (ret) { 7764 dev_err(&hdev->pdev->dev, 7765 "failed to get loopback done status, ret = %d\n", 7766 ret); 7767 return ret; 7768 } 7769 } while (++i < HCLGE_COMMON_LB_RETRY_NUM && 7770 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); 7771 7772 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { 7773 dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); 7774 return -EBUSY; 7775 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { 7776 dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); 7777 return -EIO; 7778 } 7779 7780 return 0; 7781 } 7782 7783 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, 7784 enum hnae3_loop loop_mode) 7785 { 7786 int ret; 7787 7788 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); 7789 if (ret) 7790 return ret; 7791 7792 return hclge_cfg_common_loopback_wait(hdev); 7793 } 7794 7795 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, 7796 enum hnae3_loop loop_mode) 7797 { 7798 int ret; 7799 7800 ret = hclge_cfg_common_loopback(hdev, en, loop_mode); 7801 if (ret) 7802 return ret; 7803 7804 hclge_cfg_mac_mode(hdev, en); 7805 7806 ret = hclge_mac_phy_link_status_wait(hdev, en, false); 7807 if (ret) 7808 dev_err(&hdev->pdev->dev, 7809 "serdes loopback config mac mode timeout\n"); 7810 7811 return ret; 7812 } 7813 7814 static int hclge_enable_phy_loopback(struct hclge_dev *hdev, 7815 struct phy_device *phydev) 7816 { 7817 int ret; 7818 7819 if (!phydev->suspended) { 7820 ret = phy_suspend(phydev); 7821 if (ret) 7822 return ret; 7823 } 7824 7825 ret = phy_resume(phydev); 7826 if (ret) 7827 return ret; 7828 7829 return phy_loopback(phydev, true); 7830 } 7831 7832 static int hclge_disable_phy_loopback(struct hclge_dev *hdev, 7833 struct phy_device *phydev) 7834 { 7835 int ret; 7836 7837 ret = phy_loopback(phydev, false); 7838 if (ret) 7839 return ret; 7840 7841 return phy_suspend(phydev); 7842 } 7843 7844 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) 7845 { 7846 struct phy_device *phydev = hdev->hw.mac.phydev; 7847 int ret; 7848 7849 if (!phydev) { 7850 if (hnae3_dev_phy_imp_supported(hdev)) 7851 return hclge_set_common_loopback(hdev, en, 7852 HNAE3_LOOP_PHY); 7853 return -ENOTSUPP; 7854 } 7855 7856 if (en) 7857 ret = hclge_enable_phy_loopback(hdev, phydev); 7858 else 7859 ret = hclge_disable_phy_loopback(hdev, phydev); 7860 if (ret) { 7861 dev_err(&hdev->pdev->dev, 7862 "set phy loopback fail, ret = %d\n", ret); 7863 return ret; 7864 } 7865 7866 hclge_cfg_mac_mode(hdev, en); 7867 7868 ret = hclge_mac_phy_link_status_wait(hdev, en, true); 7869 if (ret) 7870 dev_err(&hdev->pdev->dev, 7871 "phy loopback config mac mode timeout\n"); 7872 7873 return ret; 7874 } 7875 7876 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, 7877 u16 stream_id, bool enable) 7878 { 7879 struct hclge_desc desc; 7880 struct hclge_cfg_com_tqp_queue_cmd *req = 7881 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 7882 7883 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 7884 req->tqp_id = cpu_to_le16(tqp_id); 7885 req->stream_id = cpu_to_le16(stream_id); 7886 if (enable) 7887 req->enable |= 1U << HCLGE_TQP_ENABLE_B; 7888 7889 return hclge_cmd_send(&hdev->hw, &desc, 1); 7890 } 7891 7892 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable) 7893 { 7894 struct hclge_vport *vport = hclge_get_vport(handle); 7895 struct hclge_dev *hdev = vport->back; 7896 int ret; 7897 u16 i; 7898 7899 for (i = 0; i < handle->kinfo.num_tqps; i++) { 7900 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); 7901 if (ret) 7902 return ret; 7903 } 7904 return 0; 7905 } 7906 7907 static int hclge_set_loopback(struct hnae3_handle *handle, 7908 enum hnae3_loop loop_mode, bool en) 7909 { 7910 struct hclge_vport *vport = hclge_get_vport(handle); 7911 struct hclge_dev *hdev = vport->back; 7912 int ret = 0; 7913 7914 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By 7915 * default, SSU loopback is enabled, so if the SMAC and the DMAC are 7916 * the same, the packets are looped back in the SSU. If SSU loopback 7917 * is disabled, packets can reach MAC even if SMAC is the same as DMAC. 7918 */ 7919 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 7920 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); 7921 7922 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, 7923 HCLGE_SWITCH_ALW_LPBK_MASK); 7924 if (ret) 7925 return ret; 7926 } 7927 7928 switch (loop_mode) { 7929 case HNAE3_LOOP_APP: 7930 ret = hclge_set_app_loopback(hdev, en); 7931 break; 7932 case HNAE3_LOOP_SERIAL_SERDES: 7933 case HNAE3_LOOP_PARALLEL_SERDES: 7934 ret = hclge_set_common_loopback(hdev, en, loop_mode); 7935 break; 7936 case HNAE3_LOOP_PHY: 7937 ret = hclge_set_phy_loopback(hdev, en); 7938 break; 7939 case HNAE3_LOOP_EXTERNAL: 7940 break; 7941 default: 7942 ret = -ENOTSUPP; 7943 dev_err(&hdev->pdev->dev, 7944 "loop_mode %d is not supported\n", loop_mode); 7945 break; 7946 } 7947 7948 if (ret) 7949 return ret; 7950 7951 ret = hclge_tqp_enable(handle, en); 7952 if (ret) 7953 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", 7954 en ? "enable" : "disable", ret); 7955 7956 return ret; 7957 } 7958 7959 static int hclge_set_default_loopback(struct hclge_dev *hdev) 7960 { 7961 int ret; 7962 7963 ret = hclge_set_app_loopback(hdev, false); 7964 if (ret) 7965 return ret; 7966 7967 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); 7968 if (ret) 7969 return ret; 7970 7971 return hclge_cfg_common_loopback(hdev, false, 7972 HNAE3_LOOP_PARALLEL_SERDES); 7973 } 7974 7975 static void hclge_flush_link_update(struct hclge_dev *hdev) 7976 { 7977 #define HCLGE_FLUSH_LINK_TIMEOUT 100000 7978 7979 unsigned long last = hdev->serv_processed_cnt; 7980 int i = 0; 7981 7982 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && 7983 i++ < HCLGE_FLUSH_LINK_TIMEOUT && 7984 last == hdev->serv_processed_cnt) 7985 usleep_range(1, 1); 7986 } 7987 7988 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) 7989 { 7990 struct hclge_vport *vport = hclge_get_vport(handle); 7991 struct hclge_dev *hdev = vport->back; 7992 7993 if (enable) { 7994 hclge_task_schedule(hdev, 0); 7995 } else { 7996 /* Set the DOWN flag here to disable link updating */ 7997 set_bit(HCLGE_STATE_DOWN, &hdev->state); 7998 7999 /* flush memory to make sure DOWN is seen by service task */ 8000 smp_mb__before_atomic(); 8001 hclge_flush_link_update(hdev); 8002 } 8003 } 8004 8005 static int hclge_ae_start(struct hnae3_handle *handle) 8006 { 8007 struct hclge_vport *vport = hclge_get_vport(handle); 8008 struct hclge_dev *hdev = vport->back; 8009 8010 /* mac enable */ 8011 hclge_cfg_mac_mode(hdev, true); 8012 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 8013 hdev->hw.mac.link = 0; 8014 8015 /* reset tqp stats */ 8016 hclge_comm_reset_tqp_stats(handle); 8017 8018 hclge_mac_start_phy(hdev); 8019 8020 return 0; 8021 } 8022 8023 static void hclge_ae_stop(struct hnae3_handle *handle) 8024 { 8025 struct hclge_vport *vport = hclge_get_vport(handle); 8026 struct hclge_dev *hdev = vport->back; 8027 8028 set_bit(HCLGE_STATE_DOWN, &hdev->state); 8029 spin_lock_bh(&hdev->fd_rule_lock); 8030 hclge_clear_arfs_rules(hdev); 8031 spin_unlock_bh(&hdev->fd_rule_lock); 8032 8033 /* If it is not PF reset or FLR, the firmware will disable the MAC, 8034 * so it only need to stop phy here. 8035 */ 8036 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { 8037 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, 8038 HCLGE_PFC_DISABLE); 8039 if (hdev->reset_type != HNAE3_FUNC_RESET && 8040 hdev->reset_type != HNAE3_FLR_RESET) { 8041 hclge_mac_stop_phy(hdev); 8042 hclge_update_link_status(hdev); 8043 return; 8044 } 8045 } 8046 8047 hclge_reset_tqp(handle); 8048 8049 hclge_config_mac_tnl_int(hdev, false); 8050 8051 /* Mac disable */ 8052 hclge_cfg_mac_mode(hdev, false); 8053 8054 hclge_mac_stop_phy(hdev); 8055 8056 /* reset tqp stats */ 8057 hclge_comm_reset_tqp_stats(handle); 8058 hclge_update_link_status(hdev); 8059 } 8060 8061 int hclge_vport_start(struct hclge_vport *vport) 8062 { 8063 struct hclge_dev *hdev = vport->back; 8064 8065 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 8066 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 8067 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 8068 vport->last_active_jiffies = jiffies; 8069 vport->need_notify = 0; 8070 8071 if (test_bit(vport->vport_id, hdev->vport_config_block)) { 8072 if (vport->vport_id) { 8073 hclge_restore_mac_table_common(vport); 8074 hclge_restore_vport_vlan_table(vport); 8075 } else { 8076 hclge_restore_hw_table(hdev); 8077 } 8078 } 8079 8080 clear_bit(vport->vport_id, hdev->vport_config_block); 8081 8082 return 0; 8083 } 8084 8085 void hclge_vport_stop(struct hclge_vport *vport) 8086 { 8087 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 8088 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 8089 vport->need_notify = 0; 8090 } 8091 8092 static int hclge_client_start(struct hnae3_handle *handle) 8093 { 8094 struct hclge_vport *vport = hclge_get_vport(handle); 8095 8096 return hclge_vport_start(vport); 8097 } 8098 8099 static void hclge_client_stop(struct hnae3_handle *handle) 8100 { 8101 struct hclge_vport *vport = hclge_get_vport(handle); 8102 8103 hclge_vport_stop(vport); 8104 } 8105 8106 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 8107 u16 cmdq_resp, u8 resp_code, 8108 enum hclge_mac_vlan_tbl_opcode op) 8109 { 8110 struct hclge_dev *hdev = vport->back; 8111 8112 if (cmdq_resp) { 8113 dev_err(&hdev->pdev->dev, 8114 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n", 8115 cmdq_resp); 8116 return -EIO; 8117 } 8118 8119 if (op == HCLGE_MAC_VLAN_ADD) { 8120 if (!resp_code || resp_code == 1) 8121 return 0; 8122 else if (resp_code == HCLGE_ADD_UC_OVERFLOW || 8123 resp_code == HCLGE_ADD_MC_OVERFLOW) 8124 return -ENOSPC; 8125 8126 dev_err(&hdev->pdev->dev, 8127 "add mac addr failed for undefined, code=%u.\n", 8128 resp_code); 8129 return -EIO; 8130 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 8131 if (!resp_code) { 8132 return 0; 8133 } else if (resp_code == 1) { 8134 dev_dbg(&hdev->pdev->dev, 8135 "remove mac addr failed for miss.\n"); 8136 return -ENOENT; 8137 } 8138 8139 dev_err(&hdev->pdev->dev, 8140 "remove mac addr failed for undefined, code=%u.\n", 8141 resp_code); 8142 return -EIO; 8143 } else if (op == HCLGE_MAC_VLAN_LKUP) { 8144 if (!resp_code) { 8145 return 0; 8146 } else if (resp_code == 1) { 8147 dev_dbg(&hdev->pdev->dev, 8148 "lookup mac addr failed for miss.\n"); 8149 return -ENOENT; 8150 } 8151 8152 dev_err(&hdev->pdev->dev, 8153 "lookup mac addr failed for undefined, code=%u.\n", 8154 resp_code); 8155 return -EIO; 8156 } 8157 8158 dev_err(&hdev->pdev->dev, 8159 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); 8160 8161 return -EINVAL; 8162 } 8163 8164 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 8165 { 8166 #define HCLGE_VF_NUM_IN_FIRST_DESC 192 8167 8168 unsigned int word_num; 8169 unsigned int bit_num; 8170 8171 if (vfid > 255 || vfid < 0) 8172 return -EIO; 8173 8174 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { 8175 word_num = vfid / 32; 8176 bit_num = vfid % 32; 8177 if (clr) 8178 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 8179 else 8180 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 8181 } else { 8182 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; 8183 bit_num = vfid % 32; 8184 if (clr) 8185 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 8186 else 8187 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 8188 } 8189 8190 return 0; 8191 } 8192 8193 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 8194 { 8195 #define HCLGE_DESC_NUMBER 3 8196 #define HCLGE_FUNC_NUMBER_PER_DESC 6 8197 int i, j; 8198 8199 for (i = 1; i < HCLGE_DESC_NUMBER; i++) 8200 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 8201 if (desc[i].data[j]) 8202 return false; 8203 8204 return true; 8205 } 8206 8207 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 8208 const u8 *addr, bool is_mc) 8209 { 8210 const unsigned char *mac_addr = addr; 8211 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 8212 (mac_addr[0]) | (mac_addr[1] << 8); 8213 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 8214 8215 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 8216 if (is_mc) { 8217 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 8218 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 8219 } 8220 8221 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 8222 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 8223 } 8224 8225 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 8226 struct hclge_mac_vlan_tbl_entry_cmd *req) 8227 { 8228 struct hclge_dev *hdev = vport->back; 8229 struct hclge_desc desc; 8230 u8 resp_code; 8231 u16 retval; 8232 int ret; 8233 8234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 8235 8236 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8237 8238 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8239 if (ret) { 8240 dev_err(&hdev->pdev->dev, 8241 "del mac addr failed for cmd_send, ret =%d.\n", 8242 ret); 8243 return ret; 8244 } 8245 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 8246 retval = le16_to_cpu(desc.retval); 8247 8248 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 8249 HCLGE_MAC_VLAN_REMOVE); 8250 } 8251 8252 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 8253 struct hclge_mac_vlan_tbl_entry_cmd *req, 8254 struct hclge_desc *desc, 8255 bool is_mc) 8256 { 8257 struct hclge_dev *hdev = vport->back; 8258 u8 resp_code; 8259 u16 retval; 8260 int ret; 8261 8262 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 8263 if (is_mc) { 8264 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8265 memcpy(desc[0].data, 8266 req, 8267 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8268 hclge_cmd_setup_basic_desc(&desc[1], 8269 HCLGE_OPC_MAC_VLAN_ADD, 8270 true); 8271 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8272 hclge_cmd_setup_basic_desc(&desc[2], 8273 HCLGE_OPC_MAC_VLAN_ADD, 8274 true); 8275 ret = hclge_cmd_send(&hdev->hw, desc, 3); 8276 } else { 8277 memcpy(desc[0].data, 8278 req, 8279 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8280 ret = hclge_cmd_send(&hdev->hw, desc, 1); 8281 } 8282 if (ret) { 8283 dev_err(&hdev->pdev->dev, 8284 "lookup mac addr failed for cmd_send, ret =%d.\n", 8285 ret); 8286 return ret; 8287 } 8288 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 8289 retval = le16_to_cpu(desc[0].retval); 8290 8291 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 8292 HCLGE_MAC_VLAN_LKUP); 8293 } 8294 8295 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 8296 struct hclge_mac_vlan_tbl_entry_cmd *req, 8297 struct hclge_desc *mc_desc) 8298 { 8299 struct hclge_dev *hdev = vport->back; 8300 int cfg_status; 8301 u8 resp_code; 8302 u16 retval; 8303 int ret; 8304 8305 if (!mc_desc) { 8306 struct hclge_desc desc; 8307 8308 hclge_cmd_setup_basic_desc(&desc, 8309 HCLGE_OPC_MAC_VLAN_ADD, 8310 false); 8311 memcpy(desc.data, req, 8312 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8313 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8314 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 8315 retval = le16_to_cpu(desc.retval); 8316 8317 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 8318 resp_code, 8319 HCLGE_MAC_VLAN_ADD); 8320 } else { 8321 hclge_comm_cmd_reuse_desc(&mc_desc[0], false); 8322 mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8323 hclge_comm_cmd_reuse_desc(&mc_desc[1], false); 8324 mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8325 hclge_comm_cmd_reuse_desc(&mc_desc[2], false); 8326 mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT); 8327 memcpy(mc_desc[0].data, req, 8328 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8329 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 8330 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 8331 retval = le16_to_cpu(mc_desc[0].retval); 8332 8333 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 8334 resp_code, 8335 HCLGE_MAC_VLAN_ADD); 8336 } 8337 8338 if (ret) { 8339 dev_err(&hdev->pdev->dev, 8340 "add mac addr failed for cmd_send, ret =%d.\n", 8341 ret); 8342 return ret; 8343 } 8344 8345 return cfg_status; 8346 } 8347 8348 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 8349 u16 *allocated_size) 8350 { 8351 struct hclge_umv_spc_alc_cmd *req; 8352 struct hclge_desc desc; 8353 int ret; 8354 8355 req = (struct hclge_umv_spc_alc_cmd *)desc.data; 8356 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); 8357 8358 req->space_size = cpu_to_le32(space_size); 8359 8360 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8361 if (ret) { 8362 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", 8363 ret); 8364 return ret; 8365 } 8366 8367 *allocated_size = le32_to_cpu(desc.data[1]); 8368 8369 return 0; 8370 } 8371 8372 static int hclge_init_umv_space(struct hclge_dev *hdev) 8373 { 8374 u16 allocated_size = 0; 8375 int ret; 8376 8377 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); 8378 if (ret) 8379 return ret; 8380 8381 if (allocated_size < hdev->wanted_umv_size) 8382 dev_warn(&hdev->pdev->dev, 8383 "failed to alloc umv space, want %u, get %u\n", 8384 hdev->wanted_umv_size, allocated_size); 8385 8386 hdev->max_umv_size = allocated_size; 8387 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); 8388 hdev->share_umv_size = hdev->priv_umv_size + 8389 hdev->max_umv_size % (hdev->num_alloc_vport + 1); 8390 8391 if (hdev->ae_dev->dev_specs.mc_mac_size) 8392 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); 8393 8394 return 0; 8395 } 8396 8397 static void hclge_reset_umv_space(struct hclge_dev *hdev) 8398 { 8399 struct hclge_vport *vport; 8400 int i; 8401 8402 for (i = 0; i < hdev->num_alloc_vport; i++) { 8403 vport = &hdev->vport[i]; 8404 vport->used_umv_num = 0; 8405 } 8406 8407 mutex_lock(&hdev->vport_lock); 8408 hdev->share_umv_size = hdev->priv_umv_size + 8409 hdev->max_umv_size % (hdev->num_alloc_vport + 1); 8410 mutex_unlock(&hdev->vport_lock); 8411 8412 hdev->used_mc_mac_num = 0; 8413 } 8414 8415 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) 8416 { 8417 struct hclge_dev *hdev = vport->back; 8418 bool is_full; 8419 8420 if (need_lock) 8421 mutex_lock(&hdev->vport_lock); 8422 8423 is_full = (vport->used_umv_num >= hdev->priv_umv_size && 8424 hdev->share_umv_size == 0); 8425 8426 if (need_lock) 8427 mutex_unlock(&hdev->vport_lock); 8428 8429 return is_full; 8430 } 8431 8432 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) 8433 { 8434 struct hclge_dev *hdev = vport->back; 8435 8436 if (is_free) { 8437 if (vport->used_umv_num > hdev->priv_umv_size) 8438 hdev->share_umv_size++; 8439 8440 if (vport->used_umv_num > 0) 8441 vport->used_umv_num--; 8442 } else { 8443 if (vport->used_umv_num >= hdev->priv_umv_size && 8444 hdev->share_umv_size > 0) 8445 hdev->share_umv_size--; 8446 vport->used_umv_num++; 8447 } 8448 } 8449 8450 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, 8451 const u8 *mac_addr) 8452 { 8453 struct hclge_mac_node *mac_node, *tmp; 8454 8455 list_for_each_entry_safe(mac_node, tmp, list, node) 8456 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 8457 return mac_node; 8458 8459 return NULL; 8460 } 8461 8462 static void hclge_update_mac_node(struct hclge_mac_node *mac_node, 8463 enum HCLGE_MAC_NODE_STATE state) 8464 { 8465 switch (state) { 8466 /* from set_rx_mode or tmp_add_list */ 8467 case HCLGE_MAC_TO_ADD: 8468 if (mac_node->state == HCLGE_MAC_TO_DEL) 8469 mac_node->state = HCLGE_MAC_ACTIVE; 8470 break; 8471 /* only from set_rx_mode */ 8472 case HCLGE_MAC_TO_DEL: 8473 if (mac_node->state == HCLGE_MAC_TO_ADD) { 8474 list_del(&mac_node->node); 8475 kfree(mac_node); 8476 } else { 8477 mac_node->state = HCLGE_MAC_TO_DEL; 8478 } 8479 break; 8480 /* only from tmp_add_list, the mac_node->state won't be 8481 * ACTIVE. 8482 */ 8483 case HCLGE_MAC_ACTIVE: 8484 if (mac_node->state == HCLGE_MAC_TO_ADD) 8485 mac_node->state = HCLGE_MAC_ACTIVE; 8486 8487 break; 8488 } 8489 } 8490 8491 int hclge_update_mac_list(struct hclge_vport *vport, 8492 enum HCLGE_MAC_NODE_STATE state, 8493 enum HCLGE_MAC_ADDR_TYPE mac_type, 8494 const unsigned char *addr) 8495 { 8496 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8497 struct hclge_dev *hdev = vport->back; 8498 struct hclge_mac_node *mac_node; 8499 struct list_head *list; 8500 8501 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 8502 &vport->uc_mac_list : &vport->mc_mac_list; 8503 8504 spin_lock_bh(&vport->mac_list_lock); 8505 8506 /* if the mac addr is already in the mac list, no need to add a new 8507 * one into it, just check the mac addr state, convert it to a new 8508 * state, or just remove it, or do nothing. 8509 */ 8510 mac_node = hclge_find_mac_node(list, addr); 8511 if (mac_node) { 8512 hclge_update_mac_node(mac_node, state); 8513 spin_unlock_bh(&vport->mac_list_lock); 8514 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 8515 return 0; 8516 } 8517 8518 /* if this address is never added, unnecessary to delete */ 8519 if (state == HCLGE_MAC_TO_DEL) { 8520 spin_unlock_bh(&vport->mac_list_lock); 8521 hnae3_format_mac_addr(format_mac_addr, addr); 8522 dev_err(&hdev->pdev->dev, 8523 "failed to delete address %s from mac list\n", 8524 format_mac_addr); 8525 return -ENOENT; 8526 } 8527 8528 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 8529 if (!mac_node) { 8530 spin_unlock_bh(&vport->mac_list_lock); 8531 return -ENOMEM; 8532 } 8533 8534 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 8535 8536 mac_node->state = state; 8537 ether_addr_copy(mac_node->mac_addr, addr); 8538 list_add_tail(&mac_node->node, list); 8539 8540 spin_unlock_bh(&vport->mac_list_lock); 8541 8542 return 0; 8543 } 8544 8545 static int hclge_add_uc_addr(struct hnae3_handle *handle, 8546 const unsigned char *addr) 8547 { 8548 struct hclge_vport *vport = hclge_get_vport(handle); 8549 8550 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, 8551 addr); 8552 } 8553 8554 int hclge_add_uc_addr_common(struct hclge_vport *vport, 8555 const unsigned char *addr) 8556 { 8557 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8558 struct hclge_dev *hdev = vport->back; 8559 struct hclge_mac_vlan_tbl_entry_cmd req; 8560 struct hclge_desc desc; 8561 u16 egress_port = 0; 8562 int ret; 8563 8564 /* mac addr check */ 8565 if (is_zero_ether_addr(addr) || 8566 is_broadcast_ether_addr(addr) || 8567 is_multicast_ether_addr(addr)) { 8568 hnae3_format_mac_addr(format_mac_addr, addr); 8569 dev_err(&hdev->pdev->dev, 8570 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", 8571 format_mac_addr, is_zero_ether_addr(addr), 8572 is_broadcast_ether_addr(addr), 8573 is_multicast_ether_addr(addr)); 8574 return -EINVAL; 8575 } 8576 8577 memset(&req, 0, sizeof(req)); 8578 8579 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 8580 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 8581 8582 req.egress_port = cpu_to_le16(egress_port); 8583 8584 hclge_prepare_mac_addr(&req, addr, false); 8585 8586 /* Lookup the mac address in the mac_vlan table, and add 8587 * it if the entry is inexistent. Repeated unicast entry 8588 * is not allowed in the mac vlan table. 8589 */ 8590 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 8591 if (ret == -ENOENT) { 8592 mutex_lock(&hdev->vport_lock); 8593 if (!hclge_is_umv_space_full(vport, false)) { 8594 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); 8595 if (!ret) 8596 hclge_update_umv_space(vport, false); 8597 mutex_unlock(&hdev->vport_lock); 8598 return ret; 8599 } 8600 mutex_unlock(&hdev->vport_lock); 8601 8602 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) 8603 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", 8604 hdev->priv_umv_size); 8605 8606 return -ENOSPC; 8607 } 8608 8609 /* check if we just hit the duplicate */ 8610 if (!ret) 8611 return -EEXIST; 8612 8613 return ret; 8614 } 8615 8616 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 8617 const unsigned char *addr) 8618 { 8619 struct hclge_vport *vport = hclge_get_vport(handle); 8620 8621 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, 8622 addr); 8623 } 8624 8625 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 8626 const unsigned char *addr) 8627 { 8628 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8629 struct hclge_dev *hdev = vport->back; 8630 struct hclge_mac_vlan_tbl_entry_cmd req; 8631 int ret; 8632 8633 /* mac addr check */ 8634 if (is_zero_ether_addr(addr) || 8635 is_broadcast_ether_addr(addr) || 8636 is_multicast_ether_addr(addr)) { 8637 hnae3_format_mac_addr(format_mac_addr, addr); 8638 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", 8639 format_mac_addr); 8640 return -EINVAL; 8641 } 8642 8643 memset(&req, 0, sizeof(req)); 8644 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 8645 hclge_prepare_mac_addr(&req, addr, false); 8646 ret = hclge_remove_mac_vlan_tbl(vport, &req); 8647 if (!ret || ret == -ENOENT) { 8648 mutex_lock(&hdev->vport_lock); 8649 hclge_update_umv_space(vport, true); 8650 mutex_unlock(&hdev->vport_lock); 8651 return 0; 8652 } 8653 8654 return ret; 8655 } 8656 8657 static int hclge_add_mc_addr(struct hnae3_handle *handle, 8658 const unsigned char *addr) 8659 { 8660 struct hclge_vport *vport = hclge_get_vport(handle); 8661 8662 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, 8663 addr); 8664 } 8665 8666 int hclge_add_mc_addr_common(struct hclge_vport *vport, 8667 const unsigned char *addr) 8668 { 8669 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8670 struct hclge_dev *hdev = vport->back; 8671 struct hclge_mac_vlan_tbl_entry_cmd req; 8672 struct hclge_desc desc[3]; 8673 bool is_new_addr = false; 8674 int status; 8675 8676 /* mac addr check */ 8677 if (!is_multicast_ether_addr(addr)) { 8678 hnae3_format_mac_addr(format_mac_addr, addr); 8679 dev_err(&hdev->pdev->dev, 8680 "Add mc mac err! invalid mac:%s.\n", 8681 format_mac_addr); 8682 return -EINVAL; 8683 } 8684 memset(&req, 0, sizeof(req)); 8685 hclge_prepare_mac_addr(&req, addr, true); 8686 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 8687 if (status) { 8688 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && 8689 hdev->used_mc_mac_num >= 8690 hdev->ae_dev->dev_specs.mc_mac_size) 8691 goto err_no_space; 8692 8693 is_new_addr = true; 8694 8695 /* This mac addr do not exist, add new entry for it */ 8696 memset(desc[0].data, 0, sizeof(desc[0].data)); 8697 memset(desc[1].data, 0, sizeof(desc[0].data)); 8698 memset(desc[2].data, 0, sizeof(desc[0].data)); 8699 } 8700 status = hclge_update_desc_vfid(desc, vport->vport_id, false); 8701 if (status) 8702 return status; 8703 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 8704 if (status == -ENOSPC) 8705 goto err_no_space; 8706 else if (!status && is_new_addr) 8707 hdev->used_mc_mac_num++; 8708 8709 return status; 8710 8711 err_no_space: 8712 /* if already overflow, not to print each time */ 8713 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { 8714 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; 8715 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); 8716 } 8717 8718 return -ENOSPC; 8719 } 8720 8721 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 8722 const unsigned char *addr) 8723 { 8724 struct hclge_vport *vport = hclge_get_vport(handle); 8725 8726 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, 8727 addr); 8728 } 8729 8730 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 8731 const unsigned char *addr) 8732 { 8733 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8734 struct hclge_dev *hdev = vport->back; 8735 struct hclge_mac_vlan_tbl_entry_cmd req; 8736 enum hclge_comm_cmd_status status; 8737 struct hclge_desc desc[3]; 8738 8739 /* mac addr check */ 8740 if (!is_multicast_ether_addr(addr)) { 8741 hnae3_format_mac_addr(format_mac_addr, addr); 8742 dev_dbg(&hdev->pdev->dev, 8743 "Remove mc mac err! invalid mac:%s.\n", 8744 format_mac_addr); 8745 return -EINVAL; 8746 } 8747 8748 memset(&req, 0, sizeof(req)); 8749 hclge_prepare_mac_addr(&req, addr, true); 8750 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 8751 if (!status) { 8752 /* This mac addr exist, remove this handle's VFID for it */ 8753 status = hclge_update_desc_vfid(desc, vport->vport_id, true); 8754 if (status) 8755 return status; 8756 8757 if (hclge_is_all_function_id_zero(desc)) { 8758 /* All the vfid is zero, so need to delete this entry */ 8759 status = hclge_remove_mac_vlan_tbl(vport, &req); 8760 if (!status) 8761 hdev->used_mc_mac_num--; 8762 } else { 8763 /* Not all the vfid is zero, update the vfid */ 8764 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 8765 } 8766 } else if (status == -ENOENT) { 8767 status = 0; 8768 } 8769 8770 return status; 8771 } 8772 8773 static void hclge_sync_vport_mac_list(struct hclge_vport *vport, 8774 struct list_head *list, 8775 enum HCLGE_MAC_ADDR_TYPE mac_type) 8776 { 8777 int (*sync)(struct hclge_vport *vport, const unsigned char *addr); 8778 struct hclge_mac_node *mac_node, *tmp; 8779 int ret; 8780 8781 if (mac_type == HCLGE_MAC_ADDR_UC) 8782 sync = hclge_add_uc_addr_common; 8783 else 8784 sync = hclge_add_mc_addr_common; 8785 8786 list_for_each_entry_safe(mac_node, tmp, list, node) { 8787 ret = sync(vport, mac_node->mac_addr); 8788 if (!ret) { 8789 mac_node->state = HCLGE_MAC_ACTIVE; 8790 } else { 8791 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 8792 &vport->state); 8793 8794 /* If one unicast mac address is existing in hardware, 8795 * we need to try whether other unicast mac addresses 8796 * are new addresses that can be added. 8797 * Multicast mac address can be reusable, even though 8798 * there is no space to add new multicast mac address, 8799 * we should check whether other mac addresses are 8800 * existing in hardware for reuse. 8801 */ 8802 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) || 8803 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC)) 8804 break; 8805 } 8806 } 8807 } 8808 8809 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, 8810 struct list_head *list, 8811 enum HCLGE_MAC_ADDR_TYPE mac_type) 8812 { 8813 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); 8814 struct hclge_mac_node *mac_node, *tmp; 8815 int ret; 8816 8817 if (mac_type == HCLGE_MAC_ADDR_UC) 8818 unsync = hclge_rm_uc_addr_common; 8819 else 8820 unsync = hclge_rm_mc_addr_common; 8821 8822 list_for_each_entry_safe(mac_node, tmp, list, node) { 8823 ret = unsync(vport, mac_node->mac_addr); 8824 if (!ret || ret == -ENOENT) { 8825 list_del(&mac_node->node); 8826 kfree(mac_node); 8827 } else { 8828 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 8829 &vport->state); 8830 break; 8831 } 8832 } 8833 } 8834 8835 static bool hclge_sync_from_add_list(struct list_head *add_list, 8836 struct list_head *mac_list) 8837 { 8838 struct hclge_mac_node *mac_node, *tmp, *new_node; 8839 bool all_added = true; 8840 8841 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 8842 if (mac_node->state == HCLGE_MAC_TO_ADD) 8843 all_added = false; 8844 8845 /* if the mac address from tmp_add_list is not in the 8846 * uc/mc_mac_list, it means have received a TO_DEL request 8847 * during the time window of adding the mac address into mac 8848 * table. if mac_node state is ACTIVE, then change it to TO_DEL, 8849 * then it will be removed at next time. else it must be TO_ADD, 8850 * this address hasn't been added into mac table, 8851 * so just remove the mac node. 8852 */ 8853 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); 8854 if (new_node) { 8855 hclge_update_mac_node(new_node, mac_node->state); 8856 list_del(&mac_node->node); 8857 kfree(mac_node); 8858 } else if (mac_node->state == HCLGE_MAC_ACTIVE) { 8859 mac_node->state = HCLGE_MAC_TO_DEL; 8860 list_move_tail(&mac_node->node, mac_list); 8861 } else { 8862 list_del(&mac_node->node); 8863 kfree(mac_node); 8864 } 8865 } 8866 8867 return all_added; 8868 } 8869 8870 static void hclge_sync_from_del_list(struct list_head *del_list, 8871 struct list_head *mac_list) 8872 { 8873 struct hclge_mac_node *mac_node, *tmp, *new_node; 8874 8875 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 8876 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); 8877 if (new_node) { 8878 /* If the mac addr exists in the mac list, it means 8879 * received a new TO_ADD request during the time window 8880 * of configuring the mac address. For the mac node 8881 * state is TO_ADD, and the address is already in the 8882 * in the hardware(due to delete fail), so we just need 8883 * to change the mac node state to ACTIVE. 8884 */ 8885 new_node->state = HCLGE_MAC_ACTIVE; 8886 list_del(&mac_node->node); 8887 kfree(mac_node); 8888 } else { 8889 list_move_tail(&mac_node->node, mac_list); 8890 } 8891 } 8892 } 8893 8894 static void hclge_update_overflow_flags(struct hclge_vport *vport, 8895 enum HCLGE_MAC_ADDR_TYPE mac_type, 8896 bool is_all_added) 8897 { 8898 if (mac_type == HCLGE_MAC_ADDR_UC) { 8899 if (is_all_added) 8900 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; 8901 else if (hclge_is_umv_space_full(vport, true)) 8902 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; 8903 } else { 8904 if (is_all_added) 8905 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; 8906 else 8907 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; 8908 } 8909 } 8910 8911 static void hclge_sync_vport_mac_table(struct hclge_vport *vport, 8912 enum HCLGE_MAC_ADDR_TYPE mac_type) 8913 { 8914 struct hclge_mac_node *mac_node, *tmp, *new_node; 8915 struct list_head tmp_add_list, tmp_del_list; 8916 struct list_head *list; 8917 bool all_added; 8918 8919 INIT_LIST_HEAD(&tmp_add_list); 8920 INIT_LIST_HEAD(&tmp_del_list); 8921 8922 /* move the mac addr to the tmp_add_list and tmp_del_list, then 8923 * we can add/delete these mac addr outside the spin lock 8924 */ 8925 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 8926 &vport->uc_mac_list : &vport->mc_mac_list; 8927 8928 spin_lock_bh(&vport->mac_list_lock); 8929 8930 list_for_each_entry_safe(mac_node, tmp, list, node) { 8931 switch (mac_node->state) { 8932 case HCLGE_MAC_TO_DEL: 8933 list_move_tail(&mac_node->node, &tmp_del_list); 8934 break; 8935 case HCLGE_MAC_TO_ADD: 8936 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 8937 if (!new_node) 8938 goto stop_traverse; 8939 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 8940 new_node->state = mac_node->state; 8941 list_add_tail(&new_node->node, &tmp_add_list); 8942 break; 8943 default: 8944 break; 8945 } 8946 } 8947 8948 stop_traverse: 8949 spin_unlock_bh(&vport->mac_list_lock); 8950 8951 /* delete first, in order to get max mac table space for adding */ 8952 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); 8953 hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type); 8954 8955 /* if some mac addresses were added/deleted fail, move back to the 8956 * mac_list, and retry at next time. 8957 */ 8958 spin_lock_bh(&vport->mac_list_lock); 8959 8960 hclge_sync_from_del_list(&tmp_del_list, list); 8961 all_added = hclge_sync_from_add_list(&tmp_add_list, list); 8962 8963 spin_unlock_bh(&vport->mac_list_lock); 8964 8965 hclge_update_overflow_flags(vport, mac_type, all_added); 8966 } 8967 8968 static bool hclge_need_sync_mac_table(struct hclge_vport *vport) 8969 { 8970 struct hclge_dev *hdev = vport->back; 8971 8972 if (test_bit(vport->vport_id, hdev->vport_config_block)) 8973 return false; 8974 8975 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) 8976 return true; 8977 8978 return false; 8979 } 8980 8981 static void hclge_sync_mac_table(struct hclge_dev *hdev) 8982 { 8983 int i; 8984 8985 for (i = 0; i < hdev->num_alloc_vport; i++) { 8986 struct hclge_vport *vport = &hdev->vport[i]; 8987 8988 if (!hclge_need_sync_mac_table(vport)) 8989 continue; 8990 8991 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); 8992 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); 8993 } 8994 } 8995 8996 static void hclge_build_del_list(struct list_head *list, 8997 bool is_del_list, 8998 struct list_head *tmp_del_list) 8999 { 9000 struct hclge_mac_node *mac_cfg, *tmp; 9001 9002 list_for_each_entry_safe(mac_cfg, tmp, list, node) { 9003 switch (mac_cfg->state) { 9004 case HCLGE_MAC_TO_DEL: 9005 case HCLGE_MAC_ACTIVE: 9006 list_move_tail(&mac_cfg->node, tmp_del_list); 9007 break; 9008 case HCLGE_MAC_TO_ADD: 9009 if (is_del_list) { 9010 list_del(&mac_cfg->node); 9011 kfree(mac_cfg); 9012 } 9013 break; 9014 } 9015 } 9016 } 9017 9018 static void hclge_unsync_del_list(struct hclge_vport *vport, 9019 int (*unsync)(struct hclge_vport *vport, 9020 const unsigned char *addr), 9021 bool is_del_list, 9022 struct list_head *tmp_del_list) 9023 { 9024 struct hclge_mac_node *mac_cfg, *tmp; 9025 int ret; 9026 9027 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) { 9028 ret = unsync(vport, mac_cfg->mac_addr); 9029 if (!ret || ret == -ENOENT) { 9030 /* clear all mac addr from hardware, but remain these 9031 * mac addr in the mac list, and restore them after 9032 * vf reset finished. 9033 */ 9034 if (!is_del_list && 9035 mac_cfg->state == HCLGE_MAC_ACTIVE) { 9036 mac_cfg->state = HCLGE_MAC_TO_ADD; 9037 } else { 9038 list_del(&mac_cfg->node); 9039 kfree(mac_cfg); 9040 } 9041 } else if (is_del_list) { 9042 mac_cfg->state = HCLGE_MAC_TO_DEL; 9043 } 9044 } 9045 } 9046 9047 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, 9048 enum HCLGE_MAC_ADDR_TYPE mac_type) 9049 { 9050 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); 9051 struct hclge_dev *hdev = vport->back; 9052 struct list_head tmp_del_list, *list; 9053 9054 if (mac_type == HCLGE_MAC_ADDR_UC) { 9055 list = &vport->uc_mac_list; 9056 unsync = hclge_rm_uc_addr_common; 9057 } else { 9058 list = &vport->mc_mac_list; 9059 unsync = hclge_rm_mc_addr_common; 9060 } 9061 9062 INIT_LIST_HEAD(&tmp_del_list); 9063 9064 if (!is_del_list) 9065 set_bit(vport->vport_id, hdev->vport_config_block); 9066 9067 spin_lock_bh(&vport->mac_list_lock); 9068 9069 hclge_build_del_list(list, is_del_list, &tmp_del_list); 9070 9071 spin_unlock_bh(&vport->mac_list_lock); 9072 9073 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list); 9074 9075 spin_lock_bh(&vport->mac_list_lock); 9076 9077 hclge_sync_from_del_list(&tmp_del_list, list); 9078 9079 spin_unlock_bh(&vport->mac_list_lock); 9080 } 9081 9082 /* remove all mac address when uninitailize */ 9083 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, 9084 enum HCLGE_MAC_ADDR_TYPE mac_type) 9085 { 9086 struct hclge_mac_node *mac_node, *tmp; 9087 struct hclge_dev *hdev = vport->back; 9088 struct list_head tmp_del_list, *list; 9089 9090 INIT_LIST_HEAD(&tmp_del_list); 9091 9092 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 9093 &vport->uc_mac_list : &vport->mc_mac_list; 9094 9095 spin_lock_bh(&vport->mac_list_lock); 9096 9097 list_for_each_entry_safe(mac_node, tmp, list, node) { 9098 switch (mac_node->state) { 9099 case HCLGE_MAC_TO_DEL: 9100 case HCLGE_MAC_ACTIVE: 9101 list_move_tail(&mac_node->node, &tmp_del_list); 9102 break; 9103 case HCLGE_MAC_TO_ADD: 9104 list_del(&mac_node->node); 9105 kfree(mac_node); 9106 break; 9107 } 9108 } 9109 9110 spin_unlock_bh(&vport->mac_list_lock); 9111 9112 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); 9113 9114 if (!list_empty(&tmp_del_list)) 9115 dev_warn(&hdev->pdev->dev, 9116 "uninit %s mac list for vport %u not completely.\n", 9117 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc", 9118 vport->vport_id); 9119 9120 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { 9121 list_del(&mac_node->node); 9122 kfree(mac_node); 9123 } 9124 } 9125 9126 static void hclge_uninit_mac_table(struct hclge_dev *hdev) 9127 { 9128 struct hclge_vport *vport; 9129 int i; 9130 9131 for (i = 0; i < hdev->num_alloc_vport; i++) { 9132 vport = &hdev->vport[i]; 9133 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC); 9134 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC); 9135 } 9136 } 9137 9138 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 9139 u16 cmdq_resp, u8 resp_code) 9140 { 9141 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 9142 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 9143 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 9144 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 9145 9146 int return_status; 9147 9148 if (cmdq_resp) { 9149 dev_err(&hdev->pdev->dev, 9150 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 9151 cmdq_resp); 9152 return -EIO; 9153 } 9154 9155 switch (resp_code) { 9156 case HCLGE_ETHERTYPE_SUCCESS_ADD: 9157 case HCLGE_ETHERTYPE_ALREADY_ADD: 9158 return_status = 0; 9159 break; 9160 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 9161 dev_err(&hdev->pdev->dev, 9162 "add mac ethertype failed for manager table overflow.\n"); 9163 return_status = -EIO; 9164 break; 9165 case HCLGE_ETHERTYPE_KEY_CONFLICT: 9166 dev_err(&hdev->pdev->dev, 9167 "add mac ethertype failed for key conflict.\n"); 9168 return_status = -EIO; 9169 break; 9170 default: 9171 dev_err(&hdev->pdev->dev, 9172 "add mac ethertype failed for undefined, code=%u.\n", 9173 resp_code); 9174 return_status = -EIO; 9175 } 9176 9177 return return_status; 9178 } 9179 9180 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, 9181 u8 *mac_addr) 9182 { 9183 struct hclge_vport *vport = hclge_get_vport(handle); 9184 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 9185 struct hclge_dev *hdev = vport->back; 9186 9187 vport = hclge_get_vf_vport(hdev, vf); 9188 if (!vport) 9189 return -EINVAL; 9190 9191 hnae3_format_mac_addr(format_mac_addr, mac_addr); 9192 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { 9193 dev_info(&hdev->pdev->dev, 9194 "Specified MAC(=%s) is same as before, no change committed!\n", 9195 format_mac_addr); 9196 return 0; 9197 } 9198 9199 ether_addr_copy(vport->vf_info.mac, mac_addr); 9200 9201 /* there is a timewindow for PF to know VF unalive, it may 9202 * cause send mailbox fail, but it doesn't matter, VF will 9203 * query it when reinit. 9204 */ 9205 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { 9206 dev_info(&hdev->pdev->dev, 9207 "MAC of VF %d has been set to %s, and it will be reinitialized!\n", 9208 vf, format_mac_addr); 9209 (void)hclge_inform_reset_assert_to_vf(vport); 9210 return 0; 9211 } 9212 9213 dev_info(&hdev->pdev->dev, 9214 "MAC of VF %d has been set to %s, will be active after VF reset\n", 9215 vf, format_mac_addr); 9216 return 0; 9217 } 9218 9219 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 9220 const struct hclge_mac_mgr_tbl_entry_cmd *req) 9221 { 9222 struct hclge_desc desc; 9223 u8 resp_code; 9224 u16 retval; 9225 int ret; 9226 9227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 9228 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 9229 9230 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9231 if (ret) { 9232 dev_err(&hdev->pdev->dev, 9233 "add mac ethertype failed for cmd_send, ret =%d.\n", 9234 ret); 9235 return ret; 9236 } 9237 9238 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 9239 retval = le16_to_cpu(desc.retval); 9240 9241 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 9242 } 9243 9244 static int init_mgr_tbl(struct hclge_dev *hdev) 9245 { 9246 int ret; 9247 int i; 9248 9249 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 9250 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 9251 if (ret) { 9252 dev_err(&hdev->pdev->dev, 9253 "add mac ethertype failed, ret =%d.\n", 9254 ret); 9255 return ret; 9256 } 9257 } 9258 9259 return 0; 9260 } 9261 9262 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 9263 { 9264 struct hclge_vport *vport = hclge_get_vport(handle); 9265 struct hclge_dev *hdev = vport->back; 9266 9267 ether_addr_copy(p, hdev->hw.mac.mac_addr); 9268 } 9269 9270 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, 9271 const u8 *old_addr, const u8 *new_addr) 9272 { 9273 struct list_head *list = &vport->uc_mac_list; 9274 struct hclge_mac_node *old_node, *new_node; 9275 9276 new_node = hclge_find_mac_node(list, new_addr); 9277 if (!new_node) { 9278 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 9279 if (!new_node) 9280 return -ENOMEM; 9281 9282 new_node->state = HCLGE_MAC_TO_ADD; 9283 ether_addr_copy(new_node->mac_addr, new_addr); 9284 list_add(&new_node->node, list); 9285 } else { 9286 if (new_node->state == HCLGE_MAC_TO_DEL) 9287 new_node->state = HCLGE_MAC_ACTIVE; 9288 9289 /* make sure the new addr is in the list head, avoid dev 9290 * addr may be not re-added into mac table for the umv space 9291 * limitation after global/imp reset which will clear mac 9292 * table by hardware. 9293 */ 9294 list_move(&new_node->node, list); 9295 } 9296 9297 if (old_addr && !ether_addr_equal(old_addr, new_addr)) { 9298 old_node = hclge_find_mac_node(list, old_addr); 9299 if (old_node) { 9300 if (old_node->state == HCLGE_MAC_TO_ADD) { 9301 list_del(&old_node->node); 9302 kfree(old_node); 9303 } else { 9304 old_node->state = HCLGE_MAC_TO_DEL; 9305 } 9306 } 9307 } 9308 9309 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 9310 9311 return 0; 9312 } 9313 9314 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, 9315 bool is_first) 9316 { 9317 const unsigned char *new_addr = (const unsigned char *)p; 9318 struct hclge_vport *vport = hclge_get_vport(handle); 9319 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 9320 struct hclge_dev *hdev = vport->back; 9321 unsigned char *old_addr = NULL; 9322 int ret; 9323 9324 /* mac addr check */ 9325 if (is_zero_ether_addr(new_addr) || 9326 is_broadcast_ether_addr(new_addr) || 9327 is_multicast_ether_addr(new_addr)) { 9328 hnae3_format_mac_addr(format_mac_addr, new_addr); 9329 dev_err(&hdev->pdev->dev, 9330 "change uc mac err! invalid mac: %s.\n", 9331 format_mac_addr); 9332 return -EINVAL; 9333 } 9334 9335 ret = hclge_pause_addr_cfg(hdev, new_addr); 9336 if (ret) { 9337 dev_err(&hdev->pdev->dev, 9338 "failed to configure mac pause address, ret = %d\n", 9339 ret); 9340 return ret; 9341 } 9342 9343 if (!is_first) 9344 old_addr = hdev->hw.mac.mac_addr; 9345 9346 spin_lock_bh(&vport->mac_list_lock); 9347 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); 9348 if (ret) { 9349 hnae3_format_mac_addr(format_mac_addr, new_addr); 9350 dev_err(&hdev->pdev->dev, 9351 "failed to change the mac addr:%s, ret = %d\n", 9352 format_mac_addr, ret); 9353 spin_unlock_bh(&vport->mac_list_lock); 9354 9355 if (!is_first) 9356 hclge_pause_addr_cfg(hdev, old_addr); 9357 9358 return ret; 9359 } 9360 /* we must update dev addr with spin lock protect, preventing dev addr 9361 * being removed by set_rx_mode path. 9362 */ 9363 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 9364 spin_unlock_bh(&vport->mac_list_lock); 9365 9366 hclge_task_schedule(hdev, 0); 9367 9368 return 0; 9369 } 9370 9371 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) 9372 { 9373 struct mii_ioctl_data *data = if_mii(ifr); 9374 9375 if (!hnae3_dev_phy_imp_supported(hdev)) 9376 return -EOPNOTSUPP; 9377 9378 switch (cmd) { 9379 case SIOCGMIIPHY: 9380 data->phy_id = hdev->hw.mac.phy_addr; 9381 /* this command reads phy id and register at the same time */ 9382 fallthrough; 9383 case SIOCGMIIREG: 9384 data->val_out = hclge_read_phy_reg(hdev, data->reg_num); 9385 return 0; 9386 9387 case SIOCSMIIREG: 9388 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); 9389 default: 9390 return -EOPNOTSUPP; 9391 } 9392 } 9393 9394 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, 9395 int cmd) 9396 { 9397 struct hclge_vport *vport = hclge_get_vport(handle); 9398 struct hclge_dev *hdev = vport->back; 9399 9400 switch (cmd) { 9401 case SIOCGHWTSTAMP: 9402 return hclge_ptp_get_cfg(hdev, ifr); 9403 case SIOCSHWTSTAMP: 9404 return hclge_ptp_set_cfg(hdev, ifr); 9405 default: 9406 if (!hdev->hw.mac.phydev) 9407 return hclge_mii_ioctl(hdev, ifr, cmd); 9408 } 9409 9410 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); 9411 } 9412 9413 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, 9414 bool bypass_en) 9415 { 9416 struct hclge_port_vlan_filter_bypass_cmd *req; 9417 struct hclge_desc desc; 9418 int ret; 9419 9420 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false); 9421 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; 9422 req->vf_id = vf_id; 9423 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B, 9424 bypass_en ? 1 : 0); 9425 9426 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9427 if (ret) 9428 dev_err(&hdev->pdev->dev, 9429 "failed to set vport%u port vlan filter bypass state, ret = %d.\n", 9430 vf_id, ret); 9431 9432 return ret; 9433 } 9434 9435 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 9436 u8 fe_type, bool filter_en, u8 vf_id) 9437 { 9438 struct hclge_vlan_filter_ctrl_cmd *req; 9439 struct hclge_desc desc; 9440 int ret; 9441 9442 /* read current vlan filter parameter */ 9443 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); 9444 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 9445 req->vlan_type = vlan_type; 9446 req->vf_id = vf_id; 9447 9448 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9449 if (ret) { 9450 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", 9451 vf_id, ret); 9452 return ret; 9453 } 9454 9455 /* modify and write new config parameter */ 9456 hclge_comm_cmd_reuse_desc(&desc, false); 9457 req->vlan_fe = filter_en ? 9458 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); 9459 9460 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9461 if (ret) 9462 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", 9463 vf_id, ret); 9464 9465 return ret; 9466 } 9467 9468 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) 9469 { 9470 struct hclge_dev *hdev = vport->back; 9471 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 9472 int ret; 9473 9474 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 9475 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9476 HCLGE_FILTER_FE_EGRESS_V1_B, 9477 enable, vport->vport_id); 9478 9479 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9480 HCLGE_FILTER_FE_EGRESS, enable, 9481 vport->vport_id); 9482 if (ret) 9483 return ret; 9484 9485 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { 9486 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, 9487 !enable); 9488 } else if (!vport->vport_id) { 9489 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 9490 enable = false; 9491 9492 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 9493 HCLGE_FILTER_FE_INGRESS, 9494 enable, 0); 9495 } 9496 9497 return ret; 9498 } 9499 9500 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) 9501 { 9502 struct hnae3_handle *handle = &vport->nic; 9503 struct hclge_vport_vlan_cfg *vlan, *tmp; 9504 struct hclge_dev *hdev = vport->back; 9505 9506 if (vport->vport_id) { 9507 if (vport->port_base_vlan_cfg.state != 9508 HNAE3_PORT_BASE_VLAN_DISABLE) 9509 return true; 9510 9511 if (vport->vf_info.trusted && vport->vf_info.request_uc_en) 9512 return false; 9513 } else if (handle->netdev_flags & HNAE3_USER_UPE) { 9514 return false; 9515 } 9516 9517 if (!vport->req_vlan_fltr_en) 9518 return false; 9519 9520 /* compatible with former device, always enable vlan filter */ 9521 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) 9522 return true; 9523 9524 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) 9525 if (vlan->vlan_id != 0) 9526 return true; 9527 9528 return false; 9529 } 9530 9531 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) 9532 { 9533 struct hclge_dev *hdev = vport->back; 9534 bool need_en; 9535 int ret; 9536 9537 mutex_lock(&hdev->vport_lock); 9538 9539 vport->req_vlan_fltr_en = request_en; 9540 9541 need_en = hclge_need_enable_vport_vlan_filter(vport); 9542 if (need_en == vport->cur_vlan_fltr_en) { 9543 mutex_unlock(&hdev->vport_lock); 9544 return 0; 9545 } 9546 9547 ret = hclge_set_vport_vlan_filter(vport, need_en); 9548 if (ret) { 9549 mutex_unlock(&hdev->vport_lock); 9550 return ret; 9551 } 9552 9553 vport->cur_vlan_fltr_en = need_en; 9554 9555 mutex_unlock(&hdev->vport_lock); 9556 9557 return 0; 9558 } 9559 9560 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 9561 { 9562 struct hclge_vport *vport = hclge_get_vport(handle); 9563 9564 return hclge_enable_vport_vlan_filter(vport, enable); 9565 } 9566 9567 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, 9568 bool is_kill, u16 vlan, 9569 struct hclge_desc *desc) 9570 { 9571 struct hclge_vlan_filter_vf_cfg_cmd *req0; 9572 struct hclge_vlan_filter_vf_cfg_cmd *req1; 9573 u8 vf_byte_val; 9574 u8 vf_byte_off; 9575 int ret; 9576 9577 hclge_cmd_setup_basic_desc(&desc[0], 9578 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 9579 hclge_cmd_setup_basic_desc(&desc[1], 9580 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 9581 9582 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 9583 9584 vf_byte_off = vfid / 8; 9585 vf_byte_val = 1 << (vfid % 8); 9586 9587 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 9588 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 9589 9590 req0->vlan_id = cpu_to_le16(vlan); 9591 req0->vlan_cfg = is_kill; 9592 9593 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 9594 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 9595 else 9596 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 9597 9598 ret = hclge_cmd_send(&hdev->hw, desc, 2); 9599 if (ret) { 9600 dev_err(&hdev->pdev->dev, 9601 "Send vf vlan command fail, ret =%d.\n", 9602 ret); 9603 return ret; 9604 } 9605 9606 return 0; 9607 } 9608 9609 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, 9610 bool is_kill, struct hclge_desc *desc) 9611 { 9612 struct hclge_vlan_filter_vf_cfg_cmd *req; 9613 9614 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 9615 9616 if (!is_kill) { 9617 #define HCLGE_VF_VLAN_NO_ENTRY 2 9618 if (!req->resp_code || req->resp_code == 1) 9619 return 0; 9620 9621 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { 9622 set_bit(vfid, hdev->vf_vlan_full); 9623 dev_warn(&hdev->pdev->dev, 9624 "vf vlan table is full, vf vlan filter is disabled\n"); 9625 return 0; 9626 } 9627 9628 dev_err(&hdev->pdev->dev, 9629 "Add vf vlan filter fail, ret =%u.\n", 9630 req->resp_code); 9631 } else { 9632 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 9633 if (!req->resp_code) 9634 return 0; 9635 9636 /* vf vlan filter is disabled when vf vlan table is full, 9637 * then new vlan id will not be added into vf vlan table. 9638 * Just return 0 without warning, avoid massive verbose 9639 * print logs when unload. 9640 */ 9641 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) 9642 return 0; 9643 9644 dev_err(&hdev->pdev->dev, 9645 "Kill vf vlan filter fail, ret =%u.\n", 9646 req->resp_code); 9647 } 9648 9649 return -EIO; 9650 } 9651 9652 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, 9653 bool is_kill, u16 vlan) 9654 { 9655 struct hclge_vport *vport = &hdev->vport[vfid]; 9656 struct hclge_desc desc[2]; 9657 int ret; 9658 9659 /* if vf vlan table is full, firmware will close vf vlan filter, it 9660 * is unable and unnecessary to add new vlan id to vf vlan filter. 9661 * If spoof check is enable, and vf vlan is full, it shouldn't add 9662 * new vlan, because tx packets with these vlan id will be dropped. 9663 */ 9664 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { 9665 if (vport->vf_info.spoofchk && vlan) { 9666 dev_err(&hdev->pdev->dev, 9667 "Can't add vlan due to spoof check is on and vf vlan table is full\n"); 9668 return -EPERM; 9669 } 9670 return 0; 9671 } 9672 9673 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); 9674 if (ret) 9675 return ret; 9676 9677 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); 9678 } 9679 9680 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 9681 u16 vlan_id, bool is_kill) 9682 { 9683 struct hclge_vlan_filter_pf_cfg_cmd *req; 9684 struct hclge_desc desc; 9685 u8 vlan_offset_byte_val; 9686 u8 vlan_offset_byte; 9687 u8 vlan_offset_160; 9688 int ret; 9689 9690 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 9691 9692 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; 9693 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / 9694 HCLGE_VLAN_BYTE_SIZE; 9695 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); 9696 9697 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 9698 req->vlan_offset = vlan_offset_160; 9699 req->vlan_cfg = is_kill; 9700 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 9701 9702 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9703 if (ret) 9704 dev_err(&hdev->pdev->dev, 9705 "port vlan command, send fail, ret =%d.\n", ret); 9706 return ret; 9707 } 9708 9709 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, 9710 u16 vlan_id, bool is_kill) 9711 { 9712 /* vlan 0 may be added twice when 8021q module is enabled */ 9713 if (!is_kill && !vlan_id && 9714 test_bit(vport_id, hdev->vlan_table[vlan_id])) 9715 return false; 9716 9717 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 9718 dev_warn(&hdev->pdev->dev, 9719 "Add port vlan failed, vport %u is already in vlan %u\n", 9720 vport_id, vlan_id); 9721 return false; 9722 } 9723 9724 if (is_kill && 9725 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 9726 dev_warn(&hdev->pdev->dev, 9727 "Delete port vlan failed, vport %u is not in vlan %u\n", 9728 vport_id, vlan_id); 9729 return false; 9730 } 9731 9732 return true; 9733 } 9734 9735 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 9736 u16 vport_id, u16 vlan_id, 9737 bool is_kill) 9738 { 9739 u16 vport_idx, vport_num = 0; 9740 int ret; 9741 9742 if (is_kill && !vlan_id) 9743 return 0; 9744 9745 if (vlan_id >= VLAN_N_VID) 9746 return -EINVAL; 9747 9748 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); 9749 if (ret) { 9750 dev_err(&hdev->pdev->dev, 9751 "Set %u vport vlan filter config fail, ret =%d.\n", 9752 vport_id, ret); 9753 return ret; 9754 } 9755 9756 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) 9757 return 0; 9758 9759 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) 9760 vport_num++; 9761 9762 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 9763 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 9764 is_kill); 9765 9766 return ret; 9767 } 9768 9769 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 9770 { 9771 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 9772 struct hclge_vport_vtag_tx_cfg_cmd *req; 9773 struct hclge_dev *hdev = vport->back; 9774 struct hclge_desc desc; 9775 u16 bmap_index; 9776 int status; 9777 9778 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 9779 9780 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 9781 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 9782 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 9783 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 9784 vcfg->accept_tag1 ? 1 : 0); 9785 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 9786 vcfg->accept_untag1 ? 1 : 0); 9787 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 9788 vcfg->accept_tag2 ? 1 : 0); 9789 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 9790 vcfg->accept_untag2 ? 1 : 0); 9791 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 9792 vcfg->insert_tag1_en ? 1 : 0); 9793 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 9794 vcfg->insert_tag2_en ? 1 : 0); 9795 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, 9796 vcfg->tag_shift_mode_en ? 1 : 0); 9797 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 9798 9799 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 9800 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 9801 HCLGE_VF_NUM_PER_BYTE; 9802 req->vf_bitmap[bmap_index] = 9803 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 9804 9805 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9806 if (status) 9807 dev_err(&hdev->pdev->dev, 9808 "Send port txvlan cfg command fail, ret =%d\n", 9809 status); 9810 9811 return status; 9812 } 9813 9814 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 9815 { 9816 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 9817 struct hclge_vport_vtag_rx_cfg_cmd *req; 9818 struct hclge_dev *hdev = vport->back; 9819 struct hclge_desc desc; 9820 u16 bmap_index; 9821 int status; 9822 9823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 9824 9825 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 9826 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 9827 vcfg->strip_tag1_en ? 1 : 0); 9828 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 9829 vcfg->strip_tag2_en ? 1 : 0); 9830 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 9831 vcfg->vlan1_vlan_prionly ? 1 : 0); 9832 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 9833 vcfg->vlan2_vlan_prionly ? 1 : 0); 9834 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, 9835 vcfg->strip_tag1_discard_en ? 1 : 0); 9836 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, 9837 vcfg->strip_tag2_discard_en ? 1 : 0); 9838 9839 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 9840 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 9841 HCLGE_VF_NUM_PER_BYTE; 9842 req->vf_bitmap[bmap_index] = 9843 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 9844 9845 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9846 if (status) 9847 dev_err(&hdev->pdev->dev, 9848 "Send port rxvlan cfg command fail, ret =%d\n", 9849 status); 9850 9851 return status; 9852 } 9853 9854 static int hclge_vlan_offload_cfg(struct hclge_vport *vport, 9855 u16 port_base_vlan_state, 9856 u16 vlan_tag, u8 qos) 9857 { 9858 int ret; 9859 9860 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 9861 vport->txvlan_cfg.accept_tag1 = true; 9862 vport->txvlan_cfg.insert_tag1_en = false; 9863 vport->txvlan_cfg.default_tag1 = 0; 9864 } else { 9865 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); 9866 9867 vport->txvlan_cfg.accept_tag1 = 9868 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; 9869 vport->txvlan_cfg.insert_tag1_en = true; 9870 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | 9871 vlan_tag; 9872 } 9873 9874 vport->txvlan_cfg.accept_untag1 = true; 9875 9876 /* accept_tag2 and accept_untag2 are not supported on 9877 * pdev revision(0x20), new revision support them, 9878 * this two fields can not be configured by user. 9879 */ 9880 vport->txvlan_cfg.accept_tag2 = true; 9881 vport->txvlan_cfg.accept_untag2 = true; 9882 vport->txvlan_cfg.insert_tag2_en = false; 9883 vport->txvlan_cfg.default_tag2 = 0; 9884 vport->txvlan_cfg.tag_shift_mode_en = true; 9885 9886 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 9887 vport->rxvlan_cfg.strip_tag1_en = false; 9888 vport->rxvlan_cfg.strip_tag2_en = 9889 vport->rxvlan_cfg.rx_vlan_offload_en; 9890 vport->rxvlan_cfg.strip_tag2_discard_en = false; 9891 } else { 9892 vport->rxvlan_cfg.strip_tag1_en = 9893 vport->rxvlan_cfg.rx_vlan_offload_en; 9894 vport->rxvlan_cfg.strip_tag2_en = true; 9895 vport->rxvlan_cfg.strip_tag2_discard_en = true; 9896 } 9897 9898 vport->rxvlan_cfg.strip_tag1_discard_en = false; 9899 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 9900 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 9901 9902 ret = hclge_set_vlan_tx_offload_cfg(vport); 9903 if (ret) 9904 return ret; 9905 9906 return hclge_set_vlan_rx_offload_cfg(vport); 9907 } 9908 9909 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 9910 { 9911 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 9912 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 9913 struct hclge_desc desc; 9914 int status; 9915 9916 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 9917 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 9918 rx_req->ot_fst_vlan_type = 9919 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 9920 rx_req->ot_sec_vlan_type = 9921 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 9922 rx_req->in_fst_vlan_type = 9923 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 9924 rx_req->in_sec_vlan_type = 9925 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 9926 9927 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9928 if (status) { 9929 dev_err(&hdev->pdev->dev, 9930 "Send rxvlan protocol type command fail, ret =%d\n", 9931 status); 9932 return status; 9933 } 9934 9935 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 9936 9937 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; 9938 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 9939 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 9940 9941 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9942 if (status) 9943 dev_err(&hdev->pdev->dev, 9944 "Send txvlan protocol type command fail, ret =%d\n", 9945 status); 9946 9947 return status; 9948 } 9949 9950 static int hclge_init_vlan_filter(struct hclge_dev *hdev) 9951 { 9952 struct hclge_vport *vport; 9953 int ret; 9954 int i; 9955 9956 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 9957 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9958 HCLGE_FILTER_FE_EGRESS_V1_B, 9959 true, 0); 9960 9961 /* for revision 0x21, vf vlan filter is per function */ 9962 for (i = 0; i < hdev->num_alloc_vport; i++) { 9963 vport = &hdev->vport[i]; 9964 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9965 HCLGE_FILTER_FE_EGRESS, true, 9966 vport->vport_id); 9967 if (ret) 9968 return ret; 9969 vport->cur_vlan_fltr_en = true; 9970 } 9971 9972 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 9973 HCLGE_FILTER_FE_INGRESS, true, 0); 9974 } 9975 9976 static int hclge_init_vlan_type(struct hclge_dev *hdev) 9977 { 9978 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; 9979 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; 9980 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; 9981 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; 9982 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; 9983 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; 9984 9985 return hclge_set_vlan_protocol_type(hdev); 9986 } 9987 9988 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) 9989 { 9990 struct hclge_port_base_vlan_config *cfg; 9991 struct hclge_vport *vport; 9992 int ret; 9993 int i; 9994 9995 for (i = 0; i < hdev->num_alloc_vport; i++) { 9996 vport = &hdev->vport[i]; 9997 cfg = &vport->port_base_vlan_cfg; 9998 9999 ret = hclge_vlan_offload_cfg(vport, cfg->state, 10000 cfg->vlan_info.vlan_tag, 10001 cfg->vlan_info.qos); 10002 if (ret) 10003 return ret; 10004 } 10005 return 0; 10006 } 10007 10008 static int hclge_init_vlan_config(struct hclge_dev *hdev) 10009 { 10010 struct hnae3_handle *handle = &hdev->vport[0].nic; 10011 int ret; 10012 10013 ret = hclge_init_vlan_filter(hdev); 10014 if (ret) 10015 return ret; 10016 10017 ret = hclge_init_vlan_type(hdev); 10018 if (ret) 10019 return ret; 10020 10021 ret = hclge_init_vport_vlan_offload(hdev); 10022 if (ret) 10023 return ret; 10024 10025 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 10026 } 10027 10028 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 10029 bool writen_to_tbl) 10030 { 10031 struct hclge_vport_vlan_cfg *vlan, *tmp; 10032 struct hclge_dev *hdev = vport->back; 10033 10034 mutex_lock(&hdev->vport_lock); 10035 10036 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10037 if (vlan->vlan_id == vlan_id) { 10038 mutex_unlock(&hdev->vport_lock); 10039 return; 10040 } 10041 } 10042 10043 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 10044 if (!vlan) { 10045 mutex_unlock(&hdev->vport_lock); 10046 return; 10047 } 10048 10049 vlan->hd_tbl_status = writen_to_tbl; 10050 vlan->vlan_id = vlan_id; 10051 10052 list_add_tail(&vlan->node, &vport->vlan_list); 10053 mutex_unlock(&hdev->vport_lock); 10054 } 10055 10056 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) 10057 { 10058 struct hclge_vport_vlan_cfg *vlan, *tmp; 10059 struct hclge_dev *hdev = vport->back; 10060 int ret; 10061 10062 mutex_lock(&hdev->vport_lock); 10063 10064 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10065 if (!vlan->hd_tbl_status) { 10066 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10067 vport->vport_id, 10068 vlan->vlan_id, false); 10069 if (ret) { 10070 dev_err(&hdev->pdev->dev, 10071 "restore vport vlan list failed, ret=%d\n", 10072 ret); 10073 10074 mutex_unlock(&hdev->vport_lock); 10075 return ret; 10076 } 10077 } 10078 vlan->hd_tbl_status = true; 10079 } 10080 10081 mutex_unlock(&hdev->vport_lock); 10082 10083 return 0; 10084 } 10085 10086 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 10087 bool is_write_tbl) 10088 { 10089 struct hclge_vport_vlan_cfg *vlan, *tmp; 10090 struct hclge_dev *hdev = vport->back; 10091 10092 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10093 if (vlan->vlan_id == vlan_id) { 10094 if (is_write_tbl && vlan->hd_tbl_status) 10095 hclge_set_vlan_filter_hw(hdev, 10096 htons(ETH_P_8021Q), 10097 vport->vport_id, 10098 vlan_id, 10099 true); 10100 10101 list_del(&vlan->node); 10102 kfree(vlan); 10103 break; 10104 } 10105 } 10106 } 10107 10108 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) 10109 { 10110 struct hclge_vport_vlan_cfg *vlan, *tmp; 10111 struct hclge_dev *hdev = vport->back; 10112 10113 mutex_lock(&hdev->vport_lock); 10114 10115 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10116 if (vlan->hd_tbl_status) 10117 hclge_set_vlan_filter_hw(hdev, 10118 htons(ETH_P_8021Q), 10119 vport->vport_id, 10120 vlan->vlan_id, 10121 true); 10122 10123 vlan->hd_tbl_status = false; 10124 if (is_del_list) { 10125 list_del(&vlan->node); 10126 kfree(vlan); 10127 } 10128 } 10129 clear_bit(vport->vport_id, hdev->vf_vlan_full); 10130 mutex_unlock(&hdev->vport_lock); 10131 } 10132 10133 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) 10134 { 10135 struct hclge_vport_vlan_cfg *vlan, *tmp; 10136 struct hclge_vport *vport; 10137 int i; 10138 10139 mutex_lock(&hdev->vport_lock); 10140 10141 for (i = 0; i < hdev->num_alloc_vport; i++) { 10142 vport = &hdev->vport[i]; 10143 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10144 list_del(&vlan->node); 10145 kfree(vlan); 10146 } 10147 } 10148 10149 mutex_unlock(&hdev->vport_lock); 10150 } 10151 10152 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) 10153 { 10154 struct hclge_vlan_info *vlan_info; 10155 struct hclge_vport *vport; 10156 u16 vlan_proto; 10157 u16 vlan_id; 10158 u16 state; 10159 int vf_id; 10160 int ret; 10161 10162 /* PF should restore all vfs port base vlan */ 10163 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { 10164 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; 10165 vlan_info = vport->port_base_vlan_cfg.tbl_sta ? 10166 &vport->port_base_vlan_cfg.vlan_info : 10167 &vport->port_base_vlan_cfg.old_vlan_info; 10168 10169 vlan_id = vlan_info->vlan_tag; 10170 vlan_proto = vlan_info->vlan_proto; 10171 state = vport->port_base_vlan_cfg.state; 10172 10173 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { 10174 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); 10175 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), 10176 vport->vport_id, 10177 vlan_id, false); 10178 vport->port_base_vlan_cfg.tbl_sta = ret == 0; 10179 } 10180 } 10181 } 10182 10183 void hclge_restore_vport_vlan_table(struct hclge_vport *vport) 10184 { 10185 struct hclge_vport_vlan_cfg *vlan, *tmp; 10186 struct hclge_dev *hdev = vport->back; 10187 int ret; 10188 10189 mutex_lock(&hdev->vport_lock); 10190 10191 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10192 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10193 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10194 vport->vport_id, 10195 vlan->vlan_id, false); 10196 if (ret) 10197 break; 10198 vlan->hd_tbl_status = true; 10199 } 10200 } 10201 10202 mutex_unlock(&hdev->vport_lock); 10203 } 10204 10205 /* For global reset and imp reset, hardware will clear the mac table, 10206 * so we change the mac address state from ACTIVE to TO_ADD, then they 10207 * can be restored in the service task after reset complete. Furtherly, 10208 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to 10209 * be restored after reset, so just remove these mac nodes from mac_list. 10210 */ 10211 static void hclge_mac_node_convert_for_reset(struct list_head *list) 10212 { 10213 struct hclge_mac_node *mac_node, *tmp; 10214 10215 list_for_each_entry_safe(mac_node, tmp, list, node) { 10216 if (mac_node->state == HCLGE_MAC_ACTIVE) { 10217 mac_node->state = HCLGE_MAC_TO_ADD; 10218 } else if (mac_node->state == HCLGE_MAC_TO_DEL) { 10219 list_del(&mac_node->node); 10220 kfree(mac_node); 10221 } 10222 } 10223 } 10224 10225 void hclge_restore_mac_table_common(struct hclge_vport *vport) 10226 { 10227 spin_lock_bh(&vport->mac_list_lock); 10228 10229 hclge_mac_node_convert_for_reset(&vport->uc_mac_list); 10230 hclge_mac_node_convert_for_reset(&vport->mc_mac_list); 10231 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 10232 10233 spin_unlock_bh(&vport->mac_list_lock); 10234 } 10235 10236 static void hclge_restore_hw_table(struct hclge_dev *hdev) 10237 { 10238 struct hclge_vport *vport = &hdev->vport[0]; 10239 struct hnae3_handle *handle = &vport->nic; 10240 10241 hclge_restore_mac_table_common(vport); 10242 hclge_restore_vport_port_base_vlan_config(hdev); 10243 hclge_restore_vport_vlan_table(vport); 10244 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 10245 hclge_restore_fd_entries(handle); 10246 } 10247 10248 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 10249 { 10250 struct hclge_vport *vport = hclge_get_vport(handle); 10251 10252 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10253 vport->rxvlan_cfg.strip_tag1_en = false; 10254 vport->rxvlan_cfg.strip_tag2_en = enable; 10255 vport->rxvlan_cfg.strip_tag2_discard_en = false; 10256 } else { 10257 vport->rxvlan_cfg.strip_tag1_en = enable; 10258 vport->rxvlan_cfg.strip_tag2_en = true; 10259 vport->rxvlan_cfg.strip_tag2_discard_en = true; 10260 } 10261 10262 vport->rxvlan_cfg.strip_tag1_discard_en = false; 10263 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 10264 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 10265 vport->rxvlan_cfg.rx_vlan_offload_en = enable; 10266 10267 return hclge_set_vlan_rx_offload_cfg(vport); 10268 } 10269 10270 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport) 10271 { 10272 struct hclge_dev *hdev = vport->back; 10273 10274 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) 10275 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); 10276 } 10277 10278 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, 10279 u16 port_base_vlan_state, 10280 struct hclge_vlan_info *new_info, 10281 struct hclge_vlan_info *old_info) 10282 { 10283 struct hclge_dev *hdev = vport->back; 10284 int ret; 10285 10286 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { 10287 hclge_rm_vport_all_vlan_table(vport, false); 10288 /* force clear VLAN 0 */ 10289 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); 10290 if (ret) 10291 return ret; 10292 return hclge_set_vlan_filter_hw(hdev, 10293 htons(new_info->vlan_proto), 10294 vport->vport_id, 10295 new_info->vlan_tag, 10296 false); 10297 } 10298 10299 vport->port_base_vlan_cfg.tbl_sta = false; 10300 10301 /* force add VLAN 0 */ 10302 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); 10303 if (ret) 10304 return ret; 10305 10306 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), 10307 vport->vport_id, old_info->vlan_tag, 10308 true); 10309 if (ret) 10310 return ret; 10311 10312 return hclge_add_vport_all_vlan_table(vport); 10313 } 10314 10315 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, 10316 const struct hclge_vlan_info *old_cfg) 10317 { 10318 if (new_cfg->vlan_tag != old_cfg->vlan_tag) 10319 return true; 10320 10321 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) 10322 return true; 10323 10324 return false; 10325 } 10326 10327 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, 10328 struct hclge_vlan_info *new_info, 10329 struct hclge_vlan_info *old_info) 10330 { 10331 struct hclge_dev *hdev = vport->back; 10332 int ret; 10333 10334 /* add new VLAN tag */ 10335 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), 10336 vport->vport_id, new_info->vlan_tag, 10337 false); 10338 if (ret) 10339 return ret; 10340 10341 vport->port_base_vlan_cfg.tbl_sta = false; 10342 /* remove old VLAN tag */ 10343 if (old_info->vlan_tag == 0) 10344 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, 10345 true, 0); 10346 else 10347 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10348 vport->vport_id, 10349 old_info->vlan_tag, true); 10350 if (ret) 10351 dev_err(&hdev->pdev->dev, 10352 "failed to clear vport%u port base vlan %u, ret = %d.\n", 10353 vport->vport_id, old_info->vlan_tag, ret); 10354 10355 return ret; 10356 } 10357 10358 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, 10359 struct hclge_vlan_info *vlan_info) 10360 { 10361 struct hnae3_handle *nic = &vport->nic; 10362 struct hclge_vlan_info *old_vlan_info; 10363 int ret; 10364 10365 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; 10366 10367 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag, 10368 vlan_info->qos); 10369 if (ret) 10370 return ret; 10371 10372 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) 10373 goto out; 10374 10375 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) 10376 ret = hclge_modify_port_base_vlan_tag(vport, vlan_info, 10377 old_vlan_info); 10378 else 10379 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, 10380 old_vlan_info); 10381 if (ret) 10382 return ret; 10383 10384 out: 10385 vport->port_base_vlan_cfg.state = state; 10386 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 10387 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 10388 else 10389 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 10390 10391 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; 10392 vport->port_base_vlan_cfg.vlan_info = *vlan_info; 10393 vport->port_base_vlan_cfg.tbl_sta = true; 10394 hclge_set_vport_vlan_fltr_change(vport); 10395 10396 return 0; 10397 } 10398 10399 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, 10400 enum hnae3_port_base_vlan_state state, 10401 u16 vlan, u8 qos) 10402 { 10403 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10404 if (!vlan && !qos) 10405 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 10406 10407 return HNAE3_PORT_BASE_VLAN_ENABLE; 10408 } 10409 10410 if (!vlan && !qos) 10411 return HNAE3_PORT_BASE_VLAN_DISABLE; 10412 10413 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && 10414 vport->port_base_vlan_cfg.vlan_info.qos == qos) 10415 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 10416 10417 return HNAE3_PORT_BASE_VLAN_MODIFY; 10418 } 10419 10420 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 10421 u16 vlan, u8 qos, __be16 proto) 10422 { 10423 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 10424 struct hclge_vport *vport = hclge_get_vport(handle); 10425 struct hclge_dev *hdev = vport->back; 10426 struct hclge_vlan_info vlan_info; 10427 u16 state; 10428 int ret; 10429 10430 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 10431 return -EOPNOTSUPP; 10432 10433 vport = hclge_get_vf_vport(hdev, vfid); 10434 if (!vport) 10435 return -EINVAL; 10436 10437 /* qos is a 3 bits value, so can not be bigger than 7 */ 10438 if (vlan > VLAN_N_VID - 1 || qos > 7) 10439 return -EINVAL; 10440 if (proto != htons(ETH_P_8021Q)) 10441 return -EPROTONOSUPPORT; 10442 10443 state = hclge_get_port_base_vlan_state(vport, 10444 vport->port_base_vlan_cfg.state, 10445 vlan, qos); 10446 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) 10447 return 0; 10448 10449 vlan_info.vlan_tag = vlan; 10450 vlan_info.qos = qos; 10451 vlan_info.vlan_proto = ntohs(proto); 10452 10453 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); 10454 if (ret) { 10455 dev_err(&hdev->pdev->dev, 10456 "failed to update port base vlan for vf %d, ret = %d\n", 10457 vfid, ret); 10458 return ret; 10459 } 10460 10461 /* there is a timewindow for PF to know VF unalive, it may 10462 * cause send mailbox fail, but it doesn't matter, VF will 10463 * query it when reinit. 10464 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based 10465 * VLAN state. 10466 */ 10467 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 10468 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 10469 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], 10470 vport->vport_id, 10471 state, 10472 &vlan_info); 10473 else 10474 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, 10475 &vport->need_notify); 10476 } 10477 return 0; 10478 } 10479 10480 static void hclge_clear_vf_vlan(struct hclge_dev *hdev) 10481 { 10482 struct hclge_vlan_info *vlan_info; 10483 struct hclge_vport *vport; 10484 int ret; 10485 int vf; 10486 10487 /* clear port base vlan for all vf */ 10488 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { 10489 vport = &hdev->vport[vf]; 10490 vlan_info = &vport->port_base_vlan_cfg.vlan_info; 10491 10492 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10493 vport->vport_id, 10494 vlan_info->vlan_tag, true); 10495 if (ret) 10496 dev_err(&hdev->pdev->dev, 10497 "failed to clear vf vlan for vf%d, ret = %d\n", 10498 vf - HCLGE_VF_VPORT_START_NUM, ret); 10499 } 10500 } 10501 10502 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 10503 u16 vlan_id, bool is_kill) 10504 { 10505 struct hclge_vport *vport = hclge_get_vport(handle); 10506 struct hclge_dev *hdev = vport->back; 10507 bool writen_to_tbl = false; 10508 int ret = 0; 10509 10510 /* When device is resetting or reset failed, firmware is unable to 10511 * handle mailbox. Just record the vlan id, and remove it after 10512 * reset finished. 10513 */ 10514 mutex_lock(&hdev->vport_lock); 10515 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 10516 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { 10517 set_bit(vlan_id, vport->vlan_del_fail_bmap); 10518 mutex_unlock(&hdev->vport_lock); 10519 return -EBUSY; 10520 } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { 10521 clear_bit(vlan_id, vport->vlan_del_fail_bmap); 10522 } 10523 mutex_unlock(&hdev->vport_lock); 10524 10525 /* when port base vlan enabled, we use port base vlan as the vlan 10526 * filter entry. In this case, we don't update vlan filter table 10527 * when user add new vlan or remove exist vlan, just update the vport 10528 * vlan list. The vlan id in vlan list will be writen in vlan filter 10529 * table until port base vlan disabled 10530 */ 10531 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10532 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, 10533 vlan_id, is_kill); 10534 writen_to_tbl = true; 10535 } 10536 10537 if (!ret) { 10538 if (!is_kill) { 10539 hclge_add_vport_vlan_table(vport, vlan_id, 10540 writen_to_tbl); 10541 } else if (is_kill && vlan_id != 0) { 10542 mutex_lock(&hdev->vport_lock); 10543 hclge_rm_vport_vlan_table(vport, vlan_id, false); 10544 mutex_unlock(&hdev->vport_lock); 10545 } 10546 } else if (is_kill) { 10547 /* when remove hw vlan filter failed, record the vlan id, 10548 * and try to remove it from hw later, to be consistence 10549 * with stack 10550 */ 10551 mutex_lock(&hdev->vport_lock); 10552 set_bit(vlan_id, vport->vlan_del_fail_bmap); 10553 mutex_unlock(&hdev->vport_lock); 10554 } 10555 10556 hclge_set_vport_vlan_fltr_change(vport); 10557 10558 return ret; 10559 } 10560 10561 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) 10562 { 10563 struct hclge_vport *vport; 10564 int ret; 10565 u16 i; 10566 10567 for (i = 0; i < hdev->num_alloc_vport; i++) { 10568 vport = &hdev->vport[i]; 10569 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 10570 &vport->state)) 10571 continue; 10572 10573 ret = hclge_enable_vport_vlan_filter(vport, 10574 vport->req_vlan_fltr_en); 10575 if (ret) { 10576 dev_err(&hdev->pdev->dev, 10577 "failed to sync vlan filter state for vport%u, ret = %d\n", 10578 vport->vport_id, ret); 10579 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 10580 &vport->state); 10581 return; 10582 } 10583 } 10584 } 10585 10586 static void hclge_sync_vlan_filter(struct hclge_dev *hdev) 10587 { 10588 #define HCLGE_MAX_SYNC_COUNT 60 10589 10590 int i, ret, sync_cnt = 0; 10591 u16 vlan_id; 10592 10593 mutex_lock(&hdev->vport_lock); 10594 /* start from vport 1 for PF is always alive */ 10595 for (i = 0; i < hdev->num_alloc_vport; i++) { 10596 struct hclge_vport *vport = &hdev->vport[i]; 10597 10598 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 10599 VLAN_N_VID); 10600 while (vlan_id != VLAN_N_VID) { 10601 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10602 vport->vport_id, vlan_id, 10603 true); 10604 if (ret && ret != -EINVAL) { 10605 mutex_unlock(&hdev->vport_lock); 10606 return; 10607 } 10608 10609 clear_bit(vlan_id, vport->vlan_del_fail_bmap); 10610 hclge_rm_vport_vlan_table(vport, vlan_id, false); 10611 hclge_set_vport_vlan_fltr_change(vport); 10612 10613 sync_cnt++; 10614 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) { 10615 mutex_unlock(&hdev->vport_lock); 10616 return; 10617 } 10618 10619 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 10620 VLAN_N_VID); 10621 } 10622 } 10623 mutex_unlock(&hdev->vport_lock); 10624 10625 hclge_sync_vlan_fltr_state(hdev); 10626 } 10627 10628 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) 10629 { 10630 struct hclge_config_max_frm_size_cmd *req; 10631 struct hclge_desc desc; 10632 10633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 10634 10635 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 10636 req->max_frm_size = cpu_to_le16(new_mps); 10637 req->min_frm_size = HCLGE_MAC_MIN_FRAME; 10638 10639 return hclge_cmd_send(&hdev->hw, &desc, 1); 10640 } 10641 10642 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 10643 { 10644 struct hclge_vport *vport = hclge_get_vport(handle); 10645 10646 return hclge_set_vport_mtu(vport, new_mtu); 10647 } 10648 10649 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) 10650 { 10651 struct hclge_dev *hdev = vport->back; 10652 int i, max_frm_size, ret; 10653 10654 /* HW supprt 2 layer vlan */ 10655 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 10656 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 10657 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) 10658 return -EINVAL; 10659 10660 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 10661 mutex_lock(&hdev->vport_lock); 10662 /* VF's mps must fit within hdev->mps */ 10663 if (vport->vport_id && max_frm_size > hdev->mps) { 10664 mutex_unlock(&hdev->vport_lock); 10665 return -EINVAL; 10666 } else if (vport->vport_id) { 10667 vport->mps = max_frm_size; 10668 mutex_unlock(&hdev->vport_lock); 10669 return 0; 10670 } 10671 10672 /* PF's mps must be greater then VF's mps */ 10673 for (i = 1; i < hdev->num_alloc_vport; i++) 10674 if (max_frm_size < hdev->vport[i].mps) { 10675 dev_err(&hdev->pdev->dev, 10676 "failed to set pf mtu for less than vport %d, mps = %u.\n", 10677 i, hdev->vport[i].mps); 10678 mutex_unlock(&hdev->vport_lock); 10679 return -EINVAL; 10680 } 10681 10682 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 10683 10684 ret = hclge_set_mac_mtu(hdev, max_frm_size); 10685 if (ret) { 10686 dev_err(&hdev->pdev->dev, 10687 "Change mtu fail, ret =%d\n", ret); 10688 goto out; 10689 } 10690 10691 hdev->mps = max_frm_size; 10692 vport->mps = max_frm_size; 10693 10694 ret = hclge_buffer_alloc(hdev); 10695 if (ret) 10696 dev_err(&hdev->pdev->dev, 10697 "Allocate buffer fail, ret =%d\n", ret); 10698 10699 out: 10700 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 10701 mutex_unlock(&hdev->vport_lock); 10702 return ret; 10703 } 10704 10705 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, 10706 bool enable) 10707 { 10708 struct hclge_reset_tqp_queue_cmd *req; 10709 struct hclge_desc desc; 10710 int ret; 10711 10712 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 10713 10714 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 10715 req->tqp_id = cpu_to_le16(queue_id); 10716 if (enable) 10717 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); 10718 10719 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10720 if (ret) { 10721 dev_err(&hdev->pdev->dev, 10722 "Send tqp reset cmd error, status =%d\n", ret); 10723 return ret; 10724 } 10725 10726 return 0; 10727 } 10728 10729 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, 10730 u8 *reset_status) 10731 { 10732 struct hclge_reset_tqp_queue_cmd *req; 10733 struct hclge_desc desc; 10734 int ret; 10735 10736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 10737 10738 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 10739 req->tqp_id = cpu_to_le16(queue_id); 10740 10741 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10742 if (ret) { 10743 dev_err(&hdev->pdev->dev, 10744 "Get reset status error, status =%d\n", ret); 10745 return ret; 10746 } 10747 10748 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 10749 10750 return 0; 10751 } 10752 10753 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) 10754 { 10755 struct hclge_comm_tqp *tqp; 10756 struct hnae3_queue *queue; 10757 10758 queue = handle->kinfo.tqp[queue_id]; 10759 tqp = container_of(queue, struct hclge_comm_tqp, q); 10760 10761 return tqp->index; 10762 } 10763 10764 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle) 10765 { 10766 struct hclge_vport *vport = hclge_get_vport(handle); 10767 struct hclge_dev *hdev = vport->back; 10768 u16 reset_try_times = 0; 10769 u8 reset_status; 10770 u16 queue_gid; 10771 int ret; 10772 u16 i; 10773 10774 for (i = 0; i < handle->kinfo.num_tqps; i++) { 10775 queue_gid = hclge_covert_handle_qid_global(handle, i); 10776 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); 10777 if (ret) { 10778 dev_err(&hdev->pdev->dev, 10779 "failed to send reset tqp cmd, ret = %d\n", 10780 ret); 10781 return ret; 10782 } 10783 10784 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 10785 ret = hclge_get_reset_status(hdev, queue_gid, 10786 &reset_status); 10787 if (ret) 10788 return ret; 10789 10790 if (reset_status) 10791 break; 10792 10793 /* Wait for tqp hw reset */ 10794 usleep_range(1000, 1200); 10795 } 10796 10797 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 10798 dev_err(&hdev->pdev->dev, 10799 "wait for tqp hw reset timeout\n"); 10800 return -ETIME; 10801 } 10802 10803 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); 10804 if (ret) { 10805 dev_err(&hdev->pdev->dev, 10806 "failed to deassert soft reset, ret = %d\n", 10807 ret); 10808 return ret; 10809 } 10810 reset_try_times = 0; 10811 } 10812 return 0; 10813 } 10814 10815 static int hclge_reset_rcb(struct hnae3_handle *handle) 10816 { 10817 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U 10818 #define HCLGE_RESET_RCB_SUCCESS 1U 10819 10820 struct hclge_vport *vport = hclge_get_vport(handle); 10821 struct hclge_dev *hdev = vport->back; 10822 struct hclge_reset_cmd *req; 10823 struct hclge_desc desc; 10824 u8 return_status; 10825 u16 queue_gid; 10826 int ret; 10827 10828 queue_gid = hclge_covert_handle_qid_global(handle, 0); 10829 10830 req = (struct hclge_reset_cmd *)desc.data; 10831 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 10832 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); 10833 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); 10834 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); 10835 10836 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10837 if (ret) { 10838 dev_err(&hdev->pdev->dev, 10839 "failed to send rcb reset cmd, ret = %d\n", ret); 10840 return ret; 10841 } 10842 10843 return_status = req->fun_reset_rcb_return_status; 10844 if (return_status == HCLGE_RESET_RCB_SUCCESS) 10845 return 0; 10846 10847 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) { 10848 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", 10849 return_status); 10850 return -EIO; 10851 } 10852 10853 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd 10854 * again to reset all tqps 10855 */ 10856 return hclge_reset_tqp_cmd(handle); 10857 } 10858 10859 int hclge_reset_tqp(struct hnae3_handle *handle) 10860 { 10861 struct hclge_vport *vport = hclge_get_vport(handle); 10862 struct hclge_dev *hdev = vport->back; 10863 int ret; 10864 10865 /* only need to disable PF's tqp */ 10866 if (!vport->vport_id) { 10867 ret = hclge_tqp_enable(handle, false); 10868 if (ret) { 10869 dev_err(&hdev->pdev->dev, 10870 "failed to disable tqp, ret = %d\n", ret); 10871 return ret; 10872 } 10873 } 10874 10875 return hclge_reset_rcb(handle); 10876 } 10877 10878 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 10879 { 10880 struct hclge_vport *vport = hclge_get_vport(handle); 10881 struct hclge_dev *hdev = vport->back; 10882 10883 return hdev->fw_version; 10884 } 10885 10886 int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version) 10887 { 10888 struct hclge_comm_query_scc_cmd *resp; 10889 struct hclge_desc desc; 10890 int ret; 10891 10892 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1); 10893 resp = (struct hclge_comm_query_scc_cmd *)desc.data; 10894 10895 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10896 if (ret) 10897 return ret; 10898 10899 *scc_version = le32_to_cpu(resp->scc_version); 10900 10901 return 0; 10902 } 10903 10904 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 10905 { 10906 struct phy_device *phydev = hdev->hw.mac.phydev; 10907 10908 if (!phydev) 10909 return; 10910 10911 phy_set_asym_pause(phydev, rx_en, tx_en); 10912 } 10913 10914 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 10915 { 10916 int ret; 10917 10918 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 10919 return 0; 10920 10921 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 10922 if (ret) 10923 dev_err(&hdev->pdev->dev, 10924 "configure pauseparam error, ret = %d.\n", ret); 10925 10926 return ret; 10927 } 10928 10929 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 10930 { 10931 struct phy_device *phydev = hdev->hw.mac.phydev; 10932 u16 remote_advertising = 0; 10933 u16 local_advertising; 10934 u32 rx_pause, tx_pause; 10935 u8 flowctl; 10936 10937 if (!phydev->link) 10938 return 0; 10939 10940 if (!phydev->autoneg) 10941 return hclge_mac_pause_setup_hw(hdev); 10942 10943 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); 10944 10945 if (phydev->pause) 10946 remote_advertising = LPA_PAUSE_CAP; 10947 10948 if (phydev->asym_pause) 10949 remote_advertising |= LPA_PAUSE_ASYM; 10950 10951 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 10952 remote_advertising); 10953 tx_pause = flowctl & FLOW_CTRL_TX; 10954 rx_pause = flowctl & FLOW_CTRL_RX; 10955 10956 if (phydev->duplex == HCLGE_MAC_HALF) { 10957 tx_pause = 0; 10958 rx_pause = 0; 10959 } 10960 10961 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 10962 } 10963 10964 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 10965 u32 *rx_en, u32 *tx_en) 10966 { 10967 struct hclge_vport *vport = hclge_get_vport(handle); 10968 struct hclge_dev *hdev = vport->back; 10969 u8 media_type = hdev->hw.mac.media_type; 10970 10971 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ? 10972 hclge_get_autoneg(handle) : 0; 10973 10974 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 10975 *rx_en = 0; 10976 *tx_en = 0; 10977 return; 10978 } 10979 10980 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 10981 *rx_en = 1; 10982 *tx_en = 0; 10983 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 10984 *tx_en = 1; 10985 *rx_en = 0; 10986 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 10987 *rx_en = 1; 10988 *tx_en = 1; 10989 } else { 10990 *rx_en = 0; 10991 *tx_en = 0; 10992 } 10993 } 10994 10995 static void hclge_record_user_pauseparam(struct hclge_dev *hdev, 10996 u32 rx_en, u32 tx_en) 10997 { 10998 if (rx_en && tx_en) 10999 hdev->fc_mode_last_time = HCLGE_FC_FULL; 11000 else if (rx_en && !tx_en) 11001 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 11002 else if (!rx_en && tx_en) 11003 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 11004 else 11005 hdev->fc_mode_last_time = HCLGE_FC_NONE; 11006 11007 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 11008 } 11009 11010 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 11011 u32 rx_en, u32 tx_en) 11012 { 11013 struct hclge_vport *vport = hclge_get_vport(handle); 11014 struct hclge_dev *hdev = vport->back; 11015 struct phy_device *phydev = hdev->hw.mac.phydev; 11016 u32 fc_autoneg; 11017 11018 if (phydev || hnae3_dev_phy_imp_supported(hdev)) { 11019 fc_autoneg = hclge_get_autoneg(handle); 11020 if (auto_neg != fc_autoneg) { 11021 dev_info(&hdev->pdev->dev, 11022 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 11023 return -EOPNOTSUPP; 11024 } 11025 } 11026 11027 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 11028 dev_info(&hdev->pdev->dev, 11029 "Priority flow control enabled. Cannot set link flow control.\n"); 11030 return -EOPNOTSUPP; 11031 } 11032 11033 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 11034 11035 hclge_record_user_pauseparam(hdev, rx_en, tx_en); 11036 11037 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) 11038 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 11039 11040 if (phydev) 11041 return phy_start_aneg(phydev); 11042 11043 return -EOPNOTSUPP; 11044 } 11045 11046 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 11047 u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num) 11048 { 11049 struct hclge_vport *vport = hclge_get_vport(handle); 11050 struct hclge_dev *hdev = vport->back; 11051 11052 if (speed) 11053 *speed = hdev->hw.mac.speed; 11054 if (duplex) 11055 *duplex = hdev->hw.mac.duplex; 11056 if (auto_neg) 11057 *auto_neg = hdev->hw.mac.autoneg; 11058 if (lane_num) 11059 *lane_num = hdev->hw.mac.lane_num; 11060 } 11061 11062 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, 11063 u8 *module_type) 11064 { 11065 struct hclge_vport *vport = hclge_get_vport(handle); 11066 struct hclge_dev *hdev = vport->back; 11067 11068 /* When nic is down, the service task is not running, doesn't update 11069 * the port information per second. Query the port information before 11070 * return the media type, ensure getting the correct media information. 11071 */ 11072 hclge_update_port_info(hdev); 11073 11074 if (media_type) 11075 *media_type = hdev->hw.mac.media_type; 11076 11077 if (module_type) 11078 *module_type = hdev->hw.mac.module_type; 11079 } 11080 11081 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 11082 u8 *tp_mdix_ctrl, u8 *tp_mdix) 11083 { 11084 struct hclge_vport *vport = hclge_get_vport(handle); 11085 struct hclge_dev *hdev = vport->back; 11086 struct phy_device *phydev = hdev->hw.mac.phydev; 11087 int mdix_ctrl, mdix, is_resolved; 11088 unsigned int retval; 11089 11090 if (!phydev) { 11091 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 11092 *tp_mdix = ETH_TP_MDI_INVALID; 11093 return; 11094 } 11095 11096 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 11097 11098 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 11099 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 11100 HCLGE_PHY_MDIX_CTRL_S); 11101 11102 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 11103 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 11104 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 11105 11106 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 11107 11108 switch (mdix_ctrl) { 11109 case 0x0: 11110 *tp_mdix_ctrl = ETH_TP_MDI; 11111 break; 11112 case 0x1: 11113 *tp_mdix_ctrl = ETH_TP_MDI_X; 11114 break; 11115 case 0x3: 11116 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 11117 break; 11118 default: 11119 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 11120 break; 11121 } 11122 11123 if (!is_resolved) 11124 *tp_mdix = ETH_TP_MDI_INVALID; 11125 else if (mdix) 11126 *tp_mdix = ETH_TP_MDI_X; 11127 else 11128 *tp_mdix = ETH_TP_MDI; 11129 } 11130 11131 static void hclge_info_show(struct hclge_dev *hdev) 11132 { 11133 struct hnae3_handle *handle = &hdev->vport->nic; 11134 struct device *dev = &hdev->pdev->dev; 11135 11136 dev_info(dev, "PF info begin:\n"); 11137 11138 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 11139 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 11140 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 11141 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 11142 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); 11143 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 11144 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); 11145 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); 11146 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); 11147 dev_info(dev, "This is %s PF\n", 11148 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); 11149 dev_info(dev, "DCB %s\n", 11150 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable"); 11151 dev_info(dev, "MQPRIO %s\n", 11152 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable"); 11153 dev_info(dev, "Default tx spare buffer size: %u\n", 11154 hdev->tx_spare_buf_size); 11155 11156 dev_info(dev, "PF info end.\n"); 11157 } 11158 11159 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 11160 struct hclge_vport *vport) 11161 { 11162 struct hnae3_client *client = vport->nic.client; 11163 struct hclge_dev *hdev = ae_dev->priv; 11164 int rst_cnt = hdev->rst_stats.reset_cnt; 11165 int ret; 11166 11167 ret = client->ops->init_instance(&vport->nic); 11168 if (ret) 11169 return ret; 11170 11171 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11172 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 11173 rst_cnt != hdev->rst_stats.reset_cnt) { 11174 ret = -EBUSY; 11175 goto init_nic_err; 11176 } 11177 11178 /* Enable nic hw error interrupts */ 11179 ret = hclge_config_nic_hw_error(hdev, true); 11180 if (ret) { 11181 dev_err(&ae_dev->pdev->dev, 11182 "fail(%d) to enable hw error interrupts\n", ret); 11183 goto init_nic_err; 11184 } 11185 11186 hnae3_set_client_init_flag(client, ae_dev, 1); 11187 11188 if (netif_msg_drv(&hdev->vport->nic)) 11189 hclge_info_show(hdev); 11190 11191 return ret; 11192 11193 init_nic_err: 11194 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11195 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11196 msleep(HCLGE_WAIT_RESET_DONE); 11197 11198 client->ops->uninit_instance(&vport->nic, 0); 11199 11200 return ret; 11201 } 11202 11203 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 11204 struct hclge_vport *vport) 11205 { 11206 struct hclge_dev *hdev = ae_dev->priv; 11207 struct hnae3_client *client; 11208 int rst_cnt; 11209 int ret; 11210 11211 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 11212 !hdev->nic_client) 11213 return 0; 11214 11215 client = hdev->roce_client; 11216 ret = hclge_init_roce_base_info(vport); 11217 if (ret) 11218 return ret; 11219 11220 rst_cnt = hdev->rst_stats.reset_cnt; 11221 ret = client->ops->init_instance(&vport->roce); 11222 if (ret) 11223 return ret; 11224 11225 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11226 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 11227 rst_cnt != hdev->rst_stats.reset_cnt) { 11228 ret = -EBUSY; 11229 goto init_roce_err; 11230 } 11231 11232 /* Enable roce ras interrupts */ 11233 ret = hclge_config_rocee_ras_interrupt(hdev, true); 11234 if (ret) { 11235 dev_err(&ae_dev->pdev->dev, 11236 "fail(%d) to enable roce ras interrupts\n", ret); 11237 goto init_roce_err; 11238 } 11239 11240 hnae3_set_client_init_flag(client, ae_dev, 1); 11241 11242 return 0; 11243 11244 init_roce_err: 11245 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11246 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11247 msleep(HCLGE_WAIT_RESET_DONE); 11248 11249 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); 11250 11251 return ret; 11252 } 11253 11254 static int hclge_init_client_instance(struct hnae3_client *client, 11255 struct hnae3_ae_dev *ae_dev) 11256 { 11257 struct hclge_dev *hdev = ae_dev->priv; 11258 struct hclge_vport *vport = &hdev->vport[0]; 11259 int ret; 11260 11261 switch (client->type) { 11262 case HNAE3_CLIENT_KNIC: 11263 hdev->nic_client = client; 11264 vport->nic.client = client; 11265 ret = hclge_init_nic_client_instance(ae_dev, vport); 11266 if (ret) 11267 goto clear_nic; 11268 11269 ret = hclge_init_roce_client_instance(ae_dev, vport); 11270 if (ret) 11271 goto clear_roce; 11272 11273 break; 11274 case HNAE3_CLIENT_ROCE: 11275 if (hnae3_dev_roce_supported(hdev)) { 11276 hdev->roce_client = client; 11277 vport->roce.client = client; 11278 } 11279 11280 ret = hclge_init_roce_client_instance(ae_dev, vport); 11281 if (ret) 11282 goto clear_roce; 11283 11284 break; 11285 default: 11286 return -EINVAL; 11287 } 11288 11289 return 0; 11290 11291 clear_nic: 11292 hdev->nic_client = NULL; 11293 vport->nic.client = NULL; 11294 return ret; 11295 clear_roce: 11296 hdev->roce_client = NULL; 11297 vport->roce.client = NULL; 11298 return ret; 11299 } 11300 11301 static void hclge_uninit_client_instance(struct hnae3_client *client, 11302 struct hnae3_ae_dev *ae_dev) 11303 { 11304 struct hclge_dev *hdev = ae_dev->priv; 11305 struct hclge_vport *vport = &hdev->vport[0]; 11306 11307 if (hdev->roce_client) { 11308 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11309 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11310 msleep(HCLGE_WAIT_RESET_DONE); 11311 11312 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); 11313 hdev->roce_client = NULL; 11314 vport->roce.client = NULL; 11315 } 11316 if (client->type == HNAE3_CLIENT_ROCE) 11317 return; 11318 if (hdev->nic_client && client->ops->uninit_instance) { 11319 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11320 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11321 msleep(HCLGE_WAIT_RESET_DONE); 11322 11323 client->ops->uninit_instance(&vport->nic, 0); 11324 hdev->nic_client = NULL; 11325 vport->nic.client = NULL; 11326 } 11327 } 11328 11329 static int hclge_dev_mem_map(struct hclge_dev *hdev) 11330 { 11331 struct pci_dev *pdev = hdev->pdev; 11332 struct hclge_hw *hw = &hdev->hw; 11333 11334 /* for device does not have device memory, return directly */ 11335 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR))) 11336 return 0; 11337 11338 hw->hw.mem_base = 11339 devm_ioremap_wc(&pdev->dev, 11340 pci_resource_start(pdev, HCLGE_MEM_BAR), 11341 pci_resource_len(pdev, HCLGE_MEM_BAR)); 11342 if (!hw->hw.mem_base) { 11343 dev_err(&pdev->dev, "failed to map device memory\n"); 11344 return -EFAULT; 11345 } 11346 11347 return 0; 11348 } 11349 11350 static int hclge_pci_init(struct hclge_dev *hdev) 11351 { 11352 struct pci_dev *pdev = hdev->pdev; 11353 struct hclge_hw *hw; 11354 int ret; 11355 11356 ret = pci_enable_device(pdev); 11357 if (ret) { 11358 dev_err(&pdev->dev, "failed to enable PCI device\n"); 11359 return ret; 11360 } 11361 11362 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11363 if (ret) { 11364 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11365 if (ret) { 11366 dev_err(&pdev->dev, 11367 "can't set consistent PCI DMA"); 11368 goto err_disable_device; 11369 } 11370 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 11371 } 11372 11373 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 11374 if (ret) { 11375 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 11376 goto err_disable_device; 11377 } 11378 11379 pci_set_master(pdev); 11380 hw = &hdev->hw; 11381 hw->hw.io_base = pcim_iomap(pdev, 2, 0); 11382 if (!hw->hw.io_base) { 11383 dev_err(&pdev->dev, "Can't map configuration register space\n"); 11384 ret = -ENOMEM; 11385 goto err_release_regions; 11386 } 11387 11388 ret = hclge_dev_mem_map(hdev); 11389 if (ret) 11390 goto err_unmap_io_base; 11391 11392 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 11393 11394 return 0; 11395 11396 err_unmap_io_base: 11397 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11398 err_release_regions: 11399 pci_release_regions(pdev); 11400 err_disable_device: 11401 pci_disable_device(pdev); 11402 11403 return ret; 11404 } 11405 11406 static void hclge_pci_uninit(struct hclge_dev *hdev) 11407 { 11408 struct pci_dev *pdev = hdev->pdev; 11409 11410 if (hdev->hw.hw.mem_base) 11411 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 11412 11413 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11414 pci_free_irq_vectors(pdev); 11415 pci_release_mem_regions(pdev); 11416 pci_disable_device(pdev); 11417 } 11418 11419 static void hclge_state_init(struct hclge_dev *hdev) 11420 { 11421 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 11422 set_bit(HCLGE_STATE_DOWN, &hdev->state); 11423 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 11424 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11425 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 11426 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 11427 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 11428 } 11429 11430 static void hclge_state_uninit(struct hclge_dev *hdev) 11431 { 11432 set_bit(HCLGE_STATE_DOWN, &hdev->state); 11433 set_bit(HCLGE_STATE_REMOVING, &hdev->state); 11434 11435 if (hdev->reset_timer.function) 11436 del_timer_sync(&hdev->reset_timer); 11437 if (hdev->service_task.work.func) 11438 cancel_delayed_work_sync(&hdev->service_task); 11439 } 11440 11441 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 11442 enum hnae3_reset_type rst_type) 11443 { 11444 #define HCLGE_RESET_RETRY_WAIT_MS 500 11445 #define HCLGE_RESET_RETRY_CNT 5 11446 11447 struct hclge_dev *hdev = ae_dev->priv; 11448 int retry_cnt = 0; 11449 int ret; 11450 11451 while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) { 11452 down(&hdev->reset_sem); 11453 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11454 hdev->reset_type = rst_type; 11455 ret = hclge_reset_prepare(hdev); 11456 if (!ret && !hdev->reset_pending) 11457 break; 11458 11459 dev_err(&hdev->pdev->dev, 11460 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 11461 ret, hdev->reset_pending, retry_cnt); 11462 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11463 up(&hdev->reset_sem); 11464 msleep(HCLGE_RESET_RETRY_WAIT_MS); 11465 } 11466 11467 /* disable misc vector before reset done */ 11468 hclge_enable_vector(&hdev->misc_vector, false); 11469 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 11470 11471 if (hdev->reset_type == HNAE3_FLR_RESET) 11472 hdev->rst_stats.flr_rst_cnt++; 11473 } 11474 11475 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) 11476 { 11477 struct hclge_dev *hdev = ae_dev->priv; 11478 int ret; 11479 11480 hclge_enable_vector(&hdev->misc_vector, true); 11481 11482 ret = hclge_reset_rebuild(hdev); 11483 if (ret) 11484 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); 11485 11486 hdev->reset_type = HNAE3_NONE_RESET; 11487 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11488 up(&hdev->reset_sem); 11489 } 11490 11491 static void hclge_clear_resetting_state(struct hclge_dev *hdev) 11492 { 11493 u16 i; 11494 11495 for (i = 0; i < hdev->num_alloc_vport; i++) { 11496 struct hclge_vport *vport = &hdev->vport[i]; 11497 int ret; 11498 11499 /* Send cmd to clear vport's FUNC_RST_ING */ 11500 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); 11501 if (ret) 11502 dev_warn(&hdev->pdev->dev, 11503 "clear vport(%u) rst failed %d!\n", 11504 vport->vport_id, ret); 11505 } 11506 } 11507 11508 static int hclge_clear_hw_resource(struct hclge_dev *hdev) 11509 { 11510 struct hclge_desc desc; 11511 int ret; 11512 11513 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); 11514 11515 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11516 /* This new command is only supported by new firmware, it will 11517 * fail with older firmware. Error value -EOPNOSUPP can only be 11518 * returned by older firmware running this command, to keep code 11519 * backward compatible we will override this value and return 11520 * success. 11521 */ 11522 if (ret && ret != -EOPNOTSUPP) { 11523 dev_err(&hdev->pdev->dev, 11524 "failed to clear hw resource, ret = %d\n", ret); 11525 return ret; 11526 } 11527 return 0; 11528 } 11529 11530 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) 11531 { 11532 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 11533 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); 11534 } 11535 11536 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) 11537 { 11538 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 11539 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); 11540 } 11541 11542 static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle) 11543 { 11544 struct hclge_vport *vport = hclge_get_vport(handle); 11545 11546 return &vport->back->hw.mac.wol; 11547 } 11548 11549 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev, 11550 u32 *wol_supported) 11551 { 11552 struct hclge_query_wol_supported_cmd *wol_supported_cmd; 11553 struct hclge_desc desc; 11554 int ret; 11555 11556 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE, 11557 true); 11558 wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data; 11559 11560 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11561 if (ret) { 11562 dev_err(&hdev->pdev->dev, 11563 "failed to query wol supported, ret = %d\n", ret); 11564 return ret; 11565 } 11566 11567 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode); 11568 11569 return 0; 11570 } 11571 11572 static int hclge_set_wol_cfg(struct hclge_dev *hdev, 11573 struct hclge_wol_info *wol_info) 11574 { 11575 struct hclge_wol_cfg_cmd *wol_cfg_cmd; 11576 struct hclge_desc desc; 11577 int ret; 11578 11579 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false); 11580 wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data; 11581 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode); 11582 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size; 11583 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX); 11584 11585 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11586 if (ret) 11587 dev_err(&hdev->pdev->dev, 11588 "failed to set wol config, ret = %d\n", ret); 11589 11590 return ret; 11591 } 11592 11593 static int hclge_update_wol(struct hclge_dev *hdev) 11594 { 11595 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; 11596 11597 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) 11598 return 0; 11599 11600 return hclge_set_wol_cfg(hdev, wol_info); 11601 } 11602 11603 static int hclge_init_wol(struct hclge_dev *hdev) 11604 { 11605 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; 11606 int ret; 11607 11608 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) 11609 return 0; 11610 11611 memset(wol_info, 0, sizeof(struct hclge_wol_info)); 11612 ret = hclge_get_wol_supported_mode(hdev, 11613 &wol_info->wol_support_mode); 11614 if (ret) { 11615 wol_info->wol_support_mode = 0; 11616 return ret; 11617 } 11618 11619 return hclge_update_wol(hdev); 11620 } 11621 11622 static void hclge_get_wol(struct hnae3_handle *handle, 11623 struct ethtool_wolinfo *wol) 11624 { 11625 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); 11626 11627 wol->supported = wol_info->wol_support_mode; 11628 wol->wolopts = wol_info->wol_current_mode; 11629 if (wol_info->wol_current_mode & WAKE_MAGICSECURE) 11630 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX); 11631 } 11632 11633 static int hclge_set_wol(struct hnae3_handle *handle, 11634 struct ethtool_wolinfo *wol) 11635 { 11636 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); 11637 struct hclge_vport *vport = hclge_get_vport(handle); 11638 u32 wol_mode; 11639 int ret; 11640 11641 wol_mode = wol->wolopts; 11642 if (wol_mode & ~wol_info->wol_support_mode) 11643 return -EINVAL; 11644 11645 wol_info->wol_current_mode = wol_mode; 11646 if (wol_mode & WAKE_MAGICSECURE) { 11647 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX); 11648 wol_info->wol_sopass_size = SOPASS_MAX; 11649 } else { 11650 wol_info->wol_sopass_size = 0; 11651 } 11652 11653 ret = hclge_set_wol_cfg(vport->back, wol_info); 11654 if (ret) 11655 wol_info->wol_current_mode = 0; 11656 11657 return ret; 11658 } 11659 11660 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 11661 { 11662 struct pci_dev *pdev = ae_dev->pdev; 11663 struct hclge_dev *hdev; 11664 int ret; 11665 11666 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 11667 if (!hdev) 11668 return -ENOMEM; 11669 11670 hdev->pdev = pdev; 11671 hdev->ae_dev = ae_dev; 11672 hdev->reset_type = HNAE3_NONE_RESET; 11673 hdev->reset_level = HNAE3_FUNC_RESET; 11674 ae_dev->priv = hdev; 11675 11676 /* HW supprt 2 layer vlan */ 11677 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 11678 11679 mutex_init(&hdev->vport_lock); 11680 spin_lock_init(&hdev->fd_rule_lock); 11681 sema_init(&hdev->reset_sem, 1); 11682 11683 ret = hclge_pci_init(hdev); 11684 if (ret) 11685 goto out; 11686 11687 ret = hclge_devlink_init(hdev); 11688 if (ret) 11689 goto err_pci_uninit; 11690 11691 devl_lock(hdev->devlink); 11692 11693 /* Firmware command queue initialize */ 11694 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 11695 if (ret) 11696 goto err_devlink_uninit; 11697 11698 /* Firmware command initialize */ 11699 hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops); 11700 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, 11701 true, hdev->reset_pending); 11702 if (ret) 11703 goto err_cmd_uninit; 11704 11705 ret = hclge_clear_hw_resource(hdev); 11706 if (ret) 11707 goto err_cmd_uninit; 11708 11709 ret = hclge_get_cap(hdev); 11710 if (ret) 11711 goto err_cmd_uninit; 11712 11713 ret = hclge_query_dev_specs(hdev); 11714 if (ret) { 11715 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n", 11716 ret); 11717 goto err_cmd_uninit; 11718 } 11719 11720 ret = hclge_configure(hdev); 11721 if (ret) { 11722 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 11723 goto err_cmd_uninit; 11724 } 11725 11726 ret = hclge_init_msi(hdev); 11727 if (ret) { 11728 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 11729 goto err_cmd_uninit; 11730 } 11731 11732 ret = hclge_misc_irq_init(hdev); 11733 if (ret) 11734 goto err_msi_uninit; 11735 11736 ret = hclge_alloc_tqps(hdev); 11737 if (ret) { 11738 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 11739 goto err_msi_irq_uninit; 11740 } 11741 11742 ret = hclge_alloc_vport(hdev); 11743 if (ret) 11744 goto err_msi_irq_uninit; 11745 11746 ret = hclge_map_tqp(hdev); 11747 if (ret) 11748 goto err_msi_irq_uninit; 11749 11750 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 11751 clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); 11752 if (hnae3_dev_phy_imp_supported(hdev)) 11753 ret = hclge_update_tp_port_info(hdev); 11754 else 11755 ret = hclge_mac_mdio_config(hdev); 11756 11757 if (ret) 11758 goto err_msi_irq_uninit; 11759 } 11760 11761 ret = hclge_init_umv_space(hdev); 11762 if (ret) 11763 goto err_mdiobus_unreg; 11764 11765 ret = hclge_mac_init(hdev); 11766 if (ret) { 11767 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 11768 goto err_mdiobus_unreg; 11769 } 11770 11771 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 11772 if (ret) { 11773 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 11774 goto err_mdiobus_unreg; 11775 } 11776 11777 ret = hclge_config_gro(hdev); 11778 if (ret) 11779 goto err_mdiobus_unreg; 11780 11781 ret = hclge_init_vlan_config(hdev); 11782 if (ret) { 11783 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 11784 goto err_mdiobus_unreg; 11785 } 11786 11787 ret = hclge_tm_schd_init(hdev); 11788 if (ret) { 11789 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 11790 goto err_mdiobus_unreg; 11791 } 11792 11793 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, 11794 &hdev->rss_cfg); 11795 if (ret) { 11796 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 11797 goto err_mdiobus_unreg; 11798 } 11799 11800 ret = hclge_rss_init_hw(hdev); 11801 if (ret) { 11802 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 11803 goto err_mdiobus_unreg; 11804 } 11805 11806 ret = init_mgr_tbl(hdev); 11807 if (ret) { 11808 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 11809 goto err_mdiobus_unreg; 11810 } 11811 11812 ret = hclge_init_fd_config(hdev); 11813 if (ret) { 11814 dev_err(&pdev->dev, 11815 "fd table init fail, ret=%d\n", ret); 11816 goto err_mdiobus_unreg; 11817 } 11818 11819 ret = hclge_ptp_init(hdev); 11820 if (ret) 11821 goto err_mdiobus_unreg; 11822 11823 ret = hclge_update_port_info(hdev); 11824 if (ret) 11825 goto err_mdiobus_unreg; 11826 11827 INIT_KFIFO(hdev->mac_tnl_log); 11828 11829 hclge_dcb_ops_set(hdev); 11830 11831 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); 11832 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); 11833 11834 hclge_clear_all_event_cause(hdev); 11835 hclge_clear_resetting_state(hdev); 11836 11837 /* Log and clear the hw errors those already occurred */ 11838 if (hnae3_dev_ras_imp_supported(hdev)) 11839 hclge_handle_occurred_error(hdev); 11840 else 11841 hclge_handle_all_hns_hw_errors(ae_dev); 11842 11843 /* request delayed reset for the error recovery because an immediate 11844 * global reset on a PF affecting pending initialization of other PFs 11845 */ 11846 if (ae_dev->hw_err_reset_req) { 11847 enum hnae3_reset_type reset_level; 11848 11849 reset_level = hclge_get_reset_level(ae_dev, 11850 &ae_dev->hw_err_reset_req); 11851 hclge_set_def_reset_request(ae_dev, reset_level); 11852 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 11853 } 11854 11855 hclge_init_rxd_adv_layout(hdev); 11856 11857 /* Enable MISC vector(vector0) */ 11858 hclge_enable_vector(&hdev->misc_vector, true); 11859 11860 ret = hclge_init_wol(hdev); 11861 if (ret) 11862 dev_warn(&pdev->dev, 11863 "failed to wake on lan init, ret = %d\n", ret); 11864 11865 hclge_state_init(hdev); 11866 hdev->last_reset_time = jiffies; 11867 11868 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", 11869 HCLGE_DRIVER_NAME); 11870 11871 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); 11872 11873 devl_unlock(hdev->devlink); 11874 return 0; 11875 11876 err_mdiobus_unreg: 11877 if (hdev->hw.mac.phydev) 11878 mdiobus_unregister(hdev->hw.mac.mdio_bus); 11879 err_msi_irq_uninit: 11880 hclge_misc_irq_uninit(hdev); 11881 err_msi_uninit: 11882 pci_free_irq_vectors(pdev); 11883 err_cmd_uninit: 11884 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 11885 err_devlink_uninit: 11886 devl_unlock(hdev->devlink); 11887 hclge_devlink_uninit(hdev); 11888 err_pci_uninit: 11889 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11890 pci_release_regions(pdev); 11891 pci_disable_device(pdev); 11892 out: 11893 mutex_destroy(&hdev->vport_lock); 11894 return ret; 11895 } 11896 11897 static void hclge_stats_clear(struct hclge_dev *hdev) 11898 { 11899 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); 11900 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); 11901 } 11902 11903 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 11904 { 11905 return hclge_config_switch_param(hdev, vf, enable, 11906 HCLGE_SWITCH_ANTI_SPOOF_MASK); 11907 } 11908 11909 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 11910 { 11911 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 11912 HCLGE_FILTER_FE_NIC_INGRESS_B, 11913 enable, vf); 11914 } 11915 11916 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) 11917 { 11918 int ret; 11919 11920 ret = hclge_set_mac_spoofchk(hdev, vf, enable); 11921 if (ret) { 11922 dev_err(&hdev->pdev->dev, 11923 "Set vf %d mac spoof check %s failed, ret=%d\n", 11924 vf, enable ? "on" : "off", ret); 11925 return ret; 11926 } 11927 11928 ret = hclge_set_vlan_spoofchk(hdev, vf, enable); 11929 if (ret) 11930 dev_err(&hdev->pdev->dev, 11931 "Set vf %d vlan spoof check %s failed, ret=%d\n", 11932 vf, enable ? "on" : "off", ret); 11933 11934 return ret; 11935 } 11936 11937 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, 11938 bool enable) 11939 { 11940 struct hclge_vport *vport = hclge_get_vport(handle); 11941 struct hclge_dev *hdev = vport->back; 11942 u32 new_spoofchk = enable ? 1 : 0; 11943 int ret; 11944 11945 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 11946 return -EOPNOTSUPP; 11947 11948 vport = hclge_get_vf_vport(hdev, vf); 11949 if (!vport) 11950 return -EINVAL; 11951 11952 if (vport->vf_info.spoofchk == new_spoofchk) 11953 return 0; 11954 11955 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) 11956 dev_warn(&hdev->pdev->dev, 11957 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", 11958 vf); 11959 else if (enable && hclge_is_umv_space_full(vport, true)) 11960 dev_warn(&hdev->pdev->dev, 11961 "vf %d mac table is full, enable spoof check may cause its packet send fail\n", 11962 vf); 11963 11964 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); 11965 if (ret) 11966 return ret; 11967 11968 vport->vf_info.spoofchk = new_spoofchk; 11969 return 0; 11970 } 11971 11972 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) 11973 { 11974 struct hclge_vport *vport = hdev->vport; 11975 int ret; 11976 int i; 11977 11978 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 11979 return 0; 11980 11981 /* resume the vf spoof check state after reset */ 11982 for (i = 0; i < hdev->num_alloc_vport; i++) { 11983 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, 11984 vport->vf_info.spoofchk); 11985 if (ret) 11986 return ret; 11987 11988 vport++; 11989 } 11990 11991 return 0; 11992 } 11993 11994 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) 11995 { 11996 struct hclge_vport *vport = hclge_get_vport(handle); 11997 struct hclge_dev *hdev = vport->back; 11998 u32 new_trusted = enable ? 1 : 0; 11999 12000 vport = hclge_get_vf_vport(hdev, vf); 12001 if (!vport) 12002 return -EINVAL; 12003 12004 if (vport->vf_info.trusted == new_trusted) 12005 return 0; 12006 12007 vport->vf_info.trusted = new_trusted; 12008 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12009 hclge_task_schedule(hdev, 0); 12010 12011 return 0; 12012 } 12013 12014 static void hclge_reset_vf_rate(struct hclge_dev *hdev) 12015 { 12016 int ret; 12017 int vf; 12018 12019 /* reset vf rate to default value */ 12020 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { 12021 struct hclge_vport *vport = &hdev->vport[vf]; 12022 12023 vport->vf_info.max_tx_rate = 0; 12024 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); 12025 if (ret) 12026 dev_err(&hdev->pdev->dev, 12027 "vf%d failed to reset to default, ret=%d\n", 12028 vf - HCLGE_VF_VPORT_START_NUM, ret); 12029 } 12030 } 12031 12032 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, 12033 int min_tx_rate, int max_tx_rate) 12034 { 12035 if (min_tx_rate != 0 || 12036 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { 12037 dev_err(&hdev->pdev->dev, 12038 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n", 12039 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); 12040 return -EINVAL; 12041 } 12042 12043 return 0; 12044 } 12045 12046 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, 12047 int min_tx_rate, int max_tx_rate, bool force) 12048 { 12049 struct hclge_vport *vport = hclge_get_vport(handle); 12050 struct hclge_dev *hdev = vport->back; 12051 int ret; 12052 12053 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); 12054 if (ret) 12055 return ret; 12056 12057 vport = hclge_get_vf_vport(hdev, vf); 12058 if (!vport) 12059 return -EINVAL; 12060 12061 if (!force && max_tx_rate == vport->vf_info.max_tx_rate) 12062 return 0; 12063 12064 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); 12065 if (ret) 12066 return ret; 12067 12068 vport->vf_info.max_tx_rate = max_tx_rate; 12069 12070 return 0; 12071 } 12072 12073 static int hclge_resume_vf_rate(struct hclge_dev *hdev) 12074 { 12075 struct hnae3_handle *handle = &hdev->vport->nic; 12076 struct hclge_vport *vport; 12077 int ret; 12078 int vf; 12079 12080 /* resume the vf max_tx_rate after reset */ 12081 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { 12082 vport = hclge_get_vf_vport(hdev, vf); 12083 if (!vport) 12084 return -EINVAL; 12085 12086 /* zero means max rate, after reset, firmware already set it to 12087 * max rate, so just continue. 12088 */ 12089 if (!vport->vf_info.max_tx_rate) 12090 continue; 12091 12092 ret = hclge_set_vf_rate(handle, vf, 0, 12093 vport->vf_info.max_tx_rate, true); 12094 if (ret) { 12095 dev_err(&hdev->pdev->dev, 12096 "vf%d failed to resume tx_rate:%u, ret=%d\n", 12097 vf, vport->vf_info.max_tx_rate, ret); 12098 return ret; 12099 } 12100 } 12101 12102 return 0; 12103 } 12104 12105 static void hclge_reset_vport_state(struct hclge_dev *hdev) 12106 { 12107 struct hclge_vport *vport = hdev->vport; 12108 int i; 12109 12110 for (i = 0; i < hdev->num_alloc_vport; i++) { 12111 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 12112 vport++; 12113 } 12114 } 12115 12116 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 12117 { 12118 struct hclge_dev *hdev = ae_dev->priv; 12119 struct pci_dev *pdev = ae_dev->pdev; 12120 int ret; 12121 12122 set_bit(HCLGE_STATE_DOWN, &hdev->state); 12123 12124 hclge_stats_clear(hdev); 12125 /* NOTE: pf reset needn't to clear or restore pf and vf table entry. 12126 * so here should not clean table in memory. 12127 */ 12128 if (hdev->reset_type == HNAE3_IMP_RESET || 12129 hdev->reset_type == HNAE3_GLOBAL_RESET) { 12130 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 12131 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); 12132 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); 12133 hclge_reset_umv_space(hdev); 12134 } 12135 12136 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, 12137 true, hdev->reset_pending); 12138 if (ret) { 12139 dev_err(&pdev->dev, "Cmd queue init failed\n"); 12140 return ret; 12141 } 12142 12143 ret = hclge_map_tqp(hdev); 12144 if (ret) { 12145 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 12146 return ret; 12147 } 12148 12149 ret = hclge_mac_init(hdev); 12150 if (ret) { 12151 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 12152 return ret; 12153 } 12154 12155 ret = hclge_tp_port_init(hdev); 12156 if (ret) { 12157 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", 12158 ret); 12159 return ret; 12160 } 12161 12162 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 12163 if (ret) { 12164 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 12165 return ret; 12166 } 12167 12168 ret = hclge_config_gro(hdev); 12169 if (ret) 12170 return ret; 12171 12172 ret = hclge_init_vlan_config(hdev); 12173 if (ret) { 12174 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 12175 return ret; 12176 } 12177 12178 hclge_reset_tc_config(hdev); 12179 12180 ret = hclge_tm_init_hw(hdev, true); 12181 if (ret) { 12182 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 12183 return ret; 12184 } 12185 12186 ret = hclge_rss_init_hw(hdev); 12187 if (ret) { 12188 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 12189 return ret; 12190 } 12191 12192 ret = init_mgr_tbl(hdev); 12193 if (ret) { 12194 dev_err(&pdev->dev, 12195 "failed to reinit manager table, ret = %d\n", ret); 12196 return ret; 12197 } 12198 12199 ret = hclge_init_fd_config(hdev); 12200 if (ret) { 12201 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); 12202 return ret; 12203 } 12204 12205 ret = hclge_ptp_init(hdev); 12206 if (ret) 12207 return ret; 12208 12209 /* Log and clear the hw errors those already occurred */ 12210 if (hnae3_dev_ras_imp_supported(hdev)) 12211 hclge_handle_occurred_error(hdev); 12212 else 12213 hclge_handle_all_hns_hw_errors(ae_dev); 12214 12215 /* Re-enable the hw error interrupts because 12216 * the interrupts get disabled on global reset. 12217 */ 12218 ret = hclge_config_nic_hw_error(hdev, true); 12219 if (ret) { 12220 dev_err(&pdev->dev, 12221 "fail(%d) to re-enable NIC hw error interrupts\n", 12222 ret); 12223 return ret; 12224 } 12225 12226 if (hdev->roce_client) { 12227 ret = hclge_config_rocee_ras_interrupt(hdev, true); 12228 if (ret) { 12229 dev_err(&pdev->dev, 12230 "fail(%d) to re-enable roce ras interrupts\n", 12231 ret); 12232 return ret; 12233 } 12234 } 12235 12236 hclge_reset_vport_state(hdev); 12237 ret = hclge_reset_vport_spoofchk(hdev); 12238 if (ret) 12239 return ret; 12240 12241 ret = hclge_resume_vf_rate(hdev); 12242 if (ret) 12243 return ret; 12244 12245 hclge_init_rxd_adv_layout(hdev); 12246 12247 ret = hclge_update_wol(hdev); 12248 if (ret) 12249 dev_warn(&pdev->dev, 12250 "failed to update wol config, ret = %d\n", ret); 12251 12252 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 12253 HCLGE_DRIVER_NAME); 12254 12255 return 0; 12256 } 12257 12258 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 12259 { 12260 struct hclge_dev *hdev = ae_dev->priv; 12261 struct hclge_mac *mac = &hdev->hw.mac; 12262 12263 hclge_reset_vf_rate(hdev); 12264 hclge_clear_vf_vlan(hdev); 12265 hclge_state_uninit(hdev); 12266 hclge_ptp_uninit(hdev); 12267 hclge_uninit_rxd_adv_layout(hdev); 12268 hclge_uninit_mac_table(hdev); 12269 hclge_del_all_fd_entries(hdev); 12270 12271 if (mac->phydev) 12272 mdiobus_unregister(mac->mdio_bus); 12273 12274 /* Disable MISC vector(vector0) */ 12275 hclge_enable_vector(&hdev->misc_vector, false); 12276 synchronize_irq(hdev->misc_vector.vector_irq); 12277 12278 /* Disable all hw interrupts */ 12279 hclge_config_mac_tnl_int(hdev, false); 12280 hclge_config_nic_hw_error(hdev, false); 12281 hclge_config_rocee_ras_interrupt(hdev, false); 12282 12283 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 12284 hclge_misc_irq_uninit(hdev); 12285 hclge_devlink_uninit(hdev); 12286 hclge_pci_uninit(hdev); 12287 hclge_uninit_vport_vlan_table(hdev); 12288 mutex_destroy(&hdev->vport_lock); 12289 ae_dev->priv = NULL; 12290 } 12291 12292 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 12293 { 12294 struct hclge_vport *vport = hclge_get_vport(handle); 12295 struct hclge_dev *hdev = vport->back; 12296 12297 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); 12298 } 12299 12300 static void hclge_get_channels(struct hnae3_handle *handle, 12301 struct ethtool_channels *ch) 12302 { 12303 ch->max_combined = hclge_get_max_channels(handle); 12304 ch->other_count = 1; 12305 ch->max_other = 1; 12306 ch->combined_count = handle->kinfo.rss_size; 12307 } 12308 12309 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 12310 u16 *alloc_tqps, u16 *max_rss_size) 12311 { 12312 struct hclge_vport *vport = hclge_get_vport(handle); 12313 struct hclge_dev *hdev = vport->back; 12314 12315 *alloc_tqps = vport->alloc_tqps; 12316 *max_rss_size = hdev->pf_rss_size_max; 12317 } 12318 12319 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle) 12320 { 12321 struct hclge_vport *vport = hclge_get_vport(handle); 12322 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 12323 struct hclge_dev *hdev = vport->back; 12324 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 12325 u16 tc_valid[HCLGE_MAX_TC_NUM]; 12326 u16 roundup_size; 12327 unsigned int i; 12328 12329 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); 12330 roundup_size = ilog2(roundup_size); 12331 /* Set the RSS TC mode according to the new RSS size */ 12332 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 12333 tc_valid[i] = 0; 12334 12335 if (!(hdev->hw_tc_map & BIT(i))) 12336 continue; 12337 12338 tc_valid[i] = 1; 12339 tc_size[i] = roundup_size; 12340 tc_offset[i] = vport->nic.kinfo.rss_size * i; 12341 } 12342 12343 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, 12344 tc_size); 12345 } 12346 12347 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 12348 bool rxfh_configured) 12349 { 12350 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 12351 struct hclge_vport *vport = hclge_get_vport(handle); 12352 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 12353 struct hclge_dev *hdev = vport->back; 12354 u16 cur_rss_size = kinfo->rss_size; 12355 u16 cur_tqps = kinfo->num_tqps; 12356 u32 *rss_indir; 12357 unsigned int i; 12358 int ret; 12359 12360 kinfo->req_rss_size = new_tqps_num; 12361 12362 ret = hclge_tm_vport_map_update(hdev); 12363 if (ret) { 12364 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); 12365 return ret; 12366 } 12367 12368 ret = hclge_set_rss_tc_mode_cfg(handle); 12369 if (ret) 12370 return ret; 12371 12372 /* RSS indirection table has been configured by user */ 12373 if (rxfh_configured) 12374 goto out; 12375 12376 /* Reinitializes the rss indirect table according to the new RSS size */ 12377 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32), 12378 GFP_KERNEL); 12379 if (!rss_indir) 12380 return -ENOMEM; 12381 12382 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) 12383 rss_indir[i] = i % kinfo->rss_size; 12384 12385 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 12386 if (ret) 12387 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 12388 ret); 12389 12390 kfree(rss_indir); 12391 12392 out: 12393 if (!ret) 12394 dev_info(&hdev->pdev->dev, 12395 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 12396 cur_rss_size, kinfo->rss_size, 12397 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 12398 12399 return ret; 12400 } 12401 12402 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 12403 { 12404 struct hclge_set_led_state_cmd *req; 12405 struct hclge_desc desc; 12406 int ret; 12407 12408 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 12409 12410 req = (struct hclge_set_led_state_cmd *)desc.data; 12411 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 12412 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 12413 12414 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12415 if (ret) 12416 dev_err(&hdev->pdev->dev, 12417 "Send set led state cmd error, ret =%d\n", ret); 12418 12419 return ret; 12420 } 12421 12422 enum hclge_led_status { 12423 HCLGE_LED_OFF, 12424 HCLGE_LED_ON, 12425 HCLGE_LED_NO_CHANGE = 0xFF, 12426 }; 12427 12428 static int hclge_set_led_id(struct hnae3_handle *handle, 12429 enum ethtool_phys_id_state status) 12430 { 12431 struct hclge_vport *vport = hclge_get_vport(handle); 12432 struct hclge_dev *hdev = vport->back; 12433 12434 switch (status) { 12435 case ETHTOOL_ID_ACTIVE: 12436 return hclge_set_led_status(hdev, HCLGE_LED_ON); 12437 case ETHTOOL_ID_INACTIVE: 12438 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 12439 default: 12440 return -EINVAL; 12441 } 12442 } 12443 12444 static void hclge_get_link_mode(struct hnae3_handle *handle, 12445 unsigned long *supported, 12446 unsigned long *advertising) 12447 { 12448 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 12449 struct hclge_vport *vport = hclge_get_vport(handle); 12450 struct hclge_dev *hdev = vport->back; 12451 unsigned int idx = 0; 12452 12453 for (; idx < size; idx++) { 12454 supported[idx] = hdev->hw.mac.supported[idx]; 12455 advertising[idx] = hdev->hw.mac.advertising[idx]; 12456 } 12457 } 12458 12459 static int hclge_gro_en(struct hnae3_handle *handle, bool enable) 12460 { 12461 struct hclge_vport *vport = hclge_get_vport(handle); 12462 struct hclge_dev *hdev = vport->back; 12463 bool gro_en_old = hdev->gro_en; 12464 int ret; 12465 12466 hdev->gro_en = enable; 12467 ret = hclge_config_gro(hdev); 12468 if (ret) 12469 hdev->gro_en = gro_en_old; 12470 12471 return ret; 12472 } 12473 12474 static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport) 12475 { 12476 struct hnae3_handle *handle = &vport->nic; 12477 struct hclge_dev *hdev = vport->back; 12478 bool uc_en = false; 12479 bool mc_en = false; 12480 u8 tmp_flags; 12481 bool bc_en; 12482 int ret; 12483 12484 if (vport->last_promisc_flags != vport->overflow_promisc_flags) { 12485 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12486 vport->last_promisc_flags = vport->overflow_promisc_flags; 12487 } 12488 12489 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, 12490 &vport->state)) 12491 return 0; 12492 12493 /* for PF */ 12494 if (!vport->vport_id) { 12495 tmp_flags = handle->netdev_flags | vport->last_promisc_flags; 12496 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, 12497 tmp_flags & HNAE3_MPE); 12498 if (!ret) 12499 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 12500 &vport->state); 12501 else 12502 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, 12503 &vport->state); 12504 return ret; 12505 } 12506 12507 /* for VF */ 12508 if (vport->vf_info.trusted) { 12509 uc_en = vport->vf_info.request_uc_en > 0 || 12510 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; 12511 mc_en = vport->vf_info.request_mc_en > 0 || 12512 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; 12513 } 12514 bc_en = vport->vf_info.request_bc_en > 0; 12515 12516 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, 12517 mc_en, bc_en); 12518 if (ret) { 12519 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12520 return ret; 12521 } 12522 hclge_set_vport_vlan_fltr_change(vport); 12523 12524 return 0; 12525 } 12526 12527 static void hclge_sync_promisc_mode(struct hclge_dev *hdev) 12528 { 12529 struct hclge_vport *vport; 12530 int ret; 12531 u16 i; 12532 12533 for (i = 0; i < hdev->num_alloc_vport; i++) { 12534 vport = &hdev->vport[i]; 12535 12536 ret = hclge_sync_vport_promisc_mode(vport); 12537 if (ret) 12538 return; 12539 } 12540 } 12541 12542 static bool hclge_module_existed(struct hclge_dev *hdev) 12543 { 12544 struct hclge_desc desc; 12545 u32 existed; 12546 int ret; 12547 12548 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true); 12549 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12550 if (ret) { 12551 dev_err(&hdev->pdev->dev, 12552 "failed to get SFP exist state, ret = %d\n", ret); 12553 return false; 12554 } 12555 12556 existed = le32_to_cpu(desc.data[0]); 12557 12558 return existed != 0; 12559 } 12560 12561 /* need 6 bds(total 140 bytes) in one reading 12562 * return the number of bytes actually read, 0 means read failed. 12563 */ 12564 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, 12565 u32 len, u8 *data) 12566 { 12567 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM]; 12568 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0; 12569 u16 read_len; 12570 u16 copy_len; 12571 int ret; 12572 int i; 12573 12574 /* setup all 6 bds to read module eeprom info. */ 12575 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) { 12576 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM, 12577 true); 12578 12579 /* bd0~bd4 need next flag */ 12580 if (i < HCLGE_SFP_INFO_CMD_NUM - 1) 12581 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 12582 } 12583 12584 /* setup bd0, this bd contains offset and read length. */ 12585 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data; 12586 sfp_info_bd0->offset = cpu_to_le16((u16)offset); 12587 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN); 12588 sfp_info_bd0->read_len = cpu_to_le16(read_len); 12589 12590 ret = hclge_cmd_send(&hdev->hw, desc, i); 12591 if (ret) { 12592 dev_err(&hdev->pdev->dev, 12593 "failed to get SFP eeprom info, ret = %d\n", ret); 12594 return 0; 12595 } 12596 12597 /* copy sfp info from bd0 to out buffer. */ 12598 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN); 12599 memcpy(data, sfp_info_bd0->data, copy_len); 12600 read_len = copy_len; 12601 12602 /* copy sfp info from bd1~bd5 to out buffer if needed. */ 12603 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) { 12604 if (read_len >= len) 12605 return read_len; 12606 12607 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); 12608 memcpy(data + read_len, desc[i].data, copy_len); 12609 read_len += copy_len; 12610 } 12611 12612 return read_len; 12613 } 12614 12615 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, 12616 u32 len, u8 *data) 12617 { 12618 struct hclge_vport *vport = hclge_get_vport(handle); 12619 struct hclge_dev *hdev = vport->back; 12620 u32 read_len = 0; 12621 u16 data_len; 12622 12623 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 12624 return -EOPNOTSUPP; 12625 12626 if (!hclge_module_existed(hdev)) 12627 return -ENXIO; 12628 12629 while (read_len < len) { 12630 data_len = hclge_get_sfp_eeprom_info(hdev, 12631 offset + read_len, 12632 len - read_len, 12633 data + read_len); 12634 if (!data_len) 12635 return -EIO; 12636 12637 read_len += data_len; 12638 } 12639 12640 return 0; 12641 } 12642 12643 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, 12644 u32 *status_code) 12645 { 12646 struct hclge_vport *vport = hclge_get_vport(handle); 12647 struct hclge_dev *hdev = vport->back; 12648 struct hclge_desc desc; 12649 int ret; 12650 12651 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) 12652 return -EOPNOTSUPP; 12653 12654 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); 12655 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12656 if (ret) { 12657 dev_err(&hdev->pdev->dev, 12658 "failed to query link diagnosis info, ret = %d\n", ret); 12659 return ret; 12660 } 12661 12662 *status_code = le32_to_cpu(desc.data[0]); 12663 return 0; 12664 } 12665 12666 /* After disable sriov, VF still has some config and info need clean, 12667 * which configed by PF. 12668 */ 12669 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) 12670 { 12671 struct hclge_dev *hdev = vport->back; 12672 struct hclge_vlan_info vlan_info; 12673 int ret; 12674 12675 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 12676 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 12677 vport->need_notify = 0; 12678 vport->mps = 0; 12679 12680 /* after disable sriov, clean VF rate configured by PF */ 12681 ret = hclge_tm_qs_shaper_cfg(vport, 0); 12682 if (ret) 12683 dev_err(&hdev->pdev->dev, 12684 "failed to clean vf%d rate config, ret = %d\n", 12685 vfid, ret); 12686 12687 vlan_info.vlan_tag = 0; 12688 vlan_info.qos = 0; 12689 vlan_info.vlan_proto = ETH_P_8021Q; 12690 ret = hclge_update_port_base_vlan_cfg(vport, 12691 HNAE3_PORT_BASE_VLAN_DISABLE, 12692 &vlan_info); 12693 if (ret) 12694 dev_err(&hdev->pdev->dev, 12695 "failed to clean vf%d port base vlan, ret = %d\n", 12696 vfid, ret); 12697 12698 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); 12699 if (ret) 12700 dev_err(&hdev->pdev->dev, 12701 "failed to clean vf%d spoof config, ret = %d\n", 12702 vfid, ret); 12703 12704 memset(&vport->vf_info, 0, sizeof(vport->vf_info)); 12705 } 12706 12707 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) 12708 { 12709 struct hclge_dev *hdev = ae_dev->priv; 12710 struct hclge_vport *vport; 12711 int i; 12712 12713 for (i = 0; i < num_vfs; i++) { 12714 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; 12715 12716 hclge_clear_vport_vf_info(vport, i); 12717 } 12718 } 12719 12720 static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, 12721 u8 *priority) 12722 { 12723 struct hclge_vport *vport = hclge_get_vport(h); 12724 12725 if (dscp >= HNAE3_MAX_DSCP) 12726 return -EINVAL; 12727 12728 if (tc_mode) 12729 *tc_mode = vport->nic.kinfo.tc_map_mode; 12730 if (priority) 12731 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : 12732 vport->nic.kinfo.dscp_prio[dscp]; 12733 12734 return 0; 12735 } 12736 12737 static const struct hnae3_ae_ops hclge_ops = { 12738 .init_ae_dev = hclge_init_ae_dev, 12739 .uninit_ae_dev = hclge_uninit_ae_dev, 12740 .reset_prepare = hclge_reset_prepare_general, 12741 .reset_done = hclge_reset_done, 12742 .init_client_instance = hclge_init_client_instance, 12743 .uninit_client_instance = hclge_uninit_client_instance, 12744 .map_ring_to_vector = hclge_map_ring_to_vector, 12745 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 12746 .get_vector = hclge_get_vector, 12747 .put_vector = hclge_put_vector, 12748 .set_promisc_mode = hclge_set_promisc_mode, 12749 .request_update_promisc_mode = hclge_request_update_promisc_mode, 12750 .set_loopback = hclge_set_loopback, 12751 .start = hclge_ae_start, 12752 .stop = hclge_ae_stop, 12753 .client_start = hclge_client_start, 12754 .client_stop = hclge_client_stop, 12755 .get_status = hclge_get_status, 12756 .get_ksettings_an_result = hclge_get_ksettings_an_result, 12757 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 12758 .get_media_type = hclge_get_media_type, 12759 .check_port_speed = hclge_check_port_speed, 12760 .get_fec_stats = hclge_get_fec_stats, 12761 .get_fec = hclge_get_fec, 12762 .set_fec = hclge_set_fec, 12763 .get_rss_key_size = hclge_comm_get_rss_key_size, 12764 .get_rss = hclge_get_rss, 12765 .set_rss = hclge_set_rss, 12766 .set_rss_tuple = hclge_set_rss_tuple, 12767 .get_rss_tuple = hclge_get_rss_tuple, 12768 .get_tc_size = hclge_get_tc_size, 12769 .get_mac_addr = hclge_get_mac_addr, 12770 .set_mac_addr = hclge_set_mac_addr, 12771 .do_ioctl = hclge_do_ioctl, 12772 .add_uc_addr = hclge_add_uc_addr, 12773 .rm_uc_addr = hclge_rm_uc_addr, 12774 .add_mc_addr = hclge_add_mc_addr, 12775 .rm_mc_addr = hclge_rm_mc_addr, 12776 .set_autoneg = hclge_set_autoneg, 12777 .get_autoneg = hclge_get_autoneg, 12778 .restart_autoneg = hclge_restart_autoneg, 12779 .halt_autoneg = hclge_halt_autoneg, 12780 .get_pauseparam = hclge_get_pauseparam, 12781 .set_pauseparam = hclge_set_pauseparam, 12782 .set_mtu = hclge_set_mtu, 12783 .reset_queue = hclge_reset_tqp, 12784 .get_stats = hclge_get_stats, 12785 .get_mac_stats = hclge_get_mac_stat, 12786 .update_stats = hclge_update_stats, 12787 .get_strings = hclge_get_strings, 12788 .get_sset_count = hclge_get_sset_count, 12789 .get_fw_version = hclge_get_fw_version, 12790 .get_mdix_mode = hclge_get_mdix_mode, 12791 .enable_vlan_filter = hclge_enable_vlan_filter, 12792 .set_vlan_filter = hclge_set_vlan_filter, 12793 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 12794 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 12795 .reset_event = hclge_reset_event, 12796 .get_reset_level = hclge_get_reset_level, 12797 .set_default_reset_request = hclge_set_def_reset_request, 12798 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 12799 .set_channels = hclge_set_channels, 12800 .get_channels = hclge_get_channels, 12801 .get_regs_len = hclge_get_regs_len, 12802 .get_regs = hclge_get_regs, 12803 .set_led_id = hclge_set_led_id, 12804 .get_link_mode = hclge_get_link_mode, 12805 .add_fd_entry = hclge_add_fd_entry, 12806 .del_fd_entry = hclge_del_fd_entry, 12807 .get_fd_rule_cnt = hclge_get_fd_rule_cnt, 12808 .get_fd_rule_info = hclge_get_fd_rule_info, 12809 .get_fd_all_rules = hclge_get_all_rules, 12810 .enable_fd = hclge_enable_fd, 12811 .add_arfs_entry = hclge_add_fd_entry_by_arfs, 12812 .dbg_read_cmd = hclge_dbg_read_cmd, 12813 .handle_hw_ras_error = hclge_handle_hw_ras_error, 12814 .get_hw_reset_stat = hclge_get_hw_reset_stat, 12815 .ae_dev_resetting = hclge_ae_dev_resetting, 12816 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, 12817 .set_gro_en = hclge_gro_en, 12818 .get_global_queue_id = hclge_covert_handle_qid_global, 12819 .set_timer_task = hclge_set_timer_task, 12820 .mac_connect_phy = hclge_mac_connect_phy, 12821 .mac_disconnect_phy = hclge_mac_disconnect_phy, 12822 .get_vf_config = hclge_get_vf_config, 12823 .set_vf_link_state = hclge_set_vf_link_state, 12824 .set_vf_spoofchk = hclge_set_vf_spoofchk, 12825 .set_vf_trust = hclge_set_vf_trust, 12826 .set_vf_rate = hclge_set_vf_rate, 12827 .set_vf_mac = hclge_set_vf_mac, 12828 .get_module_eeprom = hclge_get_module_eeprom, 12829 .get_cmdq_stat = hclge_get_cmdq_stat, 12830 .add_cls_flower = hclge_add_cls_flower, 12831 .del_cls_flower = hclge_del_cls_flower, 12832 .cls_flower_active = hclge_is_cls_flower_active, 12833 .get_phy_link_ksettings = hclge_get_phy_link_ksettings, 12834 .set_phy_link_ksettings = hclge_set_phy_link_ksettings, 12835 .set_tx_hwts_info = hclge_ptp_set_tx_info, 12836 .get_rx_hwts = hclge_ptp_get_rx_hwts, 12837 .get_ts_info = hclge_ptp_get_ts_info, 12838 .get_link_diagnosis_info = hclge_get_link_diagnosis_info, 12839 .clean_vf_config = hclge_clean_vport_config, 12840 .get_dscp_prio = hclge_get_dscp_prio, 12841 .get_wol = hclge_get_wol, 12842 .set_wol = hclge_set_wol, 12843 }; 12844 12845 static struct hnae3_ae_algo ae_algo = { 12846 .ops = &hclge_ops, 12847 .pdev_id_table = ae_algo_pci_tbl, 12848 }; 12849 12850 static int __init hclge_init(void) 12851 { 12852 pr_info("%s is initializing\n", HCLGE_NAME); 12853 12854 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); 12855 if (!hclge_wq) { 12856 pr_err("%s: failed to create workqueue\n", HCLGE_NAME); 12857 return -ENOMEM; 12858 } 12859 12860 hnae3_register_ae_algo(&ae_algo); 12861 12862 return 0; 12863 } 12864 12865 static void __exit hclge_exit(void) 12866 { 12867 hnae3_unregister_ae_algo_prepare(&ae_algo); 12868 hnae3_unregister_ae_algo(&ae_algo); 12869 destroy_workqueue(hclge_wq); 12870 } 12871 module_init(hclge_init); 12872 module_exit(hclge_exit); 12873 12874 MODULE_LICENSE("GPL"); 12875 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 12876 MODULE_DESCRIPTION("HCLGE Driver"); 12877 MODULE_VERSION(HCLGE_MOD_VERSION); 12878