1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/acpi.h> 5 #include <linux/device.h> 6 #include <linux/etherdevice.h> 7 #include <linux/init.h> 8 #include <linux/interrupt.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/platform_device.h> 14 #include <linux/if_vlan.h> 15 #include <linux/crash_dump.h> 16 #include <net/ipv6.h> 17 #include <net/rtnetlink.h> 18 #include "hclge_cmd.h" 19 #include "hclge_dcb.h" 20 #include "hclge_main.h" 21 #include "hclge_mbx.h" 22 #include "hclge_mdio.h" 23 #include "hclge_regs.h" 24 #include "hclge_tm.h" 25 #include "hclge_err.h" 26 #include "hnae3.h" 27 #include "hclge_devlink.h" 28 #include "hclge_comm_cmd.h" 29 30 #include "hclge_trace.h" 31 32 #define HCLGE_NAME "hclge" 33 34 #define HCLGE_BUF_SIZE_UNIT 256U 35 #define HCLGE_BUF_MUL_BY 2 36 #define HCLGE_BUF_DIV_BY 2 37 #define NEED_RESERVE_TC_NUM 2 38 #define BUF_MAX_PERCENT 100 39 #define BUF_RESERVE_PERCENT 90 40 41 #define HCLGE_RESET_MAX_FAIL_CNT 5 42 #define HCLGE_RESET_SYNC_TIME 100 43 #define HCLGE_PF_RESET_SYNC_TIME 20 44 #define HCLGE_PF_RESET_SYNC_CNT 1500 45 46 #define HCLGE_LINK_STATUS_MS 10 47 48 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); 49 static int hclge_init_vlan_config(struct hclge_dev *hdev); 50 static void hclge_sync_vlan_filter(struct hclge_dev *hdev); 51 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 52 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); 53 static void hclge_rfs_filter_expire(struct hclge_dev *hdev); 54 static int hclge_clear_arfs_rules(struct hclge_dev *hdev); 55 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 56 unsigned long *addr); 57 static int hclge_set_default_loopback(struct hclge_dev *hdev); 58 59 static void hclge_sync_mac_table(struct hclge_dev *hdev); 60 static void hclge_restore_hw_table(struct hclge_dev *hdev); 61 static void hclge_sync_promisc_mode(struct hclge_dev *hdev); 62 static void hclge_sync_fd_table(struct hclge_dev *hdev); 63 static void hclge_update_fec_stats(struct hclge_dev *hdev); 64 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, 65 int wait_cnt); 66 static int hclge_update_port_info(struct hclge_dev *hdev); 67 68 static struct hnae3_ae_algo ae_algo; 69 70 static struct workqueue_struct *hclge_wq; 71 72 static const struct pci_device_id ae_algo_pci_tbl[] = { 73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0}, 81 /* required last entry */ 82 {0, } 83 }; 84 85 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 86 87 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 88 "External Loopback test", 89 "App Loopback test", 90 "Serdes serial Loopback test", 91 "Serdes parallel Loopback test", 92 "Phy Loopback test" 93 }; 94 95 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 96 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, 97 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 98 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, 99 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 100 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 101 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, 102 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 103 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, 104 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 105 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, 106 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 107 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, 108 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 109 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, 110 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 111 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 112 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 113 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 114 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 115 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 116 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 118 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 120 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 122 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 124 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 125 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 126 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 127 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, 128 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 129 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, 130 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 131 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, 132 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 133 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, 134 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 135 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, 136 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 137 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, 138 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 139 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, 140 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 141 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, 142 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 143 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, 144 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 145 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 146 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 147 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 148 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 149 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 150 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 151 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 152 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 153 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 154 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 155 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 156 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 157 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 158 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 159 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 160 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, 162 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 163 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, 164 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, 166 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 167 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, 168 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 169 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, 170 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 171 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, 172 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 173 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, 174 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 175 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, 176 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 178 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 180 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 182 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 184 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 185 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 186 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 187 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 188 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 189 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 190 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 191 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 192 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 194 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 196 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 198 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 200 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 202 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 204 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 206 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 208 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 210 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 212 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 214 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 216 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 218 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 220 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 222 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 224 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 226 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 228 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 230 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 231 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 232 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 234 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 235 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 236 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 238 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 239 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 240 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 241 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 242 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 243 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 244 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 246 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 248 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 250 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 252 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 254 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 256 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 258 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 260 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 262 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 264 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 266 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 268 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 270 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 272 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 274 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 276 277 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 279 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 281 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 283 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 285 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 287 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 289 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 291 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 293 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 295 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 297 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 299 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 301 }; 302 303 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 304 { 305 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 306 .ethter_type = cpu_to_le16(ETH_P_LLDP), 307 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, 308 .i_port_bitmap = 0x1, 309 }, 310 }; 311 312 static const struct key_info meta_data_key_info[] = { 313 { PACKET_TYPE_ID, 6 }, 314 { IP_FRAGEMENT, 1 }, 315 { ROCE_TYPE, 1 }, 316 { NEXT_KEY, 5 }, 317 { VLAN_NUMBER, 2 }, 318 { SRC_VPORT, 12 }, 319 { DST_VPORT, 12 }, 320 { TUNNEL_PACKET, 1 }, 321 }; 322 323 static const struct key_info tuple_key_info[] = { 324 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 }, 325 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 }, 326 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 }, 327 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, 328 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 }, 329 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 }, 330 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 }, 331 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 }, 332 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 }, 333 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 }, 334 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 }, 335 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, 336 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, 337 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, 338 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, 339 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, 340 { INNER_DST_MAC, 48, KEY_OPT_MAC, 341 offsetof(struct hclge_fd_rule, tuples.dst_mac), 342 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, 343 { INNER_SRC_MAC, 48, KEY_OPT_MAC, 344 offsetof(struct hclge_fd_rule, tuples.src_mac), 345 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, 346 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16, 347 offsetof(struct hclge_fd_rule, tuples.vlan_tag1), 348 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, 349 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, 350 { INNER_ETH_TYPE, 16, KEY_OPT_LE16, 351 offsetof(struct hclge_fd_rule, tuples.ether_proto), 352 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, 353 { INNER_L2_RSV, 16, KEY_OPT_LE16, 354 offsetof(struct hclge_fd_rule, tuples.l2_user_def), 355 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, 356 { INNER_IP_TOS, 8, KEY_OPT_U8, 357 offsetof(struct hclge_fd_rule, tuples.ip_tos), 358 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, 359 { INNER_IP_PROTO, 8, KEY_OPT_U8, 360 offsetof(struct hclge_fd_rule, tuples.ip_proto), 361 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, 362 { INNER_SRC_IP, 32, KEY_OPT_IP, 363 offsetof(struct hclge_fd_rule, tuples.src_ip), 364 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, 365 { INNER_DST_IP, 32, KEY_OPT_IP, 366 offsetof(struct hclge_fd_rule, tuples.dst_ip), 367 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, 368 { INNER_L3_RSV, 16, KEY_OPT_LE16, 369 offsetof(struct hclge_fd_rule, tuples.l3_user_def), 370 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, 371 { INNER_SRC_PORT, 16, KEY_OPT_LE16, 372 offsetof(struct hclge_fd_rule, tuples.src_port), 373 offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, 374 { INNER_DST_PORT, 16, KEY_OPT_LE16, 375 offsetof(struct hclge_fd_rule, tuples.dst_port), 376 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, 377 { INNER_L4_RSV, 32, KEY_OPT_LE32, 378 offsetof(struct hclge_fd_rule, tuples.l4_user_def), 379 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, 380 }; 381 382 /** 383 * hclge_cmd_send - send command to command queue 384 * @hw: pointer to the hw struct 385 * @desc: prefilled descriptor for describing the command 386 * @num : the number of descriptors to be sent 387 * 388 * This is the main send command for command queue, it 389 * sends the queue, cleans the queue, etc 390 **/ 391 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) 392 { 393 return hclge_comm_cmd_send(&hw->hw, desc, num); 394 } 395 396 static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, 397 int num, bool is_special) 398 { 399 int i; 400 401 trace_hclge_pf_cmd_send(hw, desc, 0, num); 402 403 if (!is_special) { 404 for (i = 1; i < num; i++) 405 trace_hclge_pf_cmd_send(hw, &desc[i], i, num); 406 } else { 407 for (i = 1; i < num; i++) 408 trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i], 409 i, num); 410 } 411 } 412 413 static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, 414 int num, bool is_special) 415 { 416 int i; 417 418 if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) 419 return; 420 421 trace_hclge_pf_cmd_get(hw, desc, 0, num); 422 423 if (!is_special) { 424 for (i = 1; i < num; i++) 425 trace_hclge_pf_cmd_get(hw, &desc[i], i, num); 426 } else { 427 for (i = 1; i < num; i++) 428 trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i], 429 i, num); 430 } 431 } 432 433 static const struct hclge_comm_cmq_ops hclge_cmq_ops = { 434 .trace_cmd_send = hclge_trace_cmd_send, 435 .trace_cmd_get = hclge_trace_cmd_get, 436 }; 437 438 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) 439 { 440 #define HCLGE_MAC_CMD_NUM 21 441 442 u64 *data = (u64 *)(&hdev->mac_stats); 443 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 444 __le64 *desc_data; 445 u32 data_size; 446 int ret; 447 u32 i; 448 449 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 450 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 451 if (ret) { 452 dev_err(&hdev->pdev->dev, 453 "Get MAC pkt stats fail, status = %d.\n", ret); 454 455 return ret; 456 } 457 458 /* The first desc has a 64-bit header, so data size need to minus 1 */ 459 data_size = sizeof(desc) / (sizeof(u64)) - 1; 460 461 desc_data = (__le64 *)(&desc[0].data[0]); 462 for (i = 0; i < data_size; i++) { 463 /* data memory is continuous becase only the first desc has a 464 * header in this command 465 */ 466 *data += le64_to_cpu(*desc_data); 467 data++; 468 desc_data++; 469 } 470 471 return 0; 472 } 473 474 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) 475 { 476 #define HCLGE_REG_NUM_PER_DESC 4 477 478 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; 479 u64 *data = (u64 *)(&hdev->mac_stats); 480 struct hclge_desc *desc; 481 __le64 *desc_data; 482 u32 data_size; 483 u32 desc_num; 484 int ret; 485 u32 i; 486 487 /* The first desc has a 64-bit header, so need to consider it */ 488 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1; 489 490 /* This may be called inside atomic sections, 491 * so GFP_ATOMIC is more suitalbe here 492 */ 493 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC); 494 if (!desc) 495 return -ENOMEM; 496 497 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); 498 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); 499 if (ret) { 500 kfree(desc); 501 return ret; 502 } 503 504 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); 505 506 desc_data = (__le64 *)(&desc[0].data[0]); 507 for (i = 0; i < data_size; i++) { 508 /* data memory is continuous becase only the first desc has a 509 * header in this command 510 */ 511 *data += le64_to_cpu(*desc_data); 512 data++; 513 desc_data++; 514 } 515 516 kfree(desc); 517 518 return 0; 519 } 520 521 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) 522 { 523 struct hclge_desc desc; 524 int ret; 525 526 /* Driver needs total register number of both valid registers and 527 * reserved registers, but the old firmware only returns number 528 * of valid registers in device V2. To be compatible with these 529 * devices, driver uses a fixed value. 530 */ 531 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { 532 *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1; 533 return 0; 534 } 535 536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); 537 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 538 if (ret) { 539 dev_err(&hdev->pdev->dev, 540 "failed to query mac statistic reg number, ret = %d\n", 541 ret); 542 return ret; 543 } 544 545 *reg_num = le32_to_cpu(desc.data[0]); 546 if (*reg_num == 0) { 547 dev_err(&hdev->pdev->dev, 548 "mac statistic reg number is invalid!\n"); 549 return -ENODATA; 550 } 551 552 return 0; 553 } 554 555 int hclge_mac_update_stats(struct hclge_dev *hdev) 556 { 557 /* The firmware supports the new statistics acquisition method */ 558 if (hdev->ae_dev->dev_specs.mac_stats_num) 559 return hclge_mac_update_stats_complete(hdev); 560 else 561 return hclge_mac_update_stats_defective(hdev); 562 } 563 564 static int hclge_comm_get_count(struct hclge_dev *hdev, 565 const struct hclge_comm_stats_str strs[], 566 u32 size) 567 { 568 int count = 0; 569 u32 i; 570 571 for (i = 0; i < size; i++) 572 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) 573 count++; 574 575 return count; 576 } 577 578 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, 579 const struct hclge_comm_stats_str strs[], 580 int size, u64 *data) 581 { 582 u64 *buf = data; 583 u32 i; 584 585 for (i = 0; i < size; i++) { 586 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) 587 continue; 588 589 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); 590 buf++; 591 } 592 593 return buf; 594 } 595 596 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, 597 const struct hclge_comm_stats_str strs[], 598 int size, u8 *data) 599 { 600 char *buff = (char *)data; 601 u32 i; 602 603 if (stringset != ETH_SS_STATS) 604 return buff; 605 606 for (i = 0; i < size; i++) { 607 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) 608 continue; 609 610 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); 611 buff = buff + ETH_GSTRING_LEN; 612 } 613 614 return (u8 *)buff; 615 } 616 617 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 618 { 619 struct hnae3_handle *handle; 620 int status; 621 622 handle = &hdev->vport[0].nic; 623 if (handle->client) { 624 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 625 if (status) { 626 dev_err(&hdev->pdev->dev, 627 "Update TQPS stats fail, status = %d.\n", 628 status); 629 } 630 } 631 632 hclge_update_fec_stats(hdev); 633 634 status = hclge_mac_update_stats(hdev); 635 if (status) 636 dev_err(&hdev->pdev->dev, 637 "Update MAC stats fail, status = %d.\n", status); 638 } 639 640 static void hclge_update_stats(struct hnae3_handle *handle) 641 { 642 struct hclge_vport *vport = hclge_get_vport(handle); 643 struct hclge_dev *hdev = vport->back; 644 int status; 645 646 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 647 return; 648 649 status = hclge_mac_update_stats(hdev); 650 if (status) 651 dev_err(&hdev->pdev->dev, 652 "Update MAC stats fail, status = %d.\n", 653 status); 654 655 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 656 if (status) 657 dev_err(&hdev->pdev->dev, 658 "Update TQPS stats fail, status = %d.\n", 659 status); 660 661 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 662 } 663 664 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 665 { 666 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ 667 HNAE3_SUPPORT_PHY_LOOPBACK | \ 668 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ 669 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \ 670 HNAE3_SUPPORT_EXTERNAL_LOOPBACK) 671 672 struct hclge_vport *vport = hclge_get_vport(handle); 673 struct hclge_dev *hdev = vport->back; 674 int count = 0; 675 676 /* Loopback test support rules: 677 * mac: only GE mode support 678 * serdes: all mac mode will support include GE/XGE/LGE/CGE 679 * phy: only support when phy device exist on board 680 */ 681 if (stringset == ETH_SS_TEST) { 682 /* clear loopback bit flags at first */ 683 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 684 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || 685 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 686 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 687 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 688 count += 1; 689 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; 690 } 691 692 if (hdev->ae_dev->dev_specs.hilink_version != 693 HCLGE_HILINK_H60) { 694 count += 1; 695 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 696 } 697 698 count += 1; 699 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 700 count += 1; 701 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; 702 703 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && 704 hdev->hw.mac.phydev->drv->set_loopback) || 705 hnae3_dev_phy_imp_supported(hdev)) { 706 count += 1; 707 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; 708 } 709 } else if (stringset == ETH_SS_STATS) { 710 count = hclge_comm_get_count(hdev, g_mac_stats_string, 711 ARRAY_SIZE(g_mac_stats_string)) + 712 hclge_comm_tqps_get_sset_count(handle); 713 } 714 715 return count; 716 } 717 718 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, 719 u8 *data) 720 { 721 struct hclge_vport *vport = hclge_get_vport(handle); 722 struct hclge_dev *hdev = vport->back; 723 u8 *p = (char *)data; 724 int size; 725 726 if (stringset == ETH_SS_STATS) { 727 size = ARRAY_SIZE(g_mac_stats_string); 728 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, 729 size, p); 730 p = hclge_comm_tqps_get_strings(handle, p); 731 } else if (stringset == ETH_SS_TEST) { 732 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { 733 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL], 734 ETH_GSTRING_LEN); 735 p += ETH_GSTRING_LEN; 736 } 737 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { 738 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], 739 ETH_GSTRING_LEN); 740 p += ETH_GSTRING_LEN; 741 } 742 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { 743 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], 744 ETH_GSTRING_LEN); 745 p += ETH_GSTRING_LEN; 746 } 747 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { 748 memcpy(p, 749 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], 750 ETH_GSTRING_LEN); 751 p += ETH_GSTRING_LEN; 752 } 753 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 754 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], 755 ETH_GSTRING_LEN); 756 p += ETH_GSTRING_LEN; 757 } 758 } 759 } 760 761 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 762 { 763 struct hclge_vport *vport = hclge_get_vport(handle); 764 struct hclge_dev *hdev = vport->back; 765 u64 *p; 766 767 p = hclge_comm_get_stats(hdev, g_mac_stats_string, 768 ARRAY_SIZE(g_mac_stats_string), data); 769 p = hclge_comm_tqps_get_stats(handle, p); 770 } 771 772 static void hclge_get_mac_stat(struct hnae3_handle *handle, 773 struct hns3_mac_stats *mac_stats) 774 { 775 struct hclge_vport *vport = hclge_get_vport(handle); 776 struct hclge_dev *hdev = vport->back; 777 778 hclge_update_stats(handle); 779 780 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; 781 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; 782 } 783 784 static int hclge_parse_func_status(struct hclge_dev *hdev, 785 struct hclge_func_status_cmd *status) 786 { 787 #define HCLGE_MAC_ID_MASK 0xF 788 789 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 790 return -EINVAL; 791 792 /* Set the pf to main pf */ 793 if (status->pf_state & HCLGE_PF_STATE_MAIN) 794 hdev->flag |= HCLGE_FLAG_MAIN; 795 else 796 hdev->flag &= ~HCLGE_FLAG_MAIN; 797 798 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; 799 return 0; 800 } 801 802 static int hclge_query_function_status(struct hclge_dev *hdev) 803 { 804 #define HCLGE_QUERY_MAX_CNT 5 805 806 struct hclge_func_status_cmd *req; 807 struct hclge_desc desc; 808 int timeout = 0; 809 int ret; 810 811 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 812 req = (struct hclge_func_status_cmd *)desc.data; 813 814 do { 815 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 816 if (ret) { 817 dev_err(&hdev->pdev->dev, 818 "query function status failed %d.\n", ret); 819 return ret; 820 } 821 822 /* Check pf reset is done */ 823 if (req->pf_state) 824 break; 825 usleep_range(1000, 2000); 826 } while (timeout++ < HCLGE_QUERY_MAX_CNT); 827 828 return hclge_parse_func_status(hdev, req); 829 } 830 831 static int hclge_query_pf_resource(struct hclge_dev *hdev) 832 { 833 struct hclge_pf_res_cmd *req; 834 struct hclge_desc desc; 835 int ret; 836 837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 838 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 839 if (ret) { 840 dev_err(&hdev->pdev->dev, 841 "query pf resource failed %d.\n", ret); 842 return ret; 843 } 844 845 req = (struct hclge_pf_res_cmd *)desc.data; 846 hdev->num_tqps = le16_to_cpu(req->tqp_num) + 847 le16_to_cpu(req->ext_tqp_num); 848 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 849 850 if (req->tx_buf_size) 851 hdev->tx_buf_size = 852 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; 853 else 854 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 855 856 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); 857 858 if (req->dv_buf_size) 859 hdev->dv_buf_size = 860 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; 861 else 862 hdev->dv_buf_size = HCLGE_DEFAULT_DV; 863 864 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); 865 866 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); 867 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { 868 dev_err(&hdev->pdev->dev, 869 "only %u msi resources available, not enough for pf(min:2).\n", 870 hdev->num_nic_msi); 871 return -EINVAL; 872 } 873 874 if (hnae3_dev_roce_supported(hdev)) { 875 hdev->num_roce_msi = 876 le16_to_cpu(req->pf_intr_vector_number_roce); 877 878 /* PF should have NIC vectors and Roce vectors, 879 * NIC vectors are queued before Roce vectors. 880 */ 881 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; 882 } else { 883 hdev->num_msi = hdev->num_nic_msi; 884 } 885 886 return 0; 887 } 888 889 static int hclge_parse_speed(u8 speed_cmd, u32 *speed) 890 { 891 switch (speed_cmd) { 892 case HCLGE_FW_MAC_SPEED_10M: 893 *speed = HCLGE_MAC_SPEED_10M; 894 break; 895 case HCLGE_FW_MAC_SPEED_100M: 896 *speed = HCLGE_MAC_SPEED_100M; 897 break; 898 case HCLGE_FW_MAC_SPEED_1G: 899 *speed = HCLGE_MAC_SPEED_1G; 900 break; 901 case HCLGE_FW_MAC_SPEED_10G: 902 *speed = HCLGE_MAC_SPEED_10G; 903 break; 904 case HCLGE_FW_MAC_SPEED_25G: 905 *speed = HCLGE_MAC_SPEED_25G; 906 break; 907 case HCLGE_FW_MAC_SPEED_40G: 908 *speed = HCLGE_MAC_SPEED_40G; 909 break; 910 case HCLGE_FW_MAC_SPEED_50G: 911 *speed = HCLGE_MAC_SPEED_50G; 912 break; 913 case HCLGE_FW_MAC_SPEED_100G: 914 *speed = HCLGE_MAC_SPEED_100G; 915 break; 916 case HCLGE_FW_MAC_SPEED_200G: 917 *speed = HCLGE_MAC_SPEED_200G; 918 break; 919 default: 920 return -EINVAL; 921 } 922 923 return 0; 924 } 925 926 static const struct hclge_speed_bit_map speed_bit_map[] = { 927 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, 928 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, 929 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, 930 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, 931 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, 932 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, 933 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS}, 934 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS}, 935 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS}, 936 }; 937 938 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) 939 { 940 u16 i; 941 942 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { 943 if (speed == speed_bit_map[i].speed) { 944 *speed_bit = speed_bit_map[i].speed_bit; 945 return 0; 946 } 947 } 948 949 return -EINVAL; 950 } 951 952 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) 953 { 954 struct hclge_vport *vport = hclge_get_vport(handle); 955 struct hclge_dev *hdev = vport->back; 956 u32 speed_ability = hdev->hw.mac.speed_ability; 957 u32 speed_bit = 0; 958 int ret; 959 960 ret = hclge_get_speed_bit(speed, &speed_bit); 961 if (ret) 962 return ret; 963 964 if (speed_bit & speed_ability) 965 return 0; 966 967 return -EINVAL; 968 } 969 970 static void hclge_update_fec_support(struct hclge_mac *mac) 971 { 972 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); 973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); 974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported); 975 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 976 977 if (mac->fec_ability & BIT(HNAE3_FEC_BASER)) 978 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 979 mac->supported); 980 if (mac->fec_ability & BIT(HNAE3_FEC_RS)) 981 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 982 mac->supported); 983 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS)) 984 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 985 mac->supported); 986 if (mac->fec_ability & BIT(HNAE3_FEC_NONE)) 987 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 988 mac->supported); 989 } 990 991 static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = { 992 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, 993 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, 994 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, 995 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, 996 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT}, 997 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, 998 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT}, 999 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1000 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, 1001 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, 1002 }; 1003 1004 static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = { 1005 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, 1006 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, 1007 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT}, 1008 {HCLGE_SUPPORT_100G_R4_BIT, 1009 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, 1010 {HCLGE_SUPPORT_100G_R2_BIT, 1011 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT}, 1012 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1013 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, 1014 {HCLGE_SUPPORT_200G_R4_BIT, 1015 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, 1016 }; 1017 1018 static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = { 1019 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, 1020 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, 1021 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, 1022 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, 1023 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT}, 1024 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, 1025 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT}, 1026 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1027 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, 1028 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, 1029 }; 1030 1031 static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = { 1032 {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, 1033 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, 1034 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, 1035 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, 1036 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, 1037 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT}, 1038 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, 1039 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT}, 1040 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1041 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, 1042 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, 1043 }; 1044 1045 static void hclge_convert_setting_sr(u16 speed_ability, 1046 unsigned long *link_mode) 1047 { 1048 int i; 1049 1050 for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) { 1051 if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit) 1052 linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode, 1053 link_mode); 1054 } 1055 } 1056 1057 static void hclge_convert_setting_lr(u16 speed_ability, 1058 unsigned long *link_mode) 1059 { 1060 int i; 1061 1062 for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) { 1063 if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit) 1064 linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode, 1065 link_mode); 1066 } 1067 } 1068 1069 static void hclge_convert_setting_cr(u16 speed_ability, 1070 unsigned long *link_mode) 1071 { 1072 int i; 1073 1074 for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) { 1075 if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit) 1076 linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode, 1077 link_mode); 1078 } 1079 } 1080 1081 static void hclge_convert_setting_kr(u16 speed_ability, 1082 unsigned long *link_mode) 1083 { 1084 int i; 1085 1086 for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) { 1087 if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit) 1088 linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode, 1089 link_mode); 1090 } 1091 } 1092 1093 static void hclge_convert_setting_fec(struct hclge_mac *mac) 1094 { 1095 /* If firmware has reported fec_ability, don't need to convert by speed */ 1096 if (mac->fec_ability) 1097 goto out; 1098 1099 switch (mac->speed) { 1100 case HCLGE_MAC_SPEED_10G: 1101 case HCLGE_MAC_SPEED_40G: 1102 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) | 1103 BIT(HNAE3_FEC_NONE); 1104 break; 1105 case HCLGE_MAC_SPEED_25G: 1106 case HCLGE_MAC_SPEED_50G: 1107 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | 1108 BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE); 1109 break; 1110 case HCLGE_MAC_SPEED_100G: 1111 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | 1112 BIT(HNAE3_FEC_NONE); 1113 break; 1114 case HCLGE_MAC_SPEED_200G: 1115 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | 1116 BIT(HNAE3_FEC_LLRS); 1117 break; 1118 default: 1119 mac->fec_ability = 0; 1120 break; 1121 } 1122 1123 out: 1124 hclge_update_fec_support(mac); 1125 } 1126 1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 1128 u16 speed_ability) 1129 { 1130 struct hclge_mac *mac = &hdev->hw.mac; 1131 1132 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1133 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1134 mac->supported); 1135 1136 hclge_convert_setting_sr(speed_ability, mac->supported); 1137 hclge_convert_setting_lr(speed_ability, mac->supported); 1138 hclge_convert_setting_cr(speed_ability, mac->supported); 1139 if (hnae3_dev_fec_supported(hdev)) 1140 hclge_convert_setting_fec(mac); 1141 1142 if (hnae3_dev_pause_supported(hdev)) 1143 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1144 1145 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); 1146 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1147 } 1148 1149 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, 1150 u16 speed_ability) 1151 { 1152 struct hclge_mac *mac = &hdev->hw.mac; 1153 1154 hclge_convert_setting_kr(speed_ability, mac->supported); 1155 if (hnae3_dev_fec_supported(hdev)) 1156 hclge_convert_setting_fec(mac); 1157 1158 if (hnae3_dev_pause_supported(hdev)) 1159 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1160 1161 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); 1162 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1163 } 1164 1165 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, 1166 u16 speed_ability) 1167 { 1168 unsigned long *supported = hdev->hw.mac.supported; 1169 1170 /* default to support all speed for GE port */ 1171 if (!speed_ability) 1172 speed_ability = HCLGE_SUPPORT_GE; 1173 1174 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1175 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1176 supported); 1177 1178 if (speed_ability & HCLGE_SUPPORT_100M_BIT) { 1179 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1180 supported); 1181 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 1182 supported); 1183 } 1184 1185 if (speed_ability & HCLGE_SUPPORT_10M_BIT) { 1186 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); 1187 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); 1188 } 1189 1190 if (hnae3_dev_pause_supported(hdev)) { 1191 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1192 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); 1193 } 1194 1195 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); 1196 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); 1197 } 1198 1199 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) 1200 { 1201 u8 media_type = hdev->hw.mac.media_type; 1202 1203 if (media_type == HNAE3_MEDIA_TYPE_FIBER) 1204 hclge_parse_fiber_link_mode(hdev, speed_ability); 1205 else if (media_type == HNAE3_MEDIA_TYPE_COPPER) 1206 hclge_parse_copper_link_mode(hdev, speed_ability); 1207 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) 1208 hclge_parse_backplane_link_mode(hdev, speed_ability); 1209 } 1210 1211 static u32 hclge_get_max_speed(u16 speed_ability) 1212 { 1213 if (speed_ability & HCLGE_SUPPORT_200G_BITS) 1214 return HCLGE_MAC_SPEED_200G; 1215 1216 if (speed_ability & HCLGE_SUPPORT_100G_BITS) 1217 return HCLGE_MAC_SPEED_100G; 1218 1219 if (speed_ability & HCLGE_SUPPORT_50G_BITS) 1220 return HCLGE_MAC_SPEED_50G; 1221 1222 if (speed_ability & HCLGE_SUPPORT_40G_BIT) 1223 return HCLGE_MAC_SPEED_40G; 1224 1225 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1226 return HCLGE_MAC_SPEED_25G; 1227 1228 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1229 return HCLGE_MAC_SPEED_10G; 1230 1231 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1232 return HCLGE_MAC_SPEED_1G; 1233 1234 if (speed_ability & HCLGE_SUPPORT_100M_BIT) 1235 return HCLGE_MAC_SPEED_100M; 1236 1237 if (speed_ability & HCLGE_SUPPORT_10M_BIT) 1238 return HCLGE_MAC_SPEED_10M; 1239 1240 return HCLGE_MAC_SPEED_1G; 1241 } 1242 1243 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1244 { 1245 #define HCLGE_TX_SPARE_SIZE_UNIT 4096 1246 #define SPEED_ABILITY_EXT_SHIFT 8 1247 1248 struct hclge_cfg_param_cmd *req; 1249 u64 mac_addr_tmp_high; 1250 u16 speed_ability_ext; 1251 u64 mac_addr_tmp; 1252 unsigned int i; 1253 1254 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1255 1256 /* get the configuration */ 1257 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1258 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1259 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1260 HCLGE_CFG_TQP_DESC_N_M, 1261 HCLGE_CFG_TQP_DESC_N_S); 1262 1263 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), 1264 HCLGE_CFG_PHY_ADDR_M, 1265 HCLGE_CFG_PHY_ADDR_S); 1266 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), 1267 HCLGE_CFG_MEDIA_TP_M, 1268 HCLGE_CFG_MEDIA_TP_S); 1269 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), 1270 HCLGE_CFG_RX_BUF_LEN_M, 1271 HCLGE_CFG_RX_BUF_LEN_S); 1272 /* get mac_address */ 1273 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1274 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), 1275 HCLGE_CFG_MAC_ADDR_H_M, 1276 HCLGE_CFG_MAC_ADDR_H_S); 1277 1278 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1279 1280 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), 1281 HCLGE_CFG_DEFAULT_SPEED_M, 1282 HCLGE_CFG_DEFAULT_SPEED_S); 1283 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 1284 HCLGE_CFG_RSS_SIZE_M, 1285 HCLGE_CFG_RSS_SIZE_S); 1286 1287 for (i = 0; i < ETH_ALEN; i++) 1288 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1289 1290 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1291 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1292 1293 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), 1294 HCLGE_CFG_SPEED_ABILITY_M, 1295 HCLGE_CFG_SPEED_ABILITY_S); 1296 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), 1297 HCLGE_CFG_SPEED_ABILITY_EXT_M, 1298 HCLGE_CFG_SPEED_ABILITY_EXT_S); 1299 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; 1300 1301 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), 1302 HCLGE_CFG_VLAN_FLTR_CAP_M, 1303 HCLGE_CFG_VLAN_FLTR_CAP_S); 1304 1305 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), 1306 HCLGE_CFG_UMV_TBL_SPACE_M, 1307 HCLGE_CFG_UMV_TBL_SPACE_S); 1308 1309 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), 1310 HCLGE_CFG_PF_RSS_SIZE_M, 1311 HCLGE_CFG_PF_RSS_SIZE_S); 1312 1313 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a 1314 * power of 2, instead of reading out directly. This would 1315 * be more flexible for future changes and expansions. 1316 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, 1317 * it does not make sense if PF's field is 0. In this case, PF and VF 1318 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. 1319 */ 1320 cfg->pf_rss_size_max = cfg->pf_rss_size_max ? 1321 1U << cfg->pf_rss_size_max : 1322 cfg->vf_rss_size_max; 1323 1324 /* The unit of the tx spare buffer size queried from configuration 1325 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is 1326 * needed here. 1327 */ 1328 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]), 1329 HCLGE_CFG_TX_SPARE_BUF_SIZE_M, 1330 HCLGE_CFG_TX_SPARE_BUF_SIZE_S); 1331 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT; 1332 } 1333 1334 /* hclge_get_cfg: query the static parameter from flash 1335 * @hdev: pointer to struct hclge_dev 1336 * @hcfg: the config structure to be getted 1337 */ 1338 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1339 { 1340 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1341 struct hclge_cfg_param_cmd *req; 1342 unsigned int i; 1343 int ret; 1344 1345 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1346 u32 offset = 0; 1347 1348 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1349 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1350 true); 1351 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, 1352 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1353 /* Len should be united by 4 bytes when send to hardware */ 1354 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1355 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1356 req->offset = cpu_to_le32(offset); 1357 } 1358 1359 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1360 if (ret) { 1361 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); 1362 return ret; 1363 } 1364 1365 hclge_parse_cfg(hcfg, desc); 1366 1367 return 0; 1368 } 1369 1370 static void hclge_set_default_dev_specs(struct hclge_dev *hdev) 1371 { 1372 #define HCLGE_MAX_NON_TSO_BD_NUM 8U 1373 1374 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1375 1376 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; 1377 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; 1378 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 1379 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; 1380 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; 1381 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; 1382 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; 1383 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1384 ae_dev->dev_specs.tnl_num = 0; 1385 } 1386 1387 static void hclge_parse_dev_specs(struct hclge_dev *hdev, 1388 struct hclge_desc *desc) 1389 { 1390 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1391 struct hclge_dev_specs_0_cmd *req0; 1392 struct hclge_dev_specs_1_cmd *req1; 1393 1394 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; 1395 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data; 1396 1397 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 1398 ae_dev->dev_specs.rss_ind_tbl_size = 1399 le16_to_cpu(req0->rss_ind_tbl_size); 1400 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 1401 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 1402 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); 1403 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); 1404 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 1405 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 1406 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); 1407 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); 1408 ae_dev->dev_specs.tnl_num = req1->tnl_num; 1409 ae_dev->dev_specs.hilink_version = req1->hilink_version; 1410 } 1411 1412 static void hclge_check_dev_specs(struct hclge_dev *hdev) 1413 { 1414 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 1415 1416 if (!dev_specs->max_non_tso_bd_num) 1417 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; 1418 if (!dev_specs->rss_ind_tbl_size) 1419 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; 1420 if (!dev_specs->rss_key_size) 1421 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 1422 if (!dev_specs->max_tm_rate) 1423 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; 1424 if (!dev_specs->max_qset_num) 1425 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM; 1426 if (!dev_specs->max_int_gl) 1427 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; 1428 if (!dev_specs->max_frm_size) 1429 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; 1430 if (!dev_specs->umv_size) 1431 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1432 } 1433 1434 static int hclge_query_mac_stats_num(struct hclge_dev *hdev) 1435 { 1436 u32 reg_num = 0; 1437 int ret; 1438 1439 ret = hclge_mac_query_reg_num(hdev, ®_num); 1440 if (ret && ret != -EOPNOTSUPP) 1441 return ret; 1442 1443 hdev->ae_dev->dev_specs.mac_stats_num = reg_num; 1444 return 0; 1445 } 1446 1447 static int hclge_query_dev_specs(struct hclge_dev *hdev) 1448 { 1449 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM]; 1450 int ret; 1451 int i; 1452 1453 ret = hclge_query_mac_stats_num(hdev); 1454 if (ret) 1455 return ret; 1456 1457 /* set default specifications as devices lower than version V3 do not 1458 * support querying specifications from firmware. 1459 */ 1460 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 1461 hclge_set_default_dev_specs(hdev); 1462 return 0; 1463 } 1464 1465 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 1466 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, 1467 true); 1468 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 1469 } 1470 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); 1471 1472 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); 1473 if (ret) 1474 return ret; 1475 1476 hclge_parse_dev_specs(hdev, desc); 1477 hclge_check_dev_specs(hdev); 1478 1479 return 0; 1480 } 1481 1482 static int hclge_get_cap(struct hclge_dev *hdev) 1483 { 1484 int ret; 1485 1486 ret = hclge_query_function_status(hdev); 1487 if (ret) { 1488 dev_err(&hdev->pdev->dev, 1489 "query function status error %d.\n", ret); 1490 return ret; 1491 } 1492 1493 /* get pf resource */ 1494 return hclge_query_pf_resource(hdev); 1495 } 1496 1497 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) 1498 { 1499 #define HCLGE_MIN_TX_DESC 64 1500 #define HCLGE_MIN_RX_DESC 64 1501 1502 if (!is_kdump_kernel()) 1503 return; 1504 1505 dev_info(&hdev->pdev->dev, 1506 "Running kdump kernel. Using minimal resources\n"); 1507 1508 /* minimal queue pairs equals to the number of vports */ 1509 hdev->num_tqps = hdev->num_req_vfs + 1; 1510 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; 1511 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; 1512 } 1513 1514 static void hclge_init_tc_config(struct hclge_dev *hdev) 1515 { 1516 unsigned int i; 1517 1518 if (hdev->tc_max > HNAE3_MAX_TC || 1519 hdev->tc_max < 1) { 1520 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", 1521 hdev->tc_max); 1522 hdev->tc_max = 1; 1523 } 1524 1525 /* Dev does not support DCB */ 1526 if (!hnae3_dev_dcb_supported(hdev)) { 1527 hdev->tc_max = 1; 1528 hdev->pfc_max = 0; 1529 } else { 1530 hdev->pfc_max = hdev->tc_max; 1531 } 1532 1533 hdev->tm_info.num_tc = 1; 1534 1535 /* Currently not support uncontiuous tc */ 1536 for (i = 0; i < hdev->tm_info.num_tc; i++) 1537 hnae3_set_bit(hdev->hw_tc_map, i, 1); 1538 1539 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1540 } 1541 1542 static int hclge_configure(struct hclge_dev *hdev) 1543 { 1544 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1545 struct hclge_cfg cfg; 1546 int ret; 1547 1548 ret = hclge_get_cfg(hdev, &cfg); 1549 if (ret) 1550 return ret; 1551 1552 hdev->base_tqp_pid = 0; 1553 hdev->vf_rss_size_max = cfg.vf_rss_size_max; 1554 hdev->pf_rss_size_max = cfg.pf_rss_size_max; 1555 hdev->rx_buf_len = cfg.rx_buf_len; 1556 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1557 hdev->hw.mac.media_type = cfg.media_type; 1558 hdev->hw.mac.phy_addr = cfg.phy_addr; 1559 hdev->num_tx_desc = cfg.tqp_desc_num; 1560 hdev->num_rx_desc = cfg.tqp_desc_num; 1561 hdev->tm_info.num_pg = 1; 1562 hdev->tc_max = cfg.tc_num; 1563 hdev->tm_info.hw_pfc_map = 0; 1564 if (cfg.umv_space) 1565 hdev->wanted_umv_size = cfg.umv_space; 1566 else 1567 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; 1568 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; 1569 hdev->gro_en = true; 1570 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) 1571 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 1572 1573 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 1574 hdev->fd_en = true; 1575 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 1576 } 1577 1578 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1579 if (ret) { 1580 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", 1581 cfg.default_speed, ret); 1582 return ret; 1583 } 1584 hdev->hw.mac.req_speed = hdev->hw.mac.speed; 1585 hdev->hw.mac.req_autoneg = AUTONEG_ENABLE; 1586 hdev->hw.mac.req_duplex = DUPLEX_FULL; 1587 1588 hclge_parse_link_mode(hdev, cfg.speed_ability); 1589 1590 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); 1591 1592 hclge_init_tc_config(hdev); 1593 hclge_init_kdump_kernel_config(hdev); 1594 1595 return ret; 1596 } 1597 1598 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, 1599 u16 tso_mss_max) 1600 { 1601 struct hclge_cfg_tso_status_cmd *req; 1602 struct hclge_desc desc; 1603 1604 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1605 1606 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1607 req->tso_mss_min = cpu_to_le16(tso_mss_min); 1608 req->tso_mss_max = cpu_to_le16(tso_mss_max); 1609 1610 return hclge_cmd_send(&hdev->hw, &desc, 1); 1611 } 1612 1613 static int hclge_config_gro(struct hclge_dev *hdev) 1614 { 1615 struct hclge_cfg_gro_status_cmd *req; 1616 struct hclge_desc desc; 1617 int ret; 1618 1619 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) 1620 return 0; 1621 1622 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); 1623 req = (struct hclge_cfg_gro_status_cmd *)desc.data; 1624 1625 req->gro_en = hdev->gro_en ? 1 : 0; 1626 1627 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1628 if (ret) 1629 dev_err(&hdev->pdev->dev, 1630 "GRO hardware config cmd failed, ret = %d\n", ret); 1631 1632 return ret; 1633 } 1634 1635 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1636 { 1637 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1638 struct hclge_comm_tqp *tqp; 1639 int i; 1640 1641 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1642 sizeof(struct hclge_comm_tqp), GFP_KERNEL); 1643 if (!hdev->htqp) 1644 return -ENOMEM; 1645 1646 tqp = hdev->htqp; 1647 1648 for (i = 0; i < hdev->num_tqps; i++) { 1649 tqp->dev = &hdev->pdev->dev; 1650 tqp->index = i; 1651 1652 tqp->q.ae_algo = &ae_algo; 1653 tqp->q.buf_size = hdev->rx_buf_len; 1654 tqp->q.tx_desc_num = hdev->num_tx_desc; 1655 tqp->q.rx_desc_num = hdev->num_rx_desc; 1656 1657 /* need an extended offset to configure queues >= 1658 * HCLGE_TQP_MAX_SIZE_DEV_V2 1659 */ 1660 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) 1661 tqp->q.io_base = hdev->hw.hw.io_base + 1662 HCLGE_TQP_REG_OFFSET + 1663 i * HCLGE_TQP_REG_SIZE; 1664 else 1665 tqp->q.io_base = hdev->hw.hw.io_base + 1666 HCLGE_TQP_REG_OFFSET + 1667 HCLGE_TQP_EXT_REG_OFFSET + 1668 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * 1669 HCLGE_TQP_REG_SIZE; 1670 1671 /* when device supports tx push and has device memory, 1672 * the queue can execute push mode or doorbell mode on 1673 * device memory. 1674 */ 1675 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) 1676 tqp->q.mem_base = hdev->hw.hw.mem_base + 1677 HCLGE_TQP_MEM_OFFSET(hdev, i); 1678 1679 tqp++; 1680 } 1681 1682 return 0; 1683 } 1684 1685 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1686 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1687 { 1688 struct hclge_tqp_map_cmd *req; 1689 struct hclge_desc desc; 1690 int ret; 1691 1692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1693 1694 req = (struct hclge_tqp_map_cmd *)desc.data; 1695 req->tqp_id = cpu_to_le16(tqp_pid); 1696 req->tqp_vf = func_id; 1697 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; 1698 if (!is_pf) 1699 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; 1700 req->tqp_vid = cpu_to_le16(tqp_vid); 1701 1702 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1703 if (ret) 1704 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); 1705 1706 return ret; 1707 } 1708 1709 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) 1710 { 1711 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1712 struct hclge_dev *hdev = vport->back; 1713 int i, alloced; 1714 1715 for (i = 0, alloced = 0; i < hdev->num_tqps && 1716 alloced < num_tqps; i++) { 1717 if (!hdev->htqp[i].alloced) { 1718 hdev->htqp[i].q.handle = &vport->nic; 1719 hdev->htqp[i].q.tqp_index = alloced; 1720 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; 1721 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; 1722 kinfo->tqp[alloced] = &hdev->htqp[i].q; 1723 hdev->htqp[i].alloced = true; 1724 alloced++; 1725 } 1726 } 1727 vport->alloc_tqps = alloced; 1728 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, 1729 vport->alloc_tqps / hdev->tm_info.num_tc); 1730 1731 /* ensure one to one mapping between irq and queue at default */ 1732 kinfo->rss_size = min_t(u16, kinfo->rss_size, 1733 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); 1734 1735 return 0; 1736 } 1737 1738 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, 1739 u16 num_tx_desc, u16 num_rx_desc) 1740 1741 { 1742 struct hnae3_handle *nic = &vport->nic; 1743 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1744 struct hclge_dev *hdev = vport->back; 1745 int ret; 1746 1747 kinfo->num_tx_desc = num_tx_desc; 1748 kinfo->num_rx_desc = num_rx_desc; 1749 1750 kinfo->rx_buf_len = hdev->rx_buf_len; 1751 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; 1752 1753 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, 1754 sizeof(struct hnae3_queue *), GFP_KERNEL); 1755 if (!kinfo->tqp) 1756 return -ENOMEM; 1757 1758 ret = hclge_assign_tqp(vport, num_tqps); 1759 if (ret) 1760 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1761 1762 return ret; 1763 } 1764 1765 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1766 struct hclge_vport *vport) 1767 { 1768 struct hnae3_handle *nic = &vport->nic; 1769 struct hnae3_knic_private_info *kinfo; 1770 u16 i; 1771 1772 kinfo = &nic->kinfo; 1773 for (i = 0; i < vport->alloc_tqps; i++) { 1774 struct hclge_comm_tqp *q = 1775 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); 1776 bool is_pf; 1777 int ret; 1778 1779 is_pf = !(vport->vport_id); 1780 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1781 i, is_pf); 1782 if (ret) 1783 return ret; 1784 } 1785 1786 return 0; 1787 } 1788 1789 static int hclge_map_tqp(struct hclge_dev *hdev) 1790 { 1791 struct hclge_vport *vport = hdev->vport; 1792 u16 i, num_vport; 1793 1794 num_vport = hdev->num_req_vfs + 1; 1795 for (i = 0; i < num_vport; i++) { 1796 int ret; 1797 1798 ret = hclge_map_tqp_to_vport(hdev, vport); 1799 if (ret) 1800 return ret; 1801 1802 vport++; 1803 } 1804 1805 return 0; 1806 } 1807 1808 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1809 { 1810 struct hnae3_handle *nic = &vport->nic; 1811 struct hclge_dev *hdev = vport->back; 1812 int ret; 1813 1814 nic->pdev = hdev->pdev; 1815 nic->ae_algo = &ae_algo; 1816 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, 1817 MAX_NUMNODES); 1818 nic->kinfo.io_base = hdev->hw.hw.io_base; 1819 1820 ret = hclge_knic_setup(vport, num_tqps, 1821 hdev->num_tx_desc, hdev->num_rx_desc); 1822 if (ret) 1823 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); 1824 1825 return ret; 1826 } 1827 1828 static int hclge_alloc_vport(struct hclge_dev *hdev) 1829 { 1830 struct pci_dev *pdev = hdev->pdev; 1831 struct hclge_vport *vport; 1832 u32 tqp_main_vport; 1833 u32 tqp_per_vport; 1834 int num_vport, i; 1835 int ret; 1836 1837 /* We need to alloc a vport for main NIC of PF */ 1838 num_vport = hdev->num_req_vfs + 1; 1839 1840 if (hdev->num_tqps < num_vport) { 1841 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", 1842 hdev->num_tqps, num_vport); 1843 return -EINVAL; 1844 } 1845 1846 /* Alloc the same number of TQPs for every vport */ 1847 tqp_per_vport = hdev->num_tqps / num_vport; 1848 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1849 1850 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1851 GFP_KERNEL); 1852 if (!vport) 1853 return -ENOMEM; 1854 1855 hdev->vport = vport; 1856 hdev->num_alloc_vport = num_vport; 1857 1858 if (IS_ENABLED(CONFIG_PCI_IOV)) 1859 hdev->num_alloc_vfs = hdev->num_req_vfs; 1860 1861 for (i = 0; i < num_vport; i++) { 1862 vport->back = hdev; 1863 vport->vport_id = i; 1864 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; 1865 vport->mps = HCLGE_MAC_DEFAULT_FRAME; 1866 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; 1867 vport->port_base_vlan_cfg.tbl_sta = true; 1868 vport->rxvlan_cfg.rx_vlan_offload_en = true; 1869 vport->req_vlan_fltr_en = true; 1870 INIT_LIST_HEAD(&vport->vlan_list); 1871 INIT_LIST_HEAD(&vport->uc_mac_list); 1872 INIT_LIST_HEAD(&vport->mc_mac_list); 1873 spin_lock_init(&vport->mac_list_lock); 1874 1875 if (i == 0) 1876 ret = hclge_vport_setup(vport, tqp_main_vport); 1877 else 1878 ret = hclge_vport_setup(vport, tqp_per_vport); 1879 if (ret) { 1880 dev_err(&pdev->dev, 1881 "vport setup failed for vport %d, %d\n", 1882 i, ret); 1883 return ret; 1884 } 1885 1886 vport++; 1887 } 1888 1889 return 0; 1890 } 1891 1892 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1893 struct hclge_pkt_buf_alloc *buf_alloc) 1894 { 1895 /* TX buffer size is unit by 128 byte */ 1896 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1897 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1898 struct hclge_tx_buff_alloc_cmd *req; 1899 struct hclge_desc desc; 1900 int ret; 1901 u8 i; 1902 1903 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1904 1905 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1906 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1907 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1908 1909 req->tx_pkt_buff[i] = 1910 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1911 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1912 } 1913 1914 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1915 if (ret) 1916 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1917 ret); 1918 1919 return ret; 1920 } 1921 1922 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1923 struct hclge_pkt_buf_alloc *buf_alloc) 1924 { 1925 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1926 1927 if (ret) 1928 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); 1929 1930 return ret; 1931 } 1932 1933 static u32 hclge_get_tc_num(struct hclge_dev *hdev) 1934 { 1935 unsigned int i; 1936 u32 cnt = 0; 1937 1938 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1939 if (hdev->hw_tc_map & BIT(i)) 1940 cnt++; 1941 return cnt; 1942 } 1943 1944 /* Get the number of pfc enabled TCs, which have private buffer */ 1945 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1946 struct hclge_pkt_buf_alloc *buf_alloc) 1947 { 1948 struct hclge_priv_buf *priv; 1949 unsigned int i; 1950 int cnt = 0; 1951 1952 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1953 priv = &buf_alloc->priv_buf[i]; 1954 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1955 priv->enable) 1956 cnt++; 1957 } 1958 1959 return cnt; 1960 } 1961 1962 /* Get the number of pfc disabled TCs, which have private buffer */ 1963 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1964 struct hclge_pkt_buf_alloc *buf_alloc) 1965 { 1966 struct hclge_priv_buf *priv; 1967 unsigned int i; 1968 int cnt = 0; 1969 1970 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1971 priv = &buf_alloc->priv_buf[i]; 1972 if (hdev->hw_tc_map & BIT(i) && 1973 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1974 priv->enable) 1975 cnt++; 1976 } 1977 1978 return cnt; 1979 } 1980 1981 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1982 { 1983 struct hclge_priv_buf *priv; 1984 u32 rx_priv = 0; 1985 int i; 1986 1987 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1988 priv = &buf_alloc->priv_buf[i]; 1989 if (priv->enable) 1990 rx_priv += priv->buf_size; 1991 } 1992 return rx_priv; 1993 } 1994 1995 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1996 { 1997 u32 i, total_tx_size = 0; 1998 1999 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 2000 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 2001 2002 return total_tx_size; 2003 } 2004 2005 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 2006 struct hclge_pkt_buf_alloc *buf_alloc, 2007 u32 rx_all) 2008 { 2009 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 2010 u32 tc_num = hclge_get_tc_num(hdev); 2011 u32 shared_buf, aligned_mps; 2012 u32 rx_priv; 2013 int i; 2014 2015 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); 2016 2017 if (hnae3_dev_dcb_supported(hdev)) 2018 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + 2019 hdev->dv_buf_size; 2020 else 2021 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF 2022 + hdev->dv_buf_size; 2023 2024 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 2025 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), 2026 HCLGE_BUF_SIZE_UNIT); 2027 2028 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 2029 if (rx_all < rx_priv + shared_std) 2030 return false; 2031 2032 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); 2033 buf_alloc->s_buf.buf_size = shared_buf; 2034 if (hnae3_dev_dcb_supported(hdev)) { 2035 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; 2036 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 2037 - roundup(aligned_mps / HCLGE_BUF_DIV_BY, 2038 HCLGE_BUF_SIZE_UNIT); 2039 } else { 2040 buf_alloc->s_buf.self.high = aligned_mps + 2041 HCLGE_NON_DCB_ADDITIONAL_BUF; 2042 buf_alloc->s_buf.self.low = aligned_mps; 2043 } 2044 2045 if (hnae3_dev_dcb_supported(hdev)) { 2046 hi_thrd = shared_buf - hdev->dv_buf_size; 2047 2048 if (tc_num <= NEED_RESERVE_TC_NUM) 2049 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT 2050 / BUF_MAX_PERCENT; 2051 2052 if (tc_num) 2053 hi_thrd = hi_thrd / tc_num; 2054 2055 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps); 2056 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); 2057 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; 2058 } else { 2059 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; 2060 lo_thrd = aligned_mps; 2061 } 2062 2063 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2064 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 2065 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 2066 } 2067 2068 return true; 2069 } 2070 2071 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 2072 struct hclge_pkt_buf_alloc *buf_alloc) 2073 { 2074 u32 i, total_size; 2075 2076 total_size = hdev->pkt_buf_size; 2077 2078 /* alloc tx buffer for all enabled tc */ 2079 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2080 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2081 2082 if (hdev->hw_tc_map & BIT(i)) { 2083 if (total_size < hdev->tx_buf_size) 2084 return -ENOMEM; 2085 2086 priv->tx_buf_size = hdev->tx_buf_size; 2087 } else { 2088 priv->tx_buf_size = 0; 2089 } 2090 2091 total_size -= priv->tx_buf_size; 2092 } 2093 2094 return 0; 2095 } 2096 2097 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, 2098 struct hclge_pkt_buf_alloc *buf_alloc) 2099 { 2100 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2101 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); 2102 unsigned int i; 2103 2104 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2105 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2106 2107 priv->enable = 0; 2108 priv->wl.low = 0; 2109 priv->wl.high = 0; 2110 priv->buf_size = 0; 2111 2112 if (!(hdev->hw_tc_map & BIT(i))) 2113 continue; 2114 2115 priv->enable = 1; 2116 2117 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 2118 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; 2119 priv->wl.high = roundup(priv->wl.low + aligned_mps, 2120 HCLGE_BUF_SIZE_UNIT); 2121 } else { 2122 priv->wl.low = 0; 2123 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : 2124 aligned_mps; 2125 } 2126 2127 priv->buf_size = priv->wl.high + hdev->dv_buf_size; 2128 } 2129 2130 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2131 } 2132 2133 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, 2134 struct hclge_pkt_buf_alloc *buf_alloc) 2135 { 2136 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2137 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 2138 int i; 2139 2140 /* let the last to be cleared first */ 2141 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 2142 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2143 unsigned int mask = BIT((unsigned int)i); 2144 2145 if (hdev->hw_tc_map & mask && 2146 !(hdev->tm_info.hw_pfc_map & mask)) { 2147 /* Clear the no pfc TC private buffer */ 2148 priv->wl.low = 0; 2149 priv->wl.high = 0; 2150 priv->buf_size = 0; 2151 priv->enable = 0; 2152 no_pfc_priv_num--; 2153 } 2154 2155 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 2156 no_pfc_priv_num == 0) 2157 break; 2158 } 2159 2160 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2161 } 2162 2163 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, 2164 struct hclge_pkt_buf_alloc *buf_alloc) 2165 { 2166 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2167 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 2168 int i; 2169 2170 /* let the last to be cleared first */ 2171 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 2172 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2173 unsigned int mask = BIT((unsigned int)i); 2174 2175 if (hdev->hw_tc_map & mask && 2176 hdev->tm_info.hw_pfc_map & mask) { 2177 /* Reduce the number of pfc TC with private buffer */ 2178 priv->wl.low = 0; 2179 priv->enable = 0; 2180 priv->wl.high = 0; 2181 priv->buf_size = 0; 2182 pfc_priv_num--; 2183 } 2184 2185 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 2186 pfc_priv_num == 0) 2187 break; 2188 } 2189 2190 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2191 } 2192 2193 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, 2194 struct hclge_pkt_buf_alloc *buf_alloc) 2195 { 2196 #define COMPENSATE_BUFFER 0x3C00 2197 #define COMPENSATE_HALF_MPS_NUM 5 2198 #define PRIV_WL_GAP 0x1800 2199 2200 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2201 u32 tc_num = hclge_get_tc_num(hdev); 2202 u32 half_mps = hdev->mps >> 1; 2203 u32 min_rx_priv; 2204 unsigned int i; 2205 2206 if (tc_num) 2207 rx_priv = rx_priv / tc_num; 2208 2209 if (tc_num <= NEED_RESERVE_TC_NUM) 2210 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 2211 2212 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + 2213 COMPENSATE_HALF_MPS_NUM * half_mps; 2214 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); 2215 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); 2216 if (rx_priv < min_rx_priv) 2217 return false; 2218 2219 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2220 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2221 2222 priv->enable = 0; 2223 priv->wl.low = 0; 2224 priv->wl.high = 0; 2225 priv->buf_size = 0; 2226 2227 if (!(hdev->hw_tc_map & BIT(i))) 2228 continue; 2229 2230 priv->enable = 1; 2231 priv->buf_size = rx_priv; 2232 priv->wl.high = rx_priv - hdev->dv_buf_size; 2233 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 2234 } 2235 2236 buf_alloc->s_buf.buf_size = 0; 2237 2238 return true; 2239 } 2240 2241 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 2242 * @hdev: pointer to struct hclge_dev 2243 * @buf_alloc: pointer to buffer calculation data 2244 * @return: 0: calculate successful, negative: fail 2245 */ 2246 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 2247 struct hclge_pkt_buf_alloc *buf_alloc) 2248 { 2249 /* When DCB is not supported, rx private buffer is not allocated. */ 2250 if (!hnae3_dev_dcb_supported(hdev)) { 2251 u32 rx_all = hdev->pkt_buf_size; 2252 2253 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 2254 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 2255 return -ENOMEM; 2256 2257 return 0; 2258 } 2259 2260 if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) 2261 return 0; 2262 2263 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) 2264 return 0; 2265 2266 /* try to decrease the buffer size */ 2267 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) 2268 return 0; 2269 2270 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) 2271 return 0; 2272 2273 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) 2274 return 0; 2275 2276 return -ENOMEM; 2277 } 2278 2279 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 2280 struct hclge_pkt_buf_alloc *buf_alloc) 2281 { 2282 struct hclge_rx_priv_buff_cmd *req; 2283 struct hclge_desc desc; 2284 int ret; 2285 int i; 2286 2287 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 2288 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 2289 2290 /* Alloc private buffer TCs */ 2291 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2292 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2293 2294 req->buf_num[i] = 2295 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 2296 req->buf_num[i] |= 2297 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 2298 } 2299 2300 req->shared_buf = 2301 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 2302 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 2303 2304 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2305 if (ret) 2306 dev_err(&hdev->pdev->dev, 2307 "rx private buffer alloc cmd failed %d\n", ret); 2308 2309 return ret; 2310 } 2311 2312 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 2313 struct hclge_pkt_buf_alloc *buf_alloc) 2314 { 2315 struct hclge_rx_priv_wl_buf *req; 2316 struct hclge_priv_buf *priv; 2317 struct hclge_desc desc[2]; 2318 int i, j; 2319 int ret; 2320 2321 for (i = 0; i < 2; i++) { 2322 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 2323 false); 2324 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 2325 2326 /* The first descriptor set the NEXT bit to 1 */ 2327 if (i == 0) 2328 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2329 else 2330 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2331 2332 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2333 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 2334 2335 priv = &buf_alloc->priv_buf[idx]; 2336 req->tc_wl[j].high = 2337 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 2338 req->tc_wl[j].high |= 2339 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2340 req->tc_wl[j].low = 2341 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 2342 req->tc_wl[j].low |= 2343 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2344 } 2345 } 2346 2347 /* Send 2 descriptor at one time */ 2348 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2349 if (ret) 2350 dev_err(&hdev->pdev->dev, 2351 "rx private waterline config cmd failed %d\n", 2352 ret); 2353 return ret; 2354 } 2355 2356 static int hclge_common_thrd_config(struct hclge_dev *hdev, 2357 struct hclge_pkt_buf_alloc *buf_alloc) 2358 { 2359 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 2360 struct hclge_rx_com_thrd *req; 2361 struct hclge_desc desc[2]; 2362 struct hclge_tc_thrd *tc; 2363 int i, j; 2364 int ret; 2365 2366 for (i = 0; i < 2; i++) { 2367 hclge_cmd_setup_basic_desc(&desc[i], 2368 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 2369 req = (struct hclge_rx_com_thrd *)&desc[i].data; 2370 2371 /* The first descriptor set the NEXT bit to 1 */ 2372 if (i == 0) 2373 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2374 else 2375 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2376 2377 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2378 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 2379 2380 req->com_thrd[j].high = 2381 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 2382 req->com_thrd[j].high |= 2383 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2384 req->com_thrd[j].low = 2385 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 2386 req->com_thrd[j].low |= 2387 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2388 } 2389 } 2390 2391 /* Send 2 descriptors at one time */ 2392 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2393 if (ret) 2394 dev_err(&hdev->pdev->dev, 2395 "common threshold config cmd failed %d\n", ret); 2396 return ret; 2397 } 2398 2399 static int hclge_common_wl_config(struct hclge_dev *hdev, 2400 struct hclge_pkt_buf_alloc *buf_alloc) 2401 { 2402 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 2403 struct hclge_rx_com_wl *req; 2404 struct hclge_desc desc; 2405 int ret; 2406 2407 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 2408 2409 req = (struct hclge_rx_com_wl *)desc.data; 2410 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 2411 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2412 2413 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 2414 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2415 2416 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2417 if (ret) 2418 dev_err(&hdev->pdev->dev, 2419 "common waterline config cmd failed %d\n", ret); 2420 2421 return ret; 2422 } 2423 2424 int hclge_buffer_alloc(struct hclge_dev *hdev) 2425 { 2426 struct hclge_pkt_buf_alloc *pkt_buf; 2427 int ret; 2428 2429 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 2430 if (!pkt_buf) 2431 return -ENOMEM; 2432 2433 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 2434 if (ret) { 2435 dev_err(&hdev->pdev->dev, 2436 "could not calc tx buffer size for all TCs %d\n", ret); 2437 goto out; 2438 } 2439 2440 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 2441 if (ret) { 2442 dev_err(&hdev->pdev->dev, 2443 "could not alloc tx buffers %d\n", ret); 2444 goto out; 2445 } 2446 2447 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 2448 if (ret) { 2449 dev_err(&hdev->pdev->dev, 2450 "could not calc rx priv buffer size for all TCs %d\n", 2451 ret); 2452 goto out; 2453 } 2454 2455 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 2456 if (ret) { 2457 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 2458 ret); 2459 goto out; 2460 } 2461 2462 if (hnae3_dev_dcb_supported(hdev)) { 2463 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2464 if (ret) { 2465 dev_err(&hdev->pdev->dev, 2466 "could not configure rx private waterline %d\n", 2467 ret); 2468 goto out; 2469 } 2470 2471 ret = hclge_common_thrd_config(hdev, pkt_buf); 2472 if (ret) { 2473 dev_err(&hdev->pdev->dev, 2474 "could not configure common threshold %d\n", 2475 ret); 2476 goto out; 2477 } 2478 } 2479 2480 ret = hclge_common_wl_config(hdev, pkt_buf); 2481 if (ret) 2482 dev_err(&hdev->pdev->dev, 2483 "could not configure common waterline %d\n", ret); 2484 2485 out: 2486 kfree(pkt_buf); 2487 return ret; 2488 } 2489 2490 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2491 { 2492 struct hnae3_handle *roce = &vport->roce; 2493 struct hnae3_handle *nic = &vport->nic; 2494 struct hclge_dev *hdev = vport->back; 2495 2496 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2497 2498 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) 2499 return -EINVAL; 2500 2501 roce->rinfo.base_vector = hdev->num_nic_msi; 2502 2503 roce->rinfo.netdev = nic->kinfo.netdev; 2504 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2505 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2506 2507 roce->pdev = nic->pdev; 2508 roce->ae_algo = nic->ae_algo; 2509 bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, 2510 MAX_NUMNODES); 2511 2512 return 0; 2513 } 2514 2515 static int hclge_init_msi(struct hclge_dev *hdev) 2516 { 2517 struct pci_dev *pdev = hdev->pdev; 2518 int vectors; 2519 int i; 2520 2521 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2522 hdev->num_msi, 2523 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2524 if (vectors < 0) { 2525 dev_err(&pdev->dev, 2526 "failed(%d) to allocate MSI/MSI-X vectors\n", 2527 vectors); 2528 return vectors; 2529 } 2530 if (vectors < hdev->num_msi) 2531 dev_warn(&hdev->pdev->dev, 2532 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2533 hdev->num_msi, vectors); 2534 2535 hdev->num_msi = vectors; 2536 hdev->num_msi_left = vectors; 2537 2538 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2539 sizeof(u16), GFP_KERNEL); 2540 if (!hdev->vector_status) { 2541 pci_free_irq_vectors(pdev); 2542 return -ENOMEM; 2543 } 2544 2545 for (i = 0; i < hdev->num_msi; i++) 2546 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2547 2548 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2549 sizeof(int), GFP_KERNEL); 2550 if (!hdev->vector_irq) { 2551 pci_free_irq_vectors(pdev); 2552 return -ENOMEM; 2553 } 2554 2555 return 0; 2556 } 2557 2558 static u8 hclge_check_speed_dup(u8 duplex, int speed) 2559 { 2560 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) 2561 duplex = HCLGE_MAC_FULL; 2562 2563 return duplex; 2564 } 2565 2566 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = { 2567 {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M}, 2568 {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M}, 2569 {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G}, 2570 {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G}, 2571 {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G}, 2572 {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G}, 2573 {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G}, 2574 {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G}, 2575 {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G}, 2576 }; 2577 2578 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw) 2579 { 2580 u16 i; 2581 2582 for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) { 2583 if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) { 2584 *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw; 2585 return 0; 2586 } 2587 } 2588 2589 return -EINVAL; 2590 } 2591 2592 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, 2593 u8 duplex, u8 lane_num) 2594 { 2595 struct hclge_config_mac_speed_dup_cmd *req; 2596 struct hclge_desc desc; 2597 u32 speed_fw; 2598 int ret; 2599 2600 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2601 2602 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2603 2604 if (duplex) 2605 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); 2606 2607 ret = hclge_convert_to_fw_speed(speed, &speed_fw); 2608 if (ret) { 2609 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2610 return ret; 2611 } 2612 2613 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, 2614 speed_fw); 2615 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2616 1); 2617 req->lane_num = lane_num; 2618 2619 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2620 if (ret) { 2621 dev_err(&hdev->pdev->dev, 2622 "mac speed/duplex config cmd failed %d.\n", ret); 2623 return ret; 2624 } 2625 2626 return 0; 2627 } 2628 2629 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) 2630 { 2631 struct hclge_mac *mac = &hdev->hw.mac; 2632 int ret; 2633 2634 duplex = hclge_check_speed_dup(duplex, speed); 2635 if (!mac->support_autoneg && mac->speed == speed && 2636 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0)) 2637 return 0; 2638 2639 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); 2640 if (ret) 2641 return ret; 2642 2643 hdev->hw.mac.speed = speed; 2644 hdev->hw.mac.duplex = duplex; 2645 if (!lane_num) 2646 hdev->hw.mac.lane_num = lane_num; 2647 2648 return 0; 2649 } 2650 2651 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2652 u8 duplex, u8 lane_num) 2653 { 2654 struct hclge_vport *vport = hclge_get_vport(handle); 2655 struct hclge_dev *hdev = vport->back; 2656 2657 return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); 2658 } 2659 2660 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2661 { 2662 struct hclge_config_auto_neg_cmd *req; 2663 struct hclge_desc desc; 2664 u32 flag = 0; 2665 int ret; 2666 2667 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2668 2669 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2670 if (enable) 2671 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U); 2672 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2673 2674 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2675 if (ret) 2676 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2677 ret); 2678 2679 return ret; 2680 } 2681 2682 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2683 { 2684 struct hclge_vport *vport = hclge_get_vport(handle); 2685 struct hclge_dev *hdev = vport->back; 2686 2687 if (!hdev->hw.mac.support_autoneg) { 2688 if (enable) { 2689 dev_err(&hdev->pdev->dev, 2690 "autoneg is not supported by current port\n"); 2691 return -EOPNOTSUPP; 2692 } else { 2693 return 0; 2694 } 2695 } 2696 2697 return hclge_set_autoneg_en(hdev, enable); 2698 } 2699 2700 static int hclge_get_autoneg(struct hnae3_handle *handle) 2701 { 2702 struct hclge_vport *vport = hclge_get_vport(handle); 2703 struct hclge_dev *hdev = vport->back; 2704 struct phy_device *phydev = hdev->hw.mac.phydev; 2705 2706 if (phydev) 2707 return phydev->autoneg; 2708 2709 return hdev->hw.mac.autoneg; 2710 } 2711 2712 static int hclge_restart_autoneg(struct hnae3_handle *handle) 2713 { 2714 struct hclge_vport *vport = hclge_get_vport(handle); 2715 struct hclge_dev *hdev = vport->back; 2716 int ret; 2717 2718 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); 2719 2720 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2721 if (ret) 2722 return ret; 2723 return hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2724 } 2725 2726 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt) 2727 { 2728 struct hclge_vport *vport = hclge_get_vport(handle); 2729 struct hclge_dev *hdev = vport->back; 2730 2731 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) 2732 return hclge_set_autoneg_en(hdev, !halt); 2733 2734 return 0; 2735 } 2736 2737 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, 2738 struct hclge_desc *desc, u32 desc_len) 2739 { 2740 u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2; 2741 u32 desc_index = 0; 2742 u32 data_index = 0; 2743 u32 i; 2744 2745 for (i = 0; i < lane_size; i++) { 2746 if (data_index >= HCLGE_DESC_DATA_LEN) { 2747 desc_index++; 2748 data_index = 0; 2749 } 2750 2751 if (desc_index >= desc_len) 2752 return; 2753 2754 hdev->fec_stats.per_lanes[i] += 2755 le32_to_cpu(desc[desc_index].data[data_index]); 2756 data_index++; 2757 } 2758 } 2759 2760 static void hclge_parse_fec_stats(struct hclge_dev *hdev, 2761 struct hclge_desc *desc, u32 desc_len) 2762 { 2763 struct hclge_query_fec_stats_cmd *req; 2764 2765 req = (struct hclge_query_fec_stats_cmd *)desc[0].data; 2766 2767 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; 2768 hdev->fec_stats.rs_corr_blocks += 2769 le32_to_cpu(req->rs_fec_corr_blocks); 2770 hdev->fec_stats.rs_uncorr_blocks += 2771 le32_to_cpu(req->rs_fec_uncorr_blocks); 2772 hdev->fec_stats.rs_error_blocks += 2773 le32_to_cpu(req->rs_fec_error_blocks); 2774 hdev->fec_stats.base_r_corr_blocks += 2775 le32_to_cpu(req->base_r_fec_corr_blocks); 2776 hdev->fec_stats.base_r_uncorr_blocks += 2777 le32_to_cpu(req->base_r_fec_uncorr_blocks); 2778 2779 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); 2780 } 2781 2782 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) 2783 { 2784 struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM]; 2785 int ret; 2786 u32 i; 2787 2788 for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) { 2789 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS, 2790 true); 2791 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1)) 2792 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2793 } 2794 2795 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); 2796 if (ret) 2797 return ret; 2798 2799 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); 2800 2801 return 0; 2802 } 2803 2804 static void hclge_update_fec_stats(struct hclge_dev *hdev) 2805 { 2806 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2807 int ret; 2808 2809 if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || 2810 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) 2811 return; 2812 2813 ret = hclge_update_fec_stats_hw(hdev); 2814 if (ret) 2815 dev_err(&hdev->pdev->dev, 2816 "failed to update fec stats, ret = %d\n", ret); 2817 2818 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); 2819 } 2820 2821 static void hclge_get_fec_stats_total(struct hclge_dev *hdev, 2822 struct ethtool_fec_stats *fec_stats) 2823 { 2824 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; 2825 fec_stats->uncorrectable_blocks.total = 2826 hdev->fec_stats.rs_uncorr_blocks; 2827 } 2828 2829 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, 2830 struct ethtool_fec_stats *fec_stats) 2831 { 2832 u32 i; 2833 2834 if (hdev->fec_stats.base_r_lane_num == 0 || 2835 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { 2836 dev_err(&hdev->pdev->dev, 2837 "fec stats lane number(%llu) is invalid\n", 2838 hdev->fec_stats.base_r_lane_num); 2839 return; 2840 } 2841 2842 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { 2843 fec_stats->corrected_blocks.lanes[i] = 2844 hdev->fec_stats.base_r_corr_per_lanes[i]; 2845 fec_stats->uncorrectable_blocks.lanes[i] = 2846 hdev->fec_stats.base_r_uncorr_per_lanes[i]; 2847 } 2848 } 2849 2850 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, 2851 struct ethtool_fec_stats *fec_stats) 2852 { 2853 u32 fec_mode = hdev->hw.mac.fec_mode; 2854 2855 switch (fec_mode) { 2856 case BIT(HNAE3_FEC_RS): 2857 case BIT(HNAE3_FEC_LLRS): 2858 hclge_get_fec_stats_total(hdev, fec_stats); 2859 break; 2860 case BIT(HNAE3_FEC_BASER): 2861 hclge_get_fec_stats_lanes(hdev, fec_stats); 2862 break; 2863 default: 2864 dev_err(&hdev->pdev->dev, 2865 "fec stats is not supported by current fec mode(0x%x)\n", 2866 fec_mode); 2867 break; 2868 } 2869 } 2870 2871 static void hclge_get_fec_stats(struct hnae3_handle *handle, 2872 struct ethtool_fec_stats *fec_stats) 2873 { 2874 struct hclge_vport *vport = hclge_get_vport(handle); 2875 struct hclge_dev *hdev = vport->back; 2876 u32 fec_mode = hdev->hw.mac.fec_mode; 2877 2878 if (fec_mode == BIT(HNAE3_FEC_NONE) || 2879 fec_mode == BIT(HNAE3_FEC_AUTO) || 2880 fec_mode == BIT(HNAE3_FEC_USER_DEF)) 2881 return; 2882 2883 hclge_update_fec_stats(hdev); 2884 2885 hclge_comm_get_fec_stats(hdev, fec_stats); 2886 } 2887 2888 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) 2889 { 2890 struct hclge_config_fec_cmd *req; 2891 struct hclge_desc desc; 2892 int ret; 2893 2894 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); 2895 2896 req = (struct hclge_config_fec_cmd *)desc.data; 2897 if (fec_mode & BIT(HNAE3_FEC_AUTO)) 2898 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); 2899 if (fec_mode & BIT(HNAE3_FEC_RS)) 2900 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2901 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); 2902 if (fec_mode & BIT(HNAE3_FEC_LLRS)) 2903 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2904 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS); 2905 if (fec_mode & BIT(HNAE3_FEC_BASER)) 2906 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2907 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); 2908 2909 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2910 if (ret) 2911 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); 2912 2913 return ret; 2914 } 2915 2916 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) 2917 { 2918 struct hclge_vport *vport = hclge_get_vport(handle); 2919 struct hclge_dev *hdev = vport->back; 2920 struct hclge_mac *mac = &hdev->hw.mac; 2921 int ret; 2922 2923 if (fec_mode && !(mac->fec_ability & fec_mode)) { 2924 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); 2925 return -EINVAL; 2926 } 2927 2928 ret = hclge_set_fec_hw(hdev, fec_mode); 2929 if (ret) 2930 return ret; 2931 2932 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); 2933 return 0; 2934 } 2935 2936 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, 2937 u8 *fec_mode) 2938 { 2939 struct hclge_vport *vport = hclge_get_vport(handle); 2940 struct hclge_dev *hdev = vport->back; 2941 struct hclge_mac *mac = &hdev->hw.mac; 2942 2943 if (fec_ability) 2944 *fec_ability = mac->fec_ability; 2945 if (fec_mode) 2946 *fec_mode = mac->fec_mode; 2947 } 2948 2949 static int hclge_mac_init(struct hclge_dev *hdev) 2950 { 2951 struct hclge_mac *mac = &hdev->hw.mac; 2952 int ret; 2953 2954 hdev->support_sfp_query = true; 2955 2956 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2957 hdev->hw.mac.duplex = HCLGE_MAC_FULL; 2958 2959 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, 2960 hdev->hw.mac.duplex, hdev->hw.mac.lane_num); 2961 if (ret) 2962 return ret; 2963 2964 if (hdev->hw.mac.support_autoneg) { 2965 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); 2966 if (ret) 2967 return ret; 2968 } 2969 2970 mac->link = 0; 2971 2972 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { 2973 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); 2974 if (ret) 2975 return ret; 2976 } 2977 2978 ret = hclge_set_mac_mtu(hdev, hdev->mps); 2979 if (ret) { 2980 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); 2981 return ret; 2982 } 2983 2984 ret = hclge_set_default_loopback(hdev); 2985 if (ret) 2986 return ret; 2987 2988 ret = hclge_buffer_alloc(hdev); 2989 if (ret) 2990 dev_err(&hdev->pdev->dev, 2991 "allocate buffer fail, ret=%d\n", ret); 2992 2993 return ret; 2994 } 2995 2996 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2997 { 2998 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2999 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { 3000 hdev->last_mbx_scheduled = jiffies; 3001 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 3002 } 3003 } 3004 3005 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 3006 { 3007 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 3008 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && 3009 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { 3010 hdev->last_rst_scheduled = jiffies; 3011 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 3012 } 3013 } 3014 3015 static void hclge_errhand_task_schedule(struct hclge_dev *hdev) 3016 { 3017 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 3018 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) 3019 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 3020 } 3021 3022 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) 3023 { 3024 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 3025 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) 3026 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); 3027 } 3028 3029 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) 3030 { 3031 struct hclge_link_status_cmd *req; 3032 struct hclge_desc desc; 3033 int ret; 3034 3035 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 3036 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3037 if (ret) { 3038 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 3039 ret); 3040 return ret; 3041 } 3042 3043 req = (struct hclge_link_status_cmd *)desc.data; 3044 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? 3045 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 3046 3047 return 0; 3048 } 3049 3050 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) 3051 { 3052 struct phy_device *phydev = hdev->hw.mac.phydev; 3053 3054 *link_status = HCLGE_LINK_STATUS_DOWN; 3055 3056 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) 3057 return 0; 3058 3059 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) 3060 return 0; 3061 3062 return hclge_get_mac_link_status(hdev, link_status); 3063 } 3064 3065 static void hclge_push_link_status(struct hclge_dev *hdev) 3066 { 3067 struct hclge_vport *vport; 3068 int ret; 3069 u16 i; 3070 3071 for (i = 0; i < pci_num_vf(hdev->pdev); i++) { 3072 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; 3073 3074 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || 3075 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) 3076 continue; 3077 3078 ret = hclge_push_vf_link_status(vport); 3079 if (ret) { 3080 dev_err(&hdev->pdev->dev, 3081 "failed to push link status to vf%u, ret = %d\n", 3082 i, ret); 3083 } 3084 } 3085 } 3086 3087 static void hclge_update_link_status(struct hclge_dev *hdev) 3088 { 3089 struct hnae3_handle *handle = &hdev->vport[0].nic; 3090 struct hnae3_client *client = hdev->nic_client; 3091 int state; 3092 int ret; 3093 3094 if (!client) 3095 return; 3096 3097 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) 3098 return; 3099 3100 ret = hclge_get_mac_phy_link(hdev, &state); 3101 if (ret) { 3102 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 3103 return; 3104 } 3105 3106 if (state != hdev->hw.mac.link) { 3107 hdev->hw.mac.link = state; 3108 if (state == HCLGE_LINK_STATUS_UP) 3109 hclge_update_port_info(hdev); 3110 3111 client->ops->link_status_change(handle, state); 3112 hclge_config_mac_tnl_int(hdev, state); 3113 3114 if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) { 3115 struct hnae3_handle *rhandle = &hdev->vport[0].roce; 3116 struct hnae3_client *rclient = hdev->roce_client; 3117 3118 if (rclient && rclient->ops->link_status_change) 3119 rclient->ops->link_status_change(rhandle, 3120 state); 3121 } 3122 3123 hclge_push_link_status(hdev); 3124 } 3125 3126 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 3127 } 3128 3129 static void hclge_update_speed_advertising(struct hclge_mac *mac) 3130 { 3131 u32 speed_ability; 3132 3133 if (hclge_get_speed_bit(mac->speed, &speed_ability)) 3134 return; 3135 3136 switch (mac->module_type) { 3137 case HNAE3_MODULE_TYPE_FIBRE_LR: 3138 hclge_convert_setting_lr(speed_ability, mac->advertising); 3139 break; 3140 case HNAE3_MODULE_TYPE_FIBRE_SR: 3141 case HNAE3_MODULE_TYPE_AOC: 3142 hclge_convert_setting_sr(speed_ability, mac->advertising); 3143 break; 3144 case HNAE3_MODULE_TYPE_CR: 3145 hclge_convert_setting_cr(speed_ability, mac->advertising); 3146 break; 3147 case HNAE3_MODULE_TYPE_KR: 3148 hclge_convert_setting_kr(speed_ability, mac->advertising); 3149 break; 3150 default: 3151 break; 3152 } 3153 } 3154 3155 static void hclge_update_fec_advertising(struct hclge_mac *mac) 3156 { 3157 if (mac->fec_mode & BIT(HNAE3_FEC_RS)) 3158 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 3159 mac->advertising); 3160 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS)) 3161 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 3162 mac->advertising); 3163 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) 3164 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 3165 mac->advertising); 3166 else 3167 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 3168 mac->advertising); 3169 } 3170 3171 static void hclge_update_pause_advertising(struct hclge_dev *hdev) 3172 { 3173 struct hclge_mac *mac = &hdev->hw.mac; 3174 bool rx_en, tx_en; 3175 3176 switch (hdev->fc_mode_last_time) { 3177 case HCLGE_FC_RX_PAUSE: 3178 rx_en = true; 3179 tx_en = false; 3180 break; 3181 case HCLGE_FC_TX_PAUSE: 3182 rx_en = false; 3183 tx_en = true; 3184 break; 3185 case HCLGE_FC_FULL: 3186 rx_en = true; 3187 tx_en = true; 3188 break; 3189 default: 3190 rx_en = false; 3191 tx_en = false; 3192 break; 3193 } 3194 3195 linkmode_set_pause(mac->advertising, tx_en, rx_en); 3196 } 3197 3198 static void hclge_update_advertising(struct hclge_dev *hdev) 3199 { 3200 struct hclge_mac *mac = &hdev->hw.mac; 3201 3202 linkmode_zero(mac->advertising); 3203 hclge_update_speed_advertising(mac); 3204 hclge_update_fec_advertising(mac); 3205 hclge_update_pause_advertising(hdev); 3206 } 3207 3208 static void hclge_update_port_capability(struct hclge_dev *hdev, 3209 struct hclge_mac *mac) 3210 { 3211 if (hnae3_dev_fec_supported(hdev)) 3212 hclge_convert_setting_fec(mac); 3213 3214 /* firmware can not identify back plane type, the media type 3215 * read from configuration can help deal it 3216 */ 3217 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && 3218 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) 3219 mac->module_type = HNAE3_MODULE_TYPE_KR; 3220 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 3221 mac->module_type = HNAE3_MODULE_TYPE_TP; 3222 3223 if (mac->support_autoneg) { 3224 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); 3225 linkmode_copy(mac->advertising, mac->supported); 3226 } else { 3227 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 3228 mac->supported); 3229 hclge_update_advertising(hdev); 3230 } 3231 } 3232 3233 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) 3234 { 3235 struct hclge_sfp_info_cmd *resp; 3236 struct hclge_desc desc; 3237 int ret; 3238 3239 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 3240 resp = (struct hclge_sfp_info_cmd *)desc.data; 3241 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3242 if (ret == -EOPNOTSUPP) { 3243 dev_warn(&hdev->pdev->dev, 3244 "IMP do not support get SFP speed %d\n", ret); 3245 return ret; 3246 } else if (ret) { 3247 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); 3248 return ret; 3249 } 3250 3251 *speed = le32_to_cpu(resp->speed); 3252 3253 return 0; 3254 } 3255 3256 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) 3257 { 3258 struct hclge_sfp_info_cmd *resp; 3259 struct hclge_desc desc; 3260 int ret; 3261 3262 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 3263 resp = (struct hclge_sfp_info_cmd *)desc.data; 3264 3265 resp->query_type = QUERY_ACTIVE_SPEED; 3266 3267 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3268 if (ret == -EOPNOTSUPP) { 3269 dev_warn(&hdev->pdev->dev, 3270 "IMP does not support get SFP info %d\n", ret); 3271 return ret; 3272 } else if (ret) { 3273 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); 3274 return ret; 3275 } 3276 3277 /* In some case, mac speed get from IMP may be 0, it shouldn't be 3278 * set to mac->speed. 3279 */ 3280 if (!le32_to_cpu(resp->speed)) 3281 return 0; 3282 3283 mac->speed = le32_to_cpu(resp->speed); 3284 /* if resp->speed_ability is 0, it means it's an old version 3285 * firmware, do not update these params 3286 */ 3287 if (resp->speed_ability) { 3288 mac->module_type = le32_to_cpu(resp->module_type); 3289 mac->speed_ability = le32_to_cpu(resp->speed_ability); 3290 mac->autoneg = resp->autoneg; 3291 mac->support_autoneg = resp->autoneg_ability; 3292 mac->speed_type = QUERY_ACTIVE_SPEED; 3293 mac->lane_num = resp->lane_num; 3294 if (!resp->active_fec) 3295 mac->fec_mode = 0; 3296 else 3297 mac->fec_mode = BIT(resp->active_fec); 3298 mac->fec_ability = resp->fec_ability; 3299 } else { 3300 mac->speed_type = QUERY_SFP_SPEED; 3301 } 3302 3303 return 0; 3304 } 3305 3306 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, 3307 struct ethtool_link_ksettings *cmd) 3308 { 3309 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; 3310 struct hclge_vport *vport = hclge_get_vport(handle); 3311 struct hclge_phy_link_ksetting_0_cmd *req0; 3312 struct hclge_phy_link_ksetting_1_cmd *req1; 3313 u32 supported, advertising, lp_advertising; 3314 struct hclge_dev *hdev = vport->back; 3315 int ret; 3316 3317 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, 3318 true); 3319 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3320 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, 3321 true); 3322 3323 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); 3324 if (ret) { 3325 dev_err(&hdev->pdev->dev, 3326 "failed to get phy link ksetting, ret = %d.\n", ret); 3327 return ret; 3328 } 3329 3330 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; 3331 cmd->base.autoneg = req0->autoneg; 3332 cmd->base.speed = le32_to_cpu(req0->speed); 3333 cmd->base.duplex = req0->duplex; 3334 cmd->base.port = req0->port; 3335 cmd->base.transceiver = req0->transceiver; 3336 cmd->base.phy_address = req0->phy_address; 3337 cmd->base.eth_tp_mdix = req0->eth_tp_mdix; 3338 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; 3339 supported = le32_to_cpu(req0->supported); 3340 advertising = le32_to_cpu(req0->advertising); 3341 lp_advertising = le32_to_cpu(req0->lp_advertising); 3342 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 3343 supported); 3344 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 3345 advertising); 3346 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, 3347 lp_advertising); 3348 3349 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; 3350 cmd->base.master_slave_cfg = req1->master_slave_cfg; 3351 cmd->base.master_slave_state = req1->master_slave_state; 3352 3353 return 0; 3354 } 3355 3356 static int 3357 hclge_set_phy_link_ksettings(struct hnae3_handle *handle, 3358 const struct ethtool_link_ksettings *cmd) 3359 { 3360 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; 3361 struct hclge_vport *vport = hclge_get_vport(handle); 3362 struct hclge_phy_link_ksetting_0_cmd *req0; 3363 struct hclge_phy_link_ksetting_1_cmd *req1; 3364 struct hclge_dev *hdev = vport->back; 3365 u32 advertising; 3366 int ret; 3367 3368 if (cmd->base.autoneg == AUTONEG_DISABLE && 3369 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || 3370 (cmd->base.duplex != DUPLEX_HALF && 3371 cmd->base.duplex != DUPLEX_FULL))) 3372 return -EINVAL; 3373 3374 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, 3375 false); 3376 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3377 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, 3378 false); 3379 3380 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; 3381 req0->autoneg = cmd->base.autoneg; 3382 req0->speed = cpu_to_le32(cmd->base.speed); 3383 req0->duplex = cmd->base.duplex; 3384 ethtool_convert_link_mode_to_legacy_u32(&advertising, 3385 cmd->link_modes.advertising); 3386 req0->advertising = cpu_to_le32(advertising); 3387 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; 3388 3389 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; 3390 req1->master_slave_cfg = cmd->base.master_slave_cfg; 3391 3392 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); 3393 if (ret) { 3394 dev_err(&hdev->pdev->dev, 3395 "failed to set phy link ksettings, ret = %d.\n", ret); 3396 return ret; 3397 } 3398 3399 hdev->hw.mac.req_autoneg = cmd->base.autoneg; 3400 hdev->hw.mac.req_speed = cmd->base.speed; 3401 hdev->hw.mac.req_duplex = cmd->base.duplex; 3402 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); 3403 3404 return 0; 3405 } 3406 3407 static int hclge_update_tp_port_info(struct hclge_dev *hdev) 3408 { 3409 struct ethtool_link_ksettings cmd; 3410 int ret; 3411 3412 if (!hnae3_dev_phy_imp_supported(hdev)) 3413 return 0; 3414 3415 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); 3416 if (ret) 3417 return ret; 3418 3419 hdev->hw.mac.autoneg = cmd.base.autoneg; 3420 hdev->hw.mac.speed = cmd.base.speed; 3421 hdev->hw.mac.duplex = cmd.base.duplex; 3422 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); 3423 3424 return 0; 3425 } 3426 3427 static int hclge_tp_port_init(struct hclge_dev *hdev) 3428 { 3429 struct ethtool_link_ksettings cmd; 3430 3431 if (!hnae3_dev_phy_imp_supported(hdev)) 3432 return 0; 3433 3434 cmd.base.autoneg = hdev->hw.mac.req_autoneg; 3435 cmd.base.speed = hdev->hw.mac.req_speed; 3436 cmd.base.duplex = hdev->hw.mac.req_duplex; 3437 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); 3438 3439 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); 3440 } 3441 3442 static int hclge_update_port_info(struct hclge_dev *hdev) 3443 { 3444 struct hclge_mac *mac = &hdev->hw.mac; 3445 int speed; 3446 int ret; 3447 3448 /* get the port info from SFP cmd if not copper port */ 3449 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 3450 return hclge_update_tp_port_info(hdev); 3451 3452 /* if IMP does not support get SFP/qSFP info, return directly */ 3453 if (!hdev->support_sfp_query) 3454 return 0; 3455 3456 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3457 speed = mac->speed; 3458 ret = hclge_get_sfp_info(hdev, mac); 3459 } else { 3460 speed = HCLGE_MAC_SPEED_UNKNOWN; 3461 ret = hclge_get_sfp_speed(hdev, &speed); 3462 } 3463 3464 if (ret == -EOPNOTSUPP) { 3465 hdev->support_sfp_query = false; 3466 return ret; 3467 } else if (ret) { 3468 return ret; 3469 } 3470 3471 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3472 if (mac->speed_type == QUERY_ACTIVE_SPEED) { 3473 hclge_update_port_capability(hdev, mac); 3474 if (mac->speed != speed) 3475 (void)hclge_tm_port_shaper_cfg(hdev); 3476 return 0; 3477 } 3478 return hclge_cfg_mac_speed_dup(hdev, mac->speed, 3479 HCLGE_MAC_FULL, mac->lane_num); 3480 } else { 3481 if (speed == HCLGE_MAC_SPEED_UNKNOWN) 3482 return 0; /* do nothing if no SFP */ 3483 3484 /* must config full duplex for SFP */ 3485 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0); 3486 } 3487 } 3488 3489 static int hclge_get_status(struct hnae3_handle *handle) 3490 { 3491 struct hclge_vport *vport = hclge_get_vport(handle); 3492 struct hclge_dev *hdev = vport->back; 3493 3494 hclge_update_link_status(hdev); 3495 3496 return hdev->hw.mac.link; 3497 } 3498 3499 struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) 3500 { 3501 if (!pci_num_vf(hdev->pdev)) { 3502 dev_err(&hdev->pdev->dev, 3503 "SRIOV is disabled, can not get vport(%d) info.\n", vf); 3504 return NULL; 3505 } 3506 3507 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { 3508 dev_err(&hdev->pdev->dev, 3509 "vf id(%d) is out of range(0 <= vfid < %d)\n", 3510 vf, pci_num_vf(hdev->pdev)); 3511 return NULL; 3512 } 3513 3514 /* VF start from 1 in vport */ 3515 vf += HCLGE_VF_VPORT_START_NUM; 3516 return &hdev->vport[vf]; 3517 } 3518 3519 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, 3520 struct ifla_vf_info *ivf) 3521 { 3522 struct hclge_vport *vport = hclge_get_vport(handle); 3523 struct hclge_dev *hdev = vport->back; 3524 3525 vport = hclge_get_vf_vport(hdev, vf); 3526 if (!vport) 3527 return -EINVAL; 3528 3529 ivf->vf = vf; 3530 ivf->linkstate = vport->vf_info.link_state; 3531 ivf->spoofchk = vport->vf_info.spoofchk; 3532 ivf->trusted = vport->vf_info.trusted; 3533 ivf->min_tx_rate = 0; 3534 ivf->max_tx_rate = vport->vf_info.max_tx_rate; 3535 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; 3536 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); 3537 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; 3538 ether_addr_copy(ivf->mac, vport->vf_info.mac); 3539 3540 return 0; 3541 } 3542 3543 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, 3544 int link_state) 3545 { 3546 struct hclge_vport *vport = hclge_get_vport(handle); 3547 struct hclge_dev *hdev = vport->back; 3548 int link_state_old; 3549 int ret; 3550 3551 vport = hclge_get_vf_vport(hdev, vf); 3552 if (!vport) 3553 return -EINVAL; 3554 3555 link_state_old = vport->vf_info.link_state; 3556 vport->vf_info.link_state = link_state; 3557 3558 /* return success directly if the VF is unalive, VF will 3559 * query link state itself when it starts work. 3560 */ 3561 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 3562 return 0; 3563 3564 ret = hclge_push_vf_link_status(vport); 3565 if (ret) { 3566 vport->vf_info.link_state = link_state_old; 3567 dev_err(&hdev->pdev->dev, 3568 "failed to push vf%d link status, ret = %d\n", vf, ret); 3569 } 3570 3571 return ret; 3572 } 3573 3574 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 3575 { 3576 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg; 3577 3578 /* fetch the events from their corresponding regs */ 3579 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 3580 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 3581 hw_err_src_reg = hclge_read_dev(&hdev->hw, 3582 HCLGE_RAS_PF_OTHER_INT_STS_REG); 3583 3584 /* Assumption: If by any chance reset and mailbox events are reported 3585 * together then we will only process reset event in this go and will 3586 * defer the processing of the mailbox events. Since, we would have not 3587 * cleared RX CMDQ event this time we would receive again another 3588 * interrupt from H/W just for the mailbox. 3589 * 3590 * check for vector0 reset event sources 3591 */ 3592 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { 3593 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); 3594 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 3595 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3596 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 3597 hdev->rst_stats.imp_rst_cnt++; 3598 return HCLGE_VECTOR0_EVENT_RST; 3599 } 3600 3601 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { 3602 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); 3603 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3604 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 3605 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 3606 hdev->rst_stats.global_rst_cnt++; 3607 return HCLGE_VECTOR0_EVENT_RST; 3608 } 3609 3610 /* check for vector0 msix event and hardware error event source */ 3611 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK || 3612 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK) 3613 return HCLGE_VECTOR0_EVENT_ERR; 3614 3615 /* check for vector0 ptp event source */ 3616 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) { 3617 *clearval = msix_src_reg; 3618 return HCLGE_VECTOR0_EVENT_PTP; 3619 } 3620 3621 /* check for vector0 mailbox(=CMDQ RX) event source */ 3622 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 3623 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 3624 *clearval = cmdq_src_reg; 3625 return HCLGE_VECTOR0_EVENT_MBX; 3626 } 3627 3628 /* print other vector0 event source */ 3629 dev_info(&hdev->pdev->dev, 3630 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n", 3631 cmdq_src_reg, hw_err_src_reg, msix_src_reg); 3632 3633 return HCLGE_VECTOR0_EVENT_OTHER; 3634 } 3635 3636 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 3637 u32 regclr) 3638 { 3639 #define HCLGE_IMP_RESET_DELAY 5 3640 3641 switch (event_type) { 3642 case HCLGE_VECTOR0_EVENT_PTP: 3643 case HCLGE_VECTOR0_EVENT_RST: 3644 if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) 3645 mdelay(HCLGE_IMP_RESET_DELAY); 3646 3647 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 3648 break; 3649 case HCLGE_VECTOR0_EVENT_MBX: 3650 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 3651 break; 3652 default: 3653 break; 3654 } 3655 } 3656 3657 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) 3658 { 3659 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, 3660 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | 3661 BIT(HCLGE_VECTOR0_CORERESET_INT_B) | 3662 BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); 3663 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); 3664 } 3665 3666 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 3667 { 3668 writel(enable ? 1 : 0, vector->addr); 3669 } 3670 3671 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 3672 { 3673 struct hclge_dev *hdev = data; 3674 unsigned long flags; 3675 u32 clearval = 0; 3676 u32 event_cause; 3677 3678 hclge_enable_vector(&hdev->misc_vector, false); 3679 event_cause = hclge_check_event_cause(hdev, &clearval); 3680 3681 /* vector 0 interrupt is shared with reset and mailbox source events. */ 3682 switch (event_cause) { 3683 case HCLGE_VECTOR0_EVENT_ERR: 3684 hclge_errhand_task_schedule(hdev); 3685 break; 3686 case HCLGE_VECTOR0_EVENT_RST: 3687 hclge_reset_task_schedule(hdev); 3688 break; 3689 case HCLGE_VECTOR0_EVENT_PTP: 3690 spin_lock_irqsave(&hdev->ptp->lock, flags); 3691 hclge_ptp_clean_tx_hwts(hdev); 3692 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 3693 break; 3694 case HCLGE_VECTOR0_EVENT_MBX: 3695 /* If we are here then, 3696 * 1. Either we are not handling any mbx task and we are not 3697 * scheduled as well 3698 * OR 3699 * 2. We could be handling a mbx task but nothing more is 3700 * scheduled. 3701 * In both cases, we should schedule mbx task as there are more 3702 * mbx messages reported by this interrupt. 3703 */ 3704 hclge_mbx_task_schedule(hdev); 3705 break; 3706 default: 3707 dev_warn(&hdev->pdev->dev, 3708 "received unknown or unhandled event of vector0\n"); 3709 break; 3710 } 3711 3712 hclge_clear_event_cause(hdev, event_cause, clearval); 3713 3714 /* Enable interrupt if it is not caused by reset event or error event */ 3715 if (event_cause == HCLGE_VECTOR0_EVENT_PTP || 3716 event_cause == HCLGE_VECTOR0_EVENT_MBX || 3717 event_cause == HCLGE_VECTOR0_EVENT_OTHER) 3718 hclge_enable_vector(&hdev->misc_vector, true); 3719 3720 return IRQ_HANDLED; 3721 } 3722 3723 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 3724 { 3725 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { 3726 dev_warn(&hdev->pdev->dev, 3727 "vector(vector_id %d) has been freed.\n", vector_id); 3728 return; 3729 } 3730 3731 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 3732 hdev->num_msi_left += 1; 3733 hdev->num_msi_used -= 1; 3734 } 3735 3736 static void hclge_get_misc_vector(struct hclge_dev *hdev) 3737 { 3738 struct hclge_misc_vector *vector = &hdev->misc_vector; 3739 3740 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 3741 3742 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 3743 hdev->vector_status[0] = 0; 3744 3745 hdev->num_msi_left -= 1; 3746 hdev->num_msi_used += 1; 3747 } 3748 3749 static int hclge_misc_irq_init(struct hclge_dev *hdev) 3750 { 3751 int ret; 3752 3753 hclge_get_misc_vector(hdev); 3754 3755 /* this would be explicitly freed in the end */ 3756 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 3757 HCLGE_NAME, pci_name(hdev->pdev)); 3758 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 3759 0, hdev->misc_vector.name, hdev); 3760 if (ret) { 3761 hclge_free_vector(hdev, 0); 3762 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 3763 hdev->misc_vector.vector_irq); 3764 } 3765 3766 return ret; 3767 } 3768 3769 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 3770 { 3771 free_irq(hdev->misc_vector.vector_irq, hdev); 3772 hclge_free_vector(hdev, 0); 3773 } 3774 3775 int hclge_notify_client(struct hclge_dev *hdev, 3776 enum hnae3_reset_notify_type type) 3777 { 3778 struct hnae3_handle *handle = &hdev->vport[0].nic; 3779 struct hnae3_client *client = hdev->nic_client; 3780 int ret; 3781 3782 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) 3783 return 0; 3784 3785 if (!client->ops->reset_notify) 3786 return -EOPNOTSUPP; 3787 3788 ret = client->ops->reset_notify(handle, type); 3789 if (ret) 3790 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 3791 type, ret); 3792 3793 return ret; 3794 } 3795 3796 static int hclge_notify_roce_client(struct hclge_dev *hdev, 3797 enum hnae3_reset_notify_type type) 3798 { 3799 struct hnae3_handle *handle = &hdev->vport[0].roce; 3800 struct hnae3_client *client = hdev->roce_client; 3801 int ret; 3802 3803 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) 3804 return 0; 3805 3806 if (!client->ops->reset_notify) 3807 return -EOPNOTSUPP; 3808 3809 ret = client->ops->reset_notify(handle, type); 3810 if (ret) 3811 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 3812 type, ret); 3813 3814 return ret; 3815 } 3816 3817 static int hclge_reset_wait(struct hclge_dev *hdev) 3818 { 3819 #define HCLGE_RESET_WATI_MS 100 3820 #define HCLGE_RESET_WAIT_CNT 350 3821 3822 u32 val, reg, reg_bit; 3823 u32 cnt = 0; 3824 3825 switch (hdev->reset_type) { 3826 case HNAE3_IMP_RESET: 3827 reg = HCLGE_GLOBAL_RESET_REG; 3828 reg_bit = HCLGE_IMP_RESET_BIT; 3829 break; 3830 case HNAE3_GLOBAL_RESET: 3831 reg = HCLGE_GLOBAL_RESET_REG; 3832 reg_bit = HCLGE_GLOBAL_RESET_BIT; 3833 break; 3834 case HNAE3_FUNC_RESET: 3835 reg = HCLGE_FUN_RST_ING; 3836 reg_bit = HCLGE_FUN_RST_ING_B; 3837 break; 3838 default: 3839 dev_err(&hdev->pdev->dev, 3840 "Wait for unsupported reset type: %d\n", 3841 hdev->reset_type); 3842 return -EINVAL; 3843 } 3844 3845 val = hclge_read_dev(&hdev->hw, reg); 3846 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 3847 msleep(HCLGE_RESET_WATI_MS); 3848 val = hclge_read_dev(&hdev->hw, reg); 3849 cnt++; 3850 } 3851 3852 if (cnt >= HCLGE_RESET_WAIT_CNT) { 3853 dev_warn(&hdev->pdev->dev, 3854 "Wait for reset timeout: %d\n", hdev->reset_type); 3855 return -EBUSY; 3856 } 3857 3858 return 0; 3859 } 3860 3861 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) 3862 { 3863 struct hclge_vf_rst_cmd *req; 3864 struct hclge_desc desc; 3865 3866 req = (struct hclge_vf_rst_cmd *)desc.data; 3867 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); 3868 req->dest_vfid = func_id; 3869 3870 if (reset) 3871 req->vf_rst = 0x1; 3872 3873 return hclge_cmd_send(&hdev->hw, &desc, 1); 3874 } 3875 3876 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) 3877 { 3878 int i; 3879 3880 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { 3881 struct hclge_vport *vport = &hdev->vport[i]; 3882 int ret; 3883 3884 /* Send cmd to set/clear VF's FUNC_RST_ING */ 3885 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); 3886 if (ret) { 3887 dev_err(&hdev->pdev->dev, 3888 "set vf(%u) rst failed %d!\n", 3889 vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3890 ret); 3891 return ret; 3892 } 3893 3894 if (!reset || 3895 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) 3896 continue; 3897 3898 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && 3899 hdev->reset_type == HNAE3_FUNC_RESET) { 3900 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, 3901 &vport->need_notify); 3902 continue; 3903 } 3904 3905 /* Inform VF to process the reset. 3906 * hclge_inform_reset_assert_to_vf may fail if VF 3907 * driver is not loaded. 3908 */ 3909 ret = hclge_inform_reset_assert_to_vf(vport); 3910 if (ret) 3911 dev_warn(&hdev->pdev->dev, 3912 "inform reset to vf(%u) failed %d!\n", 3913 vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3914 ret); 3915 } 3916 3917 return 0; 3918 } 3919 3920 static void hclge_mailbox_service_task(struct hclge_dev *hdev) 3921 { 3922 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || 3923 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || 3924 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 3925 return; 3926 3927 if (time_is_before_jiffies(hdev->last_mbx_scheduled + 3928 HCLGE_MBX_SCHED_TIMEOUT)) 3929 dev_warn(&hdev->pdev->dev, 3930 "mbx service task is scheduled after %ums on cpu%u!\n", 3931 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), 3932 smp_processor_id()); 3933 3934 hclge_mbx_handler(hdev); 3935 3936 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 3937 } 3938 3939 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) 3940 { 3941 struct hclge_pf_rst_sync_cmd *req; 3942 struct hclge_desc desc; 3943 int cnt = 0; 3944 int ret; 3945 3946 req = (struct hclge_pf_rst_sync_cmd *)desc.data; 3947 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); 3948 3949 do { 3950 /* vf need to down netdev by mbx during PF or FLR reset */ 3951 hclge_mailbox_service_task(hdev); 3952 3953 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3954 /* for compatible with old firmware, wait 3955 * 100 ms for VF to stop IO 3956 */ 3957 if (ret == -EOPNOTSUPP) { 3958 msleep(HCLGE_RESET_SYNC_TIME); 3959 return; 3960 } else if (ret) { 3961 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", 3962 ret); 3963 return; 3964 } else if (req->all_vf_ready) { 3965 return; 3966 } 3967 msleep(HCLGE_PF_RESET_SYNC_TIME); 3968 hclge_comm_cmd_reuse_desc(&desc, true); 3969 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); 3970 3971 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); 3972 } 3973 3974 void hclge_report_hw_error(struct hclge_dev *hdev, 3975 enum hnae3_hw_error_type type) 3976 { 3977 struct hnae3_client *client = hdev->nic_client; 3978 3979 if (!client || !client->ops->process_hw_error || 3980 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) 3981 return; 3982 3983 client->ops->process_hw_error(&hdev->vport[0].nic, type); 3984 } 3985 3986 static void hclge_handle_imp_error(struct hclge_dev *hdev) 3987 { 3988 u32 reg_val; 3989 3990 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 3991 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { 3992 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); 3993 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); 3994 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 3995 } 3996 3997 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { 3998 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); 3999 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); 4000 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 4001 } 4002 } 4003 4004 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 4005 { 4006 struct hclge_desc desc; 4007 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 4008 int ret; 4009 4010 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 4011 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 4012 req->fun_reset_vfid = func_id; 4013 4014 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4015 if (ret) 4016 dev_err(&hdev->pdev->dev, 4017 "send function reset cmd fail, status =%d\n", ret); 4018 4019 return ret; 4020 } 4021 4022 static void hclge_do_reset(struct hclge_dev *hdev) 4023 { 4024 struct hnae3_handle *handle = &hdev->vport[0].nic; 4025 struct pci_dev *pdev = hdev->pdev; 4026 u32 val; 4027 4028 if (hclge_get_hw_reset_stat(handle)) { 4029 dev_info(&pdev->dev, "hardware reset not finish\n"); 4030 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", 4031 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), 4032 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 4033 return; 4034 } 4035 4036 switch (hdev->reset_type) { 4037 case HNAE3_IMP_RESET: 4038 dev_info(&pdev->dev, "IMP reset requested\n"); 4039 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 4040 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); 4041 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); 4042 break; 4043 case HNAE3_GLOBAL_RESET: 4044 dev_info(&pdev->dev, "global reset requested\n"); 4045 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 4046 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 4047 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 4048 break; 4049 case HNAE3_FUNC_RESET: 4050 dev_info(&pdev->dev, "PF reset requested\n"); 4051 /* schedule again to check later */ 4052 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 4053 hclge_reset_task_schedule(hdev); 4054 break; 4055 default: 4056 dev_warn(&pdev->dev, 4057 "unsupported reset type: %d\n", hdev->reset_type); 4058 break; 4059 } 4060 } 4061 4062 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 4063 unsigned long *addr) 4064 { 4065 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 4066 struct hclge_dev *hdev = ae_dev->priv; 4067 4068 /* return the highest priority reset level amongst all */ 4069 if (test_bit(HNAE3_IMP_RESET, addr)) { 4070 rst_level = HNAE3_IMP_RESET; 4071 clear_bit(HNAE3_IMP_RESET, addr); 4072 clear_bit(HNAE3_GLOBAL_RESET, addr); 4073 clear_bit(HNAE3_FUNC_RESET, addr); 4074 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { 4075 rst_level = HNAE3_GLOBAL_RESET; 4076 clear_bit(HNAE3_GLOBAL_RESET, addr); 4077 clear_bit(HNAE3_FUNC_RESET, addr); 4078 } else if (test_bit(HNAE3_FUNC_RESET, addr)) { 4079 rst_level = HNAE3_FUNC_RESET; 4080 clear_bit(HNAE3_FUNC_RESET, addr); 4081 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 4082 rst_level = HNAE3_FLR_RESET; 4083 clear_bit(HNAE3_FLR_RESET, addr); 4084 } 4085 4086 if (hdev->reset_type != HNAE3_NONE_RESET && 4087 rst_level < hdev->reset_type) 4088 return HNAE3_NONE_RESET; 4089 4090 return rst_level; 4091 } 4092 4093 static void hclge_clear_reset_cause(struct hclge_dev *hdev) 4094 { 4095 u32 clearval = 0; 4096 4097 switch (hdev->reset_type) { 4098 case HNAE3_IMP_RESET: 4099 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 4100 break; 4101 case HNAE3_GLOBAL_RESET: 4102 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 4103 break; 4104 default: 4105 break; 4106 } 4107 4108 if (!clearval) 4109 return; 4110 4111 /* For revision 0x20, the reset interrupt source 4112 * can only be cleared after hardware reset done 4113 */ 4114 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 4115 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, 4116 clearval); 4117 4118 hclge_enable_vector(&hdev->misc_vector, true); 4119 } 4120 4121 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) 4122 { 4123 u32 reg_val; 4124 4125 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 4126 if (enable) 4127 reg_val |= HCLGE_COMM_NIC_SW_RST_RDY; 4128 else 4129 reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY; 4130 4131 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); 4132 } 4133 4134 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) 4135 { 4136 int ret; 4137 4138 ret = hclge_set_all_vf_rst(hdev, true); 4139 if (ret) 4140 return ret; 4141 4142 hclge_func_reset_sync_vf(hdev); 4143 4144 return 0; 4145 } 4146 4147 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) 4148 { 4149 u32 reg_val; 4150 int ret = 0; 4151 4152 switch (hdev->reset_type) { 4153 case HNAE3_FUNC_RESET: 4154 ret = hclge_func_reset_notify_vf(hdev); 4155 if (ret) 4156 return ret; 4157 4158 ret = hclge_func_reset_cmd(hdev, 0); 4159 if (ret) { 4160 dev_err(&hdev->pdev->dev, 4161 "asserting function reset fail %d!\n", ret); 4162 return ret; 4163 } 4164 4165 /* After performaning pf reset, it is not necessary to do the 4166 * mailbox handling or send any command to firmware, because 4167 * any mailbox handling or command to firmware is only valid 4168 * after hclge_comm_cmd_init is called. 4169 */ 4170 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 4171 hdev->rst_stats.pf_rst_cnt++; 4172 break; 4173 case HNAE3_FLR_RESET: 4174 ret = hclge_func_reset_notify_vf(hdev); 4175 if (ret) 4176 return ret; 4177 break; 4178 case HNAE3_IMP_RESET: 4179 hclge_handle_imp_error(hdev); 4180 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 4181 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, 4182 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); 4183 break; 4184 default: 4185 break; 4186 } 4187 4188 /* inform hardware that preparatory work is done */ 4189 msleep(HCLGE_RESET_SYNC_TIME); 4190 hclge_reset_handshake(hdev, true); 4191 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); 4192 4193 return ret; 4194 } 4195 4196 static void hclge_show_rst_info(struct hclge_dev *hdev) 4197 { 4198 char *buf; 4199 4200 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL); 4201 if (!buf) 4202 return; 4203 4204 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); 4205 4206 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); 4207 4208 kfree(buf); 4209 } 4210 4211 static bool hclge_reset_err_handle(struct hclge_dev *hdev) 4212 { 4213 #define MAX_RESET_FAIL_CNT 5 4214 4215 if (hdev->reset_pending) { 4216 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", 4217 hdev->reset_pending); 4218 return true; 4219 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & 4220 HCLGE_RESET_INT_M) { 4221 dev_info(&hdev->pdev->dev, 4222 "reset failed because new reset interrupt\n"); 4223 hclge_clear_reset_cause(hdev); 4224 return false; 4225 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { 4226 hdev->rst_stats.reset_fail_cnt++; 4227 set_bit(hdev->reset_type, &hdev->reset_pending); 4228 dev_info(&hdev->pdev->dev, 4229 "re-schedule reset task(%u)\n", 4230 hdev->rst_stats.reset_fail_cnt); 4231 return true; 4232 } 4233 4234 hclge_clear_reset_cause(hdev); 4235 4236 /* recover the handshake status when reset fail */ 4237 hclge_reset_handshake(hdev, true); 4238 4239 dev_err(&hdev->pdev->dev, "Reset fail!\n"); 4240 4241 hclge_show_rst_info(hdev); 4242 4243 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 4244 4245 return false; 4246 } 4247 4248 static void hclge_update_reset_level(struct hclge_dev *hdev) 4249 { 4250 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4251 enum hnae3_reset_type reset_level; 4252 4253 /* reset request will not be set during reset, so clear 4254 * pending reset request to avoid unnecessary reset 4255 * caused by the same reason. 4256 */ 4257 hclge_get_reset_level(ae_dev, &hdev->reset_request); 4258 4259 /* if default_reset_request has a higher level reset request, 4260 * it should be handled as soon as possible. since some errors 4261 * need this kind of reset to fix. 4262 */ 4263 reset_level = hclge_get_reset_level(ae_dev, 4264 &hdev->default_reset_request); 4265 if (reset_level != HNAE3_NONE_RESET) 4266 set_bit(reset_level, &hdev->reset_request); 4267 } 4268 4269 static int hclge_set_rst_done(struct hclge_dev *hdev) 4270 { 4271 struct hclge_pf_rst_done_cmd *req; 4272 struct hclge_desc desc; 4273 int ret; 4274 4275 req = (struct hclge_pf_rst_done_cmd *)desc.data; 4276 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); 4277 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; 4278 4279 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4280 /* To be compatible with the old firmware, which does not support 4281 * command HCLGE_OPC_PF_RST_DONE, just print a warning and 4282 * return success 4283 */ 4284 if (ret == -EOPNOTSUPP) { 4285 dev_warn(&hdev->pdev->dev, 4286 "current firmware does not support command(0x%x)!\n", 4287 HCLGE_OPC_PF_RST_DONE); 4288 return 0; 4289 } else if (ret) { 4290 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", 4291 ret); 4292 } 4293 4294 return ret; 4295 } 4296 4297 static int hclge_reset_prepare_up(struct hclge_dev *hdev) 4298 { 4299 int ret = 0; 4300 4301 switch (hdev->reset_type) { 4302 case HNAE3_FUNC_RESET: 4303 case HNAE3_FLR_RESET: 4304 ret = hclge_set_all_vf_rst(hdev, false); 4305 break; 4306 case HNAE3_GLOBAL_RESET: 4307 case HNAE3_IMP_RESET: 4308 ret = hclge_set_rst_done(hdev); 4309 break; 4310 default: 4311 break; 4312 } 4313 4314 /* clear up the handshake status after re-initialize done */ 4315 hclge_reset_handshake(hdev, false); 4316 4317 return ret; 4318 } 4319 4320 static int hclge_reset_stack(struct hclge_dev *hdev) 4321 { 4322 int ret; 4323 4324 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 4325 if (ret) 4326 return ret; 4327 4328 ret = hclge_reset_ae_dev(hdev->ae_dev); 4329 if (ret) 4330 return ret; 4331 4332 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 4333 } 4334 4335 static int hclge_reset_prepare(struct hclge_dev *hdev) 4336 { 4337 int ret; 4338 4339 hdev->rst_stats.reset_cnt++; 4340 /* perform reset of the stack & ae device for a client */ 4341 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 4342 if (ret) 4343 return ret; 4344 4345 rtnl_lock(); 4346 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 4347 rtnl_unlock(); 4348 if (ret) 4349 return ret; 4350 4351 return hclge_reset_prepare_wait(hdev); 4352 } 4353 4354 static int hclge_reset_rebuild(struct hclge_dev *hdev) 4355 { 4356 int ret; 4357 4358 hdev->rst_stats.hw_reset_done_cnt++; 4359 4360 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 4361 if (ret) 4362 return ret; 4363 4364 rtnl_lock(); 4365 ret = hclge_reset_stack(hdev); 4366 rtnl_unlock(); 4367 if (ret) 4368 return ret; 4369 4370 hclge_clear_reset_cause(hdev); 4371 4372 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 4373 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 4374 * times 4375 */ 4376 if (ret && 4377 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) 4378 return ret; 4379 4380 ret = hclge_reset_prepare_up(hdev); 4381 if (ret) 4382 return ret; 4383 4384 rtnl_lock(); 4385 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); 4386 rtnl_unlock(); 4387 if (ret) 4388 return ret; 4389 4390 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); 4391 if (ret) 4392 return ret; 4393 4394 hdev->last_reset_time = jiffies; 4395 hdev->rst_stats.reset_fail_cnt = 0; 4396 hdev->rst_stats.reset_done_cnt++; 4397 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 4398 4399 hclge_update_reset_level(hdev); 4400 4401 return 0; 4402 } 4403 4404 static void hclge_reset(struct hclge_dev *hdev) 4405 { 4406 if (hclge_reset_prepare(hdev)) 4407 goto err_reset; 4408 4409 if (hclge_reset_wait(hdev)) 4410 goto err_reset; 4411 4412 if (hclge_reset_rebuild(hdev)) 4413 goto err_reset; 4414 4415 return; 4416 4417 err_reset: 4418 if (hclge_reset_err_handle(hdev)) 4419 hclge_reset_task_schedule(hdev); 4420 } 4421 4422 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) 4423 { 4424 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 4425 struct hclge_dev *hdev = ae_dev->priv; 4426 4427 /* We might end up getting called broadly because of 2 below cases: 4428 * 1. Recoverable error was conveyed through APEI and only way to bring 4429 * normalcy is to reset. 4430 * 2. A new reset request from the stack due to timeout 4431 * 4432 * check if this is a new reset request and we are not here just because 4433 * last reset attempt did not succeed and watchdog hit us again. We will 4434 * know this if last reset request did not occur very recently (watchdog 4435 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 4436 * In case of new request we reset the "reset level" to PF reset. 4437 * And if it is a repeat reset request of the most recent one then we 4438 * want to make sure we throttle the reset request. Therefore, we will 4439 * not allow it again before 3*HZ times. 4440 */ 4441 4442 if (time_before(jiffies, (hdev->last_reset_time + 4443 HCLGE_RESET_INTERVAL))) { 4444 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 4445 return; 4446 } 4447 4448 if (hdev->default_reset_request) { 4449 hdev->reset_level = 4450 hclge_get_reset_level(ae_dev, 4451 &hdev->default_reset_request); 4452 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { 4453 hdev->reset_level = HNAE3_FUNC_RESET; 4454 } 4455 4456 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", 4457 hdev->reset_level); 4458 4459 /* request reset & schedule reset task */ 4460 set_bit(hdev->reset_level, &hdev->reset_request); 4461 hclge_reset_task_schedule(hdev); 4462 4463 if (hdev->reset_level < HNAE3_GLOBAL_RESET) 4464 hdev->reset_level++; 4465 } 4466 4467 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 4468 enum hnae3_reset_type rst_type) 4469 { 4470 struct hclge_dev *hdev = ae_dev->priv; 4471 4472 set_bit(rst_type, &hdev->default_reset_request); 4473 } 4474 4475 static void hclge_reset_timer(struct timer_list *t) 4476 { 4477 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); 4478 4479 /* if default_reset_request has no value, it means that this reset 4480 * request has already be handled, so just return here 4481 */ 4482 if (!hdev->default_reset_request) 4483 return; 4484 4485 dev_info(&hdev->pdev->dev, 4486 "triggering reset in reset timer\n"); 4487 hclge_reset_event(hdev->pdev, NULL); 4488 } 4489 4490 static void hclge_reset_subtask(struct hclge_dev *hdev) 4491 { 4492 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4493 4494 /* check if there is any ongoing reset in the hardware. This status can 4495 * be checked from reset_pending. If there is then, we need to wait for 4496 * hardware to complete reset. 4497 * a. If we are able to figure out in reasonable time that hardware 4498 * has fully resetted then, we can proceed with driver, client 4499 * reset. 4500 * b. else, we can come back later to check this status so re-sched 4501 * now. 4502 */ 4503 hdev->last_reset_time = jiffies; 4504 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); 4505 if (hdev->reset_type != HNAE3_NONE_RESET) 4506 hclge_reset(hdev); 4507 4508 /* check if we got any *new* reset requests to be honored */ 4509 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); 4510 if (hdev->reset_type != HNAE3_NONE_RESET) 4511 hclge_do_reset(hdev); 4512 4513 hdev->reset_type = HNAE3_NONE_RESET; 4514 } 4515 4516 static void hclge_handle_err_reset_request(struct hclge_dev *hdev) 4517 { 4518 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4519 enum hnae3_reset_type reset_type; 4520 4521 if (ae_dev->hw_err_reset_req) { 4522 reset_type = hclge_get_reset_level(ae_dev, 4523 &ae_dev->hw_err_reset_req); 4524 hclge_set_def_reset_request(ae_dev, reset_type); 4525 } 4526 4527 if (hdev->default_reset_request && ae_dev->ops->reset_event) 4528 ae_dev->ops->reset_event(hdev->pdev, NULL); 4529 4530 /* enable interrupt after error handling complete */ 4531 hclge_enable_vector(&hdev->misc_vector, true); 4532 } 4533 4534 static void hclge_handle_err_recovery(struct hclge_dev *hdev) 4535 { 4536 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4537 4538 ae_dev->hw_err_reset_req = 0; 4539 4540 if (hclge_find_error_source(hdev)) { 4541 hclge_handle_error_info_log(ae_dev); 4542 hclge_handle_mac_tnl(hdev); 4543 hclge_handle_vf_queue_err_ras(hdev); 4544 } 4545 4546 hclge_handle_err_reset_request(hdev); 4547 } 4548 4549 static void hclge_misc_err_recovery(struct hclge_dev *hdev) 4550 { 4551 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4552 struct device *dev = &hdev->pdev->dev; 4553 u32 msix_sts_reg; 4554 4555 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 4556 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { 4557 if (hclge_handle_hw_msix_error 4558 (hdev, &hdev->default_reset_request)) 4559 dev_info(dev, "received msix interrupt 0x%x\n", 4560 msix_sts_reg); 4561 } 4562 4563 hclge_handle_hw_ras_error(ae_dev); 4564 4565 hclge_handle_err_reset_request(hdev); 4566 } 4567 4568 static void hclge_errhand_service_task(struct hclge_dev *hdev) 4569 { 4570 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) 4571 return; 4572 4573 if (hnae3_dev_ras_imp_supported(hdev)) 4574 hclge_handle_err_recovery(hdev); 4575 else 4576 hclge_misc_err_recovery(hdev); 4577 } 4578 4579 static void hclge_reset_service_task(struct hclge_dev *hdev) 4580 { 4581 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 4582 return; 4583 4584 if (time_is_before_jiffies(hdev->last_rst_scheduled + 4585 HCLGE_RESET_SCHED_TIMEOUT)) 4586 dev_warn(&hdev->pdev->dev, 4587 "reset service task is scheduled after %ums on cpu%u!\n", 4588 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), 4589 smp_processor_id()); 4590 4591 down(&hdev->reset_sem); 4592 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 4593 4594 hclge_reset_subtask(hdev); 4595 4596 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 4597 up(&hdev->reset_sem); 4598 } 4599 4600 static void hclge_update_vport_alive(struct hclge_dev *hdev) 4601 { 4602 #define HCLGE_ALIVE_SECONDS_NORMAL 8 4603 4604 unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ; 4605 int i; 4606 4607 /* start from vport 1 for PF is always alive */ 4608 for (i = 1; i < hdev->num_alloc_vport; i++) { 4609 struct hclge_vport *vport = &hdev->vport[i]; 4610 4611 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || 4612 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 4613 continue; 4614 if (time_after(jiffies, vport->last_active_jiffies + 4615 alive_time)) { 4616 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 4617 dev_warn(&hdev->pdev->dev, 4618 "VF %u heartbeat timeout\n", 4619 i - HCLGE_VF_VPORT_START_NUM); 4620 } 4621 } 4622 } 4623 4624 static void hclge_periodic_service_task(struct hclge_dev *hdev) 4625 { 4626 unsigned long delta = round_jiffies_relative(HZ); 4627 4628 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) 4629 return; 4630 4631 /* Always handle the link updating to make sure link state is 4632 * updated when it is triggered by mbx. 4633 */ 4634 hclge_update_link_status(hdev); 4635 hclge_sync_mac_table(hdev); 4636 hclge_sync_promisc_mode(hdev); 4637 hclge_sync_fd_table(hdev); 4638 4639 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 4640 delta = jiffies - hdev->last_serv_processed; 4641 4642 if (delta < round_jiffies_relative(HZ)) { 4643 delta = round_jiffies_relative(HZ) - delta; 4644 goto out; 4645 } 4646 } 4647 4648 hdev->serv_processed_cnt++; 4649 hclge_update_vport_alive(hdev); 4650 4651 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { 4652 hdev->last_serv_processed = jiffies; 4653 goto out; 4654 } 4655 4656 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) 4657 hclge_update_stats_for_all(hdev); 4658 4659 hclge_update_port_info(hdev); 4660 hclge_sync_vlan_filter(hdev); 4661 4662 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) 4663 hclge_rfs_filter_expire(hdev); 4664 4665 hdev->last_serv_processed = jiffies; 4666 4667 out: 4668 hclge_task_schedule(hdev, delta); 4669 } 4670 4671 static void hclge_ptp_service_task(struct hclge_dev *hdev) 4672 { 4673 unsigned long flags; 4674 4675 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || 4676 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || 4677 !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) 4678 return; 4679 4680 /* to prevent concurrence with the irq handler */ 4681 spin_lock_irqsave(&hdev->ptp->lock, flags); 4682 4683 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq 4684 * handler may handle it just before spin_lock_irqsave(). 4685 */ 4686 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) 4687 hclge_ptp_clean_tx_hwts(hdev); 4688 4689 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 4690 } 4691 4692 static void hclge_service_task(struct work_struct *work) 4693 { 4694 struct hclge_dev *hdev = 4695 container_of(work, struct hclge_dev, service_task.work); 4696 4697 hclge_errhand_service_task(hdev); 4698 hclge_reset_service_task(hdev); 4699 hclge_ptp_service_task(hdev); 4700 hclge_mailbox_service_task(hdev); 4701 hclge_periodic_service_task(hdev); 4702 4703 /* Handle error recovery, reset and mbx again in case periodical task 4704 * delays the handling by calling hclge_task_schedule() in 4705 * hclge_periodic_service_task(). 4706 */ 4707 hclge_errhand_service_task(hdev); 4708 hclge_reset_service_task(hdev); 4709 hclge_mailbox_service_task(hdev); 4710 } 4711 4712 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 4713 { 4714 /* VF handle has no client */ 4715 if (!handle->client) 4716 return container_of(handle, struct hclge_vport, nic); 4717 else if (handle->client->type == HNAE3_CLIENT_ROCE) 4718 return container_of(handle, struct hclge_vport, roce); 4719 else 4720 return container_of(handle, struct hclge_vport, nic); 4721 } 4722 4723 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, 4724 struct hnae3_vector_info *vector_info) 4725 { 4726 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64 4727 4728 vector_info->vector = pci_irq_vector(hdev->pdev, idx); 4729 4730 /* need an extend offset to config vector >= 64 */ 4731 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) 4732 vector_info->io_addr = hdev->hw.hw.io_base + 4733 HCLGE_VECTOR_REG_BASE + 4734 (idx - 1) * HCLGE_VECTOR_REG_OFFSET; 4735 else 4736 vector_info->io_addr = hdev->hw.hw.io_base + 4737 HCLGE_VECTOR_EXT_REG_BASE + 4738 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * 4739 HCLGE_VECTOR_REG_OFFSET_H + 4740 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * 4741 HCLGE_VECTOR_REG_OFFSET; 4742 4743 hdev->vector_status[idx] = hdev->vport[0].vport_id; 4744 hdev->vector_irq[idx] = vector_info->vector; 4745 } 4746 4747 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 4748 struct hnae3_vector_info *vector_info) 4749 { 4750 struct hclge_vport *vport = hclge_get_vport(handle); 4751 struct hnae3_vector_info *vector = vector_info; 4752 struct hclge_dev *hdev = vport->back; 4753 int alloc = 0; 4754 u16 i = 0; 4755 u16 j; 4756 4757 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); 4758 vector_num = min(hdev->num_msi_left, vector_num); 4759 4760 for (j = 0; j < vector_num; j++) { 4761 while (++i < hdev->num_nic_msi) { 4762 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 4763 hclge_get_vector_info(hdev, i, vector); 4764 vector++; 4765 alloc++; 4766 4767 break; 4768 } 4769 } 4770 } 4771 hdev->num_msi_left -= alloc; 4772 hdev->num_msi_used += alloc; 4773 4774 return alloc; 4775 } 4776 4777 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 4778 { 4779 int i; 4780 4781 for (i = 0; i < hdev->num_msi; i++) 4782 if (vector == hdev->vector_irq[i]) 4783 return i; 4784 4785 return -EINVAL; 4786 } 4787 4788 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 4789 { 4790 struct hclge_vport *vport = hclge_get_vport(handle); 4791 struct hclge_dev *hdev = vport->back; 4792 int vector_id; 4793 4794 vector_id = hclge_get_vector_index(hdev, vector); 4795 if (vector_id < 0) { 4796 dev_err(&hdev->pdev->dev, 4797 "Get vector index fail. vector = %d\n", vector); 4798 return vector_id; 4799 } 4800 4801 hclge_free_vector(hdev, vector_id); 4802 4803 return 0; 4804 } 4805 4806 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 4807 u8 *key, u8 *hfunc) 4808 { 4809 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 4810 struct hclge_vport *vport = hclge_get_vport(handle); 4811 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg; 4812 4813 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); 4814 4815 hclge_comm_get_rss_indir_tbl(rss_cfg, indir, 4816 ae_dev->dev_specs.rss_ind_tbl_size); 4817 4818 return 0; 4819 } 4820 4821 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 4822 const u8 *key, const u8 hfunc) 4823 { 4824 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 4825 struct hclge_vport *vport = hclge_get_vport(handle); 4826 struct hclge_dev *hdev = vport->back; 4827 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 4828 int ret, i; 4829 4830 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); 4831 if (ret) { 4832 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); 4833 return ret; 4834 } 4835 4836 /* Update the shadow RSS table with user specified qids */ 4837 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) 4838 rss_cfg->rss_indirection_tbl[i] = indir[i]; 4839 4840 /* Update the hardware */ 4841 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, 4842 rss_cfg->rss_indirection_tbl); 4843 } 4844 4845 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 4846 struct ethtool_rxnfc *nfc) 4847 { 4848 struct hclge_vport *vport = hclge_get_vport(handle); 4849 struct hclge_dev *hdev = vport->back; 4850 int ret; 4851 4852 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, 4853 &hdev->rss_cfg, nfc); 4854 if (ret) { 4855 dev_err(&hdev->pdev->dev, 4856 "failed to set rss tuple, ret = %d.\n", ret); 4857 return ret; 4858 } 4859 4860 return 0; 4861 } 4862 4863 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 4864 struct ethtool_rxnfc *nfc) 4865 { 4866 struct hclge_vport *vport = hclge_get_vport(handle); 4867 u8 tuple_sets; 4868 int ret; 4869 4870 nfc->data = 0; 4871 4872 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type, 4873 &tuple_sets); 4874 if (ret || !tuple_sets) 4875 return ret; 4876 4877 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); 4878 4879 return 0; 4880 } 4881 4882 static int hclge_get_tc_size(struct hnae3_handle *handle) 4883 { 4884 struct hclge_vport *vport = hclge_get_vport(handle); 4885 struct hclge_dev *hdev = vport->back; 4886 4887 return hdev->pf_rss_size_max; 4888 } 4889 4890 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) 4891 { 4892 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 4893 struct hclge_vport *vport = hdev->vport; 4894 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 4895 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; 4896 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 4897 struct hnae3_tc_info *tc_info; 4898 u16 roundup_size; 4899 u16 rss_size; 4900 int i; 4901 4902 tc_info = &vport->nic.kinfo.tc_info; 4903 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 4904 rss_size = tc_info->tqp_count[i]; 4905 tc_valid[i] = 0; 4906 4907 if (!(hdev->hw_tc_map & BIT(i))) 4908 continue; 4909 4910 /* tc_size set to hardware is the log2 of roundup power of two 4911 * of rss_size, the acutal queue size is limited by indirection 4912 * table. 4913 */ 4914 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || 4915 rss_size == 0) { 4916 dev_err(&hdev->pdev->dev, 4917 "Configure rss tc size failed, invalid TC_SIZE = %u\n", 4918 rss_size); 4919 return -EINVAL; 4920 } 4921 4922 roundup_size = roundup_pow_of_two(rss_size); 4923 roundup_size = ilog2(roundup_size); 4924 4925 tc_valid[i] = 1; 4926 tc_size[i] = roundup_size; 4927 tc_offset[i] = tc_info->tqp_offset[i]; 4928 } 4929 4930 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, 4931 tc_size); 4932 } 4933 4934 int hclge_rss_init_hw(struct hclge_dev *hdev) 4935 { 4936 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; 4937 u8 *key = hdev->rss_cfg.rss_hash_key; 4938 u8 hfunc = hdev->rss_cfg.rss_algo; 4939 int ret; 4940 4941 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 4942 rss_indir); 4943 if (ret) 4944 return ret; 4945 4946 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); 4947 if (ret) 4948 return ret; 4949 4950 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg); 4951 if (ret) 4952 return ret; 4953 4954 return hclge_init_rss_tc_mode(hdev); 4955 } 4956 4957 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 4958 int vector_id, bool en, 4959 struct hnae3_ring_chain_node *ring_chain) 4960 { 4961 struct hclge_dev *hdev = vport->back; 4962 struct hnae3_ring_chain_node *node; 4963 struct hclge_desc desc; 4964 struct hclge_ctrl_vector_chain_cmd *req = 4965 (struct hclge_ctrl_vector_chain_cmd *)desc.data; 4966 enum hclge_comm_cmd_status status; 4967 enum hclge_opcode_type op; 4968 u16 tqp_type_and_id; 4969 int i; 4970 4971 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 4972 hclge_cmd_setup_basic_desc(&desc, op, false); 4973 req->int_vector_id_l = hnae3_get_field(vector_id, 4974 HCLGE_VECTOR_ID_L_M, 4975 HCLGE_VECTOR_ID_L_S); 4976 req->int_vector_id_h = hnae3_get_field(vector_id, 4977 HCLGE_VECTOR_ID_H_M, 4978 HCLGE_VECTOR_ID_H_S); 4979 4980 i = 0; 4981 for (node = ring_chain; node; node = node->next) { 4982 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 4983 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 4984 HCLGE_INT_TYPE_S, 4985 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); 4986 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 4987 HCLGE_TQP_ID_S, node->tqp_index); 4988 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 4989 HCLGE_INT_GL_IDX_S, 4990 hnae3_get_field(node->int_gl_idx, 4991 HNAE3_RING_GL_IDX_M, 4992 HNAE3_RING_GL_IDX_S)); 4993 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 4994 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 4995 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 4996 req->vfid = vport->vport_id; 4997 4998 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4999 if (status) { 5000 dev_err(&hdev->pdev->dev, 5001 "Map TQP fail, status is %d.\n", 5002 status); 5003 return -EIO; 5004 } 5005 i = 0; 5006 5007 hclge_cmd_setup_basic_desc(&desc, 5008 op, 5009 false); 5010 req->int_vector_id_l = 5011 hnae3_get_field(vector_id, 5012 HCLGE_VECTOR_ID_L_M, 5013 HCLGE_VECTOR_ID_L_S); 5014 req->int_vector_id_h = 5015 hnae3_get_field(vector_id, 5016 HCLGE_VECTOR_ID_H_M, 5017 HCLGE_VECTOR_ID_H_S); 5018 } 5019 } 5020 5021 if (i > 0) { 5022 req->int_cause_num = i; 5023 req->vfid = vport->vport_id; 5024 status = hclge_cmd_send(&hdev->hw, &desc, 1); 5025 if (status) { 5026 dev_err(&hdev->pdev->dev, 5027 "Map TQP fail, status is %d.\n", status); 5028 return -EIO; 5029 } 5030 } 5031 5032 return 0; 5033 } 5034 5035 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, 5036 struct hnae3_ring_chain_node *ring_chain) 5037 { 5038 struct hclge_vport *vport = hclge_get_vport(handle); 5039 struct hclge_dev *hdev = vport->back; 5040 int vector_id; 5041 5042 vector_id = hclge_get_vector_index(hdev, vector); 5043 if (vector_id < 0) { 5044 dev_err(&hdev->pdev->dev, 5045 "failed to get vector index. vector=%d\n", vector); 5046 return vector_id; 5047 } 5048 5049 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 5050 } 5051 5052 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, 5053 struct hnae3_ring_chain_node *ring_chain) 5054 { 5055 struct hclge_vport *vport = hclge_get_vport(handle); 5056 struct hclge_dev *hdev = vport->back; 5057 int vector_id, ret; 5058 5059 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 5060 return 0; 5061 5062 vector_id = hclge_get_vector_index(hdev, vector); 5063 if (vector_id < 0) { 5064 dev_err(&handle->pdev->dev, 5065 "Get vector index fail. ret =%d\n", vector_id); 5066 return vector_id; 5067 } 5068 5069 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 5070 if (ret) 5071 dev_err(&handle->pdev->dev, 5072 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 5073 vector_id, ret); 5074 5075 return ret; 5076 } 5077 5078 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, 5079 bool en_uc, bool en_mc, bool en_bc) 5080 { 5081 struct hclge_vport *vport = &hdev->vport[vf_id]; 5082 struct hnae3_handle *handle = &vport->nic; 5083 struct hclge_promisc_cfg_cmd *req; 5084 struct hclge_desc desc; 5085 bool uc_tx_en = en_uc; 5086 u8 promisc_cfg = 0; 5087 int ret; 5088 5089 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 5090 5091 req = (struct hclge_promisc_cfg_cmd *)desc.data; 5092 req->vf_id = vf_id; 5093 5094 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) 5095 uc_tx_en = false; 5096 5097 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0); 5098 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0); 5099 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0); 5100 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0); 5101 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0); 5102 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0); 5103 req->extend_promisc = promisc_cfg; 5104 5105 /* to be compatible with DEVICE_VERSION_V1/2 */ 5106 promisc_cfg = 0; 5107 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0); 5108 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0); 5109 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0); 5110 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1); 5111 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1); 5112 req->promisc = promisc_cfg; 5113 5114 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5115 if (ret) 5116 dev_err(&hdev->pdev->dev, 5117 "failed to set vport %u promisc mode, ret = %d.\n", 5118 vf_id, ret); 5119 5120 return ret; 5121 } 5122 5123 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, 5124 bool en_mc_pmc, bool en_bc_pmc) 5125 { 5126 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, 5127 en_uc_pmc, en_mc_pmc, en_bc_pmc); 5128 } 5129 5130 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 5131 bool en_mc_pmc) 5132 { 5133 struct hclge_vport *vport = hclge_get_vport(handle); 5134 struct hclge_dev *hdev = vport->back; 5135 bool en_bc_pmc = true; 5136 5137 /* For device whose version below V2, if broadcast promisc enabled, 5138 * vlan filter is always bypassed. So broadcast promisc should be 5139 * disabled until user enable promisc mode 5140 */ 5141 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 5142 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; 5143 5144 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, 5145 en_bc_pmc); 5146 } 5147 5148 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) 5149 { 5150 struct hclge_vport *vport = hclge_get_vport(handle); 5151 5152 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 5153 } 5154 5155 static void hclge_sync_fd_state(struct hclge_dev *hdev) 5156 { 5157 if (hlist_empty(&hdev->fd_rule_list)) 5158 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 5159 } 5160 5161 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) 5162 { 5163 if (!test_bit(location, hdev->fd_bmap)) { 5164 set_bit(location, hdev->fd_bmap); 5165 hdev->hclge_fd_rule_num++; 5166 } 5167 } 5168 5169 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) 5170 { 5171 if (test_bit(location, hdev->fd_bmap)) { 5172 clear_bit(location, hdev->fd_bmap); 5173 hdev->hclge_fd_rule_num--; 5174 } 5175 } 5176 5177 static void hclge_fd_free_node(struct hclge_dev *hdev, 5178 struct hclge_fd_rule *rule) 5179 { 5180 hlist_del(&rule->rule_node); 5181 kfree(rule); 5182 hclge_sync_fd_state(hdev); 5183 } 5184 5185 static void hclge_update_fd_rule_node(struct hclge_dev *hdev, 5186 struct hclge_fd_rule *old_rule, 5187 struct hclge_fd_rule *new_rule, 5188 enum HCLGE_FD_NODE_STATE state) 5189 { 5190 switch (state) { 5191 case HCLGE_FD_TO_ADD: 5192 case HCLGE_FD_ACTIVE: 5193 /* 1) if the new state is TO_ADD, just replace the old rule 5194 * with the same location, no matter its state, because the 5195 * new rule will be configured to the hardware. 5196 * 2) if the new state is ACTIVE, it means the new rule 5197 * has been configured to the hardware, so just replace 5198 * the old rule node with the same location. 5199 * 3) for it doesn't add a new node to the list, so it's 5200 * unnecessary to update the rule number and fd_bmap. 5201 */ 5202 new_rule->rule_node.next = old_rule->rule_node.next; 5203 new_rule->rule_node.pprev = old_rule->rule_node.pprev; 5204 memcpy(old_rule, new_rule, sizeof(*old_rule)); 5205 kfree(new_rule); 5206 break; 5207 case HCLGE_FD_DELETED: 5208 hclge_fd_dec_rule_cnt(hdev, old_rule->location); 5209 hclge_fd_free_node(hdev, old_rule); 5210 break; 5211 case HCLGE_FD_TO_DEL: 5212 /* if new request is TO_DEL, and old rule is existent 5213 * 1) the state of old rule is TO_DEL, we need do nothing, 5214 * because we delete rule by location, other rule content 5215 * is unncessary. 5216 * 2) the state of old rule is ACTIVE, we need to change its 5217 * state to TO_DEL, so the rule will be deleted when periodic 5218 * task being scheduled. 5219 * 3) the state of old rule is TO_ADD, it means the rule hasn't 5220 * been added to hardware, so we just delete the rule node from 5221 * fd_rule_list directly. 5222 */ 5223 if (old_rule->state == HCLGE_FD_TO_ADD) { 5224 hclge_fd_dec_rule_cnt(hdev, old_rule->location); 5225 hclge_fd_free_node(hdev, old_rule); 5226 return; 5227 } 5228 old_rule->state = HCLGE_FD_TO_DEL; 5229 break; 5230 } 5231 } 5232 5233 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, 5234 u16 location, 5235 struct hclge_fd_rule **parent) 5236 { 5237 struct hclge_fd_rule *rule; 5238 struct hlist_node *node; 5239 5240 hlist_for_each_entry_safe(rule, node, hlist, rule_node) { 5241 if (rule->location == location) 5242 return rule; 5243 else if (rule->location > location) 5244 return NULL; 5245 /* record the parent node, use to keep the nodes in fd_rule_list 5246 * in ascend order. 5247 */ 5248 *parent = rule; 5249 } 5250 5251 return NULL; 5252 } 5253 5254 /* insert fd rule node in ascend order according to rule->location */ 5255 static void hclge_fd_insert_rule_node(struct hlist_head *hlist, 5256 struct hclge_fd_rule *rule, 5257 struct hclge_fd_rule *parent) 5258 { 5259 INIT_HLIST_NODE(&rule->rule_node); 5260 5261 if (parent) 5262 hlist_add_behind(&rule->rule_node, &parent->rule_node); 5263 else 5264 hlist_add_head(&rule->rule_node, hlist); 5265 } 5266 5267 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, 5268 struct hclge_fd_user_def_cfg *cfg) 5269 { 5270 struct hclge_fd_user_def_cfg_cmd *req; 5271 struct hclge_desc desc; 5272 u16 data = 0; 5273 int ret; 5274 5275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false); 5276 5277 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; 5278 5279 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0); 5280 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5281 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset); 5282 req->ol2_cfg = cpu_to_le16(data); 5283 5284 data = 0; 5285 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0); 5286 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5287 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset); 5288 req->ol3_cfg = cpu_to_le16(data); 5289 5290 data = 0; 5291 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0); 5292 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5293 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset); 5294 req->ol4_cfg = cpu_to_le16(data); 5295 5296 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5297 if (ret) 5298 dev_err(&hdev->pdev->dev, 5299 "failed to set fd user def data, ret= %d\n", ret); 5300 return ret; 5301 } 5302 5303 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) 5304 { 5305 int ret; 5306 5307 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) 5308 return; 5309 5310 if (!locked) 5311 spin_lock_bh(&hdev->fd_rule_lock); 5312 5313 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); 5314 if (ret) 5315 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5316 5317 if (!locked) 5318 spin_unlock_bh(&hdev->fd_rule_lock); 5319 } 5320 5321 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, 5322 struct hclge_fd_rule *rule) 5323 { 5324 struct hlist_head *hlist = &hdev->fd_rule_list; 5325 struct hclge_fd_rule *fd_rule, *parent = NULL; 5326 struct hclge_fd_user_def_info *info, *old_info; 5327 struct hclge_fd_user_def_cfg *cfg; 5328 5329 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5330 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5331 return 0; 5332 5333 /* for valid layer is start from 1, so need minus 1 to get the cfg */ 5334 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5335 info = &rule->ep.user_def; 5336 5337 if (!cfg->ref_cnt || cfg->offset == info->offset) 5338 return 0; 5339 5340 if (cfg->ref_cnt > 1) 5341 goto error; 5342 5343 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); 5344 if (fd_rule) { 5345 old_info = &fd_rule->ep.user_def; 5346 if (info->layer == old_info->layer) 5347 return 0; 5348 } 5349 5350 error: 5351 dev_err(&hdev->pdev->dev, 5352 "No available offset for layer%d fd rule, each layer only support one user def offset.\n", 5353 info->layer + 1); 5354 return -ENOSPC; 5355 } 5356 5357 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, 5358 struct hclge_fd_rule *rule) 5359 { 5360 struct hclge_fd_user_def_cfg *cfg; 5361 5362 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5363 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5364 return; 5365 5366 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5367 if (!cfg->ref_cnt) { 5368 cfg->offset = rule->ep.user_def.offset; 5369 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5370 } 5371 cfg->ref_cnt++; 5372 } 5373 5374 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, 5375 struct hclge_fd_rule *rule) 5376 { 5377 struct hclge_fd_user_def_cfg *cfg; 5378 5379 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5380 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5381 return; 5382 5383 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5384 if (!cfg->ref_cnt) 5385 return; 5386 5387 cfg->ref_cnt--; 5388 if (!cfg->ref_cnt) { 5389 cfg->offset = 0; 5390 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5391 } 5392 } 5393 5394 static void hclge_update_fd_list(struct hclge_dev *hdev, 5395 enum HCLGE_FD_NODE_STATE state, u16 location, 5396 struct hclge_fd_rule *new_rule) 5397 { 5398 struct hlist_head *hlist = &hdev->fd_rule_list; 5399 struct hclge_fd_rule *fd_rule, *parent = NULL; 5400 5401 fd_rule = hclge_find_fd_rule(hlist, location, &parent); 5402 if (fd_rule) { 5403 hclge_fd_dec_user_def_refcnt(hdev, fd_rule); 5404 if (state == HCLGE_FD_ACTIVE) 5405 hclge_fd_inc_user_def_refcnt(hdev, new_rule); 5406 hclge_sync_fd_user_def_cfg(hdev, true); 5407 5408 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); 5409 return; 5410 } 5411 5412 /* it's unlikely to fail here, because we have checked the rule 5413 * exist before. 5414 */ 5415 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) { 5416 dev_warn(&hdev->pdev->dev, 5417 "failed to delete fd rule %u, it's inexistent\n", 5418 location); 5419 return; 5420 } 5421 5422 hclge_fd_inc_user_def_refcnt(hdev, new_rule); 5423 hclge_sync_fd_user_def_cfg(hdev, true); 5424 5425 hclge_fd_insert_rule_node(hlist, new_rule, parent); 5426 hclge_fd_inc_rule_cnt(hdev, new_rule->location); 5427 5428 if (state == HCLGE_FD_TO_ADD) { 5429 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 5430 hclge_task_schedule(hdev, 0); 5431 } 5432 } 5433 5434 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) 5435 { 5436 struct hclge_get_fd_mode_cmd *req; 5437 struct hclge_desc desc; 5438 int ret; 5439 5440 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); 5441 5442 req = (struct hclge_get_fd_mode_cmd *)desc.data; 5443 5444 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5445 if (ret) { 5446 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); 5447 return ret; 5448 } 5449 5450 *fd_mode = req->mode; 5451 5452 return ret; 5453 } 5454 5455 static int hclge_get_fd_allocation(struct hclge_dev *hdev, 5456 u32 *stage1_entry_num, 5457 u32 *stage2_entry_num, 5458 u16 *stage1_counter_num, 5459 u16 *stage2_counter_num) 5460 { 5461 struct hclge_get_fd_allocation_cmd *req; 5462 struct hclge_desc desc; 5463 int ret; 5464 5465 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); 5466 5467 req = (struct hclge_get_fd_allocation_cmd *)desc.data; 5468 5469 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5470 if (ret) { 5471 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", 5472 ret); 5473 return ret; 5474 } 5475 5476 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); 5477 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); 5478 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); 5479 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); 5480 5481 return ret; 5482 } 5483 5484 static int hclge_set_fd_key_config(struct hclge_dev *hdev, 5485 enum HCLGE_FD_STAGE stage_num) 5486 { 5487 struct hclge_set_fd_key_config_cmd *req; 5488 struct hclge_fd_key_cfg *stage; 5489 struct hclge_desc desc; 5490 int ret; 5491 5492 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); 5493 5494 req = (struct hclge_set_fd_key_config_cmd *)desc.data; 5495 stage = &hdev->fd_cfg.key_cfg[stage_num]; 5496 req->stage = stage_num; 5497 req->key_select = stage->key_sel; 5498 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; 5499 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; 5500 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; 5501 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; 5502 req->tuple_mask = cpu_to_le32(~stage->tuple_active); 5503 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); 5504 5505 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5506 if (ret) 5507 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); 5508 5509 return ret; 5510 } 5511 5512 static void hclge_fd_disable_user_def(struct hclge_dev *hdev) 5513 { 5514 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; 5515 5516 spin_lock_bh(&hdev->fd_rule_lock); 5517 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); 5518 spin_unlock_bh(&hdev->fd_rule_lock); 5519 5520 hclge_fd_set_user_def_cmd(hdev, cfg); 5521 } 5522 5523 static int hclge_init_fd_config(struct hclge_dev *hdev) 5524 { 5525 #define LOW_2_WORDS 0x03 5526 struct hclge_fd_key_cfg *key_cfg; 5527 int ret; 5528 5529 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 5530 return 0; 5531 5532 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); 5533 if (ret) 5534 return ret; 5535 5536 switch (hdev->fd_cfg.fd_mode) { 5537 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: 5538 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; 5539 break; 5540 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: 5541 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; 5542 break; 5543 default: 5544 dev_err(&hdev->pdev->dev, 5545 "Unsupported flow director mode %u\n", 5546 hdev->fd_cfg.fd_mode); 5547 return -EOPNOTSUPP; 5548 } 5549 5550 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; 5551 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; 5552 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; 5553 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; 5554 key_cfg->outer_sipv6_word_en = 0; 5555 key_cfg->outer_dipv6_word_en = 0; 5556 5557 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | 5558 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | 5559 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 5560 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5561 5562 /* If use max 400bit key, we can support tuples for ether type */ 5563 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 5564 key_cfg->tuple_active |= 5565 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); 5566 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 5567 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; 5568 } 5569 5570 /* roce_type is used to filter roce frames 5571 * dst_vport is used to specify the rule 5572 */ 5573 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); 5574 5575 ret = hclge_get_fd_allocation(hdev, 5576 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 5577 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], 5578 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], 5579 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); 5580 if (ret) 5581 return ret; 5582 5583 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); 5584 } 5585 5586 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, 5587 int loc, u8 *key, bool is_add) 5588 { 5589 struct hclge_fd_tcam_config_1_cmd *req1; 5590 struct hclge_fd_tcam_config_2_cmd *req2; 5591 struct hclge_fd_tcam_config_3_cmd *req3; 5592 struct hclge_desc desc[3]; 5593 int ret; 5594 5595 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); 5596 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 5597 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); 5598 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 5599 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); 5600 5601 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 5602 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 5603 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 5604 5605 req1->stage = stage; 5606 req1->xy_sel = sel_x ? 1 : 0; 5607 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); 5608 req1->index = cpu_to_le32(loc); 5609 req1->entry_vld = sel_x ? is_add : 0; 5610 5611 if (key) { 5612 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); 5613 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], 5614 sizeof(req2->tcam_data)); 5615 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + 5616 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); 5617 } 5618 5619 ret = hclge_cmd_send(&hdev->hw, desc, 3); 5620 if (ret) 5621 dev_err(&hdev->pdev->dev, 5622 "config tcam key fail, ret=%d\n", 5623 ret); 5624 5625 return ret; 5626 } 5627 5628 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, 5629 struct hclge_fd_ad_data *action) 5630 { 5631 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 5632 struct hclge_fd_ad_config_cmd *req; 5633 struct hclge_desc desc; 5634 u64 ad_data = 0; 5635 int ret; 5636 5637 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); 5638 5639 req = (struct hclge_fd_ad_config_cmd *)desc.data; 5640 req->index = cpu_to_le32(loc); 5641 req->stage = stage; 5642 5643 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, 5644 action->write_rule_id_to_bd); 5645 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, 5646 action->rule_id); 5647 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { 5648 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, 5649 action->override_tc); 5650 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, 5651 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); 5652 } 5653 ad_data <<= 32; 5654 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); 5655 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, 5656 action->forward_to_direct_queue); 5657 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, 5658 action->queue_id); 5659 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); 5660 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, 5661 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); 5662 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); 5663 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, 5664 action->counter_id); 5665 5666 req->ad_data = cpu_to_le64(ad_data); 5667 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5668 if (ret) 5669 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); 5670 5671 return ret; 5672 } 5673 5674 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, 5675 struct hclge_fd_rule *rule) 5676 { 5677 int offset, moffset, ip_offset; 5678 enum HCLGE_FD_KEY_OPT key_opt; 5679 u16 tmp_x_s, tmp_y_s; 5680 u32 tmp_x_l, tmp_y_l; 5681 u8 *p = (u8 *)rule; 5682 int i; 5683 5684 if (rule->unused_tuple & BIT(tuple_bit)) 5685 return true; 5686 5687 key_opt = tuple_key_info[tuple_bit].key_opt; 5688 offset = tuple_key_info[tuple_bit].offset; 5689 moffset = tuple_key_info[tuple_bit].moffset; 5690 5691 switch (key_opt) { 5692 case KEY_OPT_U8: 5693 calc_x(*key_x, p[offset], p[moffset]); 5694 calc_y(*key_y, p[offset], p[moffset]); 5695 5696 return true; 5697 case KEY_OPT_LE16: 5698 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); 5699 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); 5700 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 5701 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 5702 5703 return true; 5704 case KEY_OPT_LE32: 5705 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); 5706 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); 5707 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5708 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5709 5710 return true; 5711 case KEY_OPT_MAC: 5712 for (i = 0; i < ETH_ALEN; i++) { 5713 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], 5714 p[moffset + i]); 5715 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], 5716 p[moffset + i]); 5717 } 5718 5719 return true; 5720 case KEY_OPT_IP: 5721 ip_offset = IPV4_INDEX * sizeof(u32); 5722 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), 5723 *(u32 *)(&p[moffset + ip_offset])); 5724 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), 5725 *(u32 *)(&p[moffset + ip_offset])); 5726 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5727 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5728 5729 return true; 5730 default: 5731 return false; 5732 } 5733 } 5734 5735 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, 5736 u8 vf_id, u8 network_port_id) 5737 { 5738 u32 port_number = 0; 5739 5740 if (port_type == HOST_PORT) { 5741 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, 5742 pf_id); 5743 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, 5744 vf_id); 5745 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); 5746 } else { 5747 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, 5748 HCLGE_NETWORK_PORT_ID_S, network_port_id); 5749 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); 5750 } 5751 5752 return port_number; 5753 } 5754 5755 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, 5756 __le32 *key_x, __le32 *key_y, 5757 struct hclge_fd_rule *rule) 5758 { 5759 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; 5760 u8 cur_pos = 0, tuple_size, shift_bits; 5761 unsigned int i; 5762 5763 for (i = 0; i < MAX_META_DATA; i++) { 5764 tuple_size = meta_data_key_info[i].key_length; 5765 tuple_bit = key_cfg->meta_data_active & BIT(i); 5766 5767 switch (tuple_bit) { 5768 case BIT(ROCE_TYPE): 5769 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); 5770 cur_pos += tuple_size; 5771 break; 5772 case BIT(DST_VPORT): 5773 port_number = hclge_get_port_number(HOST_PORT, 0, 5774 rule->vf_id, 0); 5775 hnae3_set_field(meta_data, 5776 GENMASK(cur_pos + tuple_size, cur_pos), 5777 cur_pos, port_number); 5778 cur_pos += tuple_size; 5779 break; 5780 default: 5781 break; 5782 } 5783 } 5784 5785 calc_x(tmp_x, meta_data, 0xFFFFFFFF); 5786 calc_y(tmp_y, meta_data, 0xFFFFFFFF); 5787 shift_bits = sizeof(meta_data) * 8 - cur_pos; 5788 5789 *key_x = cpu_to_le32(tmp_x << shift_bits); 5790 *key_y = cpu_to_le32(tmp_y << shift_bits); 5791 } 5792 5793 /* A complete key is combined with meta data key and tuple key. 5794 * Meta data key is stored at the MSB region, and tuple key is stored at 5795 * the LSB region, unused bits will be filled 0. 5796 */ 5797 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, 5798 struct hclge_fd_rule *rule) 5799 { 5800 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; 5801 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; 5802 u8 *cur_key_x, *cur_key_y; 5803 u8 meta_data_region; 5804 u8 tuple_size; 5805 int ret; 5806 u32 i; 5807 5808 memset(key_x, 0, sizeof(key_x)); 5809 memset(key_y, 0, sizeof(key_y)); 5810 cur_key_x = key_x; 5811 cur_key_y = key_y; 5812 5813 for (i = 0; i < MAX_TUPLE; i++) { 5814 bool tuple_valid; 5815 5816 tuple_size = tuple_key_info[i].key_length / 8; 5817 if (!(key_cfg->tuple_active & BIT(i))) 5818 continue; 5819 5820 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x, 5821 cur_key_y, rule); 5822 if (tuple_valid) { 5823 cur_key_x += tuple_size; 5824 cur_key_y += tuple_size; 5825 } 5826 } 5827 5828 meta_data_region = hdev->fd_cfg.max_key_length / 8 - 5829 MAX_META_DATA_LENGTH / 8; 5830 5831 hclge_fd_convert_meta_data(key_cfg, 5832 (__le32 *)(key_x + meta_data_region), 5833 (__le32 *)(key_y + meta_data_region), 5834 rule); 5835 5836 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, 5837 true); 5838 if (ret) { 5839 dev_err(&hdev->pdev->dev, 5840 "fd key_y config fail, loc=%u, ret=%d\n", 5841 rule->queue_id, ret); 5842 return ret; 5843 } 5844 5845 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, 5846 true); 5847 if (ret) 5848 dev_err(&hdev->pdev->dev, 5849 "fd key_x config fail, loc=%u, ret=%d\n", 5850 rule->queue_id, ret); 5851 return ret; 5852 } 5853 5854 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, 5855 struct hclge_fd_rule *rule) 5856 { 5857 struct hclge_vport *vport = hdev->vport; 5858 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5859 struct hclge_fd_ad_data ad_data; 5860 5861 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); 5862 ad_data.ad_id = rule->location; 5863 5864 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 5865 ad_data.drop_packet = true; 5866 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { 5867 ad_data.override_tc = true; 5868 ad_data.queue_id = 5869 kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; 5870 ad_data.tc_size = 5871 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); 5872 } else { 5873 ad_data.forward_to_direct_queue = true; 5874 ad_data.queue_id = rule->queue_id; 5875 } 5876 5877 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { 5878 ad_data.use_counter = true; 5879 ad_data.counter_id = rule->vf_id % 5880 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; 5881 } else { 5882 ad_data.use_counter = false; 5883 ad_data.counter_id = 0; 5884 } 5885 5886 ad_data.use_next_stage = false; 5887 ad_data.next_input_key = 0; 5888 5889 ad_data.write_rule_id_to_bd = true; 5890 ad_data.rule_id = rule->location; 5891 5892 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); 5893 } 5894 5895 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec, 5896 u32 *unused_tuple) 5897 { 5898 if (!spec || !unused_tuple) 5899 return -EINVAL; 5900 5901 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 5902 5903 if (!spec->ip4src) 5904 *unused_tuple |= BIT(INNER_SRC_IP); 5905 5906 if (!spec->ip4dst) 5907 *unused_tuple |= BIT(INNER_DST_IP); 5908 5909 if (!spec->psrc) 5910 *unused_tuple |= BIT(INNER_SRC_PORT); 5911 5912 if (!spec->pdst) 5913 *unused_tuple |= BIT(INNER_DST_PORT); 5914 5915 if (!spec->tos) 5916 *unused_tuple |= BIT(INNER_IP_TOS); 5917 5918 return 0; 5919 } 5920 5921 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, 5922 u32 *unused_tuple) 5923 { 5924 if (!spec || !unused_tuple) 5925 return -EINVAL; 5926 5927 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5928 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5929 5930 if (!spec->ip4src) 5931 *unused_tuple |= BIT(INNER_SRC_IP); 5932 5933 if (!spec->ip4dst) 5934 *unused_tuple |= BIT(INNER_DST_IP); 5935 5936 if (!spec->tos) 5937 *unused_tuple |= BIT(INNER_IP_TOS); 5938 5939 if (!spec->proto) 5940 *unused_tuple |= BIT(INNER_IP_PROTO); 5941 5942 if (spec->l4_4_bytes) 5943 return -EOPNOTSUPP; 5944 5945 if (spec->ip_ver != ETH_RX_NFC_IP4) 5946 return -EOPNOTSUPP; 5947 5948 return 0; 5949 } 5950 5951 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, 5952 u32 *unused_tuple) 5953 { 5954 if (!spec || !unused_tuple) 5955 return -EINVAL; 5956 5957 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 5958 5959 /* check whether src/dst ip address used */ 5960 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) 5961 *unused_tuple |= BIT(INNER_SRC_IP); 5962 5963 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) 5964 *unused_tuple |= BIT(INNER_DST_IP); 5965 5966 if (!spec->psrc) 5967 *unused_tuple |= BIT(INNER_SRC_PORT); 5968 5969 if (!spec->pdst) 5970 *unused_tuple |= BIT(INNER_DST_PORT); 5971 5972 if (!spec->tclass) 5973 *unused_tuple |= BIT(INNER_IP_TOS); 5974 5975 return 0; 5976 } 5977 5978 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, 5979 u32 *unused_tuple) 5980 { 5981 if (!spec || !unused_tuple) 5982 return -EINVAL; 5983 5984 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5985 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5986 5987 /* check whether src/dst ip address used */ 5988 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) 5989 *unused_tuple |= BIT(INNER_SRC_IP); 5990 5991 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) 5992 *unused_tuple |= BIT(INNER_DST_IP); 5993 5994 if (!spec->l4_proto) 5995 *unused_tuple |= BIT(INNER_IP_PROTO); 5996 5997 if (!spec->tclass) 5998 *unused_tuple |= BIT(INNER_IP_TOS); 5999 6000 if (spec->l4_4_bytes) 6001 return -EOPNOTSUPP; 6002 6003 return 0; 6004 } 6005 6006 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) 6007 { 6008 if (!spec || !unused_tuple) 6009 return -EINVAL; 6010 6011 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 6012 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | 6013 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); 6014 6015 if (is_zero_ether_addr(spec->h_source)) 6016 *unused_tuple |= BIT(INNER_SRC_MAC); 6017 6018 if (is_zero_ether_addr(spec->h_dest)) 6019 *unused_tuple |= BIT(INNER_DST_MAC); 6020 6021 if (!spec->h_proto) 6022 *unused_tuple |= BIT(INNER_ETH_TYPE); 6023 6024 return 0; 6025 } 6026 6027 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, 6028 struct ethtool_rx_flow_spec *fs, 6029 u32 *unused_tuple) 6030 { 6031 if (fs->flow_type & FLOW_EXT) { 6032 if (fs->h_ext.vlan_etype) { 6033 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); 6034 return -EOPNOTSUPP; 6035 } 6036 6037 if (!fs->h_ext.vlan_tci) 6038 *unused_tuple |= BIT(INNER_VLAN_TAG_FST); 6039 6040 if (fs->m_ext.vlan_tci && 6041 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { 6042 dev_err(&hdev->pdev->dev, 6043 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n", 6044 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); 6045 return -EINVAL; 6046 } 6047 } else { 6048 *unused_tuple |= BIT(INNER_VLAN_TAG_FST); 6049 } 6050 6051 if (fs->flow_type & FLOW_MAC_EXT) { 6052 if (hdev->fd_cfg.fd_mode != 6053 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 6054 dev_err(&hdev->pdev->dev, 6055 "FLOW_MAC_EXT is not supported in current fd mode!\n"); 6056 return -EOPNOTSUPP; 6057 } 6058 6059 if (is_zero_ether_addr(fs->h_ext.h_dest)) 6060 *unused_tuple |= BIT(INNER_DST_MAC); 6061 else 6062 *unused_tuple &= ~BIT(INNER_DST_MAC); 6063 } 6064 6065 return 0; 6066 } 6067 6068 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, 6069 struct hclge_fd_user_def_info *info) 6070 { 6071 switch (flow_type) { 6072 case ETHER_FLOW: 6073 info->layer = HCLGE_FD_USER_DEF_L2; 6074 *unused_tuple &= ~BIT(INNER_L2_RSV); 6075 break; 6076 case IP_USER_FLOW: 6077 case IPV6_USER_FLOW: 6078 info->layer = HCLGE_FD_USER_DEF_L3; 6079 *unused_tuple &= ~BIT(INNER_L3_RSV); 6080 break; 6081 case TCP_V4_FLOW: 6082 case UDP_V4_FLOW: 6083 case TCP_V6_FLOW: 6084 case UDP_V6_FLOW: 6085 info->layer = HCLGE_FD_USER_DEF_L4; 6086 *unused_tuple &= ~BIT(INNER_L4_RSV); 6087 break; 6088 default: 6089 return -EOPNOTSUPP; 6090 } 6091 6092 return 0; 6093 } 6094 6095 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) 6096 { 6097 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; 6098 } 6099 6100 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, 6101 struct ethtool_rx_flow_spec *fs, 6102 u32 *unused_tuple, 6103 struct hclge_fd_user_def_info *info) 6104 { 6105 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; 6106 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6107 u16 data, offset, data_mask, offset_mask; 6108 int ret; 6109 6110 info->layer = HCLGE_FD_USER_DEF_NONE; 6111 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; 6112 6113 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) 6114 return 0; 6115 6116 /* user-def data from ethtool is 64 bit value, the bit0~15 is used 6117 * for data, and bit32~47 is used for offset. 6118 */ 6119 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; 6120 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; 6121 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; 6122 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; 6123 6124 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) { 6125 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); 6126 return -EOPNOTSUPP; 6127 } 6128 6129 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) { 6130 dev_err(&hdev->pdev->dev, 6131 "user-def offset[%u] should be no more than %u\n", 6132 offset, HCLGE_FD_MAX_USER_DEF_OFFSET); 6133 return -EINVAL; 6134 } 6135 6136 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) { 6137 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); 6138 return -EINVAL; 6139 } 6140 6141 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); 6142 if (ret) { 6143 dev_err(&hdev->pdev->dev, 6144 "unsupported flow type for user-def bytes, ret = %d\n", 6145 ret); 6146 return ret; 6147 } 6148 6149 info->data = data; 6150 info->data_mask = data_mask; 6151 info->offset = offset; 6152 6153 return 0; 6154 } 6155 6156 static int hclge_fd_check_spec(struct hclge_dev *hdev, 6157 struct ethtool_rx_flow_spec *fs, 6158 u32 *unused_tuple, 6159 struct hclge_fd_user_def_info *info) 6160 { 6161 u32 flow_type; 6162 int ret; 6163 6164 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 6165 dev_err(&hdev->pdev->dev, 6166 "failed to config fd rules, invalid rule location: %u, max is %u\n.", 6167 fs->location, 6168 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); 6169 return -EINVAL; 6170 } 6171 6172 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); 6173 if (ret) 6174 return ret; 6175 6176 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6177 switch (flow_type) { 6178 case SCTP_V4_FLOW: 6179 case TCP_V4_FLOW: 6180 case UDP_V4_FLOW: 6181 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, 6182 unused_tuple); 6183 break; 6184 case IP_USER_FLOW: 6185 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, 6186 unused_tuple); 6187 break; 6188 case SCTP_V6_FLOW: 6189 case TCP_V6_FLOW: 6190 case UDP_V6_FLOW: 6191 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, 6192 unused_tuple); 6193 break; 6194 case IPV6_USER_FLOW: 6195 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, 6196 unused_tuple); 6197 break; 6198 case ETHER_FLOW: 6199 if (hdev->fd_cfg.fd_mode != 6200 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 6201 dev_err(&hdev->pdev->dev, 6202 "ETHER_FLOW is not supported in current fd mode!\n"); 6203 return -EOPNOTSUPP; 6204 } 6205 6206 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, 6207 unused_tuple); 6208 break; 6209 default: 6210 dev_err(&hdev->pdev->dev, 6211 "unsupported protocol type, protocol type = %#x\n", 6212 flow_type); 6213 return -EOPNOTSUPP; 6214 } 6215 6216 if (ret) { 6217 dev_err(&hdev->pdev->dev, 6218 "failed to check flow union tuple, ret = %d\n", 6219 ret); 6220 return ret; 6221 } 6222 6223 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); 6224 } 6225 6226 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs, 6227 struct hclge_fd_rule *rule, u8 ip_proto) 6228 { 6229 rule->tuples.src_ip[IPV4_INDEX] = 6230 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); 6231 rule->tuples_mask.src_ip[IPV4_INDEX] = 6232 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); 6233 6234 rule->tuples.dst_ip[IPV4_INDEX] = 6235 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); 6236 rule->tuples_mask.dst_ip[IPV4_INDEX] = 6237 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); 6238 6239 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); 6240 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); 6241 6242 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); 6243 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); 6244 6245 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; 6246 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; 6247 6248 rule->tuples.ether_proto = ETH_P_IP; 6249 rule->tuples_mask.ether_proto = 0xFFFF; 6250 6251 rule->tuples.ip_proto = ip_proto; 6252 rule->tuples_mask.ip_proto = 0xFF; 6253 } 6254 6255 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs, 6256 struct hclge_fd_rule *rule) 6257 { 6258 rule->tuples.src_ip[IPV4_INDEX] = 6259 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); 6260 rule->tuples_mask.src_ip[IPV4_INDEX] = 6261 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); 6262 6263 rule->tuples.dst_ip[IPV4_INDEX] = 6264 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); 6265 rule->tuples_mask.dst_ip[IPV4_INDEX] = 6266 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); 6267 6268 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; 6269 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; 6270 6271 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; 6272 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; 6273 6274 rule->tuples.ether_proto = ETH_P_IP; 6275 rule->tuples_mask.ether_proto = 0xFFFF; 6276 } 6277 6278 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs, 6279 struct hclge_fd_rule *rule, u8 ip_proto) 6280 { 6281 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, 6282 IPV6_SIZE); 6283 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, 6284 IPV6_SIZE); 6285 6286 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, 6287 IPV6_SIZE); 6288 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, 6289 IPV6_SIZE); 6290 6291 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); 6292 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); 6293 6294 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); 6295 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); 6296 6297 rule->tuples.ether_proto = ETH_P_IPV6; 6298 rule->tuples_mask.ether_proto = 0xFFFF; 6299 6300 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; 6301 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; 6302 6303 rule->tuples.ip_proto = ip_proto; 6304 rule->tuples_mask.ip_proto = 0xFF; 6305 } 6306 6307 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs, 6308 struct hclge_fd_rule *rule) 6309 { 6310 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, 6311 IPV6_SIZE); 6312 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, 6313 IPV6_SIZE); 6314 6315 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, 6316 IPV6_SIZE); 6317 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, 6318 IPV6_SIZE); 6319 6320 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; 6321 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; 6322 6323 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; 6324 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; 6325 6326 rule->tuples.ether_proto = ETH_P_IPV6; 6327 rule->tuples_mask.ether_proto = 0xFFFF; 6328 } 6329 6330 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs, 6331 struct hclge_fd_rule *rule) 6332 { 6333 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); 6334 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); 6335 6336 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); 6337 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); 6338 6339 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); 6340 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); 6341 } 6342 6343 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, 6344 struct hclge_fd_rule *rule) 6345 { 6346 switch (info->layer) { 6347 case HCLGE_FD_USER_DEF_L2: 6348 rule->tuples.l2_user_def = info->data; 6349 rule->tuples_mask.l2_user_def = info->data_mask; 6350 break; 6351 case HCLGE_FD_USER_DEF_L3: 6352 rule->tuples.l3_user_def = info->data; 6353 rule->tuples_mask.l3_user_def = info->data_mask; 6354 break; 6355 case HCLGE_FD_USER_DEF_L4: 6356 rule->tuples.l4_user_def = (u32)info->data << 16; 6357 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; 6358 break; 6359 default: 6360 break; 6361 } 6362 6363 rule->ep.user_def = *info; 6364 } 6365 6366 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs, 6367 struct hclge_fd_rule *rule, 6368 struct hclge_fd_user_def_info *info) 6369 { 6370 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6371 6372 switch (flow_type) { 6373 case SCTP_V4_FLOW: 6374 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP); 6375 break; 6376 case TCP_V4_FLOW: 6377 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP); 6378 break; 6379 case UDP_V4_FLOW: 6380 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP); 6381 break; 6382 case IP_USER_FLOW: 6383 hclge_fd_get_ip4_tuple(fs, rule); 6384 break; 6385 case SCTP_V6_FLOW: 6386 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP); 6387 break; 6388 case TCP_V6_FLOW: 6389 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP); 6390 break; 6391 case UDP_V6_FLOW: 6392 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP); 6393 break; 6394 case IPV6_USER_FLOW: 6395 hclge_fd_get_ip6_tuple(fs, rule); 6396 break; 6397 case ETHER_FLOW: 6398 hclge_fd_get_ether_tuple(fs, rule); 6399 break; 6400 default: 6401 return -EOPNOTSUPP; 6402 } 6403 6404 if (fs->flow_type & FLOW_EXT) { 6405 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); 6406 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); 6407 hclge_fd_get_user_def_tuple(info, rule); 6408 } 6409 6410 if (fs->flow_type & FLOW_MAC_EXT) { 6411 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); 6412 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); 6413 } 6414 6415 return 0; 6416 } 6417 6418 static int hclge_fd_config_rule(struct hclge_dev *hdev, 6419 struct hclge_fd_rule *rule) 6420 { 6421 int ret; 6422 6423 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 6424 if (ret) 6425 return ret; 6426 6427 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 6428 } 6429 6430 static int hclge_add_fd_entry_common(struct hclge_dev *hdev, 6431 struct hclge_fd_rule *rule) 6432 { 6433 int ret; 6434 6435 spin_lock_bh(&hdev->fd_rule_lock); 6436 6437 if (hdev->fd_active_type != rule->rule_type && 6438 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || 6439 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { 6440 dev_err(&hdev->pdev->dev, 6441 "mode conflict(new type %d, active type %d), please delete existent rules first\n", 6442 rule->rule_type, hdev->fd_active_type); 6443 spin_unlock_bh(&hdev->fd_rule_lock); 6444 return -EINVAL; 6445 } 6446 6447 ret = hclge_fd_check_user_def_refcnt(hdev, rule); 6448 if (ret) 6449 goto out; 6450 6451 ret = hclge_clear_arfs_rules(hdev); 6452 if (ret) 6453 goto out; 6454 6455 ret = hclge_fd_config_rule(hdev, rule); 6456 if (ret) 6457 goto out; 6458 6459 rule->state = HCLGE_FD_ACTIVE; 6460 hdev->fd_active_type = rule->rule_type; 6461 hclge_update_fd_list(hdev, rule->state, rule->location, rule); 6462 6463 out: 6464 spin_unlock_bh(&hdev->fd_rule_lock); 6465 return ret; 6466 } 6467 6468 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) 6469 { 6470 struct hclge_vport *vport = hclge_get_vport(handle); 6471 struct hclge_dev *hdev = vport->back; 6472 6473 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; 6474 } 6475 6476 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, 6477 u16 *vport_id, u8 *action, u16 *queue_id) 6478 { 6479 struct hclge_vport *vport = hdev->vport; 6480 6481 if (ring_cookie == RX_CLS_FLOW_DISC) { 6482 *action = HCLGE_FD_ACTION_DROP_PACKET; 6483 } else { 6484 u32 ring = ethtool_get_flow_spec_ring(ring_cookie); 6485 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); 6486 u16 tqps; 6487 6488 /* To keep consistent with user's configuration, minus 1 when 6489 * printing 'vf', because vf id from ethtool is added 1 for vf. 6490 */ 6491 if (vf > hdev->num_req_vfs) { 6492 dev_err(&hdev->pdev->dev, 6493 "Error: vf id (%u) should be less than %u\n", 6494 vf - 1U, hdev->num_req_vfs); 6495 return -EINVAL; 6496 } 6497 6498 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; 6499 tqps = hdev->vport[vf].nic.kinfo.num_tqps; 6500 6501 if (ring >= tqps) { 6502 dev_err(&hdev->pdev->dev, 6503 "Error: queue id (%u) > max tqp num (%u)\n", 6504 ring, tqps - 1U); 6505 return -EINVAL; 6506 } 6507 6508 *action = HCLGE_FD_ACTION_SELECT_QUEUE; 6509 *queue_id = ring; 6510 } 6511 6512 return 0; 6513 } 6514 6515 static int hclge_add_fd_entry(struct hnae3_handle *handle, 6516 struct ethtool_rxnfc *cmd) 6517 { 6518 struct hclge_vport *vport = hclge_get_vport(handle); 6519 struct hclge_dev *hdev = vport->back; 6520 struct hclge_fd_user_def_info info; 6521 u16 dst_vport_id = 0, q_index = 0; 6522 struct ethtool_rx_flow_spec *fs; 6523 struct hclge_fd_rule *rule; 6524 u32 unused = 0; 6525 u8 action; 6526 int ret; 6527 6528 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 6529 dev_err(&hdev->pdev->dev, 6530 "flow table director is not supported\n"); 6531 return -EOPNOTSUPP; 6532 } 6533 6534 if (!hdev->fd_en) { 6535 dev_err(&hdev->pdev->dev, 6536 "please enable flow director first\n"); 6537 return -EOPNOTSUPP; 6538 } 6539 6540 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6541 6542 ret = hclge_fd_check_spec(hdev, fs, &unused, &info); 6543 if (ret) 6544 return ret; 6545 6546 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, 6547 &action, &q_index); 6548 if (ret) 6549 return ret; 6550 6551 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 6552 if (!rule) 6553 return -ENOMEM; 6554 6555 ret = hclge_fd_get_tuple(fs, rule, &info); 6556 if (ret) { 6557 kfree(rule); 6558 return ret; 6559 } 6560 6561 rule->flow_type = fs->flow_type; 6562 rule->location = fs->location; 6563 rule->unused_tuple = unused; 6564 rule->vf_id = dst_vport_id; 6565 rule->queue_id = q_index; 6566 rule->action = action; 6567 rule->rule_type = HCLGE_FD_EP_ACTIVE; 6568 6569 ret = hclge_add_fd_entry_common(hdev, rule); 6570 if (ret) 6571 kfree(rule); 6572 6573 return ret; 6574 } 6575 6576 static int hclge_del_fd_entry(struct hnae3_handle *handle, 6577 struct ethtool_rxnfc *cmd) 6578 { 6579 struct hclge_vport *vport = hclge_get_vport(handle); 6580 struct hclge_dev *hdev = vport->back; 6581 struct ethtool_rx_flow_spec *fs; 6582 int ret; 6583 6584 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6585 return -EOPNOTSUPP; 6586 6587 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6588 6589 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 6590 return -EINVAL; 6591 6592 spin_lock_bh(&hdev->fd_rule_lock); 6593 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || 6594 !test_bit(fs->location, hdev->fd_bmap)) { 6595 dev_err(&hdev->pdev->dev, 6596 "Delete fail, rule %u is inexistent\n", fs->location); 6597 spin_unlock_bh(&hdev->fd_rule_lock); 6598 return -ENOENT; 6599 } 6600 6601 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, 6602 NULL, false); 6603 if (ret) 6604 goto out; 6605 6606 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); 6607 6608 out: 6609 spin_unlock_bh(&hdev->fd_rule_lock); 6610 return ret; 6611 } 6612 6613 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, 6614 bool clear_list) 6615 { 6616 struct hclge_fd_rule *rule; 6617 struct hlist_node *node; 6618 u16 location; 6619 6620 spin_lock_bh(&hdev->fd_rule_lock); 6621 6622 for_each_set_bit(location, hdev->fd_bmap, 6623 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 6624 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, 6625 NULL, false); 6626 6627 if (clear_list) { 6628 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, 6629 rule_node) { 6630 hlist_del(&rule->rule_node); 6631 kfree(rule); 6632 } 6633 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 6634 hdev->hclge_fd_rule_num = 0; 6635 bitmap_zero(hdev->fd_bmap, 6636 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 6637 } 6638 6639 spin_unlock_bh(&hdev->fd_rule_lock); 6640 } 6641 6642 static void hclge_del_all_fd_entries(struct hclge_dev *hdev) 6643 { 6644 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6645 return; 6646 6647 hclge_clear_fd_rules_in_list(hdev, true); 6648 hclge_fd_disable_user_def(hdev); 6649 } 6650 6651 static int hclge_restore_fd_entries(struct hnae3_handle *handle) 6652 { 6653 struct hclge_vport *vport = hclge_get_vport(handle); 6654 struct hclge_dev *hdev = vport->back; 6655 struct hclge_fd_rule *rule; 6656 struct hlist_node *node; 6657 6658 /* Return ok here, because reset error handling will check this 6659 * return value. If error is returned here, the reset process will 6660 * fail. 6661 */ 6662 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6663 return 0; 6664 6665 /* if fd is disabled, should not restore it when reset */ 6666 if (!hdev->fd_en) 6667 return 0; 6668 6669 spin_lock_bh(&hdev->fd_rule_lock); 6670 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 6671 if (rule->state == HCLGE_FD_ACTIVE) 6672 rule->state = HCLGE_FD_TO_ADD; 6673 } 6674 spin_unlock_bh(&hdev->fd_rule_lock); 6675 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 6676 6677 return 0; 6678 } 6679 6680 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, 6681 struct ethtool_rxnfc *cmd) 6682 { 6683 struct hclge_vport *vport = hclge_get_vport(handle); 6684 struct hclge_dev *hdev = vport->back; 6685 6686 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) 6687 return -EOPNOTSUPP; 6688 6689 cmd->rule_cnt = hdev->hclge_fd_rule_num; 6690 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 6691 6692 return 0; 6693 } 6694 6695 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, 6696 struct ethtool_tcpip4_spec *spec, 6697 struct ethtool_tcpip4_spec *spec_mask) 6698 { 6699 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 6700 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? 6701 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 6702 6703 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 6704 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? 6705 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 6706 6707 spec->psrc = cpu_to_be16(rule->tuples.src_port); 6708 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 6709 0 : cpu_to_be16(rule->tuples_mask.src_port); 6710 6711 spec->pdst = cpu_to_be16(rule->tuples.dst_port); 6712 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? 6713 0 : cpu_to_be16(rule->tuples_mask.dst_port); 6714 6715 spec->tos = rule->tuples.ip_tos; 6716 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6717 0 : rule->tuples_mask.ip_tos; 6718 } 6719 6720 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, 6721 struct ethtool_usrip4_spec *spec, 6722 struct ethtool_usrip4_spec *spec_mask) 6723 { 6724 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 6725 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? 6726 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 6727 6728 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 6729 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? 6730 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 6731 6732 spec->tos = rule->tuples.ip_tos; 6733 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6734 0 : rule->tuples_mask.ip_tos; 6735 6736 spec->proto = rule->tuples.ip_proto; 6737 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 6738 0 : rule->tuples_mask.ip_proto; 6739 6740 spec->ip_ver = ETH_RX_NFC_IP4; 6741 } 6742 6743 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, 6744 struct ethtool_tcpip6_spec *spec, 6745 struct ethtool_tcpip6_spec *spec_mask) 6746 { 6747 cpu_to_be32_array(spec->ip6src, 6748 rule->tuples.src_ip, IPV6_SIZE); 6749 cpu_to_be32_array(spec->ip6dst, 6750 rule->tuples.dst_ip, IPV6_SIZE); 6751 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 6752 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); 6753 else 6754 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, 6755 IPV6_SIZE); 6756 6757 if (rule->unused_tuple & BIT(INNER_DST_IP)) 6758 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); 6759 else 6760 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, 6761 IPV6_SIZE); 6762 6763 spec->tclass = rule->tuples.ip_tos; 6764 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6765 0 : rule->tuples_mask.ip_tos; 6766 6767 spec->psrc = cpu_to_be16(rule->tuples.src_port); 6768 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 6769 0 : cpu_to_be16(rule->tuples_mask.src_port); 6770 6771 spec->pdst = cpu_to_be16(rule->tuples.dst_port); 6772 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? 6773 0 : cpu_to_be16(rule->tuples_mask.dst_port); 6774 } 6775 6776 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, 6777 struct ethtool_usrip6_spec *spec, 6778 struct ethtool_usrip6_spec *spec_mask) 6779 { 6780 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); 6781 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); 6782 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 6783 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); 6784 else 6785 cpu_to_be32_array(spec_mask->ip6src, 6786 rule->tuples_mask.src_ip, IPV6_SIZE); 6787 6788 if (rule->unused_tuple & BIT(INNER_DST_IP)) 6789 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); 6790 else 6791 cpu_to_be32_array(spec_mask->ip6dst, 6792 rule->tuples_mask.dst_ip, IPV6_SIZE); 6793 6794 spec->tclass = rule->tuples.ip_tos; 6795 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6796 0 : rule->tuples_mask.ip_tos; 6797 6798 spec->l4_proto = rule->tuples.ip_proto; 6799 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 6800 0 : rule->tuples_mask.ip_proto; 6801 } 6802 6803 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, 6804 struct ethhdr *spec, 6805 struct ethhdr *spec_mask) 6806 { 6807 ether_addr_copy(spec->h_source, rule->tuples.src_mac); 6808 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); 6809 6810 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) 6811 eth_zero_addr(spec_mask->h_source); 6812 else 6813 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); 6814 6815 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6816 eth_zero_addr(spec_mask->h_dest); 6817 else 6818 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); 6819 6820 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); 6821 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? 6822 0 : cpu_to_be16(rule->tuples_mask.ether_proto); 6823 } 6824 6825 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, 6826 struct hclge_fd_rule *rule) 6827 { 6828 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == 6829 HCLGE_FD_TUPLE_USER_DEF_TUPLES) { 6830 fs->h_ext.data[0] = 0; 6831 fs->h_ext.data[1] = 0; 6832 fs->m_ext.data[0] = 0; 6833 fs->m_ext.data[1] = 0; 6834 } else { 6835 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); 6836 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); 6837 fs->m_ext.data[0] = 6838 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK); 6839 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); 6840 } 6841 } 6842 6843 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, 6844 struct hclge_fd_rule *rule) 6845 { 6846 if (fs->flow_type & FLOW_EXT) { 6847 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); 6848 fs->m_ext.vlan_tci = 6849 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? 6850 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); 6851 6852 hclge_fd_get_user_def_info(fs, rule); 6853 } 6854 6855 if (fs->flow_type & FLOW_MAC_EXT) { 6856 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); 6857 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6858 eth_zero_addr(fs->m_u.ether_spec.h_dest); 6859 else 6860 ether_addr_copy(fs->m_u.ether_spec.h_dest, 6861 rule->tuples_mask.dst_mac); 6862 } 6863 } 6864 6865 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, 6866 u16 location) 6867 { 6868 struct hclge_fd_rule *rule = NULL; 6869 struct hlist_node *node2; 6870 6871 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 6872 if (rule->location == location) 6873 return rule; 6874 else if (rule->location > location) 6875 return NULL; 6876 } 6877 6878 return NULL; 6879 } 6880 6881 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, 6882 struct hclge_fd_rule *rule) 6883 { 6884 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 6885 fs->ring_cookie = RX_CLS_FLOW_DISC; 6886 } else { 6887 u64 vf_id; 6888 6889 fs->ring_cookie = rule->queue_id; 6890 vf_id = rule->vf_id; 6891 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 6892 fs->ring_cookie |= vf_id; 6893 } 6894 } 6895 6896 static int hclge_get_fd_rule_info(struct hnae3_handle *handle, 6897 struct ethtool_rxnfc *cmd) 6898 { 6899 struct hclge_vport *vport = hclge_get_vport(handle); 6900 struct hclge_fd_rule *rule = NULL; 6901 struct hclge_dev *hdev = vport->back; 6902 struct ethtool_rx_flow_spec *fs; 6903 6904 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6905 return -EOPNOTSUPP; 6906 6907 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6908 6909 spin_lock_bh(&hdev->fd_rule_lock); 6910 6911 rule = hclge_get_fd_rule(hdev, fs->location); 6912 if (!rule) { 6913 spin_unlock_bh(&hdev->fd_rule_lock); 6914 return -ENOENT; 6915 } 6916 6917 fs->flow_type = rule->flow_type; 6918 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 6919 case SCTP_V4_FLOW: 6920 case TCP_V4_FLOW: 6921 case UDP_V4_FLOW: 6922 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, 6923 &fs->m_u.tcp_ip4_spec); 6924 break; 6925 case IP_USER_FLOW: 6926 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, 6927 &fs->m_u.usr_ip4_spec); 6928 break; 6929 case SCTP_V6_FLOW: 6930 case TCP_V6_FLOW: 6931 case UDP_V6_FLOW: 6932 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, 6933 &fs->m_u.tcp_ip6_spec); 6934 break; 6935 case IPV6_USER_FLOW: 6936 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, 6937 &fs->m_u.usr_ip6_spec); 6938 break; 6939 /* The flow type of fd rule has been checked before adding in to rule 6940 * list. As other flow types have been handled, it must be ETHER_FLOW 6941 * for the default case 6942 */ 6943 default: 6944 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, 6945 &fs->m_u.ether_spec); 6946 break; 6947 } 6948 6949 hclge_fd_get_ext_info(fs, rule); 6950 6951 hclge_fd_get_ring_cookie(fs, rule); 6952 6953 spin_unlock_bh(&hdev->fd_rule_lock); 6954 6955 return 0; 6956 } 6957 6958 static int hclge_get_all_rules(struct hnae3_handle *handle, 6959 struct ethtool_rxnfc *cmd, u32 *rule_locs) 6960 { 6961 struct hclge_vport *vport = hclge_get_vport(handle); 6962 struct hclge_dev *hdev = vport->back; 6963 struct hclge_fd_rule *rule; 6964 struct hlist_node *node2; 6965 int cnt = 0; 6966 6967 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6968 return -EOPNOTSUPP; 6969 6970 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 6971 6972 spin_lock_bh(&hdev->fd_rule_lock); 6973 hlist_for_each_entry_safe(rule, node2, 6974 &hdev->fd_rule_list, rule_node) { 6975 if (cnt == cmd->rule_cnt) { 6976 spin_unlock_bh(&hdev->fd_rule_lock); 6977 return -EMSGSIZE; 6978 } 6979 6980 if (rule->state == HCLGE_FD_TO_DEL) 6981 continue; 6982 6983 rule_locs[cnt] = rule->location; 6984 cnt++; 6985 } 6986 6987 spin_unlock_bh(&hdev->fd_rule_lock); 6988 6989 cmd->rule_cnt = cnt; 6990 6991 return 0; 6992 } 6993 6994 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, 6995 struct hclge_fd_rule_tuples *tuples) 6996 { 6997 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 6998 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 6999 7000 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); 7001 tuples->ip_proto = fkeys->basic.ip_proto; 7002 tuples->dst_port = be16_to_cpu(fkeys->ports.dst); 7003 7004 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 7005 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); 7006 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); 7007 } else { 7008 int i; 7009 7010 for (i = 0; i < IPV6_SIZE; i++) { 7011 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); 7012 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); 7013 } 7014 } 7015 } 7016 7017 /* traverse all rules, check whether an existed rule has the same tuples */ 7018 static struct hclge_fd_rule * 7019 hclge_fd_search_flow_keys(struct hclge_dev *hdev, 7020 const struct hclge_fd_rule_tuples *tuples) 7021 { 7022 struct hclge_fd_rule *rule = NULL; 7023 struct hlist_node *node; 7024 7025 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7026 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) 7027 return rule; 7028 } 7029 7030 return NULL; 7031 } 7032 7033 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, 7034 struct hclge_fd_rule *rule) 7035 { 7036 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 7037 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) | 7038 BIT(INNER_SRC_PORT); 7039 rule->action = 0; 7040 rule->vf_id = 0; 7041 rule->rule_type = HCLGE_FD_ARFS_ACTIVE; 7042 rule->state = HCLGE_FD_TO_ADD; 7043 if (tuples->ether_proto == ETH_P_IP) { 7044 if (tuples->ip_proto == IPPROTO_TCP) 7045 rule->flow_type = TCP_V4_FLOW; 7046 else 7047 rule->flow_type = UDP_V4_FLOW; 7048 } else { 7049 if (tuples->ip_proto == IPPROTO_TCP) 7050 rule->flow_type = TCP_V6_FLOW; 7051 else 7052 rule->flow_type = UDP_V6_FLOW; 7053 } 7054 memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); 7055 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); 7056 } 7057 7058 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, 7059 u16 flow_id, struct flow_keys *fkeys) 7060 { 7061 struct hclge_vport *vport = hclge_get_vport(handle); 7062 struct hclge_fd_rule_tuples new_tuples = {}; 7063 struct hclge_dev *hdev = vport->back; 7064 struct hclge_fd_rule *rule; 7065 u16 bit_id; 7066 7067 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7068 return -EOPNOTSUPP; 7069 7070 /* when there is already fd rule existed add by user, 7071 * arfs should not work 7072 */ 7073 spin_lock_bh(&hdev->fd_rule_lock); 7074 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && 7075 hdev->fd_active_type != HCLGE_FD_RULE_NONE) { 7076 spin_unlock_bh(&hdev->fd_rule_lock); 7077 return -EOPNOTSUPP; 7078 } 7079 7080 hclge_fd_get_flow_tuples(fkeys, &new_tuples); 7081 7082 /* check is there flow director filter existed for this flow, 7083 * if not, create a new filter for it; 7084 * if filter exist with different queue id, modify the filter; 7085 * if filter exist with same queue id, do nothing 7086 */ 7087 rule = hclge_fd_search_flow_keys(hdev, &new_tuples); 7088 if (!rule) { 7089 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); 7090 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 7091 spin_unlock_bh(&hdev->fd_rule_lock); 7092 return -ENOSPC; 7093 } 7094 7095 rule = kzalloc(sizeof(*rule), GFP_ATOMIC); 7096 if (!rule) { 7097 spin_unlock_bh(&hdev->fd_rule_lock); 7098 return -ENOMEM; 7099 } 7100 7101 rule->location = bit_id; 7102 rule->arfs.flow_id = flow_id; 7103 rule->queue_id = queue_id; 7104 hclge_fd_build_arfs_rule(&new_tuples, rule); 7105 hclge_update_fd_list(hdev, rule->state, rule->location, rule); 7106 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; 7107 } else if (rule->queue_id != queue_id) { 7108 rule->queue_id = queue_id; 7109 rule->state = HCLGE_FD_TO_ADD; 7110 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7111 hclge_task_schedule(hdev, 0); 7112 } 7113 spin_unlock_bh(&hdev->fd_rule_lock); 7114 return rule->location; 7115 } 7116 7117 static void hclge_rfs_filter_expire(struct hclge_dev *hdev) 7118 { 7119 #ifdef CONFIG_RFS_ACCEL 7120 struct hnae3_handle *handle = &hdev->vport[0].nic; 7121 struct hclge_fd_rule *rule; 7122 struct hlist_node *node; 7123 7124 spin_lock_bh(&hdev->fd_rule_lock); 7125 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { 7126 spin_unlock_bh(&hdev->fd_rule_lock); 7127 return; 7128 } 7129 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7130 if (rule->state != HCLGE_FD_ACTIVE) 7131 continue; 7132 if (rps_may_expire_flow(handle->netdev, rule->queue_id, 7133 rule->arfs.flow_id, rule->location)) { 7134 rule->state = HCLGE_FD_TO_DEL; 7135 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7136 } 7137 } 7138 spin_unlock_bh(&hdev->fd_rule_lock); 7139 #endif 7140 } 7141 7142 /* make sure being called after lock up with fd_rule_lock */ 7143 static int hclge_clear_arfs_rules(struct hclge_dev *hdev) 7144 { 7145 #ifdef CONFIG_RFS_ACCEL 7146 struct hclge_fd_rule *rule; 7147 struct hlist_node *node; 7148 int ret; 7149 7150 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) 7151 return 0; 7152 7153 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7154 switch (rule->state) { 7155 case HCLGE_FD_TO_DEL: 7156 case HCLGE_FD_ACTIVE: 7157 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 7158 rule->location, NULL, false); 7159 if (ret) 7160 return ret; 7161 fallthrough; 7162 case HCLGE_FD_TO_ADD: 7163 hclge_fd_dec_rule_cnt(hdev, rule->location); 7164 hlist_del(&rule->rule_node); 7165 kfree(rule); 7166 break; 7167 default: 7168 break; 7169 } 7170 } 7171 hclge_sync_fd_state(hdev); 7172 7173 #endif 7174 return 0; 7175 } 7176 7177 static void hclge_get_cls_key_basic(const struct flow_rule *flow, 7178 struct hclge_fd_rule *rule) 7179 { 7180 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) { 7181 struct flow_match_basic match; 7182 u16 ethtype_key, ethtype_mask; 7183 7184 flow_rule_match_basic(flow, &match); 7185 ethtype_key = ntohs(match.key->n_proto); 7186 ethtype_mask = ntohs(match.mask->n_proto); 7187 7188 if (ethtype_key == ETH_P_ALL) { 7189 ethtype_key = 0; 7190 ethtype_mask = 0; 7191 } 7192 rule->tuples.ether_proto = ethtype_key; 7193 rule->tuples_mask.ether_proto = ethtype_mask; 7194 rule->tuples.ip_proto = match.key->ip_proto; 7195 rule->tuples_mask.ip_proto = match.mask->ip_proto; 7196 } else { 7197 rule->unused_tuple |= BIT(INNER_IP_PROTO); 7198 rule->unused_tuple |= BIT(INNER_ETH_TYPE); 7199 } 7200 } 7201 7202 static void hclge_get_cls_key_mac(const struct flow_rule *flow, 7203 struct hclge_fd_rule *rule) 7204 { 7205 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 7206 struct flow_match_eth_addrs match; 7207 7208 flow_rule_match_eth_addrs(flow, &match); 7209 ether_addr_copy(rule->tuples.dst_mac, match.key->dst); 7210 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); 7211 ether_addr_copy(rule->tuples.src_mac, match.key->src); 7212 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); 7213 } else { 7214 rule->unused_tuple |= BIT(INNER_DST_MAC); 7215 rule->unused_tuple |= BIT(INNER_SRC_MAC); 7216 } 7217 } 7218 7219 static void hclge_get_cls_key_vlan(const struct flow_rule *flow, 7220 struct hclge_fd_rule *rule) 7221 { 7222 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { 7223 struct flow_match_vlan match; 7224 7225 flow_rule_match_vlan(flow, &match); 7226 rule->tuples.vlan_tag1 = match.key->vlan_id | 7227 (match.key->vlan_priority << VLAN_PRIO_SHIFT); 7228 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | 7229 (match.mask->vlan_priority << VLAN_PRIO_SHIFT); 7230 } else { 7231 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); 7232 } 7233 } 7234 7235 static int hclge_get_cls_key_ip(const struct flow_rule *flow, 7236 struct hclge_fd_rule *rule, 7237 struct netlink_ext_ack *extack) 7238 { 7239 u16 addr_type = 0; 7240 7241 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) { 7242 struct flow_match_control match; 7243 7244 flow_rule_match_control(flow, &match); 7245 addr_type = match.key->addr_type; 7246 7247 if (flow_rule_has_control_flags(match.mask->flags, extack)) 7248 return -EOPNOTSUPP; 7249 } 7250 7251 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 7252 struct flow_match_ipv4_addrs match; 7253 7254 flow_rule_match_ipv4_addrs(flow, &match); 7255 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); 7256 rule->tuples_mask.src_ip[IPV4_INDEX] = 7257 be32_to_cpu(match.mask->src); 7258 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); 7259 rule->tuples_mask.dst_ip[IPV4_INDEX] = 7260 be32_to_cpu(match.mask->dst); 7261 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 7262 struct flow_match_ipv6_addrs match; 7263 7264 flow_rule_match_ipv6_addrs(flow, &match); 7265 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, 7266 IPV6_SIZE); 7267 be32_to_cpu_array(rule->tuples_mask.src_ip, 7268 match.mask->src.s6_addr32, IPV6_SIZE); 7269 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, 7270 IPV6_SIZE); 7271 be32_to_cpu_array(rule->tuples_mask.dst_ip, 7272 match.mask->dst.s6_addr32, IPV6_SIZE); 7273 } else { 7274 rule->unused_tuple |= BIT(INNER_SRC_IP); 7275 rule->unused_tuple |= BIT(INNER_DST_IP); 7276 } 7277 7278 return 0; 7279 } 7280 7281 static void hclge_get_cls_key_port(const struct flow_rule *flow, 7282 struct hclge_fd_rule *rule) 7283 { 7284 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { 7285 struct flow_match_ports match; 7286 7287 flow_rule_match_ports(flow, &match); 7288 7289 rule->tuples.src_port = be16_to_cpu(match.key->src); 7290 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); 7291 rule->tuples.dst_port = be16_to_cpu(match.key->dst); 7292 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); 7293 } else { 7294 rule->unused_tuple |= BIT(INNER_SRC_PORT); 7295 rule->unused_tuple |= BIT(INNER_DST_PORT); 7296 } 7297 } 7298 7299 static int hclge_parse_cls_flower(struct hclge_dev *hdev, 7300 struct flow_cls_offload *cls_flower, 7301 struct hclge_fd_rule *rule) 7302 { 7303 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); 7304 struct netlink_ext_ack *extack = cls_flower->common.extack; 7305 struct flow_dissector *dissector = flow->match.dissector; 7306 int ret; 7307 7308 if (dissector->used_keys & 7309 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 7310 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 7311 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 7312 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 7313 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 7314 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 7315 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { 7316 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n", 7317 dissector->used_keys); 7318 return -EOPNOTSUPP; 7319 } 7320 7321 hclge_get_cls_key_basic(flow, rule); 7322 hclge_get_cls_key_mac(flow, rule); 7323 hclge_get_cls_key_vlan(flow, rule); 7324 7325 ret = hclge_get_cls_key_ip(flow, rule, extack); 7326 if (ret) 7327 return ret; 7328 7329 hclge_get_cls_key_port(flow, rule); 7330 7331 return 0; 7332 } 7333 7334 static int hclge_check_cls_flower(struct hclge_dev *hdev, 7335 struct flow_cls_offload *cls_flower, int tc) 7336 { 7337 u32 prio = cls_flower->common.prio; 7338 7339 if (tc < 0 || tc > hdev->tc_max) { 7340 dev_err(&hdev->pdev->dev, "invalid traffic class\n"); 7341 return -EINVAL; 7342 } 7343 7344 if (prio == 0 || 7345 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 7346 dev_err(&hdev->pdev->dev, 7347 "prio %u should be in range[1, %u]\n", 7348 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 7349 return -EINVAL; 7350 } 7351 7352 if (test_bit(prio - 1, hdev->fd_bmap)) { 7353 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); 7354 return -EINVAL; 7355 } 7356 return 0; 7357 } 7358 7359 static int hclge_add_cls_flower(struct hnae3_handle *handle, 7360 struct flow_cls_offload *cls_flower, 7361 int tc) 7362 { 7363 struct hclge_vport *vport = hclge_get_vport(handle); 7364 struct hclge_dev *hdev = vport->back; 7365 struct hclge_fd_rule *rule; 7366 int ret; 7367 7368 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 7369 dev_err(&hdev->pdev->dev, 7370 "cls flower is not supported\n"); 7371 return -EOPNOTSUPP; 7372 } 7373 7374 ret = hclge_check_cls_flower(hdev, cls_flower, tc); 7375 if (ret) { 7376 dev_err(&hdev->pdev->dev, 7377 "failed to check cls flower params, ret = %d\n", ret); 7378 return ret; 7379 } 7380 7381 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 7382 if (!rule) 7383 return -ENOMEM; 7384 7385 ret = hclge_parse_cls_flower(hdev, cls_flower, rule); 7386 if (ret) { 7387 kfree(rule); 7388 return ret; 7389 } 7390 7391 rule->action = HCLGE_FD_ACTION_SELECT_TC; 7392 rule->cls_flower.tc = tc; 7393 rule->location = cls_flower->common.prio - 1; 7394 rule->vf_id = 0; 7395 rule->cls_flower.cookie = cls_flower->cookie; 7396 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; 7397 7398 ret = hclge_add_fd_entry_common(hdev, rule); 7399 if (ret) 7400 kfree(rule); 7401 7402 return ret; 7403 } 7404 7405 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, 7406 unsigned long cookie) 7407 { 7408 struct hclge_fd_rule *rule; 7409 struct hlist_node *node; 7410 7411 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7412 if (rule->cls_flower.cookie == cookie) 7413 return rule; 7414 } 7415 7416 return NULL; 7417 } 7418 7419 static int hclge_del_cls_flower(struct hnae3_handle *handle, 7420 struct flow_cls_offload *cls_flower) 7421 { 7422 struct hclge_vport *vport = hclge_get_vport(handle); 7423 struct hclge_dev *hdev = vport->back; 7424 struct hclge_fd_rule *rule; 7425 int ret; 7426 7427 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7428 return -EOPNOTSUPP; 7429 7430 spin_lock_bh(&hdev->fd_rule_lock); 7431 7432 rule = hclge_find_cls_flower(hdev, cls_flower->cookie); 7433 if (!rule) { 7434 spin_unlock_bh(&hdev->fd_rule_lock); 7435 return -EINVAL; 7436 } 7437 7438 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, 7439 NULL, false); 7440 if (ret) { 7441 /* if tcam config fail, set rule state to TO_DEL, 7442 * so the rule will be deleted when periodic 7443 * task being scheduled. 7444 */ 7445 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); 7446 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7447 spin_unlock_bh(&hdev->fd_rule_lock); 7448 return ret; 7449 } 7450 7451 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); 7452 spin_unlock_bh(&hdev->fd_rule_lock); 7453 7454 return 0; 7455 } 7456 7457 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) 7458 { 7459 struct hclge_fd_rule *rule; 7460 struct hlist_node *node; 7461 int ret = 0; 7462 7463 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) 7464 return; 7465 7466 spin_lock_bh(&hdev->fd_rule_lock); 7467 7468 hlist_for_each_entry_safe(rule, node, hlist, rule_node) { 7469 switch (rule->state) { 7470 case HCLGE_FD_TO_ADD: 7471 ret = hclge_fd_config_rule(hdev, rule); 7472 if (ret) 7473 goto out; 7474 rule->state = HCLGE_FD_ACTIVE; 7475 break; 7476 case HCLGE_FD_TO_DEL: 7477 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 7478 rule->location, NULL, false); 7479 if (ret) 7480 goto out; 7481 hclge_fd_dec_rule_cnt(hdev, rule->location); 7482 hclge_fd_free_node(hdev, rule); 7483 break; 7484 default: 7485 break; 7486 } 7487 } 7488 7489 out: 7490 if (ret) 7491 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7492 7493 spin_unlock_bh(&hdev->fd_rule_lock); 7494 } 7495 7496 static void hclge_sync_fd_table(struct hclge_dev *hdev) 7497 { 7498 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7499 return; 7500 7501 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { 7502 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; 7503 7504 hclge_clear_fd_rules_in_list(hdev, clear_list); 7505 } 7506 7507 hclge_sync_fd_user_def_cfg(hdev, false); 7508 7509 hclge_sync_fd_list(hdev, &hdev->fd_rule_list); 7510 } 7511 7512 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) 7513 { 7514 struct hclge_vport *vport = hclge_get_vport(handle); 7515 struct hclge_dev *hdev = vport->back; 7516 7517 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || 7518 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); 7519 } 7520 7521 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle) 7522 { 7523 struct hclge_vport *vport = hclge_get_vport(handle); 7524 struct hclge_dev *hdev = vport->back; 7525 7526 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 7527 } 7528 7529 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) 7530 { 7531 struct hclge_vport *vport = hclge_get_vport(handle); 7532 struct hclge_dev *hdev = vport->back; 7533 7534 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 7535 } 7536 7537 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) 7538 { 7539 struct hclge_vport *vport = hclge_get_vport(handle); 7540 struct hclge_dev *hdev = vport->back; 7541 7542 return hdev->rst_stats.hw_reset_done_cnt; 7543 } 7544 7545 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) 7546 { 7547 struct hclge_vport *vport = hclge_get_vport(handle); 7548 struct hclge_dev *hdev = vport->back; 7549 7550 hdev->fd_en = enable; 7551 7552 if (!enable) 7553 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); 7554 else 7555 hclge_restore_fd_entries(handle); 7556 7557 hclge_task_schedule(hdev, 0); 7558 } 7559 7560 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 7561 { 7562 #define HCLGE_LINK_STATUS_WAIT_CNT 3 7563 7564 struct hclge_desc desc; 7565 struct hclge_config_mac_mode_cmd *req = 7566 (struct hclge_config_mac_mode_cmd *)desc.data; 7567 u32 loop_en = 0; 7568 int ret; 7569 7570 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 7571 7572 if (enable) { 7573 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U); 7574 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U); 7575 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U); 7576 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U); 7577 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U); 7578 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U); 7579 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U); 7580 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U); 7581 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U); 7582 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U); 7583 } 7584 7585 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 7586 7587 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7588 if (ret) { 7589 dev_err(&hdev->pdev->dev, 7590 "mac enable fail, ret =%d.\n", ret); 7591 return; 7592 } 7593 7594 if (!enable) 7595 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, 7596 HCLGE_LINK_STATUS_WAIT_CNT); 7597 } 7598 7599 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, 7600 u8 switch_param, u8 param_mask) 7601 { 7602 struct hclge_mac_vlan_switch_cmd *req; 7603 struct hclge_desc desc; 7604 u32 func_id; 7605 int ret; 7606 7607 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); 7608 req = (struct hclge_mac_vlan_switch_cmd *)desc.data; 7609 7610 /* read current config parameter */ 7611 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, 7612 true); 7613 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; 7614 req->func_id = cpu_to_le32(func_id); 7615 7616 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7617 if (ret) { 7618 dev_err(&hdev->pdev->dev, 7619 "read mac vlan switch parameter fail, ret = %d\n", ret); 7620 return ret; 7621 } 7622 7623 /* modify and write new config parameter */ 7624 hclge_comm_cmd_reuse_desc(&desc, false); 7625 req->switch_param = (req->switch_param & param_mask) | switch_param; 7626 req->param_mask = param_mask; 7627 7628 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7629 if (ret) 7630 dev_err(&hdev->pdev->dev, 7631 "set mac vlan switch parameter fail, ret = %d\n", ret); 7632 return ret; 7633 } 7634 7635 static void hclge_phy_link_status_wait(struct hclge_dev *hdev, 7636 int link_ret) 7637 { 7638 #define HCLGE_PHY_LINK_STATUS_NUM 200 7639 7640 struct phy_device *phydev = hdev->hw.mac.phydev; 7641 int i = 0; 7642 int ret; 7643 7644 do { 7645 ret = phy_read_status(phydev); 7646 if (ret) { 7647 dev_err(&hdev->pdev->dev, 7648 "phy update link status fail, ret = %d\n", ret); 7649 return; 7650 } 7651 7652 if (phydev->link == link_ret) 7653 break; 7654 7655 msleep(HCLGE_LINK_STATUS_MS); 7656 } while (++i < HCLGE_PHY_LINK_STATUS_NUM); 7657 } 7658 7659 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, 7660 int wait_cnt) 7661 { 7662 int link_status; 7663 int i = 0; 7664 int ret; 7665 7666 do { 7667 ret = hclge_get_mac_link_status(hdev, &link_status); 7668 if (ret) 7669 return ret; 7670 if (link_status == link_ret) 7671 return 0; 7672 7673 msleep(HCLGE_LINK_STATUS_MS); 7674 } while (++i < wait_cnt); 7675 return -EBUSY; 7676 } 7677 7678 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, 7679 bool is_phy) 7680 { 7681 #define HCLGE_MAC_LINK_STATUS_NUM 100 7682 7683 int link_ret; 7684 7685 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 7686 7687 if (is_phy) 7688 hclge_phy_link_status_wait(hdev, link_ret); 7689 7690 return hclge_mac_link_status_wait(hdev, link_ret, 7691 HCLGE_MAC_LINK_STATUS_NUM); 7692 } 7693 7694 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) 7695 { 7696 struct hclge_config_mac_mode_cmd *req; 7697 struct hclge_desc desc; 7698 u32 loop_en; 7699 int ret; 7700 7701 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 7702 /* 1 Read out the MAC mode config at first */ 7703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 7704 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7705 if (ret) { 7706 dev_err(&hdev->pdev->dev, 7707 "mac loopback get fail, ret =%d.\n", ret); 7708 return ret; 7709 } 7710 7711 /* 2 Then setup the loopback flag */ 7712 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 7713 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 7714 7715 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 7716 7717 /* 3 Config mac work mode with loopback flag 7718 * and its original configure parameters 7719 */ 7720 hclge_comm_cmd_reuse_desc(&desc, false); 7721 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7722 if (ret) 7723 dev_err(&hdev->pdev->dev, 7724 "mac loopback set fail, ret =%d.\n", ret); 7725 return ret; 7726 } 7727 7728 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, 7729 enum hnae3_loop loop_mode) 7730 { 7731 struct hclge_common_lb_cmd *req; 7732 struct hclge_desc desc; 7733 u8 loop_mode_b; 7734 int ret; 7735 7736 req = (struct hclge_common_lb_cmd *)desc.data; 7737 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); 7738 7739 switch (loop_mode) { 7740 case HNAE3_LOOP_SERIAL_SERDES: 7741 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 7742 break; 7743 case HNAE3_LOOP_PARALLEL_SERDES: 7744 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 7745 break; 7746 case HNAE3_LOOP_PHY: 7747 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B; 7748 break; 7749 default: 7750 dev_err(&hdev->pdev->dev, 7751 "unsupported loopback mode %d\n", loop_mode); 7752 return -ENOTSUPP; 7753 } 7754 7755 req->mask = loop_mode_b; 7756 if (en) 7757 req->enable = loop_mode_b; 7758 7759 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7760 if (ret) 7761 dev_err(&hdev->pdev->dev, 7762 "failed to send loopback cmd, loop_mode = %d, ret = %d\n", 7763 loop_mode, ret); 7764 7765 return ret; 7766 } 7767 7768 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) 7769 { 7770 #define HCLGE_COMMON_LB_RETRY_MS 10 7771 #define HCLGE_COMMON_LB_RETRY_NUM 100 7772 7773 struct hclge_common_lb_cmd *req; 7774 struct hclge_desc desc; 7775 u32 i = 0; 7776 int ret; 7777 7778 req = (struct hclge_common_lb_cmd *)desc.data; 7779 7780 do { 7781 msleep(HCLGE_COMMON_LB_RETRY_MS); 7782 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, 7783 true); 7784 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7785 if (ret) { 7786 dev_err(&hdev->pdev->dev, 7787 "failed to get loopback done status, ret = %d\n", 7788 ret); 7789 return ret; 7790 } 7791 } while (++i < HCLGE_COMMON_LB_RETRY_NUM && 7792 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); 7793 7794 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { 7795 dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); 7796 return -EBUSY; 7797 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { 7798 dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); 7799 return -EIO; 7800 } 7801 7802 return 0; 7803 } 7804 7805 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, 7806 enum hnae3_loop loop_mode) 7807 { 7808 int ret; 7809 7810 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); 7811 if (ret) 7812 return ret; 7813 7814 return hclge_cfg_common_loopback_wait(hdev); 7815 } 7816 7817 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, 7818 enum hnae3_loop loop_mode) 7819 { 7820 int ret; 7821 7822 ret = hclge_cfg_common_loopback(hdev, en, loop_mode); 7823 if (ret) 7824 return ret; 7825 7826 hclge_cfg_mac_mode(hdev, en); 7827 7828 ret = hclge_mac_phy_link_status_wait(hdev, en, false); 7829 if (ret) 7830 dev_err(&hdev->pdev->dev, 7831 "serdes loopback config mac mode timeout\n"); 7832 7833 return ret; 7834 } 7835 7836 static int hclge_enable_phy_loopback(struct hclge_dev *hdev, 7837 struct phy_device *phydev) 7838 { 7839 int ret; 7840 7841 if (!phydev->suspended) { 7842 ret = phy_suspend(phydev); 7843 if (ret) 7844 return ret; 7845 } 7846 7847 ret = phy_resume(phydev); 7848 if (ret) 7849 return ret; 7850 7851 return phy_loopback(phydev, true); 7852 } 7853 7854 static int hclge_disable_phy_loopback(struct hclge_dev *hdev, 7855 struct phy_device *phydev) 7856 { 7857 int ret; 7858 7859 ret = phy_loopback(phydev, false); 7860 if (ret) 7861 return ret; 7862 7863 return phy_suspend(phydev); 7864 } 7865 7866 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) 7867 { 7868 struct phy_device *phydev = hdev->hw.mac.phydev; 7869 int ret; 7870 7871 if (!phydev) { 7872 if (hnae3_dev_phy_imp_supported(hdev)) 7873 return hclge_set_common_loopback(hdev, en, 7874 HNAE3_LOOP_PHY); 7875 return -ENOTSUPP; 7876 } 7877 7878 if (en) 7879 ret = hclge_enable_phy_loopback(hdev, phydev); 7880 else 7881 ret = hclge_disable_phy_loopback(hdev, phydev); 7882 if (ret) { 7883 dev_err(&hdev->pdev->dev, 7884 "set phy loopback fail, ret = %d\n", ret); 7885 return ret; 7886 } 7887 7888 hclge_cfg_mac_mode(hdev, en); 7889 7890 ret = hclge_mac_phy_link_status_wait(hdev, en, true); 7891 if (ret) 7892 dev_err(&hdev->pdev->dev, 7893 "phy loopback config mac mode timeout\n"); 7894 7895 return ret; 7896 } 7897 7898 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, 7899 u16 stream_id, bool enable) 7900 { 7901 struct hclge_desc desc; 7902 struct hclge_cfg_com_tqp_queue_cmd *req = 7903 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 7904 7905 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 7906 req->tqp_id = cpu_to_le16(tqp_id); 7907 req->stream_id = cpu_to_le16(stream_id); 7908 if (enable) 7909 req->enable |= 1U << HCLGE_TQP_ENABLE_B; 7910 7911 return hclge_cmd_send(&hdev->hw, &desc, 1); 7912 } 7913 7914 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable) 7915 { 7916 struct hclge_vport *vport = hclge_get_vport(handle); 7917 struct hclge_dev *hdev = vport->back; 7918 int ret; 7919 u16 i; 7920 7921 for (i = 0; i < handle->kinfo.num_tqps; i++) { 7922 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); 7923 if (ret) 7924 return ret; 7925 } 7926 return 0; 7927 } 7928 7929 static int hclge_set_loopback(struct hnae3_handle *handle, 7930 enum hnae3_loop loop_mode, bool en) 7931 { 7932 struct hclge_vport *vport = hclge_get_vport(handle); 7933 struct hclge_dev *hdev = vport->back; 7934 int ret = 0; 7935 7936 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By 7937 * default, SSU loopback is enabled, so if the SMAC and the DMAC are 7938 * the same, the packets are looped back in the SSU. If SSU loopback 7939 * is disabled, packets can reach MAC even if SMAC is the same as DMAC. 7940 */ 7941 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 7942 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); 7943 7944 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, 7945 HCLGE_SWITCH_ALW_LPBK_MASK); 7946 if (ret) 7947 return ret; 7948 } 7949 7950 switch (loop_mode) { 7951 case HNAE3_LOOP_APP: 7952 ret = hclge_set_app_loopback(hdev, en); 7953 break; 7954 case HNAE3_LOOP_SERIAL_SERDES: 7955 case HNAE3_LOOP_PARALLEL_SERDES: 7956 ret = hclge_set_common_loopback(hdev, en, loop_mode); 7957 break; 7958 case HNAE3_LOOP_PHY: 7959 ret = hclge_set_phy_loopback(hdev, en); 7960 break; 7961 case HNAE3_LOOP_EXTERNAL: 7962 break; 7963 default: 7964 ret = -ENOTSUPP; 7965 dev_err(&hdev->pdev->dev, 7966 "loop_mode %d is not supported\n", loop_mode); 7967 break; 7968 } 7969 7970 if (ret) 7971 return ret; 7972 7973 ret = hclge_tqp_enable(handle, en); 7974 if (ret) 7975 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", 7976 en ? "enable" : "disable", ret); 7977 7978 return ret; 7979 } 7980 7981 static int hclge_set_default_loopback(struct hclge_dev *hdev) 7982 { 7983 int ret; 7984 7985 ret = hclge_set_app_loopback(hdev, false); 7986 if (ret) 7987 return ret; 7988 7989 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); 7990 if (ret) 7991 return ret; 7992 7993 return hclge_cfg_common_loopback(hdev, false, 7994 HNAE3_LOOP_PARALLEL_SERDES); 7995 } 7996 7997 static void hclge_flush_link_update(struct hclge_dev *hdev) 7998 { 7999 #define HCLGE_FLUSH_LINK_TIMEOUT 100000 8000 8001 unsigned long last = hdev->serv_processed_cnt; 8002 int i = 0; 8003 8004 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && 8005 i++ < HCLGE_FLUSH_LINK_TIMEOUT && 8006 last == hdev->serv_processed_cnt) 8007 usleep_range(1, 1); 8008 } 8009 8010 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) 8011 { 8012 struct hclge_vport *vport = hclge_get_vport(handle); 8013 struct hclge_dev *hdev = vport->back; 8014 8015 if (enable) { 8016 hclge_task_schedule(hdev, 0); 8017 } else { 8018 /* Set the DOWN flag here to disable link updating */ 8019 set_bit(HCLGE_STATE_DOWN, &hdev->state); 8020 8021 smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */ 8022 hclge_flush_link_update(hdev); 8023 } 8024 } 8025 8026 static int hclge_ae_start(struct hnae3_handle *handle) 8027 { 8028 struct hclge_vport *vport = hclge_get_vport(handle); 8029 struct hclge_dev *hdev = vport->back; 8030 8031 /* mac enable */ 8032 hclge_cfg_mac_mode(hdev, true); 8033 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 8034 hdev->hw.mac.link = 0; 8035 8036 /* reset tqp stats */ 8037 hclge_comm_reset_tqp_stats(handle); 8038 8039 hclge_mac_start_phy(hdev); 8040 8041 return 0; 8042 } 8043 8044 static void hclge_ae_stop(struct hnae3_handle *handle) 8045 { 8046 struct hclge_vport *vport = hclge_get_vport(handle); 8047 struct hclge_dev *hdev = vport->back; 8048 8049 set_bit(HCLGE_STATE_DOWN, &hdev->state); 8050 spin_lock_bh(&hdev->fd_rule_lock); 8051 hclge_clear_arfs_rules(hdev); 8052 spin_unlock_bh(&hdev->fd_rule_lock); 8053 8054 /* If it is not PF reset or FLR, the firmware will disable the MAC, 8055 * so it only need to stop phy here. 8056 */ 8057 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { 8058 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, 8059 HCLGE_PFC_DISABLE); 8060 if (hdev->reset_type != HNAE3_FUNC_RESET && 8061 hdev->reset_type != HNAE3_FLR_RESET) { 8062 hclge_mac_stop_phy(hdev); 8063 hclge_update_link_status(hdev); 8064 return; 8065 } 8066 } 8067 8068 hclge_reset_tqp(handle); 8069 8070 hclge_config_mac_tnl_int(hdev, false); 8071 8072 /* Mac disable */ 8073 hclge_cfg_mac_mode(hdev, false); 8074 8075 hclge_mac_stop_phy(hdev); 8076 8077 /* reset tqp stats */ 8078 hclge_comm_reset_tqp_stats(handle); 8079 hclge_update_link_status(hdev); 8080 } 8081 8082 int hclge_vport_start(struct hclge_vport *vport) 8083 { 8084 struct hclge_dev *hdev = vport->back; 8085 8086 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 8087 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 8088 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 8089 vport->last_active_jiffies = jiffies; 8090 vport->need_notify = 0; 8091 8092 if (test_bit(vport->vport_id, hdev->vport_config_block)) { 8093 if (vport->vport_id) { 8094 hclge_restore_mac_table_common(vport); 8095 hclge_restore_vport_vlan_table(vport); 8096 } else { 8097 hclge_restore_hw_table(hdev); 8098 } 8099 } 8100 8101 clear_bit(vport->vport_id, hdev->vport_config_block); 8102 8103 return 0; 8104 } 8105 8106 void hclge_vport_stop(struct hclge_vport *vport) 8107 { 8108 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 8109 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 8110 vport->need_notify = 0; 8111 } 8112 8113 static int hclge_client_start(struct hnae3_handle *handle) 8114 { 8115 struct hclge_vport *vport = hclge_get_vport(handle); 8116 8117 return hclge_vport_start(vport); 8118 } 8119 8120 static void hclge_client_stop(struct hnae3_handle *handle) 8121 { 8122 struct hclge_vport *vport = hclge_get_vport(handle); 8123 8124 hclge_vport_stop(vport); 8125 } 8126 8127 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 8128 u16 cmdq_resp, u8 resp_code, 8129 enum hclge_mac_vlan_tbl_opcode op) 8130 { 8131 struct hclge_dev *hdev = vport->back; 8132 8133 if (cmdq_resp) { 8134 dev_err(&hdev->pdev->dev, 8135 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n", 8136 cmdq_resp); 8137 return -EIO; 8138 } 8139 8140 if (op == HCLGE_MAC_VLAN_ADD) { 8141 if (!resp_code || resp_code == 1) 8142 return 0; 8143 else if (resp_code == HCLGE_ADD_UC_OVERFLOW || 8144 resp_code == HCLGE_ADD_MC_OVERFLOW) 8145 return -ENOSPC; 8146 8147 dev_err(&hdev->pdev->dev, 8148 "add mac addr failed for undefined, code=%u.\n", 8149 resp_code); 8150 return -EIO; 8151 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 8152 if (!resp_code) { 8153 return 0; 8154 } else if (resp_code == 1) { 8155 dev_dbg(&hdev->pdev->dev, 8156 "remove mac addr failed for miss.\n"); 8157 return -ENOENT; 8158 } 8159 8160 dev_err(&hdev->pdev->dev, 8161 "remove mac addr failed for undefined, code=%u.\n", 8162 resp_code); 8163 return -EIO; 8164 } else if (op == HCLGE_MAC_VLAN_LKUP) { 8165 if (!resp_code) { 8166 return 0; 8167 } else if (resp_code == 1) { 8168 dev_dbg(&hdev->pdev->dev, 8169 "lookup mac addr failed for miss.\n"); 8170 return -ENOENT; 8171 } 8172 8173 dev_err(&hdev->pdev->dev, 8174 "lookup mac addr failed for undefined, code=%u.\n", 8175 resp_code); 8176 return -EIO; 8177 } 8178 8179 dev_err(&hdev->pdev->dev, 8180 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); 8181 8182 return -EINVAL; 8183 } 8184 8185 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 8186 { 8187 #define HCLGE_VF_NUM_IN_FIRST_DESC 192 8188 8189 unsigned int word_num; 8190 unsigned int bit_num; 8191 8192 if (vfid > 255 || vfid < 0) 8193 return -EIO; 8194 8195 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { 8196 word_num = vfid / 32; 8197 bit_num = vfid % 32; 8198 if (clr) 8199 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 8200 else 8201 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 8202 } else { 8203 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; 8204 bit_num = vfid % 32; 8205 if (clr) 8206 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 8207 else 8208 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 8209 } 8210 8211 return 0; 8212 } 8213 8214 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 8215 { 8216 #define HCLGE_DESC_NUMBER 3 8217 #define HCLGE_FUNC_NUMBER_PER_DESC 6 8218 int i, j; 8219 8220 for (i = 1; i < HCLGE_DESC_NUMBER; i++) 8221 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 8222 if (desc[i].data[j]) 8223 return false; 8224 8225 return true; 8226 } 8227 8228 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 8229 const u8 *addr, bool is_mc) 8230 { 8231 const unsigned char *mac_addr = addr; 8232 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 8233 (mac_addr[0]) | (mac_addr[1] << 8); 8234 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 8235 8236 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 8237 if (is_mc) { 8238 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 8239 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 8240 } 8241 8242 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 8243 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 8244 } 8245 8246 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 8247 struct hclge_mac_vlan_tbl_entry_cmd *req) 8248 { 8249 struct hclge_dev *hdev = vport->back; 8250 struct hclge_desc desc; 8251 u8 resp_code; 8252 u16 retval; 8253 int ret; 8254 8255 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 8256 8257 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8258 8259 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8260 if (ret) { 8261 dev_err(&hdev->pdev->dev, 8262 "del mac addr failed for cmd_send, ret =%d.\n", 8263 ret); 8264 return ret; 8265 } 8266 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 8267 retval = le16_to_cpu(desc.retval); 8268 8269 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 8270 HCLGE_MAC_VLAN_REMOVE); 8271 } 8272 8273 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 8274 struct hclge_mac_vlan_tbl_entry_cmd *req, 8275 struct hclge_desc *desc, 8276 bool is_mc) 8277 { 8278 struct hclge_dev *hdev = vport->back; 8279 u8 resp_code; 8280 u16 retval; 8281 int ret; 8282 8283 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 8284 if (is_mc) { 8285 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8286 memcpy(desc[0].data, 8287 req, 8288 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8289 hclge_cmd_setup_basic_desc(&desc[1], 8290 HCLGE_OPC_MAC_VLAN_ADD, 8291 true); 8292 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8293 hclge_cmd_setup_basic_desc(&desc[2], 8294 HCLGE_OPC_MAC_VLAN_ADD, 8295 true); 8296 ret = hclge_cmd_send(&hdev->hw, desc, 3); 8297 } else { 8298 memcpy(desc[0].data, 8299 req, 8300 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8301 ret = hclge_cmd_send(&hdev->hw, desc, 1); 8302 } 8303 if (ret) { 8304 dev_err(&hdev->pdev->dev, 8305 "lookup mac addr failed for cmd_send, ret =%d.\n", 8306 ret); 8307 return ret; 8308 } 8309 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 8310 retval = le16_to_cpu(desc[0].retval); 8311 8312 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 8313 HCLGE_MAC_VLAN_LKUP); 8314 } 8315 8316 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 8317 struct hclge_mac_vlan_tbl_entry_cmd *req, 8318 struct hclge_desc *mc_desc) 8319 { 8320 struct hclge_dev *hdev = vport->back; 8321 int cfg_status; 8322 u8 resp_code; 8323 u16 retval; 8324 int ret; 8325 8326 if (!mc_desc) { 8327 struct hclge_desc desc; 8328 8329 hclge_cmd_setup_basic_desc(&desc, 8330 HCLGE_OPC_MAC_VLAN_ADD, 8331 false); 8332 memcpy(desc.data, req, 8333 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8334 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8335 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 8336 retval = le16_to_cpu(desc.retval); 8337 8338 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 8339 resp_code, 8340 HCLGE_MAC_VLAN_ADD); 8341 } else { 8342 hclge_comm_cmd_reuse_desc(&mc_desc[0], false); 8343 mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8344 hclge_comm_cmd_reuse_desc(&mc_desc[1], false); 8345 mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8346 hclge_comm_cmd_reuse_desc(&mc_desc[2], false); 8347 mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT); 8348 memcpy(mc_desc[0].data, req, 8349 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8350 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 8351 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 8352 retval = le16_to_cpu(mc_desc[0].retval); 8353 8354 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 8355 resp_code, 8356 HCLGE_MAC_VLAN_ADD); 8357 } 8358 8359 if (ret) { 8360 dev_err(&hdev->pdev->dev, 8361 "add mac addr failed for cmd_send, ret =%d.\n", 8362 ret); 8363 return ret; 8364 } 8365 8366 return cfg_status; 8367 } 8368 8369 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 8370 u16 *allocated_size) 8371 { 8372 struct hclge_umv_spc_alc_cmd *req; 8373 struct hclge_desc desc; 8374 int ret; 8375 8376 req = (struct hclge_umv_spc_alc_cmd *)desc.data; 8377 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); 8378 8379 req->space_size = cpu_to_le32(space_size); 8380 8381 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8382 if (ret) { 8383 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", 8384 ret); 8385 return ret; 8386 } 8387 8388 *allocated_size = le32_to_cpu(desc.data[1]); 8389 8390 return 0; 8391 } 8392 8393 static int hclge_init_umv_space(struct hclge_dev *hdev) 8394 { 8395 u16 allocated_size = 0; 8396 int ret; 8397 8398 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); 8399 if (ret) 8400 return ret; 8401 8402 if (allocated_size < hdev->wanted_umv_size) 8403 dev_warn(&hdev->pdev->dev, 8404 "failed to alloc umv space, want %u, get %u\n", 8405 hdev->wanted_umv_size, allocated_size); 8406 8407 hdev->max_umv_size = allocated_size; 8408 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); 8409 hdev->share_umv_size = hdev->priv_umv_size + 8410 hdev->max_umv_size % (hdev->num_alloc_vport + 1); 8411 8412 if (hdev->ae_dev->dev_specs.mc_mac_size) 8413 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); 8414 8415 return 0; 8416 } 8417 8418 static void hclge_reset_umv_space(struct hclge_dev *hdev) 8419 { 8420 struct hclge_vport *vport; 8421 int i; 8422 8423 for (i = 0; i < hdev->num_alloc_vport; i++) { 8424 vport = &hdev->vport[i]; 8425 vport->used_umv_num = 0; 8426 } 8427 8428 mutex_lock(&hdev->vport_lock); 8429 hdev->share_umv_size = hdev->priv_umv_size + 8430 hdev->max_umv_size % (hdev->num_alloc_vport + 1); 8431 mutex_unlock(&hdev->vport_lock); 8432 8433 hdev->used_mc_mac_num = 0; 8434 } 8435 8436 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) 8437 { 8438 struct hclge_dev *hdev = vport->back; 8439 bool is_full; 8440 8441 if (need_lock) 8442 mutex_lock(&hdev->vport_lock); 8443 8444 is_full = (vport->used_umv_num >= hdev->priv_umv_size && 8445 hdev->share_umv_size == 0); 8446 8447 if (need_lock) 8448 mutex_unlock(&hdev->vport_lock); 8449 8450 return is_full; 8451 } 8452 8453 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) 8454 { 8455 struct hclge_dev *hdev = vport->back; 8456 8457 if (is_free) { 8458 if (vport->used_umv_num > hdev->priv_umv_size) 8459 hdev->share_umv_size++; 8460 8461 if (vport->used_umv_num > 0) 8462 vport->used_umv_num--; 8463 } else { 8464 if (vport->used_umv_num >= hdev->priv_umv_size && 8465 hdev->share_umv_size > 0) 8466 hdev->share_umv_size--; 8467 vport->used_umv_num++; 8468 } 8469 } 8470 8471 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, 8472 const u8 *mac_addr) 8473 { 8474 struct hclge_mac_node *mac_node, *tmp; 8475 8476 list_for_each_entry_safe(mac_node, tmp, list, node) 8477 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 8478 return mac_node; 8479 8480 return NULL; 8481 } 8482 8483 static void hclge_update_mac_node(struct hclge_mac_node *mac_node, 8484 enum HCLGE_MAC_NODE_STATE state) 8485 { 8486 switch (state) { 8487 /* from set_rx_mode or tmp_add_list */ 8488 case HCLGE_MAC_TO_ADD: 8489 if (mac_node->state == HCLGE_MAC_TO_DEL) 8490 mac_node->state = HCLGE_MAC_ACTIVE; 8491 break; 8492 /* only from set_rx_mode */ 8493 case HCLGE_MAC_TO_DEL: 8494 if (mac_node->state == HCLGE_MAC_TO_ADD) { 8495 list_del(&mac_node->node); 8496 kfree(mac_node); 8497 } else { 8498 mac_node->state = HCLGE_MAC_TO_DEL; 8499 } 8500 break; 8501 /* only from tmp_add_list, the mac_node->state won't be 8502 * ACTIVE. 8503 */ 8504 case HCLGE_MAC_ACTIVE: 8505 if (mac_node->state == HCLGE_MAC_TO_ADD) 8506 mac_node->state = HCLGE_MAC_ACTIVE; 8507 8508 break; 8509 } 8510 } 8511 8512 int hclge_update_mac_list(struct hclge_vport *vport, 8513 enum HCLGE_MAC_NODE_STATE state, 8514 enum HCLGE_MAC_ADDR_TYPE mac_type, 8515 const unsigned char *addr) 8516 { 8517 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8518 struct hclge_dev *hdev = vport->back; 8519 struct hclge_mac_node *mac_node; 8520 struct list_head *list; 8521 8522 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 8523 &vport->uc_mac_list : &vport->mc_mac_list; 8524 8525 spin_lock_bh(&vport->mac_list_lock); 8526 8527 /* if the mac addr is already in the mac list, no need to add a new 8528 * one into it, just check the mac addr state, convert it to a new 8529 * state, or just remove it, or do nothing. 8530 */ 8531 mac_node = hclge_find_mac_node(list, addr); 8532 if (mac_node) { 8533 hclge_update_mac_node(mac_node, state); 8534 spin_unlock_bh(&vport->mac_list_lock); 8535 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 8536 return 0; 8537 } 8538 8539 /* if this address is never added, unnecessary to delete */ 8540 if (state == HCLGE_MAC_TO_DEL) { 8541 spin_unlock_bh(&vport->mac_list_lock); 8542 hnae3_format_mac_addr(format_mac_addr, addr); 8543 dev_err(&hdev->pdev->dev, 8544 "failed to delete address %s from mac list\n", 8545 format_mac_addr); 8546 return -ENOENT; 8547 } 8548 8549 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 8550 if (!mac_node) { 8551 spin_unlock_bh(&vport->mac_list_lock); 8552 return -ENOMEM; 8553 } 8554 8555 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 8556 8557 mac_node->state = state; 8558 ether_addr_copy(mac_node->mac_addr, addr); 8559 list_add_tail(&mac_node->node, list); 8560 8561 spin_unlock_bh(&vport->mac_list_lock); 8562 8563 return 0; 8564 } 8565 8566 static int hclge_add_uc_addr(struct hnae3_handle *handle, 8567 const unsigned char *addr) 8568 { 8569 struct hclge_vport *vport = hclge_get_vport(handle); 8570 8571 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, 8572 addr); 8573 } 8574 8575 int hclge_add_uc_addr_common(struct hclge_vport *vport, 8576 const unsigned char *addr) 8577 { 8578 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8579 struct hclge_dev *hdev = vport->back; 8580 struct hclge_mac_vlan_tbl_entry_cmd req; 8581 struct hclge_desc desc; 8582 u16 egress_port = 0; 8583 int ret; 8584 8585 /* mac addr check */ 8586 if (is_zero_ether_addr(addr) || 8587 is_broadcast_ether_addr(addr) || 8588 is_multicast_ether_addr(addr)) { 8589 hnae3_format_mac_addr(format_mac_addr, addr); 8590 dev_err(&hdev->pdev->dev, 8591 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", 8592 format_mac_addr, is_zero_ether_addr(addr), 8593 is_broadcast_ether_addr(addr), 8594 is_multicast_ether_addr(addr)); 8595 return -EINVAL; 8596 } 8597 8598 memset(&req, 0, sizeof(req)); 8599 8600 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 8601 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 8602 8603 req.egress_port = cpu_to_le16(egress_port); 8604 8605 hclge_prepare_mac_addr(&req, addr, false); 8606 8607 /* Lookup the mac address in the mac_vlan table, and add 8608 * it if the entry is inexistent. Repeated unicast entry 8609 * is not allowed in the mac vlan table. 8610 */ 8611 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 8612 if (ret == -ENOENT) { 8613 mutex_lock(&hdev->vport_lock); 8614 if (!hclge_is_umv_space_full(vport, false)) { 8615 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); 8616 if (!ret) 8617 hclge_update_umv_space(vport, false); 8618 mutex_unlock(&hdev->vport_lock); 8619 return ret; 8620 } 8621 mutex_unlock(&hdev->vport_lock); 8622 8623 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) 8624 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", 8625 hdev->priv_umv_size); 8626 8627 return -ENOSPC; 8628 } 8629 8630 /* check if we just hit the duplicate */ 8631 if (!ret) 8632 return -EEXIST; 8633 8634 return ret; 8635 } 8636 8637 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 8638 const unsigned char *addr) 8639 { 8640 struct hclge_vport *vport = hclge_get_vport(handle); 8641 8642 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, 8643 addr); 8644 } 8645 8646 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 8647 const unsigned char *addr) 8648 { 8649 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8650 struct hclge_dev *hdev = vport->back; 8651 struct hclge_mac_vlan_tbl_entry_cmd req; 8652 int ret; 8653 8654 /* mac addr check */ 8655 if (is_zero_ether_addr(addr) || 8656 is_broadcast_ether_addr(addr) || 8657 is_multicast_ether_addr(addr)) { 8658 hnae3_format_mac_addr(format_mac_addr, addr); 8659 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", 8660 format_mac_addr); 8661 return -EINVAL; 8662 } 8663 8664 memset(&req, 0, sizeof(req)); 8665 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 8666 hclge_prepare_mac_addr(&req, addr, false); 8667 ret = hclge_remove_mac_vlan_tbl(vport, &req); 8668 if (!ret || ret == -ENOENT) { 8669 mutex_lock(&hdev->vport_lock); 8670 hclge_update_umv_space(vport, true); 8671 mutex_unlock(&hdev->vport_lock); 8672 return 0; 8673 } 8674 8675 return ret; 8676 } 8677 8678 static int hclge_add_mc_addr(struct hnae3_handle *handle, 8679 const unsigned char *addr) 8680 { 8681 struct hclge_vport *vport = hclge_get_vport(handle); 8682 8683 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, 8684 addr); 8685 } 8686 8687 int hclge_add_mc_addr_common(struct hclge_vport *vport, 8688 const unsigned char *addr) 8689 { 8690 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8691 struct hclge_dev *hdev = vport->back; 8692 struct hclge_mac_vlan_tbl_entry_cmd req; 8693 struct hclge_desc desc[3]; 8694 bool is_new_addr = false; 8695 int status; 8696 8697 /* mac addr check */ 8698 if (!is_multicast_ether_addr(addr)) { 8699 hnae3_format_mac_addr(format_mac_addr, addr); 8700 dev_err(&hdev->pdev->dev, 8701 "Add mc mac err! invalid mac:%s.\n", 8702 format_mac_addr); 8703 return -EINVAL; 8704 } 8705 memset(&req, 0, sizeof(req)); 8706 hclge_prepare_mac_addr(&req, addr, true); 8707 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 8708 if (status) { 8709 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && 8710 hdev->used_mc_mac_num >= 8711 hdev->ae_dev->dev_specs.mc_mac_size) 8712 goto err_no_space; 8713 8714 is_new_addr = true; 8715 8716 /* This mac addr do not exist, add new entry for it */ 8717 memset(desc[0].data, 0, sizeof(desc[0].data)); 8718 memset(desc[1].data, 0, sizeof(desc[0].data)); 8719 memset(desc[2].data, 0, sizeof(desc[0].data)); 8720 } 8721 status = hclge_update_desc_vfid(desc, vport->vport_id, false); 8722 if (status) 8723 return status; 8724 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 8725 if (status == -ENOSPC) 8726 goto err_no_space; 8727 else if (!status && is_new_addr) 8728 hdev->used_mc_mac_num++; 8729 8730 return status; 8731 8732 err_no_space: 8733 /* if already overflow, not to print each time */ 8734 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { 8735 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; 8736 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); 8737 } 8738 8739 return -ENOSPC; 8740 } 8741 8742 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 8743 const unsigned char *addr) 8744 { 8745 struct hclge_vport *vport = hclge_get_vport(handle); 8746 8747 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, 8748 addr); 8749 } 8750 8751 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 8752 const unsigned char *addr) 8753 { 8754 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8755 struct hclge_dev *hdev = vport->back; 8756 struct hclge_mac_vlan_tbl_entry_cmd req; 8757 enum hclge_comm_cmd_status status; 8758 struct hclge_desc desc[3]; 8759 8760 /* mac addr check */ 8761 if (!is_multicast_ether_addr(addr)) { 8762 hnae3_format_mac_addr(format_mac_addr, addr); 8763 dev_dbg(&hdev->pdev->dev, 8764 "Remove mc mac err! invalid mac:%s.\n", 8765 format_mac_addr); 8766 return -EINVAL; 8767 } 8768 8769 memset(&req, 0, sizeof(req)); 8770 hclge_prepare_mac_addr(&req, addr, true); 8771 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 8772 if (!status) { 8773 /* This mac addr exist, remove this handle's VFID for it */ 8774 status = hclge_update_desc_vfid(desc, vport->vport_id, true); 8775 if (status) 8776 return status; 8777 8778 if (hclge_is_all_function_id_zero(desc)) { 8779 /* All the vfid is zero, so need to delete this entry */ 8780 status = hclge_remove_mac_vlan_tbl(vport, &req); 8781 if (!status) 8782 hdev->used_mc_mac_num--; 8783 } else { 8784 /* Not all the vfid is zero, update the vfid */ 8785 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 8786 } 8787 } else if (status == -ENOENT) { 8788 status = 0; 8789 } 8790 8791 return status; 8792 } 8793 8794 static void hclge_sync_vport_mac_list(struct hclge_vport *vport, 8795 struct list_head *list, 8796 enum HCLGE_MAC_ADDR_TYPE mac_type) 8797 { 8798 int (*sync)(struct hclge_vport *vport, const unsigned char *addr); 8799 struct hclge_mac_node *mac_node, *tmp; 8800 int ret; 8801 8802 if (mac_type == HCLGE_MAC_ADDR_UC) 8803 sync = hclge_add_uc_addr_common; 8804 else 8805 sync = hclge_add_mc_addr_common; 8806 8807 list_for_each_entry_safe(mac_node, tmp, list, node) { 8808 ret = sync(vport, mac_node->mac_addr); 8809 if (!ret) { 8810 mac_node->state = HCLGE_MAC_ACTIVE; 8811 } else { 8812 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 8813 &vport->state); 8814 8815 /* If one unicast mac address is existing in hardware, 8816 * we need to try whether other unicast mac addresses 8817 * are new addresses that can be added. 8818 * Multicast mac address can be reusable, even though 8819 * there is no space to add new multicast mac address, 8820 * we should check whether other mac addresses are 8821 * existing in hardware for reuse. 8822 */ 8823 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) || 8824 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC)) 8825 break; 8826 } 8827 } 8828 } 8829 8830 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, 8831 struct list_head *list, 8832 enum HCLGE_MAC_ADDR_TYPE mac_type) 8833 { 8834 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); 8835 struct hclge_mac_node *mac_node, *tmp; 8836 int ret; 8837 8838 if (mac_type == HCLGE_MAC_ADDR_UC) 8839 unsync = hclge_rm_uc_addr_common; 8840 else 8841 unsync = hclge_rm_mc_addr_common; 8842 8843 list_for_each_entry_safe(mac_node, tmp, list, node) { 8844 ret = unsync(vport, mac_node->mac_addr); 8845 if (!ret || ret == -ENOENT) { 8846 list_del(&mac_node->node); 8847 kfree(mac_node); 8848 } else { 8849 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 8850 &vport->state); 8851 break; 8852 } 8853 } 8854 } 8855 8856 static bool hclge_sync_from_add_list(struct list_head *add_list, 8857 struct list_head *mac_list) 8858 { 8859 struct hclge_mac_node *mac_node, *tmp, *new_node; 8860 bool all_added = true; 8861 8862 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 8863 if (mac_node->state == HCLGE_MAC_TO_ADD) 8864 all_added = false; 8865 8866 /* if the mac address from tmp_add_list is not in the 8867 * uc/mc_mac_list, it means have received a TO_DEL request 8868 * during the time window of adding the mac address into mac 8869 * table. if mac_node state is ACTIVE, then change it to TO_DEL, 8870 * then it will be removed at next time. else it must be TO_ADD, 8871 * this address hasn't been added into mac table, 8872 * so just remove the mac node. 8873 */ 8874 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); 8875 if (new_node) { 8876 hclge_update_mac_node(new_node, mac_node->state); 8877 list_del(&mac_node->node); 8878 kfree(mac_node); 8879 } else if (mac_node->state == HCLGE_MAC_ACTIVE) { 8880 mac_node->state = HCLGE_MAC_TO_DEL; 8881 list_move_tail(&mac_node->node, mac_list); 8882 } else { 8883 list_del(&mac_node->node); 8884 kfree(mac_node); 8885 } 8886 } 8887 8888 return all_added; 8889 } 8890 8891 static void hclge_sync_from_del_list(struct list_head *del_list, 8892 struct list_head *mac_list) 8893 { 8894 struct hclge_mac_node *mac_node, *tmp, *new_node; 8895 8896 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 8897 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); 8898 if (new_node) { 8899 /* If the mac addr exists in the mac list, it means 8900 * received a new TO_ADD request during the time window 8901 * of configuring the mac address. For the mac node 8902 * state is TO_ADD, and the address is already in the 8903 * in the hardware(due to delete fail), so we just need 8904 * to change the mac node state to ACTIVE. 8905 */ 8906 new_node->state = HCLGE_MAC_ACTIVE; 8907 list_del(&mac_node->node); 8908 kfree(mac_node); 8909 } else { 8910 list_move_tail(&mac_node->node, mac_list); 8911 } 8912 } 8913 } 8914 8915 static void hclge_update_overflow_flags(struct hclge_vport *vport, 8916 enum HCLGE_MAC_ADDR_TYPE mac_type, 8917 bool is_all_added) 8918 { 8919 if (mac_type == HCLGE_MAC_ADDR_UC) { 8920 if (is_all_added) 8921 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; 8922 else if (hclge_is_umv_space_full(vport, true)) 8923 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; 8924 } else { 8925 if (is_all_added) 8926 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; 8927 else 8928 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; 8929 } 8930 } 8931 8932 static void hclge_sync_vport_mac_table(struct hclge_vport *vport, 8933 enum HCLGE_MAC_ADDR_TYPE mac_type) 8934 { 8935 struct hclge_mac_node *mac_node, *tmp, *new_node; 8936 struct list_head tmp_add_list, tmp_del_list; 8937 struct list_head *list; 8938 bool all_added; 8939 8940 INIT_LIST_HEAD(&tmp_add_list); 8941 INIT_LIST_HEAD(&tmp_del_list); 8942 8943 /* move the mac addr to the tmp_add_list and tmp_del_list, then 8944 * we can add/delete these mac addr outside the spin lock 8945 */ 8946 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 8947 &vport->uc_mac_list : &vport->mc_mac_list; 8948 8949 spin_lock_bh(&vport->mac_list_lock); 8950 8951 list_for_each_entry_safe(mac_node, tmp, list, node) { 8952 switch (mac_node->state) { 8953 case HCLGE_MAC_TO_DEL: 8954 list_move_tail(&mac_node->node, &tmp_del_list); 8955 break; 8956 case HCLGE_MAC_TO_ADD: 8957 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 8958 if (!new_node) 8959 goto stop_traverse; 8960 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 8961 new_node->state = mac_node->state; 8962 list_add_tail(&new_node->node, &tmp_add_list); 8963 break; 8964 default: 8965 break; 8966 } 8967 } 8968 8969 stop_traverse: 8970 spin_unlock_bh(&vport->mac_list_lock); 8971 8972 /* delete first, in order to get max mac table space for adding */ 8973 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); 8974 hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type); 8975 8976 /* if some mac addresses were added/deleted fail, move back to the 8977 * mac_list, and retry at next time. 8978 */ 8979 spin_lock_bh(&vport->mac_list_lock); 8980 8981 hclge_sync_from_del_list(&tmp_del_list, list); 8982 all_added = hclge_sync_from_add_list(&tmp_add_list, list); 8983 8984 spin_unlock_bh(&vport->mac_list_lock); 8985 8986 hclge_update_overflow_flags(vport, mac_type, all_added); 8987 } 8988 8989 static bool hclge_need_sync_mac_table(struct hclge_vport *vport) 8990 { 8991 struct hclge_dev *hdev = vport->back; 8992 8993 if (test_bit(vport->vport_id, hdev->vport_config_block)) 8994 return false; 8995 8996 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) 8997 return true; 8998 8999 return false; 9000 } 9001 9002 static void hclge_sync_mac_table(struct hclge_dev *hdev) 9003 { 9004 int i; 9005 9006 for (i = 0; i < hdev->num_alloc_vport; i++) { 9007 struct hclge_vport *vport = &hdev->vport[i]; 9008 9009 if (!hclge_need_sync_mac_table(vport)) 9010 continue; 9011 9012 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); 9013 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); 9014 } 9015 } 9016 9017 static void hclge_build_del_list(struct list_head *list, 9018 bool is_del_list, 9019 struct list_head *tmp_del_list) 9020 { 9021 struct hclge_mac_node *mac_cfg, *tmp; 9022 9023 list_for_each_entry_safe(mac_cfg, tmp, list, node) { 9024 switch (mac_cfg->state) { 9025 case HCLGE_MAC_TO_DEL: 9026 case HCLGE_MAC_ACTIVE: 9027 list_move_tail(&mac_cfg->node, tmp_del_list); 9028 break; 9029 case HCLGE_MAC_TO_ADD: 9030 if (is_del_list) { 9031 list_del(&mac_cfg->node); 9032 kfree(mac_cfg); 9033 } 9034 break; 9035 } 9036 } 9037 } 9038 9039 static void hclge_unsync_del_list(struct hclge_vport *vport, 9040 int (*unsync)(struct hclge_vport *vport, 9041 const unsigned char *addr), 9042 bool is_del_list, 9043 struct list_head *tmp_del_list) 9044 { 9045 struct hclge_mac_node *mac_cfg, *tmp; 9046 int ret; 9047 9048 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) { 9049 ret = unsync(vport, mac_cfg->mac_addr); 9050 if (!ret || ret == -ENOENT) { 9051 /* clear all mac addr from hardware, but remain these 9052 * mac addr in the mac list, and restore them after 9053 * vf reset finished. 9054 */ 9055 if (!is_del_list && 9056 mac_cfg->state == HCLGE_MAC_ACTIVE) { 9057 mac_cfg->state = HCLGE_MAC_TO_ADD; 9058 } else { 9059 list_del(&mac_cfg->node); 9060 kfree(mac_cfg); 9061 } 9062 } else if (is_del_list) { 9063 mac_cfg->state = HCLGE_MAC_TO_DEL; 9064 } 9065 } 9066 } 9067 9068 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, 9069 enum HCLGE_MAC_ADDR_TYPE mac_type) 9070 { 9071 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); 9072 struct hclge_dev *hdev = vport->back; 9073 struct list_head tmp_del_list, *list; 9074 9075 if (mac_type == HCLGE_MAC_ADDR_UC) { 9076 list = &vport->uc_mac_list; 9077 unsync = hclge_rm_uc_addr_common; 9078 } else { 9079 list = &vport->mc_mac_list; 9080 unsync = hclge_rm_mc_addr_common; 9081 } 9082 9083 INIT_LIST_HEAD(&tmp_del_list); 9084 9085 if (!is_del_list) 9086 set_bit(vport->vport_id, hdev->vport_config_block); 9087 9088 spin_lock_bh(&vport->mac_list_lock); 9089 9090 hclge_build_del_list(list, is_del_list, &tmp_del_list); 9091 9092 spin_unlock_bh(&vport->mac_list_lock); 9093 9094 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list); 9095 9096 spin_lock_bh(&vport->mac_list_lock); 9097 9098 hclge_sync_from_del_list(&tmp_del_list, list); 9099 9100 spin_unlock_bh(&vport->mac_list_lock); 9101 } 9102 9103 /* remove all mac address when uninitailize */ 9104 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, 9105 enum HCLGE_MAC_ADDR_TYPE mac_type) 9106 { 9107 struct hclge_mac_node *mac_node, *tmp; 9108 struct hclge_dev *hdev = vport->back; 9109 struct list_head tmp_del_list, *list; 9110 9111 INIT_LIST_HEAD(&tmp_del_list); 9112 9113 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 9114 &vport->uc_mac_list : &vport->mc_mac_list; 9115 9116 spin_lock_bh(&vport->mac_list_lock); 9117 9118 list_for_each_entry_safe(mac_node, tmp, list, node) { 9119 switch (mac_node->state) { 9120 case HCLGE_MAC_TO_DEL: 9121 case HCLGE_MAC_ACTIVE: 9122 list_move_tail(&mac_node->node, &tmp_del_list); 9123 break; 9124 case HCLGE_MAC_TO_ADD: 9125 list_del(&mac_node->node); 9126 kfree(mac_node); 9127 break; 9128 } 9129 } 9130 9131 spin_unlock_bh(&vport->mac_list_lock); 9132 9133 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); 9134 9135 if (!list_empty(&tmp_del_list)) 9136 dev_warn(&hdev->pdev->dev, 9137 "uninit %s mac list for vport %u not completely.\n", 9138 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc", 9139 vport->vport_id); 9140 9141 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { 9142 list_del(&mac_node->node); 9143 kfree(mac_node); 9144 } 9145 } 9146 9147 static void hclge_uninit_mac_table(struct hclge_dev *hdev) 9148 { 9149 struct hclge_vport *vport; 9150 int i; 9151 9152 for (i = 0; i < hdev->num_alloc_vport; i++) { 9153 vport = &hdev->vport[i]; 9154 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC); 9155 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC); 9156 } 9157 } 9158 9159 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 9160 u16 cmdq_resp, u8 resp_code) 9161 { 9162 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 9163 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 9164 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 9165 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 9166 9167 int return_status; 9168 9169 if (cmdq_resp) { 9170 dev_err(&hdev->pdev->dev, 9171 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 9172 cmdq_resp); 9173 return -EIO; 9174 } 9175 9176 switch (resp_code) { 9177 case HCLGE_ETHERTYPE_SUCCESS_ADD: 9178 case HCLGE_ETHERTYPE_ALREADY_ADD: 9179 return_status = 0; 9180 break; 9181 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 9182 dev_err(&hdev->pdev->dev, 9183 "add mac ethertype failed for manager table overflow.\n"); 9184 return_status = -EIO; 9185 break; 9186 case HCLGE_ETHERTYPE_KEY_CONFLICT: 9187 dev_err(&hdev->pdev->dev, 9188 "add mac ethertype failed for key conflict.\n"); 9189 return_status = -EIO; 9190 break; 9191 default: 9192 dev_err(&hdev->pdev->dev, 9193 "add mac ethertype failed for undefined, code=%u.\n", 9194 resp_code); 9195 return_status = -EIO; 9196 } 9197 9198 return return_status; 9199 } 9200 9201 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, 9202 u8 *mac_addr) 9203 { 9204 struct hclge_vport *vport = hclge_get_vport(handle); 9205 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 9206 struct hclge_dev *hdev = vport->back; 9207 9208 vport = hclge_get_vf_vport(hdev, vf); 9209 if (!vport) 9210 return -EINVAL; 9211 9212 hnae3_format_mac_addr(format_mac_addr, mac_addr); 9213 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { 9214 dev_info(&hdev->pdev->dev, 9215 "Specified MAC(=%s) is same as before, no change committed!\n", 9216 format_mac_addr); 9217 return 0; 9218 } 9219 9220 ether_addr_copy(vport->vf_info.mac, mac_addr); 9221 9222 /* there is a timewindow for PF to know VF unalive, it may 9223 * cause send mailbox fail, but it doesn't matter, VF will 9224 * query it when reinit. 9225 */ 9226 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { 9227 dev_info(&hdev->pdev->dev, 9228 "MAC of VF %d has been set to %s, and it will be reinitialized!\n", 9229 vf, format_mac_addr); 9230 (void)hclge_inform_reset_assert_to_vf(vport); 9231 return 0; 9232 } 9233 9234 dev_info(&hdev->pdev->dev, 9235 "MAC of VF %d has been set to %s, will be active after VF reset\n", 9236 vf, format_mac_addr); 9237 return 0; 9238 } 9239 9240 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 9241 const struct hclge_mac_mgr_tbl_entry_cmd *req) 9242 { 9243 struct hclge_desc desc; 9244 u8 resp_code; 9245 u16 retval; 9246 int ret; 9247 9248 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 9249 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 9250 9251 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9252 if (ret) { 9253 dev_err(&hdev->pdev->dev, 9254 "add mac ethertype failed for cmd_send, ret =%d.\n", 9255 ret); 9256 return ret; 9257 } 9258 9259 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 9260 retval = le16_to_cpu(desc.retval); 9261 9262 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 9263 } 9264 9265 static int init_mgr_tbl(struct hclge_dev *hdev) 9266 { 9267 int ret; 9268 int i; 9269 9270 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 9271 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 9272 if (ret) { 9273 dev_err(&hdev->pdev->dev, 9274 "add mac ethertype failed, ret =%d.\n", 9275 ret); 9276 return ret; 9277 } 9278 } 9279 9280 return 0; 9281 } 9282 9283 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 9284 { 9285 struct hclge_vport *vport = hclge_get_vport(handle); 9286 struct hclge_dev *hdev = vport->back; 9287 9288 ether_addr_copy(p, hdev->hw.mac.mac_addr); 9289 } 9290 9291 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, 9292 const u8 *old_addr, const u8 *new_addr) 9293 { 9294 struct list_head *list = &vport->uc_mac_list; 9295 struct hclge_mac_node *old_node, *new_node; 9296 9297 new_node = hclge_find_mac_node(list, new_addr); 9298 if (!new_node) { 9299 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 9300 if (!new_node) 9301 return -ENOMEM; 9302 9303 new_node->state = HCLGE_MAC_TO_ADD; 9304 ether_addr_copy(new_node->mac_addr, new_addr); 9305 list_add(&new_node->node, list); 9306 } else { 9307 if (new_node->state == HCLGE_MAC_TO_DEL) 9308 new_node->state = HCLGE_MAC_ACTIVE; 9309 9310 /* make sure the new addr is in the list head, avoid dev 9311 * addr may be not re-added into mac table for the umv space 9312 * limitation after global/imp reset which will clear mac 9313 * table by hardware. 9314 */ 9315 list_move(&new_node->node, list); 9316 } 9317 9318 if (old_addr && !ether_addr_equal(old_addr, new_addr)) { 9319 old_node = hclge_find_mac_node(list, old_addr); 9320 if (old_node) { 9321 if (old_node->state == HCLGE_MAC_TO_ADD) { 9322 list_del(&old_node->node); 9323 kfree(old_node); 9324 } else { 9325 old_node->state = HCLGE_MAC_TO_DEL; 9326 } 9327 } 9328 } 9329 9330 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 9331 9332 return 0; 9333 } 9334 9335 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, 9336 bool is_first) 9337 { 9338 const unsigned char *new_addr = (const unsigned char *)p; 9339 struct hclge_vport *vport = hclge_get_vport(handle); 9340 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 9341 struct hclge_dev *hdev = vport->back; 9342 unsigned char *old_addr = NULL; 9343 int ret; 9344 9345 /* mac addr check */ 9346 if (is_zero_ether_addr(new_addr) || 9347 is_broadcast_ether_addr(new_addr) || 9348 is_multicast_ether_addr(new_addr)) { 9349 hnae3_format_mac_addr(format_mac_addr, new_addr); 9350 dev_err(&hdev->pdev->dev, 9351 "change uc mac err! invalid mac: %s.\n", 9352 format_mac_addr); 9353 return -EINVAL; 9354 } 9355 9356 ret = hclge_pause_addr_cfg(hdev, new_addr); 9357 if (ret) { 9358 dev_err(&hdev->pdev->dev, 9359 "failed to configure mac pause address, ret = %d\n", 9360 ret); 9361 return ret; 9362 } 9363 9364 if (!is_first) 9365 old_addr = hdev->hw.mac.mac_addr; 9366 9367 spin_lock_bh(&vport->mac_list_lock); 9368 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); 9369 if (ret) { 9370 hnae3_format_mac_addr(format_mac_addr, new_addr); 9371 dev_err(&hdev->pdev->dev, 9372 "failed to change the mac addr:%s, ret = %d\n", 9373 format_mac_addr, ret); 9374 spin_unlock_bh(&vport->mac_list_lock); 9375 9376 if (!is_first) 9377 hclge_pause_addr_cfg(hdev, old_addr); 9378 9379 return ret; 9380 } 9381 /* we must update dev addr with spin lock protect, preventing dev addr 9382 * being removed by set_rx_mode path. 9383 */ 9384 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 9385 spin_unlock_bh(&vport->mac_list_lock); 9386 9387 hclge_task_schedule(hdev, 0); 9388 9389 return 0; 9390 } 9391 9392 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) 9393 { 9394 struct mii_ioctl_data *data = if_mii(ifr); 9395 9396 if (!hnae3_dev_phy_imp_supported(hdev)) 9397 return -EOPNOTSUPP; 9398 9399 switch (cmd) { 9400 case SIOCGMIIPHY: 9401 data->phy_id = hdev->hw.mac.phy_addr; 9402 /* this command reads phy id and register at the same time */ 9403 fallthrough; 9404 case SIOCGMIIREG: 9405 data->val_out = hclge_read_phy_reg(hdev, data->reg_num); 9406 return 0; 9407 9408 case SIOCSMIIREG: 9409 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); 9410 default: 9411 return -EOPNOTSUPP; 9412 } 9413 } 9414 9415 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, 9416 int cmd) 9417 { 9418 struct hclge_vport *vport = hclge_get_vport(handle); 9419 struct hclge_dev *hdev = vport->back; 9420 9421 switch (cmd) { 9422 case SIOCGHWTSTAMP: 9423 return hclge_ptp_get_cfg(hdev, ifr); 9424 case SIOCSHWTSTAMP: 9425 return hclge_ptp_set_cfg(hdev, ifr); 9426 default: 9427 if (!hdev->hw.mac.phydev) 9428 return hclge_mii_ioctl(hdev, ifr, cmd); 9429 } 9430 9431 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); 9432 } 9433 9434 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, 9435 bool bypass_en) 9436 { 9437 struct hclge_port_vlan_filter_bypass_cmd *req; 9438 struct hclge_desc desc; 9439 int ret; 9440 9441 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false); 9442 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; 9443 req->vf_id = vf_id; 9444 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B, 9445 bypass_en ? 1 : 0); 9446 9447 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9448 if (ret) 9449 dev_err(&hdev->pdev->dev, 9450 "failed to set vport%u port vlan filter bypass state, ret = %d.\n", 9451 vf_id, ret); 9452 9453 return ret; 9454 } 9455 9456 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 9457 u8 fe_type, bool filter_en, u8 vf_id) 9458 { 9459 struct hclge_vlan_filter_ctrl_cmd *req; 9460 struct hclge_desc desc; 9461 int ret; 9462 9463 /* read current vlan filter parameter */ 9464 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); 9465 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 9466 req->vlan_type = vlan_type; 9467 req->vf_id = vf_id; 9468 9469 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9470 if (ret) { 9471 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", 9472 vf_id, ret); 9473 return ret; 9474 } 9475 9476 /* modify and write new config parameter */ 9477 hclge_comm_cmd_reuse_desc(&desc, false); 9478 req->vlan_fe = filter_en ? 9479 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); 9480 9481 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9482 if (ret) 9483 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", 9484 vf_id, ret); 9485 9486 return ret; 9487 } 9488 9489 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) 9490 { 9491 struct hclge_dev *hdev = vport->back; 9492 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 9493 int ret; 9494 9495 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 9496 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9497 HCLGE_FILTER_FE_EGRESS_V1_B, 9498 enable, vport->vport_id); 9499 9500 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9501 HCLGE_FILTER_FE_EGRESS, enable, 9502 vport->vport_id); 9503 if (ret) 9504 return ret; 9505 9506 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { 9507 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, 9508 !enable); 9509 } else if (!vport->vport_id) { 9510 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 9511 enable = false; 9512 9513 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 9514 HCLGE_FILTER_FE_INGRESS, 9515 enable, 0); 9516 } 9517 9518 return ret; 9519 } 9520 9521 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) 9522 { 9523 struct hnae3_handle *handle = &vport->nic; 9524 struct hclge_vport_vlan_cfg *vlan, *tmp; 9525 struct hclge_dev *hdev = vport->back; 9526 9527 if (vport->vport_id) { 9528 if (vport->port_base_vlan_cfg.state != 9529 HNAE3_PORT_BASE_VLAN_DISABLE) 9530 return true; 9531 9532 if (vport->vf_info.trusted && vport->vf_info.request_uc_en) 9533 return false; 9534 } else if (handle->netdev_flags & HNAE3_USER_UPE) { 9535 return false; 9536 } 9537 9538 if (!vport->req_vlan_fltr_en) 9539 return false; 9540 9541 /* compatible with former device, always enable vlan filter */ 9542 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) 9543 return true; 9544 9545 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) 9546 if (vlan->vlan_id != 0) 9547 return true; 9548 9549 return false; 9550 } 9551 9552 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) 9553 { 9554 struct hclge_dev *hdev = vport->back; 9555 bool need_en; 9556 int ret; 9557 9558 mutex_lock(&hdev->vport_lock); 9559 9560 vport->req_vlan_fltr_en = request_en; 9561 9562 need_en = hclge_need_enable_vport_vlan_filter(vport); 9563 if (need_en == vport->cur_vlan_fltr_en) { 9564 mutex_unlock(&hdev->vport_lock); 9565 return 0; 9566 } 9567 9568 ret = hclge_set_vport_vlan_filter(vport, need_en); 9569 if (ret) { 9570 mutex_unlock(&hdev->vport_lock); 9571 return ret; 9572 } 9573 9574 vport->cur_vlan_fltr_en = need_en; 9575 9576 mutex_unlock(&hdev->vport_lock); 9577 9578 return 0; 9579 } 9580 9581 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 9582 { 9583 struct hclge_vport *vport = hclge_get_vport(handle); 9584 9585 return hclge_enable_vport_vlan_filter(vport, enable); 9586 } 9587 9588 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, 9589 bool is_kill, u16 vlan, 9590 struct hclge_desc *desc) 9591 { 9592 struct hclge_vlan_filter_vf_cfg_cmd *req0; 9593 struct hclge_vlan_filter_vf_cfg_cmd *req1; 9594 u8 vf_byte_val; 9595 u8 vf_byte_off; 9596 int ret; 9597 9598 hclge_cmd_setup_basic_desc(&desc[0], 9599 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 9600 hclge_cmd_setup_basic_desc(&desc[1], 9601 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 9602 9603 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 9604 9605 vf_byte_off = vfid / 8; 9606 vf_byte_val = 1 << (vfid % 8); 9607 9608 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 9609 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 9610 9611 req0->vlan_id = cpu_to_le16(vlan); 9612 req0->vlan_cfg = is_kill; 9613 9614 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 9615 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 9616 else 9617 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 9618 9619 ret = hclge_cmd_send(&hdev->hw, desc, 2); 9620 if (ret) { 9621 dev_err(&hdev->pdev->dev, 9622 "Send vf vlan command fail, ret =%d.\n", 9623 ret); 9624 return ret; 9625 } 9626 9627 return 0; 9628 } 9629 9630 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, 9631 bool is_kill, struct hclge_desc *desc) 9632 { 9633 struct hclge_vlan_filter_vf_cfg_cmd *req; 9634 9635 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 9636 9637 if (!is_kill) { 9638 #define HCLGE_VF_VLAN_NO_ENTRY 2 9639 if (!req->resp_code || req->resp_code == 1) 9640 return 0; 9641 9642 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { 9643 set_bit(vfid, hdev->vf_vlan_full); 9644 dev_warn(&hdev->pdev->dev, 9645 "vf vlan table is full, vf vlan filter is disabled\n"); 9646 return 0; 9647 } 9648 9649 dev_err(&hdev->pdev->dev, 9650 "Add vf vlan filter fail, ret =%u.\n", 9651 req->resp_code); 9652 } else { 9653 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 9654 if (!req->resp_code) 9655 return 0; 9656 9657 /* vf vlan filter is disabled when vf vlan table is full, 9658 * then new vlan id will not be added into vf vlan table. 9659 * Just return 0 without warning, avoid massive verbose 9660 * print logs when unload. 9661 */ 9662 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) 9663 return 0; 9664 9665 dev_err(&hdev->pdev->dev, 9666 "Kill vf vlan filter fail, ret =%u.\n", 9667 req->resp_code); 9668 } 9669 9670 return -EIO; 9671 } 9672 9673 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, 9674 bool is_kill, u16 vlan) 9675 { 9676 struct hclge_vport *vport = &hdev->vport[vfid]; 9677 struct hclge_desc desc[2]; 9678 int ret; 9679 9680 /* if vf vlan table is full, firmware will close vf vlan filter, it 9681 * is unable and unnecessary to add new vlan id to vf vlan filter. 9682 * If spoof check is enable, and vf vlan is full, it shouldn't add 9683 * new vlan, because tx packets with these vlan id will be dropped. 9684 */ 9685 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { 9686 if (vport->vf_info.spoofchk && vlan) { 9687 dev_err(&hdev->pdev->dev, 9688 "Can't add vlan due to spoof check is on and vf vlan table is full\n"); 9689 return -EPERM; 9690 } 9691 return 0; 9692 } 9693 9694 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); 9695 if (ret) 9696 return ret; 9697 9698 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); 9699 } 9700 9701 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 9702 u16 vlan_id, bool is_kill) 9703 { 9704 struct hclge_vlan_filter_pf_cfg_cmd *req; 9705 struct hclge_desc desc; 9706 u8 vlan_offset_byte_val; 9707 u8 vlan_offset_byte; 9708 u8 vlan_offset_160; 9709 int ret; 9710 9711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 9712 9713 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; 9714 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / 9715 HCLGE_VLAN_BYTE_SIZE; 9716 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); 9717 9718 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 9719 req->vlan_offset = vlan_offset_160; 9720 req->vlan_cfg = is_kill; 9721 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 9722 9723 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9724 if (ret) 9725 dev_err(&hdev->pdev->dev, 9726 "port vlan command, send fail, ret =%d.\n", ret); 9727 return ret; 9728 } 9729 9730 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, 9731 u16 vlan_id, bool is_kill) 9732 { 9733 /* vlan 0 may be added twice when 8021q module is enabled */ 9734 if (!is_kill && !vlan_id && 9735 test_bit(vport_id, hdev->vlan_table[vlan_id])) 9736 return false; 9737 9738 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 9739 dev_warn(&hdev->pdev->dev, 9740 "Add port vlan failed, vport %u is already in vlan %u\n", 9741 vport_id, vlan_id); 9742 return false; 9743 } 9744 9745 if (is_kill && 9746 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 9747 dev_warn(&hdev->pdev->dev, 9748 "Delete port vlan failed, vport %u is not in vlan %u\n", 9749 vport_id, vlan_id); 9750 return false; 9751 } 9752 9753 return true; 9754 } 9755 9756 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 9757 u16 vport_id, u16 vlan_id, 9758 bool is_kill) 9759 { 9760 u16 vport_idx, vport_num = 0; 9761 int ret; 9762 9763 if (is_kill && !vlan_id) 9764 return 0; 9765 9766 if (vlan_id >= VLAN_N_VID) 9767 return -EINVAL; 9768 9769 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); 9770 if (ret) { 9771 dev_err(&hdev->pdev->dev, 9772 "Set %u vport vlan filter config fail, ret =%d.\n", 9773 vport_id, ret); 9774 return ret; 9775 } 9776 9777 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) 9778 return 0; 9779 9780 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) 9781 vport_num++; 9782 9783 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 9784 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 9785 is_kill); 9786 9787 return ret; 9788 } 9789 9790 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 9791 { 9792 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 9793 struct hclge_vport_vtag_tx_cfg_cmd *req; 9794 struct hclge_dev *hdev = vport->back; 9795 struct hclge_desc desc; 9796 u16 bmap_index; 9797 int status; 9798 9799 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 9800 9801 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 9802 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 9803 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 9804 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 9805 vcfg->accept_tag1 ? 1 : 0); 9806 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 9807 vcfg->accept_untag1 ? 1 : 0); 9808 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 9809 vcfg->accept_tag2 ? 1 : 0); 9810 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 9811 vcfg->accept_untag2 ? 1 : 0); 9812 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 9813 vcfg->insert_tag1_en ? 1 : 0); 9814 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 9815 vcfg->insert_tag2_en ? 1 : 0); 9816 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, 9817 vcfg->tag_shift_mode_en ? 1 : 0); 9818 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 9819 9820 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 9821 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 9822 HCLGE_VF_NUM_PER_BYTE; 9823 req->vf_bitmap[bmap_index] = 9824 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 9825 9826 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9827 if (status) 9828 dev_err(&hdev->pdev->dev, 9829 "Send port txvlan cfg command fail, ret =%d\n", 9830 status); 9831 9832 return status; 9833 } 9834 9835 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 9836 { 9837 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 9838 struct hclge_vport_vtag_rx_cfg_cmd *req; 9839 struct hclge_dev *hdev = vport->back; 9840 struct hclge_desc desc; 9841 u16 bmap_index; 9842 int status; 9843 9844 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 9845 9846 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 9847 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 9848 vcfg->strip_tag1_en ? 1 : 0); 9849 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 9850 vcfg->strip_tag2_en ? 1 : 0); 9851 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 9852 vcfg->vlan1_vlan_prionly ? 1 : 0); 9853 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 9854 vcfg->vlan2_vlan_prionly ? 1 : 0); 9855 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, 9856 vcfg->strip_tag1_discard_en ? 1 : 0); 9857 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, 9858 vcfg->strip_tag2_discard_en ? 1 : 0); 9859 9860 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 9861 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 9862 HCLGE_VF_NUM_PER_BYTE; 9863 req->vf_bitmap[bmap_index] = 9864 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 9865 9866 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9867 if (status) 9868 dev_err(&hdev->pdev->dev, 9869 "Send port rxvlan cfg command fail, ret =%d\n", 9870 status); 9871 9872 return status; 9873 } 9874 9875 static int hclge_vlan_offload_cfg(struct hclge_vport *vport, 9876 u16 port_base_vlan_state, 9877 u16 vlan_tag, u8 qos) 9878 { 9879 int ret; 9880 9881 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 9882 vport->txvlan_cfg.accept_tag1 = true; 9883 vport->txvlan_cfg.insert_tag1_en = false; 9884 vport->txvlan_cfg.default_tag1 = 0; 9885 } else { 9886 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); 9887 9888 vport->txvlan_cfg.accept_tag1 = 9889 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; 9890 vport->txvlan_cfg.insert_tag1_en = true; 9891 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | 9892 vlan_tag; 9893 } 9894 9895 vport->txvlan_cfg.accept_untag1 = true; 9896 9897 /* accept_tag2 and accept_untag2 are not supported on 9898 * pdev revision(0x20), new revision support them, 9899 * this two fields can not be configured by user. 9900 */ 9901 vport->txvlan_cfg.accept_tag2 = true; 9902 vport->txvlan_cfg.accept_untag2 = true; 9903 vport->txvlan_cfg.insert_tag2_en = false; 9904 vport->txvlan_cfg.default_tag2 = 0; 9905 vport->txvlan_cfg.tag_shift_mode_en = true; 9906 9907 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 9908 vport->rxvlan_cfg.strip_tag1_en = false; 9909 vport->rxvlan_cfg.strip_tag2_en = 9910 vport->rxvlan_cfg.rx_vlan_offload_en; 9911 vport->rxvlan_cfg.strip_tag2_discard_en = false; 9912 } else { 9913 vport->rxvlan_cfg.strip_tag1_en = 9914 vport->rxvlan_cfg.rx_vlan_offload_en; 9915 vport->rxvlan_cfg.strip_tag2_en = true; 9916 vport->rxvlan_cfg.strip_tag2_discard_en = true; 9917 } 9918 9919 vport->rxvlan_cfg.strip_tag1_discard_en = false; 9920 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 9921 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 9922 9923 ret = hclge_set_vlan_tx_offload_cfg(vport); 9924 if (ret) 9925 return ret; 9926 9927 return hclge_set_vlan_rx_offload_cfg(vport); 9928 } 9929 9930 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 9931 { 9932 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 9933 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 9934 struct hclge_desc desc; 9935 int status; 9936 9937 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 9938 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 9939 rx_req->ot_fst_vlan_type = 9940 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 9941 rx_req->ot_sec_vlan_type = 9942 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 9943 rx_req->in_fst_vlan_type = 9944 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 9945 rx_req->in_sec_vlan_type = 9946 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 9947 9948 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9949 if (status) { 9950 dev_err(&hdev->pdev->dev, 9951 "Send rxvlan protocol type command fail, ret =%d\n", 9952 status); 9953 return status; 9954 } 9955 9956 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 9957 9958 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; 9959 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 9960 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 9961 9962 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9963 if (status) 9964 dev_err(&hdev->pdev->dev, 9965 "Send txvlan protocol type command fail, ret =%d\n", 9966 status); 9967 9968 return status; 9969 } 9970 9971 static int hclge_init_vlan_filter(struct hclge_dev *hdev) 9972 { 9973 struct hclge_vport *vport; 9974 bool enable = true; 9975 int ret; 9976 int i; 9977 9978 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 9979 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9980 HCLGE_FILTER_FE_EGRESS_V1_B, 9981 true, 0); 9982 9983 /* for revision 0x21, vf vlan filter is per function */ 9984 for (i = 0; i < hdev->num_alloc_vport; i++) { 9985 vport = &hdev->vport[i]; 9986 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9987 HCLGE_FILTER_FE_EGRESS, true, 9988 vport->vport_id); 9989 if (ret) 9990 return ret; 9991 vport->cur_vlan_fltr_en = true; 9992 } 9993 9994 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) && 9995 !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) 9996 enable = false; 9997 9998 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 9999 HCLGE_FILTER_FE_INGRESS, enable, 0); 10000 } 10001 10002 static int hclge_init_vlan_type(struct hclge_dev *hdev) 10003 { 10004 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; 10005 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; 10006 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; 10007 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; 10008 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; 10009 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; 10010 10011 return hclge_set_vlan_protocol_type(hdev); 10012 } 10013 10014 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) 10015 { 10016 struct hclge_port_base_vlan_config *cfg; 10017 struct hclge_vport *vport; 10018 int ret; 10019 int i; 10020 10021 for (i = 0; i < hdev->num_alloc_vport; i++) { 10022 vport = &hdev->vport[i]; 10023 cfg = &vport->port_base_vlan_cfg; 10024 10025 ret = hclge_vlan_offload_cfg(vport, cfg->state, 10026 cfg->vlan_info.vlan_tag, 10027 cfg->vlan_info.qos); 10028 if (ret) 10029 return ret; 10030 } 10031 return 0; 10032 } 10033 10034 static int hclge_init_vlan_config(struct hclge_dev *hdev) 10035 { 10036 struct hnae3_handle *handle = &hdev->vport[0].nic; 10037 int ret; 10038 10039 ret = hclge_init_vlan_filter(hdev); 10040 if (ret) 10041 return ret; 10042 10043 ret = hclge_init_vlan_type(hdev); 10044 if (ret) 10045 return ret; 10046 10047 ret = hclge_init_vport_vlan_offload(hdev); 10048 if (ret) 10049 return ret; 10050 10051 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 10052 } 10053 10054 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 10055 bool writen_to_tbl) 10056 { 10057 struct hclge_vport_vlan_cfg *vlan, *tmp; 10058 struct hclge_dev *hdev = vport->back; 10059 10060 mutex_lock(&hdev->vport_lock); 10061 10062 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10063 if (vlan->vlan_id == vlan_id) { 10064 mutex_unlock(&hdev->vport_lock); 10065 return; 10066 } 10067 } 10068 10069 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 10070 if (!vlan) { 10071 mutex_unlock(&hdev->vport_lock); 10072 return; 10073 } 10074 10075 vlan->hd_tbl_status = writen_to_tbl; 10076 vlan->vlan_id = vlan_id; 10077 10078 list_add_tail(&vlan->node, &vport->vlan_list); 10079 mutex_unlock(&hdev->vport_lock); 10080 } 10081 10082 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) 10083 { 10084 struct hclge_vport_vlan_cfg *vlan, *tmp; 10085 struct hclge_dev *hdev = vport->back; 10086 int ret; 10087 10088 mutex_lock(&hdev->vport_lock); 10089 10090 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10091 if (!vlan->hd_tbl_status) { 10092 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10093 vport->vport_id, 10094 vlan->vlan_id, false); 10095 if (ret) { 10096 dev_err(&hdev->pdev->dev, 10097 "restore vport vlan list failed, ret=%d\n", 10098 ret); 10099 10100 mutex_unlock(&hdev->vport_lock); 10101 return ret; 10102 } 10103 } 10104 vlan->hd_tbl_status = true; 10105 } 10106 10107 mutex_unlock(&hdev->vport_lock); 10108 10109 return 0; 10110 } 10111 10112 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 10113 bool is_write_tbl) 10114 { 10115 struct hclge_vport_vlan_cfg *vlan, *tmp; 10116 struct hclge_dev *hdev = vport->back; 10117 10118 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10119 if (vlan->vlan_id == vlan_id) { 10120 if (is_write_tbl && vlan->hd_tbl_status) 10121 hclge_set_vlan_filter_hw(hdev, 10122 htons(ETH_P_8021Q), 10123 vport->vport_id, 10124 vlan_id, 10125 true); 10126 10127 list_del(&vlan->node); 10128 kfree(vlan); 10129 break; 10130 } 10131 } 10132 } 10133 10134 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) 10135 { 10136 struct hclge_vport_vlan_cfg *vlan, *tmp; 10137 struct hclge_dev *hdev = vport->back; 10138 10139 mutex_lock(&hdev->vport_lock); 10140 10141 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10142 if (vlan->hd_tbl_status) 10143 hclge_set_vlan_filter_hw(hdev, 10144 htons(ETH_P_8021Q), 10145 vport->vport_id, 10146 vlan->vlan_id, 10147 true); 10148 10149 vlan->hd_tbl_status = false; 10150 if (is_del_list) { 10151 list_del(&vlan->node); 10152 kfree(vlan); 10153 } 10154 } 10155 clear_bit(vport->vport_id, hdev->vf_vlan_full); 10156 mutex_unlock(&hdev->vport_lock); 10157 } 10158 10159 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) 10160 { 10161 struct hclge_vport_vlan_cfg *vlan, *tmp; 10162 struct hclge_vport *vport; 10163 int i; 10164 10165 mutex_lock(&hdev->vport_lock); 10166 10167 for (i = 0; i < hdev->num_alloc_vport; i++) { 10168 vport = &hdev->vport[i]; 10169 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10170 list_del(&vlan->node); 10171 kfree(vlan); 10172 } 10173 } 10174 10175 mutex_unlock(&hdev->vport_lock); 10176 } 10177 10178 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) 10179 { 10180 struct hclge_vlan_info *vlan_info; 10181 struct hclge_vport *vport; 10182 u16 vlan_proto; 10183 u16 vlan_id; 10184 u16 state; 10185 int vf_id; 10186 int ret; 10187 10188 /* PF should restore all vfs port base vlan */ 10189 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { 10190 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; 10191 vlan_info = vport->port_base_vlan_cfg.tbl_sta ? 10192 &vport->port_base_vlan_cfg.vlan_info : 10193 &vport->port_base_vlan_cfg.old_vlan_info; 10194 10195 vlan_id = vlan_info->vlan_tag; 10196 vlan_proto = vlan_info->vlan_proto; 10197 state = vport->port_base_vlan_cfg.state; 10198 10199 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { 10200 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); 10201 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), 10202 vport->vport_id, 10203 vlan_id, false); 10204 vport->port_base_vlan_cfg.tbl_sta = ret == 0; 10205 } 10206 } 10207 } 10208 10209 void hclge_restore_vport_vlan_table(struct hclge_vport *vport) 10210 { 10211 struct hclge_vport_vlan_cfg *vlan, *tmp; 10212 struct hclge_dev *hdev = vport->back; 10213 int ret; 10214 10215 mutex_lock(&hdev->vport_lock); 10216 10217 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10218 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10219 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10220 vport->vport_id, 10221 vlan->vlan_id, false); 10222 if (ret) 10223 break; 10224 vlan->hd_tbl_status = true; 10225 } 10226 } 10227 10228 mutex_unlock(&hdev->vport_lock); 10229 } 10230 10231 /* For global reset and imp reset, hardware will clear the mac table, 10232 * so we change the mac address state from ACTIVE to TO_ADD, then they 10233 * can be restored in the service task after reset complete. Furtherly, 10234 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to 10235 * be restored after reset, so just remove these mac nodes from mac_list. 10236 */ 10237 static void hclge_mac_node_convert_for_reset(struct list_head *list) 10238 { 10239 struct hclge_mac_node *mac_node, *tmp; 10240 10241 list_for_each_entry_safe(mac_node, tmp, list, node) { 10242 if (mac_node->state == HCLGE_MAC_ACTIVE) { 10243 mac_node->state = HCLGE_MAC_TO_ADD; 10244 } else if (mac_node->state == HCLGE_MAC_TO_DEL) { 10245 list_del(&mac_node->node); 10246 kfree(mac_node); 10247 } 10248 } 10249 } 10250 10251 void hclge_restore_mac_table_common(struct hclge_vport *vport) 10252 { 10253 spin_lock_bh(&vport->mac_list_lock); 10254 10255 hclge_mac_node_convert_for_reset(&vport->uc_mac_list); 10256 hclge_mac_node_convert_for_reset(&vport->mc_mac_list); 10257 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 10258 10259 spin_unlock_bh(&vport->mac_list_lock); 10260 } 10261 10262 static void hclge_restore_hw_table(struct hclge_dev *hdev) 10263 { 10264 struct hclge_vport *vport = &hdev->vport[0]; 10265 struct hnae3_handle *handle = &vport->nic; 10266 10267 hclge_restore_mac_table_common(vport); 10268 hclge_restore_vport_port_base_vlan_config(hdev); 10269 hclge_restore_vport_vlan_table(vport); 10270 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 10271 hclge_restore_fd_entries(handle); 10272 } 10273 10274 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 10275 { 10276 struct hclge_vport *vport = hclge_get_vport(handle); 10277 10278 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10279 vport->rxvlan_cfg.strip_tag1_en = false; 10280 vport->rxvlan_cfg.strip_tag2_en = enable; 10281 vport->rxvlan_cfg.strip_tag2_discard_en = false; 10282 } else { 10283 vport->rxvlan_cfg.strip_tag1_en = enable; 10284 vport->rxvlan_cfg.strip_tag2_en = true; 10285 vport->rxvlan_cfg.strip_tag2_discard_en = true; 10286 } 10287 10288 vport->rxvlan_cfg.strip_tag1_discard_en = false; 10289 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 10290 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 10291 vport->rxvlan_cfg.rx_vlan_offload_en = enable; 10292 10293 return hclge_set_vlan_rx_offload_cfg(vport); 10294 } 10295 10296 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport) 10297 { 10298 struct hclge_dev *hdev = vport->back; 10299 10300 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) 10301 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); 10302 } 10303 10304 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, 10305 u16 port_base_vlan_state, 10306 struct hclge_vlan_info *new_info, 10307 struct hclge_vlan_info *old_info) 10308 { 10309 struct hclge_dev *hdev = vport->back; 10310 int ret; 10311 10312 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { 10313 hclge_rm_vport_all_vlan_table(vport, false); 10314 /* force clear VLAN 0 */ 10315 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); 10316 if (ret) 10317 return ret; 10318 return hclge_set_vlan_filter_hw(hdev, 10319 htons(new_info->vlan_proto), 10320 vport->vport_id, 10321 new_info->vlan_tag, 10322 false); 10323 } 10324 10325 vport->port_base_vlan_cfg.tbl_sta = false; 10326 10327 /* force add VLAN 0 */ 10328 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); 10329 if (ret) 10330 return ret; 10331 10332 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), 10333 vport->vport_id, old_info->vlan_tag, 10334 true); 10335 if (ret) 10336 return ret; 10337 10338 return hclge_add_vport_all_vlan_table(vport); 10339 } 10340 10341 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, 10342 const struct hclge_vlan_info *old_cfg) 10343 { 10344 if (new_cfg->vlan_tag != old_cfg->vlan_tag) 10345 return true; 10346 10347 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) 10348 return true; 10349 10350 return false; 10351 } 10352 10353 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, 10354 struct hclge_vlan_info *new_info, 10355 struct hclge_vlan_info *old_info) 10356 { 10357 struct hclge_dev *hdev = vport->back; 10358 int ret; 10359 10360 /* add new VLAN tag */ 10361 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), 10362 vport->vport_id, new_info->vlan_tag, 10363 false); 10364 if (ret) 10365 return ret; 10366 10367 vport->port_base_vlan_cfg.tbl_sta = false; 10368 /* remove old VLAN tag */ 10369 if (old_info->vlan_tag == 0) 10370 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, 10371 true, 0); 10372 else 10373 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10374 vport->vport_id, 10375 old_info->vlan_tag, true); 10376 if (ret) 10377 dev_err(&hdev->pdev->dev, 10378 "failed to clear vport%u port base vlan %u, ret = %d.\n", 10379 vport->vport_id, old_info->vlan_tag, ret); 10380 10381 return ret; 10382 } 10383 10384 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, 10385 struct hclge_vlan_info *vlan_info) 10386 { 10387 struct hnae3_handle *nic = &vport->nic; 10388 struct hclge_vlan_info *old_vlan_info; 10389 int ret; 10390 10391 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; 10392 10393 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag, 10394 vlan_info->qos); 10395 if (ret) 10396 return ret; 10397 10398 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) 10399 goto out; 10400 10401 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) 10402 ret = hclge_modify_port_base_vlan_tag(vport, vlan_info, 10403 old_vlan_info); 10404 else 10405 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, 10406 old_vlan_info); 10407 if (ret) 10408 return ret; 10409 10410 out: 10411 vport->port_base_vlan_cfg.state = state; 10412 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 10413 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 10414 else 10415 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 10416 10417 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; 10418 vport->port_base_vlan_cfg.vlan_info = *vlan_info; 10419 vport->port_base_vlan_cfg.tbl_sta = true; 10420 hclge_set_vport_vlan_fltr_change(vport); 10421 10422 return 0; 10423 } 10424 10425 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, 10426 enum hnae3_port_base_vlan_state state, 10427 u16 vlan, u8 qos) 10428 { 10429 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10430 if (!vlan && !qos) 10431 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 10432 10433 return HNAE3_PORT_BASE_VLAN_ENABLE; 10434 } 10435 10436 if (!vlan && !qos) 10437 return HNAE3_PORT_BASE_VLAN_DISABLE; 10438 10439 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && 10440 vport->port_base_vlan_cfg.vlan_info.qos == qos) 10441 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 10442 10443 return HNAE3_PORT_BASE_VLAN_MODIFY; 10444 } 10445 10446 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 10447 u16 vlan, u8 qos, __be16 proto) 10448 { 10449 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 10450 struct hclge_vport *vport = hclge_get_vport(handle); 10451 struct hclge_dev *hdev = vport->back; 10452 struct hclge_vlan_info vlan_info; 10453 u16 state; 10454 int ret; 10455 10456 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 10457 return -EOPNOTSUPP; 10458 10459 vport = hclge_get_vf_vport(hdev, vfid); 10460 if (!vport) 10461 return -EINVAL; 10462 10463 /* qos is a 3 bits value, so can not be bigger than 7 */ 10464 if (vlan > VLAN_N_VID - 1 || qos > 7) 10465 return -EINVAL; 10466 if (proto != htons(ETH_P_8021Q)) 10467 return -EPROTONOSUPPORT; 10468 10469 state = hclge_get_port_base_vlan_state(vport, 10470 vport->port_base_vlan_cfg.state, 10471 vlan, qos); 10472 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) 10473 return 0; 10474 10475 vlan_info.vlan_tag = vlan; 10476 vlan_info.qos = qos; 10477 vlan_info.vlan_proto = ntohs(proto); 10478 10479 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); 10480 if (ret) { 10481 dev_err(&hdev->pdev->dev, 10482 "failed to update port base vlan for vf %d, ret = %d\n", 10483 vfid, ret); 10484 return ret; 10485 } 10486 10487 /* there is a timewindow for PF to know VF unalive, it may 10488 * cause send mailbox fail, but it doesn't matter, VF will 10489 * query it when reinit. 10490 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based 10491 * VLAN state. 10492 */ 10493 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 10494 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 10495 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], 10496 vport->vport_id, 10497 state, 10498 &vlan_info); 10499 else 10500 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, 10501 &vport->need_notify); 10502 } 10503 return 0; 10504 } 10505 10506 static void hclge_clear_vf_vlan(struct hclge_dev *hdev) 10507 { 10508 struct hclge_vlan_info *vlan_info; 10509 struct hclge_vport *vport; 10510 int ret; 10511 int vf; 10512 10513 /* clear port base vlan for all vf */ 10514 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { 10515 vport = &hdev->vport[vf]; 10516 vlan_info = &vport->port_base_vlan_cfg.vlan_info; 10517 10518 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10519 vport->vport_id, 10520 vlan_info->vlan_tag, true); 10521 if (ret) 10522 dev_err(&hdev->pdev->dev, 10523 "failed to clear vf vlan for vf%d, ret = %d\n", 10524 vf - HCLGE_VF_VPORT_START_NUM, ret); 10525 } 10526 } 10527 10528 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 10529 u16 vlan_id, bool is_kill) 10530 { 10531 struct hclge_vport *vport = hclge_get_vport(handle); 10532 struct hclge_dev *hdev = vport->back; 10533 bool writen_to_tbl = false; 10534 int ret = 0; 10535 10536 /* When device is resetting or reset failed, firmware is unable to 10537 * handle mailbox. Just record the vlan id, and remove it after 10538 * reset finished. 10539 */ 10540 mutex_lock(&hdev->vport_lock); 10541 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 10542 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { 10543 set_bit(vlan_id, vport->vlan_del_fail_bmap); 10544 mutex_unlock(&hdev->vport_lock); 10545 return -EBUSY; 10546 } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { 10547 clear_bit(vlan_id, vport->vlan_del_fail_bmap); 10548 } 10549 mutex_unlock(&hdev->vport_lock); 10550 10551 /* when port base vlan enabled, we use port base vlan as the vlan 10552 * filter entry. In this case, we don't update vlan filter table 10553 * when user add new vlan or remove exist vlan, just update the vport 10554 * vlan list. The vlan id in vlan list will be writen in vlan filter 10555 * table until port base vlan disabled 10556 */ 10557 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10558 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, 10559 vlan_id, is_kill); 10560 writen_to_tbl = true; 10561 } 10562 10563 if (!ret) { 10564 if (!is_kill) { 10565 hclge_add_vport_vlan_table(vport, vlan_id, 10566 writen_to_tbl); 10567 } else if (is_kill && vlan_id != 0) { 10568 mutex_lock(&hdev->vport_lock); 10569 hclge_rm_vport_vlan_table(vport, vlan_id, false); 10570 mutex_unlock(&hdev->vport_lock); 10571 } 10572 } else if (is_kill) { 10573 /* when remove hw vlan filter failed, record the vlan id, 10574 * and try to remove it from hw later, to be consistence 10575 * with stack 10576 */ 10577 mutex_lock(&hdev->vport_lock); 10578 set_bit(vlan_id, vport->vlan_del_fail_bmap); 10579 mutex_unlock(&hdev->vport_lock); 10580 } 10581 10582 hclge_set_vport_vlan_fltr_change(vport); 10583 10584 return ret; 10585 } 10586 10587 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) 10588 { 10589 struct hclge_vport *vport; 10590 int ret; 10591 u16 i; 10592 10593 for (i = 0; i < hdev->num_alloc_vport; i++) { 10594 vport = &hdev->vport[i]; 10595 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 10596 &vport->state)) 10597 continue; 10598 10599 ret = hclge_enable_vport_vlan_filter(vport, 10600 vport->req_vlan_fltr_en); 10601 if (ret) { 10602 dev_err(&hdev->pdev->dev, 10603 "failed to sync vlan filter state for vport%u, ret = %d\n", 10604 vport->vport_id, ret); 10605 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 10606 &vport->state); 10607 return; 10608 } 10609 } 10610 } 10611 10612 static void hclge_sync_vlan_filter(struct hclge_dev *hdev) 10613 { 10614 #define HCLGE_MAX_SYNC_COUNT 60 10615 10616 int i, ret, sync_cnt = 0; 10617 u16 vlan_id; 10618 10619 mutex_lock(&hdev->vport_lock); 10620 /* start from vport 1 for PF is always alive */ 10621 for (i = 0; i < hdev->num_alloc_vport; i++) { 10622 struct hclge_vport *vport = &hdev->vport[i]; 10623 10624 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 10625 VLAN_N_VID); 10626 while (vlan_id != VLAN_N_VID) { 10627 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10628 vport->vport_id, vlan_id, 10629 true); 10630 if (ret && ret != -EINVAL) { 10631 mutex_unlock(&hdev->vport_lock); 10632 return; 10633 } 10634 10635 clear_bit(vlan_id, vport->vlan_del_fail_bmap); 10636 hclge_rm_vport_vlan_table(vport, vlan_id, false); 10637 hclge_set_vport_vlan_fltr_change(vport); 10638 10639 sync_cnt++; 10640 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) { 10641 mutex_unlock(&hdev->vport_lock); 10642 return; 10643 } 10644 10645 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 10646 VLAN_N_VID); 10647 } 10648 } 10649 mutex_unlock(&hdev->vport_lock); 10650 10651 hclge_sync_vlan_fltr_state(hdev); 10652 } 10653 10654 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) 10655 { 10656 struct hclge_config_max_frm_size_cmd *req; 10657 struct hclge_desc desc; 10658 10659 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 10660 10661 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 10662 req->max_frm_size = cpu_to_le16(new_mps); 10663 req->min_frm_size = HCLGE_MAC_MIN_FRAME; 10664 10665 return hclge_cmd_send(&hdev->hw, &desc, 1); 10666 } 10667 10668 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 10669 { 10670 struct hclge_vport *vport = hclge_get_vport(handle); 10671 10672 return hclge_set_vport_mtu(vport, new_mtu); 10673 } 10674 10675 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) 10676 { 10677 struct hclge_dev *hdev = vport->back; 10678 int i, max_frm_size, ret; 10679 10680 /* HW supprt 2 layer vlan */ 10681 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 10682 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 10683 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) 10684 return -EINVAL; 10685 10686 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 10687 mutex_lock(&hdev->vport_lock); 10688 /* VF's mps must fit within hdev->mps */ 10689 if (vport->vport_id && max_frm_size > hdev->mps) { 10690 mutex_unlock(&hdev->vport_lock); 10691 return -EINVAL; 10692 } else if (vport->vport_id) { 10693 vport->mps = max_frm_size; 10694 mutex_unlock(&hdev->vport_lock); 10695 return 0; 10696 } 10697 10698 /* PF's mps must be greater then VF's mps */ 10699 for (i = 1; i < hdev->num_alloc_vport; i++) 10700 if (max_frm_size < hdev->vport[i].mps) { 10701 dev_err(&hdev->pdev->dev, 10702 "failed to set pf mtu for less than vport %d, mps = %u.\n", 10703 i, hdev->vport[i].mps); 10704 mutex_unlock(&hdev->vport_lock); 10705 return -EINVAL; 10706 } 10707 10708 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 10709 10710 ret = hclge_set_mac_mtu(hdev, max_frm_size); 10711 if (ret) { 10712 dev_err(&hdev->pdev->dev, 10713 "Change mtu fail, ret =%d\n", ret); 10714 goto out; 10715 } 10716 10717 hdev->mps = max_frm_size; 10718 vport->mps = max_frm_size; 10719 10720 ret = hclge_buffer_alloc(hdev); 10721 if (ret) 10722 dev_err(&hdev->pdev->dev, 10723 "Allocate buffer fail, ret =%d\n", ret); 10724 10725 out: 10726 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 10727 mutex_unlock(&hdev->vport_lock); 10728 return ret; 10729 } 10730 10731 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, 10732 bool enable) 10733 { 10734 struct hclge_reset_tqp_queue_cmd *req; 10735 struct hclge_desc desc; 10736 int ret; 10737 10738 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 10739 10740 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 10741 req->tqp_id = cpu_to_le16(queue_id); 10742 if (enable) 10743 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); 10744 10745 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10746 if (ret) { 10747 dev_err(&hdev->pdev->dev, 10748 "Send tqp reset cmd error, status =%d\n", ret); 10749 return ret; 10750 } 10751 10752 return 0; 10753 } 10754 10755 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, 10756 u8 *reset_status) 10757 { 10758 struct hclge_reset_tqp_queue_cmd *req; 10759 struct hclge_desc desc; 10760 int ret; 10761 10762 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 10763 10764 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 10765 req->tqp_id = cpu_to_le16(queue_id); 10766 10767 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10768 if (ret) { 10769 dev_err(&hdev->pdev->dev, 10770 "Get reset status error, status =%d\n", ret); 10771 return ret; 10772 } 10773 10774 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 10775 10776 return 0; 10777 } 10778 10779 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) 10780 { 10781 struct hclge_comm_tqp *tqp; 10782 struct hnae3_queue *queue; 10783 10784 queue = handle->kinfo.tqp[queue_id]; 10785 tqp = container_of(queue, struct hclge_comm_tqp, q); 10786 10787 return tqp->index; 10788 } 10789 10790 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle) 10791 { 10792 struct hclge_vport *vport = hclge_get_vport(handle); 10793 struct hclge_dev *hdev = vport->back; 10794 u16 reset_try_times = 0; 10795 u8 reset_status; 10796 u16 queue_gid; 10797 int ret; 10798 u16 i; 10799 10800 for (i = 0; i < handle->kinfo.num_tqps; i++) { 10801 queue_gid = hclge_covert_handle_qid_global(handle, i); 10802 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); 10803 if (ret) { 10804 dev_err(&hdev->pdev->dev, 10805 "failed to send reset tqp cmd, ret = %d\n", 10806 ret); 10807 return ret; 10808 } 10809 10810 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 10811 ret = hclge_get_reset_status(hdev, queue_gid, 10812 &reset_status); 10813 if (ret) 10814 return ret; 10815 10816 if (reset_status) 10817 break; 10818 10819 /* Wait for tqp hw reset */ 10820 usleep_range(1000, 1200); 10821 } 10822 10823 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 10824 dev_err(&hdev->pdev->dev, 10825 "wait for tqp hw reset timeout\n"); 10826 return -ETIME; 10827 } 10828 10829 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); 10830 if (ret) { 10831 dev_err(&hdev->pdev->dev, 10832 "failed to deassert soft reset, ret = %d\n", 10833 ret); 10834 return ret; 10835 } 10836 reset_try_times = 0; 10837 } 10838 return 0; 10839 } 10840 10841 static int hclge_reset_rcb(struct hnae3_handle *handle) 10842 { 10843 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U 10844 #define HCLGE_RESET_RCB_SUCCESS 1U 10845 10846 struct hclge_vport *vport = hclge_get_vport(handle); 10847 struct hclge_dev *hdev = vport->back; 10848 struct hclge_reset_cmd *req; 10849 struct hclge_desc desc; 10850 u8 return_status; 10851 u16 queue_gid; 10852 int ret; 10853 10854 queue_gid = hclge_covert_handle_qid_global(handle, 0); 10855 10856 req = (struct hclge_reset_cmd *)desc.data; 10857 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 10858 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); 10859 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); 10860 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); 10861 10862 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10863 if (ret) { 10864 dev_err(&hdev->pdev->dev, 10865 "failed to send rcb reset cmd, ret = %d\n", ret); 10866 return ret; 10867 } 10868 10869 return_status = req->fun_reset_rcb_return_status; 10870 if (return_status == HCLGE_RESET_RCB_SUCCESS) 10871 return 0; 10872 10873 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) { 10874 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", 10875 return_status); 10876 return -EIO; 10877 } 10878 10879 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd 10880 * again to reset all tqps 10881 */ 10882 return hclge_reset_tqp_cmd(handle); 10883 } 10884 10885 int hclge_reset_tqp(struct hnae3_handle *handle) 10886 { 10887 struct hclge_vport *vport = hclge_get_vport(handle); 10888 struct hclge_dev *hdev = vport->back; 10889 int ret; 10890 10891 /* only need to disable PF's tqp */ 10892 if (!vport->vport_id) { 10893 ret = hclge_tqp_enable(handle, false); 10894 if (ret) { 10895 dev_err(&hdev->pdev->dev, 10896 "failed to disable tqp, ret = %d\n", ret); 10897 return ret; 10898 } 10899 } 10900 10901 return hclge_reset_rcb(handle); 10902 } 10903 10904 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 10905 { 10906 struct hclge_vport *vport = hclge_get_vport(handle); 10907 struct hclge_dev *hdev = vport->back; 10908 10909 return hdev->fw_version; 10910 } 10911 10912 int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version) 10913 { 10914 struct hclge_comm_query_scc_cmd *resp; 10915 struct hclge_desc desc; 10916 int ret; 10917 10918 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1); 10919 resp = (struct hclge_comm_query_scc_cmd *)desc.data; 10920 10921 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10922 if (ret) 10923 return ret; 10924 10925 *scc_version = le32_to_cpu(resp->scc_version); 10926 10927 return 0; 10928 } 10929 10930 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 10931 { 10932 struct phy_device *phydev = hdev->hw.mac.phydev; 10933 10934 if (!phydev) 10935 return; 10936 10937 phy_set_asym_pause(phydev, rx_en, tx_en); 10938 } 10939 10940 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 10941 { 10942 int ret; 10943 10944 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 10945 return 0; 10946 10947 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 10948 if (ret) 10949 dev_err(&hdev->pdev->dev, 10950 "configure pauseparam error, ret = %d.\n", ret); 10951 10952 return ret; 10953 } 10954 10955 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 10956 { 10957 struct phy_device *phydev = hdev->hw.mac.phydev; 10958 u16 remote_advertising = 0; 10959 u16 local_advertising; 10960 u32 rx_pause, tx_pause; 10961 u8 flowctl; 10962 10963 if (!phydev->link) 10964 return 0; 10965 10966 if (!phydev->autoneg) 10967 return hclge_mac_pause_setup_hw(hdev); 10968 10969 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); 10970 10971 if (phydev->pause) 10972 remote_advertising = LPA_PAUSE_CAP; 10973 10974 if (phydev->asym_pause) 10975 remote_advertising |= LPA_PAUSE_ASYM; 10976 10977 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 10978 remote_advertising); 10979 tx_pause = flowctl & FLOW_CTRL_TX; 10980 rx_pause = flowctl & FLOW_CTRL_RX; 10981 10982 if (phydev->duplex == HCLGE_MAC_HALF) { 10983 tx_pause = 0; 10984 rx_pause = 0; 10985 } 10986 10987 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 10988 } 10989 10990 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 10991 u32 *rx_en, u32 *tx_en) 10992 { 10993 struct hclge_vport *vport = hclge_get_vport(handle); 10994 struct hclge_dev *hdev = vport->back; 10995 u8 media_type = hdev->hw.mac.media_type; 10996 10997 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ? 10998 hclge_get_autoneg(handle) : 0; 10999 11000 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 11001 *rx_en = 0; 11002 *tx_en = 0; 11003 return; 11004 } 11005 11006 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 11007 *rx_en = 1; 11008 *tx_en = 0; 11009 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 11010 *tx_en = 1; 11011 *rx_en = 0; 11012 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 11013 *rx_en = 1; 11014 *tx_en = 1; 11015 } else { 11016 *rx_en = 0; 11017 *tx_en = 0; 11018 } 11019 } 11020 11021 static void hclge_record_user_pauseparam(struct hclge_dev *hdev, 11022 u32 rx_en, u32 tx_en) 11023 { 11024 if (rx_en && tx_en) 11025 hdev->fc_mode_last_time = HCLGE_FC_FULL; 11026 else if (rx_en && !tx_en) 11027 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 11028 else if (!rx_en && tx_en) 11029 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 11030 else 11031 hdev->fc_mode_last_time = HCLGE_FC_NONE; 11032 11033 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 11034 } 11035 11036 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 11037 u32 rx_en, u32 tx_en) 11038 { 11039 struct hclge_vport *vport = hclge_get_vport(handle); 11040 struct hclge_dev *hdev = vport->back; 11041 struct phy_device *phydev = hdev->hw.mac.phydev; 11042 u32 fc_autoneg; 11043 11044 if (phydev || hnae3_dev_phy_imp_supported(hdev)) { 11045 fc_autoneg = hclge_get_autoneg(handle); 11046 if (auto_neg != fc_autoneg) { 11047 dev_info(&hdev->pdev->dev, 11048 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 11049 return -EOPNOTSUPP; 11050 } 11051 } 11052 11053 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 11054 dev_info(&hdev->pdev->dev, 11055 "Priority flow control enabled. Cannot set link flow control.\n"); 11056 return -EOPNOTSUPP; 11057 } 11058 11059 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 11060 11061 hclge_record_user_pauseparam(hdev, rx_en, tx_en); 11062 11063 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) 11064 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 11065 11066 if (phydev) 11067 return phy_start_aneg(phydev); 11068 11069 return -EOPNOTSUPP; 11070 } 11071 11072 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 11073 u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num) 11074 { 11075 struct hclge_vport *vport = hclge_get_vport(handle); 11076 struct hclge_dev *hdev = vport->back; 11077 11078 if (speed) 11079 *speed = hdev->hw.mac.speed; 11080 if (duplex) 11081 *duplex = hdev->hw.mac.duplex; 11082 if (auto_neg) 11083 *auto_neg = hdev->hw.mac.autoneg; 11084 if (lane_num) 11085 *lane_num = hdev->hw.mac.lane_num; 11086 } 11087 11088 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, 11089 u8 *module_type) 11090 { 11091 struct hclge_vport *vport = hclge_get_vport(handle); 11092 struct hclge_dev *hdev = vport->back; 11093 11094 /* When nic is down, the service task is not running, doesn't update 11095 * the port information per second. Query the port information before 11096 * return the media type, ensure getting the correct media information. 11097 */ 11098 hclge_update_port_info(hdev); 11099 11100 if (media_type) 11101 *media_type = hdev->hw.mac.media_type; 11102 11103 if (module_type) 11104 *module_type = hdev->hw.mac.module_type; 11105 } 11106 11107 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 11108 u8 *tp_mdix_ctrl, u8 *tp_mdix) 11109 { 11110 struct hclge_vport *vport = hclge_get_vport(handle); 11111 struct hclge_dev *hdev = vport->back; 11112 struct phy_device *phydev = hdev->hw.mac.phydev; 11113 int mdix_ctrl, mdix, is_resolved; 11114 unsigned int retval; 11115 11116 if (!phydev) { 11117 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 11118 *tp_mdix = ETH_TP_MDI_INVALID; 11119 return; 11120 } 11121 11122 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 11123 11124 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 11125 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 11126 HCLGE_PHY_MDIX_CTRL_S); 11127 11128 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 11129 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 11130 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 11131 11132 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 11133 11134 switch (mdix_ctrl) { 11135 case 0x0: 11136 *tp_mdix_ctrl = ETH_TP_MDI; 11137 break; 11138 case 0x1: 11139 *tp_mdix_ctrl = ETH_TP_MDI_X; 11140 break; 11141 case 0x3: 11142 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 11143 break; 11144 default: 11145 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 11146 break; 11147 } 11148 11149 if (!is_resolved) 11150 *tp_mdix = ETH_TP_MDI_INVALID; 11151 else if (mdix) 11152 *tp_mdix = ETH_TP_MDI_X; 11153 else 11154 *tp_mdix = ETH_TP_MDI; 11155 } 11156 11157 static void hclge_info_show(struct hclge_dev *hdev) 11158 { 11159 struct hnae3_handle *handle = &hdev->vport->nic; 11160 struct device *dev = &hdev->pdev->dev; 11161 11162 dev_info(dev, "PF info begin:\n"); 11163 11164 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 11165 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 11166 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 11167 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 11168 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); 11169 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 11170 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); 11171 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); 11172 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); 11173 dev_info(dev, "This is %s PF\n", 11174 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); 11175 dev_info(dev, "DCB %s\n", 11176 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable"); 11177 dev_info(dev, "MQPRIO %s\n", 11178 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable"); 11179 dev_info(dev, "Default tx spare buffer size: %u\n", 11180 hdev->tx_spare_buf_size); 11181 11182 dev_info(dev, "PF info end.\n"); 11183 } 11184 11185 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 11186 struct hclge_vport *vport) 11187 { 11188 struct hnae3_client *client = vport->nic.client; 11189 struct hclge_dev *hdev = ae_dev->priv; 11190 int rst_cnt = hdev->rst_stats.reset_cnt; 11191 int ret; 11192 11193 ret = client->ops->init_instance(&vport->nic); 11194 if (ret) 11195 return ret; 11196 11197 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11198 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 11199 rst_cnt != hdev->rst_stats.reset_cnt) { 11200 ret = -EBUSY; 11201 goto init_nic_err; 11202 } 11203 11204 /* Enable nic hw error interrupts */ 11205 ret = hclge_config_nic_hw_error(hdev, true); 11206 if (ret) { 11207 dev_err(&ae_dev->pdev->dev, 11208 "fail(%d) to enable hw error interrupts\n", ret); 11209 goto init_nic_err; 11210 } 11211 11212 hnae3_set_client_init_flag(client, ae_dev, 1); 11213 11214 if (netif_msg_drv(&hdev->vport->nic)) 11215 hclge_info_show(hdev); 11216 11217 return ret; 11218 11219 init_nic_err: 11220 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11221 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11222 msleep(HCLGE_WAIT_RESET_DONE); 11223 11224 client->ops->uninit_instance(&vport->nic, 0); 11225 11226 return ret; 11227 } 11228 11229 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 11230 struct hclge_vport *vport) 11231 { 11232 struct hclge_dev *hdev = ae_dev->priv; 11233 struct hnae3_client *client; 11234 int rst_cnt; 11235 int ret; 11236 11237 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 11238 !hdev->nic_client) 11239 return 0; 11240 11241 client = hdev->roce_client; 11242 ret = hclge_init_roce_base_info(vport); 11243 if (ret) 11244 return ret; 11245 11246 rst_cnt = hdev->rst_stats.reset_cnt; 11247 ret = client->ops->init_instance(&vport->roce); 11248 if (ret) 11249 return ret; 11250 11251 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11252 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 11253 rst_cnt != hdev->rst_stats.reset_cnt) { 11254 ret = -EBUSY; 11255 goto init_roce_err; 11256 } 11257 11258 /* Enable roce ras interrupts */ 11259 ret = hclge_config_rocee_ras_interrupt(hdev, true); 11260 if (ret) { 11261 dev_err(&ae_dev->pdev->dev, 11262 "fail(%d) to enable roce ras interrupts\n", ret); 11263 goto init_roce_err; 11264 } 11265 11266 hnae3_set_client_init_flag(client, ae_dev, 1); 11267 11268 return 0; 11269 11270 init_roce_err: 11271 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11272 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11273 msleep(HCLGE_WAIT_RESET_DONE); 11274 11275 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); 11276 11277 return ret; 11278 } 11279 11280 static int hclge_init_client_instance(struct hnae3_client *client, 11281 struct hnae3_ae_dev *ae_dev) 11282 { 11283 struct hclge_dev *hdev = ae_dev->priv; 11284 struct hclge_vport *vport = &hdev->vport[0]; 11285 int ret; 11286 11287 switch (client->type) { 11288 case HNAE3_CLIENT_KNIC: 11289 hdev->nic_client = client; 11290 vport->nic.client = client; 11291 ret = hclge_init_nic_client_instance(ae_dev, vport); 11292 if (ret) 11293 goto clear_nic; 11294 11295 ret = hclge_init_roce_client_instance(ae_dev, vport); 11296 if (ret) 11297 goto clear_roce; 11298 11299 break; 11300 case HNAE3_CLIENT_ROCE: 11301 if (hnae3_dev_roce_supported(hdev)) { 11302 hdev->roce_client = client; 11303 vport->roce.client = client; 11304 } 11305 11306 ret = hclge_init_roce_client_instance(ae_dev, vport); 11307 if (ret) 11308 goto clear_roce; 11309 11310 break; 11311 default: 11312 return -EINVAL; 11313 } 11314 11315 return 0; 11316 11317 clear_nic: 11318 hdev->nic_client = NULL; 11319 vport->nic.client = NULL; 11320 return ret; 11321 clear_roce: 11322 hdev->roce_client = NULL; 11323 vport->roce.client = NULL; 11324 return ret; 11325 } 11326 11327 static bool hclge_uninit_need_wait(struct hclge_dev *hdev) 11328 { 11329 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 11330 test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 11331 } 11332 11333 static void hclge_uninit_client_instance(struct hnae3_client *client, 11334 struct hnae3_ae_dev *ae_dev) 11335 { 11336 struct hclge_dev *hdev = ae_dev->priv; 11337 struct hclge_vport *vport = &hdev->vport[0]; 11338 11339 if (hdev->roce_client) { 11340 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11341 while (hclge_uninit_need_wait(hdev)) 11342 msleep(HCLGE_WAIT_RESET_DONE); 11343 11344 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); 11345 hdev->roce_client = NULL; 11346 vport->roce.client = NULL; 11347 } 11348 if (client->type == HNAE3_CLIENT_ROCE) 11349 return; 11350 if (hdev->nic_client && client->ops->uninit_instance) { 11351 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11352 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11353 msleep(HCLGE_WAIT_RESET_DONE); 11354 11355 client->ops->uninit_instance(&vport->nic, 0); 11356 hdev->nic_client = NULL; 11357 vport->nic.client = NULL; 11358 } 11359 } 11360 11361 static int hclge_dev_mem_map(struct hclge_dev *hdev) 11362 { 11363 struct pci_dev *pdev = hdev->pdev; 11364 struct hclge_hw *hw = &hdev->hw; 11365 11366 /* for device does not have device memory, return directly */ 11367 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR))) 11368 return 0; 11369 11370 hw->hw.mem_base = 11371 devm_ioremap_wc(&pdev->dev, 11372 pci_resource_start(pdev, HCLGE_MEM_BAR), 11373 pci_resource_len(pdev, HCLGE_MEM_BAR)); 11374 if (!hw->hw.mem_base) { 11375 dev_err(&pdev->dev, "failed to map device memory\n"); 11376 return -EFAULT; 11377 } 11378 11379 return 0; 11380 } 11381 11382 static int hclge_pci_init(struct hclge_dev *hdev) 11383 { 11384 struct pci_dev *pdev = hdev->pdev; 11385 struct hclge_hw *hw; 11386 int ret; 11387 11388 ret = pci_enable_device(pdev); 11389 if (ret) { 11390 dev_err(&pdev->dev, "failed to enable PCI device\n"); 11391 return ret; 11392 } 11393 11394 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11395 if (ret) { 11396 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11397 if (ret) { 11398 dev_err(&pdev->dev, 11399 "can't set consistent PCI DMA"); 11400 goto err_disable_device; 11401 } 11402 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 11403 } 11404 11405 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 11406 if (ret) { 11407 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 11408 goto err_disable_device; 11409 } 11410 11411 pci_set_master(pdev); 11412 hw = &hdev->hw; 11413 hw->hw.io_base = pcim_iomap(pdev, 2, 0); 11414 if (!hw->hw.io_base) { 11415 dev_err(&pdev->dev, "Can't map configuration register space\n"); 11416 ret = -ENOMEM; 11417 goto err_release_regions; 11418 } 11419 11420 ret = hclge_dev_mem_map(hdev); 11421 if (ret) 11422 goto err_unmap_io_base; 11423 11424 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 11425 11426 return 0; 11427 11428 err_unmap_io_base: 11429 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11430 err_release_regions: 11431 pci_release_regions(pdev); 11432 err_disable_device: 11433 pci_disable_device(pdev); 11434 11435 return ret; 11436 } 11437 11438 static void hclge_pci_uninit(struct hclge_dev *hdev) 11439 { 11440 struct pci_dev *pdev = hdev->pdev; 11441 11442 if (hdev->hw.hw.mem_base) 11443 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 11444 11445 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11446 pci_free_irq_vectors(pdev); 11447 pci_release_mem_regions(pdev); 11448 pci_disable_device(pdev); 11449 } 11450 11451 static void hclge_state_init(struct hclge_dev *hdev) 11452 { 11453 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 11454 set_bit(HCLGE_STATE_DOWN, &hdev->state); 11455 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 11456 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11457 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 11458 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 11459 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 11460 } 11461 11462 static void hclge_state_uninit(struct hclge_dev *hdev) 11463 { 11464 set_bit(HCLGE_STATE_DOWN, &hdev->state); 11465 set_bit(HCLGE_STATE_REMOVING, &hdev->state); 11466 11467 if (hdev->reset_timer.function) 11468 del_timer_sync(&hdev->reset_timer); 11469 if (hdev->service_task.work.func) 11470 cancel_delayed_work_sync(&hdev->service_task); 11471 } 11472 11473 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 11474 enum hnae3_reset_type rst_type) 11475 { 11476 #define HCLGE_RESET_RETRY_WAIT_MS 500 11477 #define HCLGE_RESET_RETRY_CNT 5 11478 11479 struct hclge_dev *hdev = ae_dev->priv; 11480 int retry_cnt = 0; 11481 int ret; 11482 11483 while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) { 11484 down(&hdev->reset_sem); 11485 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11486 hdev->reset_type = rst_type; 11487 ret = hclge_reset_prepare(hdev); 11488 if (!ret && !hdev->reset_pending) 11489 break; 11490 11491 dev_err(&hdev->pdev->dev, 11492 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 11493 ret, hdev->reset_pending, retry_cnt); 11494 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11495 up(&hdev->reset_sem); 11496 msleep(HCLGE_RESET_RETRY_WAIT_MS); 11497 } 11498 11499 /* disable misc vector before reset done */ 11500 hclge_enable_vector(&hdev->misc_vector, false); 11501 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 11502 11503 if (hdev->reset_type == HNAE3_FLR_RESET) 11504 hdev->rst_stats.flr_rst_cnt++; 11505 } 11506 11507 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) 11508 { 11509 struct hclge_dev *hdev = ae_dev->priv; 11510 int ret; 11511 11512 hclge_enable_vector(&hdev->misc_vector, true); 11513 11514 ret = hclge_reset_rebuild(hdev); 11515 if (ret) 11516 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); 11517 11518 hdev->reset_type = HNAE3_NONE_RESET; 11519 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11520 up(&hdev->reset_sem); 11521 } 11522 11523 static void hclge_clear_resetting_state(struct hclge_dev *hdev) 11524 { 11525 u16 i; 11526 11527 for (i = 0; i < hdev->num_alloc_vport; i++) { 11528 struct hclge_vport *vport = &hdev->vport[i]; 11529 int ret; 11530 11531 /* Send cmd to clear vport's FUNC_RST_ING */ 11532 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); 11533 if (ret) 11534 dev_warn(&hdev->pdev->dev, 11535 "clear vport(%u) rst failed %d!\n", 11536 vport->vport_id, ret); 11537 } 11538 } 11539 11540 static int hclge_clear_hw_resource(struct hclge_dev *hdev) 11541 { 11542 struct hclge_desc desc; 11543 int ret; 11544 11545 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); 11546 11547 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11548 /* This new command is only supported by new firmware, it will 11549 * fail with older firmware. Error value -EOPNOSUPP can only be 11550 * returned by older firmware running this command, to keep code 11551 * backward compatible we will override this value and return 11552 * success. 11553 */ 11554 if (ret && ret != -EOPNOTSUPP) { 11555 dev_err(&hdev->pdev->dev, 11556 "failed to clear hw resource, ret = %d\n", ret); 11557 return ret; 11558 } 11559 return 0; 11560 } 11561 11562 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) 11563 { 11564 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 11565 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); 11566 } 11567 11568 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) 11569 { 11570 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 11571 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); 11572 } 11573 11574 static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle) 11575 { 11576 struct hclge_vport *vport = hclge_get_vport(handle); 11577 11578 return &vport->back->hw.mac.wol; 11579 } 11580 11581 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev, 11582 u32 *wol_supported) 11583 { 11584 struct hclge_query_wol_supported_cmd *wol_supported_cmd; 11585 struct hclge_desc desc; 11586 int ret; 11587 11588 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE, 11589 true); 11590 wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data; 11591 11592 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11593 if (ret) { 11594 dev_err(&hdev->pdev->dev, 11595 "failed to query wol supported, ret = %d\n", ret); 11596 return ret; 11597 } 11598 11599 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode); 11600 11601 return 0; 11602 } 11603 11604 static int hclge_set_wol_cfg(struct hclge_dev *hdev, 11605 struct hclge_wol_info *wol_info) 11606 { 11607 struct hclge_wol_cfg_cmd *wol_cfg_cmd; 11608 struct hclge_desc desc; 11609 int ret; 11610 11611 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false); 11612 wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data; 11613 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode); 11614 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size; 11615 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX); 11616 11617 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11618 if (ret) 11619 dev_err(&hdev->pdev->dev, 11620 "failed to set wol config, ret = %d\n", ret); 11621 11622 return ret; 11623 } 11624 11625 static int hclge_update_wol(struct hclge_dev *hdev) 11626 { 11627 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; 11628 11629 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) 11630 return 0; 11631 11632 return hclge_set_wol_cfg(hdev, wol_info); 11633 } 11634 11635 static int hclge_init_wol(struct hclge_dev *hdev) 11636 { 11637 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; 11638 int ret; 11639 11640 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) 11641 return 0; 11642 11643 memset(wol_info, 0, sizeof(struct hclge_wol_info)); 11644 ret = hclge_get_wol_supported_mode(hdev, 11645 &wol_info->wol_support_mode); 11646 if (ret) { 11647 wol_info->wol_support_mode = 0; 11648 return ret; 11649 } 11650 11651 return hclge_update_wol(hdev); 11652 } 11653 11654 static void hclge_get_wol(struct hnae3_handle *handle, 11655 struct ethtool_wolinfo *wol) 11656 { 11657 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); 11658 11659 wol->supported = wol_info->wol_support_mode; 11660 wol->wolopts = wol_info->wol_current_mode; 11661 if (wol_info->wol_current_mode & WAKE_MAGICSECURE) 11662 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX); 11663 } 11664 11665 static int hclge_set_wol(struct hnae3_handle *handle, 11666 struct ethtool_wolinfo *wol) 11667 { 11668 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); 11669 struct hclge_vport *vport = hclge_get_vport(handle); 11670 u32 wol_mode; 11671 int ret; 11672 11673 wol_mode = wol->wolopts; 11674 if (wol_mode & ~wol_info->wol_support_mode) 11675 return -EINVAL; 11676 11677 wol_info->wol_current_mode = wol_mode; 11678 if (wol_mode & WAKE_MAGICSECURE) { 11679 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX); 11680 wol_info->wol_sopass_size = SOPASS_MAX; 11681 } else { 11682 wol_info->wol_sopass_size = 0; 11683 } 11684 11685 ret = hclge_set_wol_cfg(vport->back, wol_info); 11686 if (ret) 11687 wol_info->wol_current_mode = 0; 11688 11689 return ret; 11690 } 11691 11692 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 11693 { 11694 struct pci_dev *pdev = ae_dev->pdev; 11695 struct hclge_dev *hdev; 11696 int ret; 11697 11698 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 11699 if (!hdev) 11700 return -ENOMEM; 11701 11702 hdev->pdev = pdev; 11703 hdev->ae_dev = ae_dev; 11704 hdev->reset_type = HNAE3_NONE_RESET; 11705 hdev->reset_level = HNAE3_FUNC_RESET; 11706 ae_dev->priv = hdev; 11707 11708 /* HW supprt 2 layer vlan */ 11709 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 11710 11711 mutex_init(&hdev->vport_lock); 11712 spin_lock_init(&hdev->fd_rule_lock); 11713 sema_init(&hdev->reset_sem, 1); 11714 11715 ret = hclge_pci_init(hdev); 11716 if (ret) 11717 goto out; 11718 11719 /* Firmware command queue initialize */ 11720 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 11721 if (ret) 11722 goto err_pci_uninit; 11723 11724 /* Firmware command initialize */ 11725 hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops); 11726 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, 11727 true, hdev->reset_pending); 11728 if (ret) 11729 goto err_cmd_uninit; 11730 11731 ret = hclge_clear_hw_resource(hdev); 11732 if (ret) 11733 goto err_cmd_uninit; 11734 11735 ret = hclge_get_cap(hdev); 11736 if (ret) 11737 goto err_cmd_uninit; 11738 11739 ret = hclge_query_dev_specs(hdev); 11740 if (ret) { 11741 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n", 11742 ret); 11743 goto err_cmd_uninit; 11744 } 11745 11746 ret = hclge_configure(hdev); 11747 if (ret) { 11748 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 11749 goto err_cmd_uninit; 11750 } 11751 11752 ret = hclge_init_msi(hdev); 11753 if (ret) { 11754 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 11755 goto err_cmd_uninit; 11756 } 11757 11758 ret = hclge_misc_irq_init(hdev); 11759 if (ret) 11760 goto err_msi_uninit; 11761 11762 ret = hclge_alloc_tqps(hdev); 11763 if (ret) { 11764 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 11765 goto err_msi_irq_uninit; 11766 } 11767 11768 ret = hclge_alloc_vport(hdev); 11769 if (ret) 11770 goto err_msi_irq_uninit; 11771 11772 ret = hclge_map_tqp(hdev); 11773 if (ret) 11774 goto err_msi_irq_uninit; 11775 11776 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 11777 clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); 11778 if (hnae3_dev_phy_imp_supported(hdev)) 11779 ret = hclge_update_tp_port_info(hdev); 11780 else 11781 ret = hclge_mac_mdio_config(hdev); 11782 11783 if (ret) 11784 goto err_msi_irq_uninit; 11785 } 11786 11787 ret = hclge_init_umv_space(hdev); 11788 if (ret) 11789 goto err_mdiobus_unreg; 11790 11791 ret = hclge_mac_init(hdev); 11792 if (ret) { 11793 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 11794 goto err_mdiobus_unreg; 11795 } 11796 11797 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 11798 if (ret) { 11799 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 11800 goto err_mdiobus_unreg; 11801 } 11802 11803 ret = hclge_config_gro(hdev); 11804 if (ret) 11805 goto err_mdiobus_unreg; 11806 11807 ret = hclge_init_vlan_config(hdev); 11808 if (ret) { 11809 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 11810 goto err_mdiobus_unreg; 11811 } 11812 11813 ret = hclge_tm_schd_init(hdev); 11814 if (ret) { 11815 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 11816 goto err_mdiobus_unreg; 11817 } 11818 11819 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, 11820 &hdev->rss_cfg); 11821 if (ret) { 11822 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 11823 goto err_mdiobus_unreg; 11824 } 11825 11826 ret = hclge_rss_init_hw(hdev); 11827 if (ret) { 11828 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 11829 goto err_mdiobus_unreg; 11830 } 11831 11832 ret = init_mgr_tbl(hdev); 11833 if (ret) { 11834 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 11835 goto err_mdiobus_unreg; 11836 } 11837 11838 ret = hclge_init_fd_config(hdev); 11839 if (ret) { 11840 dev_err(&pdev->dev, 11841 "fd table init fail, ret=%d\n", ret); 11842 goto err_mdiobus_unreg; 11843 } 11844 11845 ret = hclge_ptp_init(hdev); 11846 if (ret) 11847 goto err_mdiobus_unreg; 11848 11849 ret = hclge_update_port_info(hdev); 11850 if (ret) 11851 goto err_ptp_uninit; 11852 11853 INIT_KFIFO(hdev->mac_tnl_log); 11854 11855 hclge_dcb_ops_set(hdev); 11856 11857 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); 11858 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); 11859 11860 hclge_clear_all_event_cause(hdev); 11861 hclge_clear_resetting_state(hdev); 11862 11863 /* Log and clear the hw errors those already occurred */ 11864 if (hnae3_dev_ras_imp_supported(hdev)) 11865 hclge_handle_occurred_error(hdev); 11866 else 11867 hclge_handle_all_hns_hw_errors(ae_dev); 11868 11869 /* request delayed reset for the error recovery because an immediate 11870 * global reset on a PF affecting pending initialization of other PFs 11871 */ 11872 if (ae_dev->hw_err_reset_req) { 11873 enum hnae3_reset_type reset_level; 11874 11875 reset_level = hclge_get_reset_level(ae_dev, 11876 &ae_dev->hw_err_reset_req); 11877 hclge_set_def_reset_request(ae_dev, reset_level); 11878 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 11879 } 11880 11881 hclge_init_rxd_adv_layout(hdev); 11882 11883 /* Enable MISC vector(vector0) */ 11884 hclge_enable_vector(&hdev->misc_vector, true); 11885 11886 ret = hclge_init_wol(hdev); 11887 if (ret) 11888 dev_warn(&pdev->dev, 11889 "failed to wake on lan init, ret = %d\n", ret); 11890 11891 ret = hclge_devlink_init(hdev); 11892 if (ret) 11893 goto err_ptp_uninit; 11894 11895 hclge_state_init(hdev); 11896 hdev->last_reset_time = jiffies; 11897 11898 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", 11899 HCLGE_DRIVER_NAME); 11900 11901 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); 11902 return 0; 11903 11904 err_ptp_uninit: 11905 hclge_ptp_uninit(hdev); 11906 err_mdiobus_unreg: 11907 if (hdev->hw.mac.phydev) 11908 mdiobus_unregister(hdev->hw.mac.mdio_bus); 11909 err_msi_irq_uninit: 11910 hclge_misc_irq_uninit(hdev); 11911 err_msi_uninit: 11912 pci_free_irq_vectors(pdev); 11913 err_cmd_uninit: 11914 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 11915 err_pci_uninit: 11916 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11917 pci_release_regions(pdev); 11918 pci_disable_device(pdev); 11919 out: 11920 mutex_destroy(&hdev->vport_lock); 11921 return ret; 11922 } 11923 11924 static void hclge_stats_clear(struct hclge_dev *hdev) 11925 { 11926 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); 11927 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); 11928 } 11929 11930 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 11931 { 11932 return hclge_config_switch_param(hdev, vf, enable, 11933 HCLGE_SWITCH_ANTI_SPOOF_MASK); 11934 } 11935 11936 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 11937 { 11938 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 11939 HCLGE_FILTER_FE_NIC_INGRESS_B, 11940 enable, vf); 11941 } 11942 11943 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) 11944 { 11945 int ret; 11946 11947 ret = hclge_set_mac_spoofchk(hdev, vf, enable); 11948 if (ret) { 11949 dev_err(&hdev->pdev->dev, 11950 "Set vf %d mac spoof check %s failed, ret=%d\n", 11951 vf, enable ? "on" : "off", ret); 11952 return ret; 11953 } 11954 11955 ret = hclge_set_vlan_spoofchk(hdev, vf, enable); 11956 if (ret) 11957 dev_err(&hdev->pdev->dev, 11958 "Set vf %d vlan spoof check %s failed, ret=%d\n", 11959 vf, enable ? "on" : "off", ret); 11960 11961 return ret; 11962 } 11963 11964 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, 11965 bool enable) 11966 { 11967 struct hclge_vport *vport = hclge_get_vport(handle); 11968 struct hclge_dev *hdev = vport->back; 11969 u32 new_spoofchk = enable ? 1 : 0; 11970 int ret; 11971 11972 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 11973 return -EOPNOTSUPP; 11974 11975 vport = hclge_get_vf_vport(hdev, vf); 11976 if (!vport) 11977 return -EINVAL; 11978 11979 if (vport->vf_info.spoofchk == new_spoofchk) 11980 return 0; 11981 11982 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) 11983 dev_warn(&hdev->pdev->dev, 11984 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", 11985 vf); 11986 else if (enable && hclge_is_umv_space_full(vport, true)) 11987 dev_warn(&hdev->pdev->dev, 11988 "vf %d mac table is full, enable spoof check may cause its packet send fail\n", 11989 vf); 11990 11991 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); 11992 if (ret) 11993 return ret; 11994 11995 vport->vf_info.spoofchk = new_spoofchk; 11996 return 0; 11997 } 11998 11999 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) 12000 { 12001 struct hclge_vport *vport = hdev->vport; 12002 int ret; 12003 int i; 12004 12005 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 12006 return 0; 12007 12008 /* resume the vf spoof check state after reset */ 12009 for (i = 0; i < hdev->num_alloc_vport; i++) { 12010 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, 12011 vport->vf_info.spoofchk); 12012 if (ret) 12013 return ret; 12014 12015 vport++; 12016 } 12017 12018 return 0; 12019 } 12020 12021 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) 12022 { 12023 struct hclge_vport *vport = hclge_get_vport(handle); 12024 struct hclge_dev *hdev = vport->back; 12025 u32 new_trusted = enable ? 1 : 0; 12026 12027 vport = hclge_get_vf_vport(hdev, vf); 12028 if (!vport) 12029 return -EINVAL; 12030 12031 if (vport->vf_info.trusted == new_trusted) 12032 return 0; 12033 12034 vport->vf_info.trusted = new_trusted; 12035 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12036 hclge_task_schedule(hdev, 0); 12037 12038 return 0; 12039 } 12040 12041 static void hclge_reset_vf_rate(struct hclge_dev *hdev) 12042 { 12043 int ret; 12044 int vf; 12045 12046 /* reset vf rate to default value */ 12047 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { 12048 struct hclge_vport *vport = &hdev->vport[vf]; 12049 12050 vport->vf_info.max_tx_rate = 0; 12051 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); 12052 if (ret) 12053 dev_err(&hdev->pdev->dev, 12054 "vf%d failed to reset to default, ret=%d\n", 12055 vf - HCLGE_VF_VPORT_START_NUM, ret); 12056 } 12057 } 12058 12059 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, 12060 int min_tx_rate, int max_tx_rate) 12061 { 12062 if (min_tx_rate != 0 || 12063 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { 12064 dev_err(&hdev->pdev->dev, 12065 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n", 12066 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); 12067 return -EINVAL; 12068 } 12069 12070 return 0; 12071 } 12072 12073 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, 12074 int min_tx_rate, int max_tx_rate, bool force) 12075 { 12076 struct hclge_vport *vport = hclge_get_vport(handle); 12077 struct hclge_dev *hdev = vport->back; 12078 int ret; 12079 12080 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); 12081 if (ret) 12082 return ret; 12083 12084 vport = hclge_get_vf_vport(hdev, vf); 12085 if (!vport) 12086 return -EINVAL; 12087 12088 if (!force && max_tx_rate == vport->vf_info.max_tx_rate) 12089 return 0; 12090 12091 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); 12092 if (ret) 12093 return ret; 12094 12095 vport->vf_info.max_tx_rate = max_tx_rate; 12096 12097 return 0; 12098 } 12099 12100 static int hclge_resume_vf_rate(struct hclge_dev *hdev) 12101 { 12102 struct hnae3_handle *handle = &hdev->vport->nic; 12103 struct hclge_vport *vport; 12104 int ret; 12105 int vf; 12106 12107 /* resume the vf max_tx_rate after reset */ 12108 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { 12109 vport = hclge_get_vf_vport(hdev, vf); 12110 if (!vport) 12111 return -EINVAL; 12112 12113 /* zero means max rate, after reset, firmware already set it to 12114 * max rate, so just continue. 12115 */ 12116 if (!vport->vf_info.max_tx_rate) 12117 continue; 12118 12119 ret = hclge_set_vf_rate(handle, vf, 0, 12120 vport->vf_info.max_tx_rate, true); 12121 if (ret) { 12122 dev_err(&hdev->pdev->dev, 12123 "vf%d failed to resume tx_rate:%u, ret=%d\n", 12124 vf, vport->vf_info.max_tx_rate, ret); 12125 return ret; 12126 } 12127 } 12128 12129 return 0; 12130 } 12131 12132 static void hclge_reset_vport_state(struct hclge_dev *hdev) 12133 { 12134 struct hclge_vport *vport = hdev->vport; 12135 int i; 12136 12137 for (i = 0; i < hdev->num_alloc_vport; i++) { 12138 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 12139 vport++; 12140 } 12141 } 12142 12143 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 12144 { 12145 struct hclge_dev *hdev = ae_dev->priv; 12146 struct pci_dev *pdev = ae_dev->pdev; 12147 int ret; 12148 12149 set_bit(HCLGE_STATE_DOWN, &hdev->state); 12150 12151 hclge_stats_clear(hdev); 12152 /* NOTE: pf reset needn't to clear or restore pf and vf table entry. 12153 * so here should not clean table in memory. 12154 */ 12155 if (hdev->reset_type == HNAE3_IMP_RESET || 12156 hdev->reset_type == HNAE3_GLOBAL_RESET) { 12157 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 12158 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); 12159 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); 12160 hclge_reset_umv_space(hdev); 12161 } 12162 12163 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, 12164 true, hdev->reset_pending); 12165 if (ret) { 12166 dev_err(&pdev->dev, "Cmd queue init failed\n"); 12167 return ret; 12168 } 12169 12170 ret = hclge_map_tqp(hdev); 12171 if (ret) { 12172 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 12173 return ret; 12174 } 12175 12176 ret = hclge_mac_init(hdev); 12177 if (ret) { 12178 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 12179 return ret; 12180 } 12181 12182 ret = hclge_tp_port_init(hdev); 12183 if (ret) { 12184 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", 12185 ret); 12186 return ret; 12187 } 12188 12189 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 12190 if (ret) { 12191 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 12192 return ret; 12193 } 12194 12195 ret = hclge_config_gro(hdev); 12196 if (ret) 12197 return ret; 12198 12199 ret = hclge_init_vlan_config(hdev); 12200 if (ret) { 12201 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 12202 return ret; 12203 } 12204 12205 hclge_reset_tc_config(hdev); 12206 12207 ret = hclge_tm_init_hw(hdev, true); 12208 if (ret) { 12209 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 12210 return ret; 12211 } 12212 12213 ret = hclge_rss_init_hw(hdev); 12214 if (ret) { 12215 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 12216 return ret; 12217 } 12218 12219 ret = init_mgr_tbl(hdev); 12220 if (ret) { 12221 dev_err(&pdev->dev, 12222 "failed to reinit manager table, ret = %d\n", ret); 12223 return ret; 12224 } 12225 12226 ret = hclge_init_fd_config(hdev); 12227 if (ret) { 12228 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); 12229 return ret; 12230 } 12231 12232 ret = hclge_ptp_init(hdev); 12233 if (ret) 12234 return ret; 12235 12236 /* Log and clear the hw errors those already occurred */ 12237 if (hnae3_dev_ras_imp_supported(hdev)) 12238 hclge_handle_occurred_error(hdev); 12239 else 12240 hclge_handle_all_hns_hw_errors(ae_dev); 12241 12242 /* Re-enable the hw error interrupts because 12243 * the interrupts get disabled on global reset. 12244 */ 12245 ret = hclge_config_nic_hw_error(hdev, true); 12246 if (ret) { 12247 dev_err(&pdev->dev, 12248 "fail(%d) to re-enable NIC hw error interrupts\n", 12249 ret); 12250 return ret; 12251 } 12252 12253 if (hdev->roce_client) { 12254 ret = hclge_config_rocee_ras_interrupt(hdev, true); 12255 if (ret) { 12256 dev_err(&pdev->dev, 12257 "fail(%d) to re-enable roce ras interrupts\n", 12258 ret); 12259 return ret; 12260 } 12261 } 12262 12263 hclge_reset_vport_state(hdev); 12264 ret = hclge_reset_vport_spoofchk(hdev); 12265 if (ret) 12266 return ret; 12267 12268 ret = hclge_resume_vf_rate(hdev); 12269 if (ret) 12270 return ret; 12271 12272 hclge_init_rxd_adv_layout(hdev); 12273 12274 ret = hclge_update_wol(hdev); 12275 if (ret) 12276 dev_warn(&pdev->dev, 12277 "failed to update wol config, ret = %d\n", ret); 12278 12279 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 12280 HCLGE_DRIVER_NAME); 12281 12282 return 0; 12283 } 12284 12285 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 12286 { 12287 struct hclge_dev *hdev = ae_dev->priv; 12288 struct hclge_mac *mac = &hdev->hw.mac; 12289 12290 hclge_reset_vf_rate(hdev); 12291 hclge_clear_vf_vlan(hdev); 12292 hclge_state_uninit(hdev); 12293 hclge_ptp_uninit(hdev); 12294 hclge_uninit_rxd_adv_layout(hdev); 12295 hclge_uninit_mac_table(hdev); 12296 hclge_del_all_fd_entries(hdev); 12297 12298 if (mac->phydev) 12299 mdiobus_unregister(mac->mdio_bus); 12300 12301 /* Disable MISC vector(vector0) */ 12302 hclge_enable_vector(&hdev->misc_vector, false); 12303 synchronize_irq(hdev->misc_vector.vector_irq); 12304 12305 /* Disable all hw interrupts */ 12306 hclge_config_mac_tnl_int(hdev, false); 12307 hclge_config_nic_hw_error(hdev, false); 12308 hclge_config_rocee_ras_interrupt(hdev, false); 12309 12310 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 12311 hclge_misc_irq_uninit(hdev); 12312 hclge_devlink_uninit(hdev); 12313 hclge_pci_uninit(hdev); 12314 hclge_uninit_vport_vlan_table(hdev); 12315 mutex_destroy(&hdev->vport_lock); 12316 ae_dev->priv = NULL; 12317 } 12318 12319 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 12320 { 12321 struct hclge_vport *vport = hclge_get_vport(handle); 12322 struct hclge_dev *hdev = vport->back; 12323 12324 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); 12325 } 12326 12327 static void hclge_get_channels(struct hnae3_handle *handle, 12328 struct ethtool_channels *ch) 12329 { 12330 ch->max_combined = hclge_get_max_channels(handle); 12331 ch->other_count = 1; 12332 ch->max_other = 1; 12333 ch->combined_count = handle->kinfo.rss_size; 12334 } 12335 12336 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 12337 u16 *alloc_tqps, u16 *max_rss_size) 12338 { 12339 struct hclge_vport *vport = hclge_get_vport(handle); 12340 struct hclge_dev *hdev = vport->back; 12341 12342 *alloc_tqps = vport->alloc_tqps; 12343 *max_rss_size = hdev->pf_rss_size_max; 12344 } 12345 12346 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle) 12347 { 12348 struct hclge_vport *vport = hclge_get_vport(handle); 12349 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 12350 struct hclge_dev *hdev = vport->back; 12351 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 12352 u16 tc_valid[HCLGE_MAX_TC_NUM]; 12353 u16 roundup_size; 12354 unsigned int i; 12355 12356 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); 12357 roundup_size = ilog2(roundup_size); 12358 /* Set the RSS TC mode according to the new RSS size */ 12359 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 12360 tc_valid[i] = 0; 12361 12362 if (!(hdev->hw_tc_map & BIT(i))) 12363 continue; 12364 12365 tc_valid[i] = 1; 12366 tc_size[i] = roundup_size; 12367 tc_offset[i] = vport->nic.kinfo.rss_size * i; 12368 } 12369 12370 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, 12371 tc_size); 12372 } 12373 12374 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 12375 bool rxfh_configured) 12376 { 12377 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 12378 struct hclge_vport *vport = hclge_get_vport(handle); 12379 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 12380 struct hclge_dev *hdev = vport->back; 12381 u16 cur_rss_size = kinfo->rss_size; 12382 u16 cur_tqps = kinfo->num_tqps; 12383 u32 *rss_indir; 12384 unsigned int i; 12385 int ret; 12386 12387 kinfo->req_rss_size = new_tqps_num; 12388 12389 ret = hclge_tm_vport_map_update(hdev); 12390 if (ret) { 12391 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); 12392 return ret; 12393 } 12394 12395 ret = hclge_set_rss_tc_mode_cfg(handle); 12396 if (ret) 12397 return ret; 12398 12399 /* RSS indirection table has been configured by user */ 12400 if (rxfh_configured) 12401 goto out; 12402 12403 /* Reinitializes the rss indirect table according to the new RSS size */ 12404 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32), 12405 GFP_KERNEL); 12406 if (!rss_indir) 12407 return -ENOMEM; 12408 12409 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) 12410 rss_indir[i] = i % kinfo->rss_size; 12411 12412 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 12413 if (ret) 12414 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 12415 ret); 12416 12417 kfree(rss_indir); 12418 12419 out: 12420 if (!ret) 12421 dev_info(&hdev->pdev->dev, 12422 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 12423 cur_rss_size, kinfo->rss_size, 12424 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 12425 12426 return ret; 12427 } 12428 12429 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 12430 { 12431 struct hclge_set_led_state_cmd *req; 12432 struct hclge_desc desc; 12433 int ret; 12434 12435 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 12436 12437 req = (struct hclge_set_led_state_cmd *)desc.data; 12438 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 12439 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 12440 12441 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12442 if (ret) 12443 dev_err(&hdev->pdev->dev, 12444 "Send set led state cmd error, ret =%d\n", ret); 12445 12446 return ret; 12447 } 12448 12449 enum hclge_led_status { 12450 HCLGE_LED_OFF, 12451 HCLGE_LED_ON, 12452 HCLGE_LED_NO_CHANGE = 0xFF, 12453 }; 12454 12455 static int hclge_set_led_id(struct hnae3_handle *handle, 12456 enum ethtool_phys_id_state status) 12457 { 12458 struct hclge_vport *vport = hclge_get_vport(handle); 12459 struct hclge_dev *hdev = vport->back; 12460 12461 switch (status) { 12462 case ETHTOOL_ID_ACTIVE: 12463 return hclge_set_led_status(hdev, HCLGE_LED_ON); 12464 case ETHTOOL_ID_INACTIVE: 12465 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 12466 default: 12467 return -EINVAL; 12468 } 12469 } 12470 12471 static void hclge_get_link_mode(struct hnae3_handle *handle, 12472 unsigned long *supported, 12473 unsigned long *advertising) 12474 { 12475 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 12476 struct hclge_vport *vport = hclge_get_vport(handle); 12477 struct hclge_dev *hdev = vport->back; 12478 unsigned int idx = 0; 12479 12480 for (; idx < size; idx++) { 12481 supported[idx] = hdev->hw.mac.supported[idx]; 12482 advertising[idx] = hdev->hw.mac.advertising[idx]; 12483 } 12484 } 12485 12486 static int hclge_gro_en(struct hnae3_handle *handle, bool enable) 12487 { 12488 struct hclge_vport *vport = hclge_get_vport(handle); 12489 struct hclge_dev *hdev = vport->back; 12490 bool gro_en_old = hdev->gro_en; 12491 int ret; 12492 12493 hdev->gro_en = enable; 12494 ret = hclge_config_gro(hdev); 12495 if (ret) 12496 hdev->gro_en = gro_en_old; 12497 12498 return ret; 12499 } 12500 12501 static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport) 12502 { 12503 struct hnae3_handle *handle = &vport->nic; 12504 struct hclge_dev *hdev = vport->back; 12505 bool uc_en = false; 12506 bool mc_en = false; 12507 u8 tmp_flags; 12508 bool bc_en; 12509 int ret; 12510 12511 if (vport->last_promisc_flags != vport->overflow_promisc_flags) { 12512 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12513 vport->last_promisc_flags = vport->overflow_promisc_flags; 12514 } 12515 12516 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, 12517 &vport->state)) 12518 return 0; 12519 12520 /* for PF */ 12521 if (!vport->vport_id) { 12522 tmp_flags = handle->netdev_flags | vport->last_promisc_flags; 12523 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, 12524 tmp_flags & HNAE3_MPE); 12525 if (!ret) 12526 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 12527 &vport->state); 12528 else 12529 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, 12530 &vport->state); 12531 return ret; 12532 } 12533 12534 /* for VF */ 12535 if (vport->vf_info.trusted) { 12536 uc_en = vport->vf_info.request_uc_en > 0 || 12537 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; 12538 mc_en = vport->vf_info.request_mc_en > 0 || 12539 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; 12540 } 12541 bc_en = vport->vf_info.request_bc_en > 0; 12542 12543 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, 12544 mc_en, bc_en); 12545 if (ret) { 12546 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12547 return ret; 12548 } 12549 hclge_set_vport_vlan_fltr_change(vport); 12550 12551 return 0; 12552 } 12553 12554 static void hclge_sync_promisc_mode(struct hclge_dev *hdev) 12555 { 12556 struct hclge_vport *vport; 12557 int ret; 12558 u16 i; 12559 12560 for (i = 0; i < hdev->num_alloc_vport; i++) { 12561 vport = &hdev->vport[i]; 12562 12563 ret = hclge_sync_vport_promisc_mode(vport); 12564 if (ret) 12565 return; 12566 } 12567 } 12568 12569 static bool hclge_module_existed(struct hclge_dev *hdev) 12570 { 12571 struct hclge_desc desc; 12572 u32 existed; 12573 int ret; 12574 12575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true); 12576 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12577 if (ret) { 12578 dev_err(&hdev->pdev->dev, 12579 "failed to get SFP exist state, ret = %d\n", ret); 12580 return false; 12581 } 12582 12583 existed = le32_to_cpu(desc.data[0]); 12584 12585 return existed != 0; 12586 } 12587 12588 /* need 6 bds(total 140 bytes) in one reading 12589 * return the number of bytes actually read, 0 means read failed. 12590 */ 12591 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, 12592 u32 len, u8 *data) 12593 { 12594 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM]; 12595 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0; 12596 u16 read_len; 12597 u16 copy_len; 12598 int ret; 12599 int i; 12600 12601 /* setup all 6 bds to read module eeprom info. */ 12602 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) { 12603 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM, 12604 true); 12605 12606 /* bd0~bd4 need next flag */ 12607 if (i < HCLGE_SFP_INFO_CMD_NUM - 1) 12608 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 12609 } 12610 12611 /* setup bd0, this bd contains offset and read length. */ 12612 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data; 12613 sfp_info_bd0->offset = cpu_to_le16((u16)offset); 12614 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN); 12615 sfp_info_bd0->read_len = cpu_to_le16(read_len); 12616 12617 ret = hclge_cmd_send(&hdev->hw, desc, i); 12618 if (ret) { 12619 dev_err(&hdev->pdev->dev, 12620 "failed to get SFP eeprom info, ret = %d\n", ret); 12621 return 0; 12622 } 12623 12624 /* copy sfp info from bd0 to out buffer. */ 12625 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN); 12626 memcpy(data, sfp_info_bd0->data, copy_len); 12627 read_len = copy_len; 12628 12629 /* copy sfp info from bd1~bd5 to out buffer if needed. */ 12630 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) { 12631 if (read_len >= len) 12632 return read_len; 12633 12634 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); 12635 memcpy(data + read_len, desc[i].data, copy_len); 12636 read_len += copy_len; 12637 } 12638 12639 return read_len; 12640 } 12641 12642 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, 12643 u32 len, u8 *data) 12644 { 12645 struct hclge_vport *vport = hclge_get_vport(handle); 12646 struct hclge_dev *hdev = vport->back; 12647 u32 read_len = 0; 12648 u16 data_len; 12649 12650 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 12651 return -EOPNOTSUPP; 12652 12653 if (!hclge_module_existed(hdev)) 12654 return -ENXIO; 12655 12656 while (read_len < len) { 12657 data_len = hclge_get_sfp_eeprom_info(hdev, 12658 offset + read_len, 12659 len - read_len, 12660 data + read_len); 12661 if (!data_len) 12662 return -EIO; 12663 12664 read_len += data_len; 12665 } 12666 12667 return 0; 12668 } 12669 12670 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, 12671 u32 *status_code) 12672 { 12673 struct hclge_vport *vport = hclge_get_vport(handle); 12674 struct hclge_dev *hdev = vport->back; 12675 struct hclge_desc desc; 12676 int ret; 12677 12678 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) 12679 return -EOPNOTSUPP; 12680 12681 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); 12682 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12683 if (ret) { 12684 dev_err(&hdev->pdev->dev, 12685 "failed to query link diagnosis info, ret = %d\n", ret); 12686 return ret; 12687 } 12688 12689 *status_code = le32_to_cpu(desc.data[0]); 12690 return 0; 12691 } 12692 12693 /* After disable sriov, VF still has some config and info need clean, 12694 * which configed by PF. 12695 */ 12696 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) 12697 { 12698 struct hclge_dev *hdev = vport->back; 12699 struct hclge_vlan_info vlan_info; 12700 int ret; 12701 12702 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 12703 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 12704 vport->need_notify = 0; 12705 vport->mps = 0; 12706 12707 /* after disable sriov, clean VF rate configured by PF */ 12708 ret = hclge_tm_qs_shaper_cfg(vport, 0); 12709 if (ret) 12710 dev_err(&hdev->pdev->dev, 12711 "failed to clean vf%d rate config, ret = %d\n", 12712 vfid, ret); 12713 12714 vlan_info.vlan_tag = 0; 12715 vlan_info.qos = 0; 12716 vlan_info.vlan_proto = ETH_P_8021Q; 12717 ret = hclge_update_port_base_vlan_cfg(vport, 12718 HNAE3_PORT_BASE_VLAN_DISABLE, 12719 &vlan_info); 12720 if (ret) 12721 dev_err(&hdev->pdev->dev, 12722 "failed to clean vf%d port base vlan, ret = %d\n", 12723 vfid, ret); 12724 12725 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); 12726 if (ret) 12727 dev_err(&hdev->pdev->dev, 12728 "failed to clean vf%d spoof config, ret = %d\n", 12729 vfid, ret); 12730 12731 memset(&vport->vf_info, 0, sizeof(vport->vf_info)); 12732 } 12733 12734 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) 12735 { 12736 struct hclge_dev *hdev = ae_dev->priv; 12737 struct hclge_vport *vport; 12738 int i; 12739 12740 for (i = 0; i < num_vfs; i++) { 12741 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; 12742 12743 hclge_clear_vport_vf_info(vport, i); 12744 } 12745 } 12746 12747 static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, 12748 u8 *priority) 12749 { 12750 struct hclge_vport *vport = hclge_get_vport(h); 12751 12752 if (dscp >= HNAE3_MAX_DSCP) 12753 return -EINVAL; 12754 12755 if (tc_mode) 12756 *tc_mode = vport->nic.kinfo.tc_map_mode; 12757 if (priority) 12758 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : 12759 vport->nic.kinfo.dscp_prio[dscp]; 12760 12761 return 0; 12762 } 12763 12764 static const struct hnae3_ae_ops hclge_ops = { 12765 .init_ae_dev = hclge_init_ae_dev, 12766 .uninit_ae_dev = hclge_uninit_ae_dev, 12767 .reset_prepare = hclge_reset_prepare_general, 12768 .reset_done = hclge_reset_done, 12769 .init_client_instance = hclge_init_client_instance, 12770 .uninit_client_instance = hclge_uninit_client_instance, 12771 .map_ring_to_vector = hclge_map_ring_to_vector, 12772 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 12773 .get_vector = hclge_get_vector, 12774 .put_vector = hclge_put_vector, 12775 .set_promisc_mode = hclge_set_promisc_mode, 12776 .request_update_promisc_mode = hclge_request_update_promisc_mode, 12777 .set_loopback = hclge_set_loopback, 12778 .start = hclge_ae_start, 12779 .stop = hclge_ae_stop, 12780 .client_start = hclge_client_start, 12781 .client_stop = hclge_client_stop, 12782 .get_status = hclge_get_status, 12783 .get_ksettings_an_result = hclge_get_ksettings_an_result, 12784 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 12785 .get_media_type = hclge_get_media_type, 12786 .check_port_speed = hclge_check_port_speed, 12787 .get_fec_stats = hclge_get_fec_stats, 12788 .get_fec = hclge_get_fec, 12789 .set_fec = hclge_set_fec, 12790 .get_rss_key_size = hclge_comm_get_rss_key_size, 12791 .get_rss = hclge_get_rss, 12792 .set_rss = hclge_set_rss, 12793 .set_rss_tuple = hclge_set_rss_tuple, 12794 .get_rss_tuple = hclge_get_rss_tuple, 12795 .get_tc_size = hclge_get_tc_size, 12796 .get_mac_addr = hclge_get_mac_addr, 12797 .set_mac_addr = hclge_set_mac_addr, 12798 .do_ioctl = hclge_do_ioctl, 12799 .add_uc_addr = hclge_add_uc_addr, 12800 .rm_uc_addr = hclge_rm_uc_addr, 12801 .add_mc_addr = hclge_add_mc_addr, 12802 .rm_mc_addr = hclge_rm_mc_addr, 12803 .set_autoneg = hclge_set_autoneg, 12804 .get_autoneg = hclge_get_autoneg, 12805 .restart_autoneg = hclge_restart_autoneg, 12806 .halt_autoneg = hclge_halt_autoneg, 12807 .get_pauseparam = hclge_get_pauseparam, 12808 .set_pauseparam = hclge_set_pauseparam, 12809 .set_mtu = hclge_set_mtu, 12810 .reset_queue = hclge_reset_tqp, 12811 .get_stats = hclge_get_stats, 12812 .get_mac_stats = hclge_get_mac_stat, 12813 .update_stats = hclge_update_stats, 12814 .get_strings = hclge_get_strings, 12815 .get_sset_count = hclge_get_sset_count, 12816 .get_fw_version = hclge_get_fw_version, 12817 .get_mdix_mode = hclge_get_mdix_mode, 12818 .enable_vlan_filter = hclge_enable_vlan_filter, 12819 .set_vlan_filter = hclge_set_vlan_filter, 12820 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 12821 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 12822 .reset_event = hclge_reset_event, 12823 .get_reset_level = hclge_get_reset_level, 12824 .set_default_reset_request = hclge_set_def_reset_request, 12825 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 12826 .set_channels = hclge_set_channels, 12827 .get_channels = hclge_get_channels, 12828 .get_regs_len = hclge_get_regs_len, 12829 .get_regs = hclge_get_regs, 12830 .set_led_id = hclge_set_led_id, 12831 .get_link_mode = hclge_get_link_mode, 12832 .add_fd_entry = hclge_add_fd_entry, 12833 .del_fd_entry = hclge_del_fd_entry, 12834 .get_fd_rule_cnt = hclge_get_fd_rule_cnt, 12835 .get_fd_rule_info = hclge_get_fd_rule_info, 12836 .get_fd_all_rules = hclge_get_all_rules, 12837 .enable_fd = hclge_enable_fd, 12838 .add_arfs_entry = hclge_add_fd_entry_by_arfs, 12839 .dbg_read_cmd = hclge_dbg_read_cmd, 12840 .handle_hw_ras_error = hclge_handle_hw_ras_error, 12841 .get_hw_reset_stat = hclge_get_hw_reset_stat, 12842 .ae_dev_resetting = hclge_ae_dev_resetting, 12843 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, 12844 .set_gro_en = hclge_gro_en, 12845 .get_global_queue_id = hclge_covert_handle_qid_global, 12846 .set_timer_task = hclge_set_timer_task, 12847 .mac_connect_phy = hclge_mac_connect_phy, 12848 .mac_disconnect_phy = hclge_mac_disconnect_phy, 12849 .get_vf_config = hclge_get_vf_config, 12850 .set_vf_link_state = hclge_set_vf_link_state, 12851 .set_vf_spoofchk = hclge_set_vf_spoofchk, 12852 .set_vf_trust = hclge_set_vf_trust, 12853 .set_vf_rate = hclge_set_vf_rate, 12854 .set_vf_mac = hclge_set_vf_mac, 12855 .get_module_eeprom = hclge_get_module_eeprom, 12856 .get_cmdq_stat = hclge_get_cmdq_stat, 12857 .add_cls_flower = hclge_add_cls_flower, 12858 .del_cls_flower = hclge_del_cls_flower, 12859 .cls_flower_active = hclge_is_cls_flower_active, 12860 .get_phy_link_ksettings = hclge_get_phy_link_ksettings, 12861 .set_phy_link_ksettings = hclge_set_phy_link_ksettings, 12862 .set_tx_hwts_info = hclge_ptp_set_tx_info, 12863 .get_rx_hwts = hclge_ptp_get_rx_hwts, 12864 .get_ts_info = hclge_ptp_get_ts_info, 12865 .get_link_diagnosis_info = hclge_get_link_diagnosis_info, 12866 .clean_vf_config = hclge_clean_vport_config, 12867 .get_dscp_prio = hclge_get_dscp_prio, 12868 .get_wol = hclge_get_wol, 12869 .set_wol = hclge_set_wol, 12870 }; 12871 12872 static struct hnae3_ae_algo ae_algo = { 12873 .ops = &hclge_ops, 12874 .pdev_id_table = ae_algo_pci_tbl, 12875 }; 12876 12877 static int __init hclge_init(void) 12878 { 12879 pr_info("%s is initializing\n", HCLGE_NAME); 12880 12881 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); 12882 if (!hclge_wq) { 12883 pr_err("%s: failed to create workqueue\n", HCLGE_NAME); 12884 return -ENOMEM; 12885 } 12886 12887 hnae3_register_ae_algo(&ae_algo); 12888 12889 return 0; 12890 } 12891 12892 static void __exit hclge_exit(void) 12893 { 12894 hnae3_unregister_ae_algo_prepare(&ae_algo); 12895 hnae3_unregister_ae_algo(&ae_algo); 12896 destroy_workqueue(hclge_wq); 12897 } 12898 module_init(hclge_init); 12899 module_exit(hclge_exit); 12900 12901 MODULE_LICENSE("GPL"); 12902 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 12903 MODULE_DESCRIPTION("HCLGE Driver"); 12904 MODULE_VERSION(HCLGE_MOD_VERSION); 12905