1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/acpi.h> 5 #include <linux/device.h> 6 #include <linux/etherdevice.h> 7 #include <linux/init.h> 8 #include <linux/interrupt.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/platform_device.h> 14 #include <linux/if_vlan.h> 15 #include <linux/crash_dump.h> 16 #include <net/ipv6.h> 17 #include <net/rtnetlink.h> 18 #include "hclge_cmd.h" 19 #include "hclge_dcb.h" 20 #include "hclge_main.h" 21 #include "hclge_mbx.h" 22 #include "hclge_mdio.h" 23 #include "hclge_regs.h" 24 #include "hclge_tm.h" 25 #include "hclge_err.h" 26 #include "hnae3.h" 27 #include "hclge_devlink.h" 28 #include "hclge_comm_cmd.h" 29 30 #include "hclge_trace.h" 31 32 #define HCLGE_NAME "hclge" 33 34 #define HCLGE_BUF_SIZE_UNIT 256U 35 #define HCLGE_BUF_MUL_BY 2 36 #define HCLGE_BUF_DIV_BY 2 37 #define NEED_RESERVE_TC_NUM 2 38 #define BUF_MAX_PERCENT 100 39 #define BUF_RESERVE_PERCENT 90 40 41 #define HCLGE_RESET_MAX_FAIL_CNT 5 42 #define HCLGE_RESET_SYNC_TIME 100 43 #define HCLGE_PF_RESET_SYNC_TIME 20 44 #define HCLGE_PF_RESET_SYNC_CNT 1500 45 46 #define HCLGE_LINK_STATUS_MS 10 47 48 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); 49 static int hclge_init_vlan_config(struct hclge_dev *hdev); 50 static void hclge_sync_vlan_filter(struct hclge_dev *hdev); 51 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 52 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); 53 static void hclge_rfs_filter_expire(struct hclge_dev *hdev); 54 static int hclge_clear_arfs_rules(struct hclge_dev *hdev); 55 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 56 unsigned long *addr); 57 static int hclge_set_default_loopback(struct hclge_dev *hdev); 58 59 static void hclge_sync_mac_table(struct hclge_dev *hdev); 60 static void hclge_restore_hw_table(struct hclge_dev *hdev); 61 static void hclge_sync_promisc_mode(struct hclge_dev *hdev); 62 static void hclge_sync_fd_table(struct hclge_dev *hdev); 63 static void hclge_update_fec_stats(struct hclge_dev *hdev); 64 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, 65 int wait_cnt); 66 static int hclge_update_port_info(struct hclge_dev *hdev); 67 68 static struct hnae3_ae_algo ae_algo; 69 70 static struct workqueue_struct *hclge_wq; 71 72 static const struct pci_device_id ae_algo_pci_tbl[] = { 73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0}, 81 /* required last entry */ 82 {0, } 83 }; 84 85 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 86 87 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 88 "External Loopback test", 89 "App Loopback test", 90 "Serdes serial Loopback test", 91 "Serdes parallel Loopback test", 92 "Phy Loopback test" 93 }; 94 95 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 96 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, 97 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 98 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, 99 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 100 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 101 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, 102 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 103 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, 104 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 105 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, 106 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 107 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, 108 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 109 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, 110 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 111 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 112 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 113 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 114 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 115 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 116 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 118 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 120 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 122 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 124 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 125 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 126 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 127 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, 128 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 129 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, 130 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 131 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, 132 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 133 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, 134 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 135 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, 136 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 137 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, 138 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 139 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, 140 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 141 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, 142 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 143 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, 144 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 145 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 146 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 147 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 148 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 149 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 150 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 151 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 152 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 153 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 154 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 155 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 156 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 157 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 158 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 159 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 160 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, 162 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 163 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, 164 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, 166 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 167 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, 168 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 169 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, 170 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 171 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, 172 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 173 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, 174 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 175 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, 176 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 178 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 180 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 182 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 184 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 185 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 186 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 187 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 188 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 189 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 190 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 191 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 192 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 194 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 196 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 198 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 200 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 202 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 204 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 206 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 208 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 210 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 212 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 214 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 216 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 218 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 220 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 222 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 224 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 225 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 226 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 228 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 230 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 231 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 232 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 234 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 235 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 236 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 238 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 239 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 240 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 241 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 242 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 243 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 244 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 246 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 248 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 250 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 252 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 254 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 256 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 258 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 260 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 262 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 264 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 266 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 268 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 270 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 272 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 274 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 275 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 276 277 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 279 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 281 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 283 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 285 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 287 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 288 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 289 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 291 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 293 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 295 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 297 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 299 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 301 }; 302 303 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 304 { 305 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 306 .ethter_type = cpu_to_le16(ETH_P_LLDP), 307 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, 308 .i_port_bitmap = 0x1, 309 }, 310 }; 311 312 static const struct key_info meta_data_key_info[] = { 313 { PACKET_TYPE_ID, 6 }, 314 { IP_FRAGEMENT, 1 }, 315 { ROCE_TYPE, 1 }, 316 { NEXT_KEY, 5 }, 317 { VLAN_NUMBER, 2 }, 318 { SRC_VPORT, 12 }, 319 { DST_VPORT, 12 }, 320 { TUNNEL_PACKET, 1 }, 321 }; 322 323 static const struct key_info tuple_key_info[] = { 324 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 }, 325 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 }, 326 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 }, 327 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, 328 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 }, 329 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 }, 330 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 }, 331 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 }, 332 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 }, 333 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 }, 334 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 }, 335 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, 336 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, 337 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, 338 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, 339 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, 340 { INNER_DST_MAC, 48, KEY_OPT_MAC, 341 offsetof(struct hclge_fd_rule, tuples.dst_mac), 342 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, 343 { INNER_SRC_MAC, 48, KEY_OPT_MAC, 344 offsetof(struct hclge_fd_rule, tuples.src_mac), 345 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, 346 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16, 347 offsetof(struct hclge_fd_rule, tuples.vlan_tag1), 348 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, 349 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, 350 { INNER_ETH_TYPE, 16, KEY_OPT_LE16, 351 offsetof(struct hclge_fd_rule, tuples.ether_proto), 352 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, 353 { INNER_L2_RSV, 16, KEY_OPT_LE16, 354 offsetof(struct hclge_fd_rule, tuples.l2_user_def), 355 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, 356 { INNER_IP_TOS, 8, KEY_OPT_U8, 357 offsetof(struct hclge_fd_rule, tuples.ip_tos), 358 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, 359 { INNER_IP_PROTO, 8, KEY_OPT_U8, 360 offsetof(struct hclge_fd_rule, tuples.ip_proto), 361 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, 362 { INNER_SRC_IP, 32, KEY_OPT_IP, 363 offsetof(struct hclge_fd_rule, tuples.src_ip), 364 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, 365 { INNER_DST_IP, 32, KEY_OPT_IP, 366 offsetof(struct hclge_fd_rule, tuples.dst_ip), 367 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, 368 { INNER_L3_RSV, 16, KEY_OPT_LE16, 369 offsetof(struct hclge_fd_rule, tuples.l3_user_def), 370 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, 371 { INNER_SRC_PORT, 16, KEY_OPT_LE16, 372 offsetof(struct hclge_fd_rule, tuples.src_port), 373 offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, 374 { INNER_DST_PORT, 16, KEY_OPT_LE16, 375 offsetof(struct hclge_fd_rule, tuples.dst_port), 376 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, 377 { INNER_L4_RSV, 32, KEY_OPT_LE32, 378 offsetof(struct hclge_fd_rule, tuples.l4_user_def), 379 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, 380 }; 381 382 /** 383 * hclge_cmd_send - send command to command queue 384 * @hw: pointer to the hw struct 385 * @desc: prefilled descriptor for describing the command 386 * @num : the number of descriptors to be sent 387 * 388 * This is the main send command for command queue, it 389 * sends the queue, cleans the queue, etc 390 **/ 391 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) 392 { 393 return hclge_comm_cmd_send(&hw->hw, desc, num); 394 } 395 396 static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, 397 int num, bool is_special) 398 { 399 int i; 400 401 trace_hclge_pf_cmd_send(hw, desc, 0, num); 402 403 if (!is_special) { 404 for (i = 1; i < num; i++) 405 trace_hclge_pf_cmd_send(hw, &desc[i], i, num); 406 } else { 407 for (i = 1; i < num; i++) 408 trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i], 409 i, num); 410 } 411 } 412 413 static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, 414 int num, bool is_special) 415 { 416 int i; 417 418 if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) 419 return; 420 421 trace_hclge_pf_cmd_get(hw, desc, 0, num); 422 423 if (!is_special) { 424 for (i = 1; i < num; i++) 425 trace_hclge_pf_cmd_get(hw, &desc[i], i, num); 426 } else { 427 for (i = 1; i < num; i++) 428 trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i], 429 i, num); 430 } 431 } 432 433 static const struct hclge_comm_cmq_ops hclge_cmq_ops = { 434 .trace_cmd_send = hclge_trace_cmd_send, 435 .trace_cmd_get = hclge_trace_cmd_get, 436 }; 437 438 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) 439 { 440 #define HCLGE_MAC_CMD_NUM 21 441 442 u64 *data = (u64 *)(&hdev->mac_stats); 443 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 444 __le64 *desc_data; 445 u32 data_size; 446 int ret; 447 u32 i; 448 449 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 450 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 451 if (ret) { 452 dev_err(&hdev->pdev->dev, 453 "Get MAC pkt stats fail, status = %d.\n", ret); 454 455 return ret; 456 } 457 458 /* The first desc has a 64-bit header, so data size need to minus 1 */ 459 data_size = sizeof(desc) / (sizeof(u64)) - 1; 460 461 desc_data = (__le64 *)(&desc[0].data[0]); 462 for (i = 0; i < data_size; i++) { 463 /* data memory is continuous becase only the first desc has a 464 * header in this command 465 */ 466 *data += le64_to_cpu(*desc_data); 467 data++; 468 desc_data++; 469 } 470 471 return 0; 472 } 473 474 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) 475 { 476 #define HCLGE_REG_NUM_PER_DESC 4 477 478 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; 479 u64 *data = (u64 *)(&hdev->mac_stats); 480 struct hclge_desc *desc; 481 __le64 *desc_data; 482 u32 data_size; 483 u32 desc_num; 484 int ret; 485 u32 i; 486 487 /* The first desc has a 64-bit header, so need to consider it */ 488 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1; 489 490 /* This may be called inside atomic sections, 491 * so GFP_ATOMIC is more suitalbe here 492 */ 493 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC); 494 if (!desc) 495 return -ENOMEM; 496 497 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); 498 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); 499 if (ret) { 500 kfree(desc); 501 return ret; 502 } 503 504 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); 505 506 desc_data = (__le64 *)(&desc[0].data[0]); 507 for (i = 0; i < data_size; i++) { 508 /* data memory is continuous becase only the first desc has a 509 * header in this command 510 */ 511 *data += le64_to_cpu(*desc_data); 512 data++; 513 desc_data++; 514 } 515 516 kfree(desc); 517 518 return 0; 519 } 520 521 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) 522 { 523 struct hclge_desc desc; 524 int ret; 525 526 /* Driver needs total register number of both valid registers and 527 * reserved registers, but the old firmware only returns number 528 * of valid registers in device V2. To be compatible with these 529 * devices, driver uses a fixed value. 530 */ 531 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { 532 *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1; 533 return 0; 534 } 535 536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); 537 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 538 if (ret) { 539 dev_err(&hdev->pdev->dev, 540 "failed to query mac statistic reg number, ret = %d\n", 541 ret); 542 return ret; 543 } 544 545 *reg_num = le32_to_cpu(desc.data[0]); 546 if (*reg_num == 0) { 547 dev_err(&hdev->pdev->dev, 548 "mac statistic reg number is invalid!\n"); 549 return -ENODATA; 550 } 551 552 return 0; 553 } 554 555 int hclge_mac_update_stats(struct hclge_dev *hdev) 556 { 557 /* The firmware supports the new statistics acquisition method */ 558 if (hdev->ae_dev->dev_specs.mac_stats_num) 559 return hclge_mac_update_stats_complete(hdev); 560 else 561 return hclge_mac_update_stats_defective(hdev); 562 } 563 564 static int hclge_comm_get_count(struct hclge_dev *hdev, 565 const struct hclge_comm_stats_str strs[], 566 u32 size) 567 { 568 int count = 0; 569 u32 i; 570 571 for (i = 0; i < size; i++) 572 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) 573 count++; 574 575 return count; 576 } 577 578 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, 579 const struct hclge_comm_stats_str strs[], 580 int size, u64 *data) 581 { 582 u64 *buf = data; 583 u32 i; 584 585 for (i = 0; i < size; i++) { 586 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) 587 continue; 588 589 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); 590 buf++; 591 } 592 593 return buf; 594 } 595 596 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, 597 const struct hclge_comm_stats_str strs[], 598 int size, u8 *data) 599 { 600 char *buff = (char *)data; 601 u32 i; 602 603 if (stringset != ETH_SS_STATS) 604 return buff; 605 606 for (i = 0; i < size; i++) { 607 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) 608 continue; 609 610 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); 611 buff = buff + ETH_GSTRING_LEN; 612 } 613 614 return (u8 *)buff; 615 } 616 617 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 618 { 619 struct hnae3_handle *handle; 620 int status; 621 622 handle = &hdev->vport[0].nic; 623 if (handle->client) { 624 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 625 if (status) { 626 dev_err(&hdev->pdev->dev, 627 "Update TQPS stats fail, status = %d.\n", 628 status); 629 } 630 } 631 632 hclge_update_fec_stats(hdev); 633 634 status = hclge_mac_update_stats(hdev); 635 if (status) 636 dev_err(&hdev->pdev->dev, 637 "Update MAC stats fail, status = %d.\n", status); 638 } 639 640 static void hclge_update_stats(struct hnae3_handle *handle) 641 { 642 struct hclge_vport *vport = hclge_get_vport(handle); 643 struct hclge_dev *hdev = vport->back; 644 int status; 645 646 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 647 return; 648 649 status = hclge_mac_update_stats(hdev); 650 if (status) 651 dev_err(&hdev->pdev->dev, 652 "Update MAC stats fail, status = %d.\n", 653 status); 654 655 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 656 if (status) 657 dev_err(&hdev->pdev->dev, 658 "Update TQPS stats fail, status = %d.\n", 659 status); 660 661 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 662 } 663 664 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 665 { 666 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ 667 HNAE3_SUPPORT_PHY_LOOPBACK | \ 668 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ 669 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \ 670 HNAE3_SUPPORT_EXTERNAL_LOOPBACK) 671 672 struct hclge_vport *vport = hclge_get_vport(handle); 673 struct hclge_dev *hdev = vport->back; 674 int count = 0; 675 676 /* Loopback test support rules: 677 * mac: only GE mode support 678 * serdes: all mac mode will support include GE/XGE/LGE/CGE 679 * phy: only support when phy device exist on board 680 */ 681 if (stringset == ETH_SS_TEST) { 682 /* clear loopback bit flags at first */ 683 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 684 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || 685 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 686 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 687 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 688 count += 1; 689 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; 690 } 691 692 if (hdev->ae_dev->dev_specs.hilink_version != 693 HCLGE_HILINK_H60) { 694 count += 1; 695 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 696 } 697 698 count += 1; 699 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 700 count += 1; 701 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; 702 703 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && 704 hdev->hw.mac.phydev->drv->set_loopback) || 705 hnae3_dev_phy_imp_supported(hdev)) { 706 count += 1; 707 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; 708 } 709 } else if (stringset == ETH_SS_STATS) { 710 count = hclge_comm_get_count(hdev, g_mac_stats_string, 711 ARRAY_SIZE(g_mac_stats_string)) + 712 hclge_comm_tqps_get_sset_count(handle); 713 } 714 715 return count; 716 } 717 718 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, 719 u8 *data) 720 { 721 struct hclge_vport *vport = hclge_get_vport(handle); 722 struct hclge_dev *hdev = vport->back; 723 u8 *p = (char *)data; 724 int size; 725 726 if (stringset == ETH_SS_STATS) { 727 size = ARRAY_SIZE(g_mac_stats_string); 728 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, 729 size, p); 730 p = hclge_comm_tqps_get_strings(handle, p); 731 } else if (stringset == ETH_SS_TEST) { 732 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { 733 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL], 734 ETH_GSTRING_LEN); 735 p += ETH_GSTRING_LEN; 736 } 737 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { 738 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], 739 ETH_GSTRING_LEN); 740 p += ETH_GSTRING_LEN; 741 } 742 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { 743 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], 744 ETH_GSTRING_LEN); 745 p += ETH_GSTRING_LEN; 746 } 747 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { 748 memcpy(p, 749 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], 750 ETH_GSTRING_LEN); 751 p += ETH_GSTRING_LEN; 752 } 753 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 754 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], 755 ETH_GSTRING_LEN); 756 p += ETH_GSTRING_LEN; 757 } 758 } 759 } 760 761 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 762 { 763 struct hclge_vport *vport = hclge_get_vport(handle); 764 struct hclge_dev *hdev = vport->back; 765 u64 *p; 766 767 p = hclge_comm_get_stats(hdev, g_mac_stats_string, 768 ARRAY_SIZE(g_mac_stats_string), data); 769 p = hclge_comm_tqps_get_stats(handle, p); 770 } 771 772 static void hclge_get_mac_stat(struct hnae3_handle *handle, 773 struct hns3_mac_stats *mac_stats) 774 { 775 struct hclge_vport *vport = hclge_get_vport(handle); 776 struct hclge_dev *hdev = vport->back; 777 778 hclge_update_stats(handle); 779 780 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; 781 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; 782 } 783 784 static int hclge_parse_func_status(struct hclge_dev *hdev, 785 struct hclge_func_status_cmd *status) 786 { 787 #define HCLGE_MAC_ID_MASK 0xF 788 789 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 790 return -EINVAL; 791 792 /* Set the pf to main pf */ 793 if (status->pf_state & HCLGE_PF_STATE_MAIN) 794 hdev->flag |= HCLGE_FLAG_MAIN; 795 else 796 hdev->flag &= ~HCLGE_FLAG_MAIN; 797 798 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; 799 return 0; 800 } 801 802 static int hclge_query_function_status(struct hclge_dev *hdev) 803 { 804 #define HCLGE_QUERY_MAX_CNT 5 805 806 struct hclge_func_status_cmd *req; 807 struct hclge_desc desc; 808 int timeout = 0; 809 int ret; 810 811 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 812 req = (struct hclge_func_status_cmd *)desc.data; 813 814 do { 815 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 816 if (ret) { 817 dev_err(&hdev->pdev->dev, 818 "query function status failed %d.\n", ret); 819 return ret; 820 } 821 822 /* Check pf reset is done */ 823 if (req->pf_state) 824 break; 825 usleep_range(1000, 2000); 826 } while (timeout++ < HCLGE_QUERY_MAX_CNT); 827 828 return hclge_parse_func_status(hdev, req); 829 } 830 831 static int hclge_query_pf_resource(struct hclge_dev *hdev) 832 { 833 struct hclge_pf_res_cmd *req; 834 struct hclge_desc desc; 835 int ret; 836 837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 838 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 839 if (ret) { 840 dev_err(&hdev->pdev->dev, 841 "query pf resource failed %d.\n", ret); 842 return ret; 843 } 844 845 req = (struct hclge_pf_res_cmd *)desc.data; 846 hdev->num_tqps = le16_to_cpu(req->tqp_num) + 847 le16_to_cpu(req->ext_tqp_num); 848 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 849 850 if (req->tx_buf_size) 851 hdev->tx_buf_size = 852 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; 853 else 854 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 855 856 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); 857 858 if (req->dv_buf_size) 859 hdev->dv_buf_size = 860 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; 861 else 862 hdev->dv_buf_size = HCLGE_DEFAULT_DV; 863 864 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); 865 866 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); 867 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { 868 dev_err(&hdev->pdev->dev, 869 "only %u msi resources available, not enough for pf(min:2).\n", 870 hdev->num_nic_msi); 871 return -EINVAL; 872 } 873 874 if (hnae3_dev_roce_supported(hdev)) { 875 hdev->num_roce_msi = 876 le16_to_cpu(req->pf_intr_vector_number_roce); 877 878 /* PF should have NIC vectors and Roce vectors, 879 * NIC vectors are queued before Roce vectors. 880 */ 881 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; 882 } else { 883 hdev->num_msi = hdev->num_nic_msi; 884 } 885 886 return 0; 887 } 888 889 static int hclge_parse_speed(u8 speed_cmd, u32 *speed) 890 { 891 switch (speed_cmd) { 892 case HCLGE_FW_MAC_SPEED_10M: 893 *speed = HCLGE_MAC_SPEED_10M; 894 break; 895 case HCLGE_FW_MAC_SPEED_100M: 896 *speed = HCLGE_MAC_SPEED_100M; 897 break; 898 case HCLGE_FW_MAC_SPEED_1G: 899 *speed = HCLGE_MAC_SPEED_1G; 900 break; 901 case HCLGE_FW_MAC_SPEED_10G: 902 *speed = HCLGE_MAC_SPEED_10G; 903 break; 904 case HCLGE_FW_MAC_SPEED_25G: 905 *speed = HCLGE_MAC_SPEED_25G; 906 break; 907 case HCLGE_FW_MAC_SPEED_40G: 908 *speed = HCLGE_MAC_SPEED_40G; 909 break; 910 case HCLGE_FW_MAC_SPEED_50G: 911 *speed = HCLGE_MAC_SPEED_50G; 912 break; 913 case HCLGE_FW_MAC_SPEED_100G: 914 *speed = HCLGE_MAC_SPEED_100G; 915 break; 916 case HCLGE_FW_MAC_SPEED_200G: 917 *speed = HCLGE_MAC_SPEED_200G; 918 break; 919 default: 920 return -EINVAL; 921 } 922 923 return 0; 924 } 925 926 static const struct hclge_speed_bit_map speed_bit_map[] = { 927 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, 928 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, 929 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, 930 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, 931 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, 932 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, 933 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS}, 934 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS}, 935 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS}, 936 }; 937 938 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) 939 { 940 u16 i; 941 942 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { 943 if (speed == speed_bit_map[i].speed) { 944 *speed_bit = speed_bit_map[i].speed_bit; 945 return 0; 946 } 947 } 948 949 return -EINVAL; 950 } 951 952 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) 953 { 954 struct hclge_vport *vport = hclge_get_vport(handle); 955 struct hclge_dev *hdev = vport->back; 956 u32 speed_ability = hdev->hw.mac.speed_ability; 957 u32 speed_bit = 0; 958 int ret; 959 960 ret = hclge_get_speed_bit(speed, &speed_bit); 961 if (ret) 962 return ret; 963 964 if (speed_bit & speed_ability) 965 return 0; 966 967 return -EINVAL; 968 } 969 970 static void hclge_update_fec_support(struct hclge_mac *mac) 971 { 972 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); 973 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); 974 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported); 975 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 976 977 if (mac->fec_ability & BIT(HNAE3_FEC_BASER)) 978 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 979 mac->supported); 980 if (mac->fec_ability & BIT(HNAE3_FEC_RS)) 981 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 982 mac->supported); 983 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS)) 984 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 985 mac->supported); 986 if (mac->fec_ability & BIT(HNAE3_FEC_NONE)) 987 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 988 mac->supported); 989 } 990 991 static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = { 992 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, 993 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, 994 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, 995 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, 996 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT}, 997 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, 998 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT}, 999 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1000 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, 1001 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, 1002 }; 1003 1004 static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = { 1005 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, 1006 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, 1007 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT}, 1008 {HCLGE_SUPPORT_100G_R4_BIT, 1009 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, 1010 {HCLGE_SUPPORT_100G_R2_BIT, 1011 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT}, 1012 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1013 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, 1014 {HCLGE_SUPPORT_200G_R4_BIT, 1015 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, 1016 }; 1017 1018 static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = { 1019 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, 1020 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, 1021 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, 1022 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, 1023 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT}, 1024 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, 1025 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT}, 1026 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1027 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, 1028 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, 1029 }; 1030 1031 static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = { 1032 {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, 1033 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, 1034 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, 1035 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, 1036 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, 1037 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT}, 1038 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, 1039 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT}, 1040 {HCLGE_SUPPORT_200G_R4_EXT_BIT, 1041 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, 1042 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, 1043 }; 1044 1045 static void hclge_convert_setting_sr(u16 speed_ability, 1046 unsigned long *link_mode) 1047 { 1048 int i; 1049 1050 for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) { 1051 if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit) 1052 linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode, 1053 link_mode); 1054 } 1055 } 1056 1057 static void hclge_convert_setting_lr(u16 speed_ability, 1058 unsigned long *link_mode) 1059 { 1060 int i; 1061 1062 for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) { 1063 if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit) 1064 linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode, 1065 link_mode); 1066 } 1067 } 1068 1069 static void hclge_convert_setting_cr(u16 speed_ability, 1070 unsigned long *link_mode) 1071 { 1072 int i; 1073 1074 for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) { 1075 if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit) 1076 linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode, 1077 link_mode); 1078 } 1079 } 1080 1081 static void hclge_convert_setting_kr(u16 speed_ability, 1082 unsigned long *link_mode) 1083 { 1084 int i; 1085 1086 for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) { 1087 if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit) 1088 linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode, 1089 link_mode); 1090 } 1091 } 1092 1093 static void hclge_convert_setting_fec(struct hclge_mac *mac) 1094 { 1095 /* If firmware has reported fec_ability, don't need to convert by speed */ 1096 if (mac->fec_ability) 1097 goto out; 1098 1099 switch (mac->speed) { 1100 case HCLGE_MAC_SPEED_10G: 1101 case HCLGE_MAC_SPEED_40G: 1102 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) | 1103 BIT(HNAE3_FEC_NONE); 1104 break; 1105 case HCLGE_MAC_SPEED_25G: 1106 case HCLGE_MAC_SPEED_50G: 1107 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | 1108 BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE); 1109 break; 1110 case HCLGE_MAC_SPEED_100G: 1111 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | 1112 BIT(HNAE3_FEC_NONE); 1113 break; 1114 case HCLGE_MAC_SPEED_200G: 1115 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | 1116 BIT(HNAE3_FEC_LLRS); 1117 break; 1118 default: 1119 mac->fec_ability = 0; 1120 break; 1121 } 1122 1123 out: 1124 hclge_update_fec_support(mac); 1125 } 1126 1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 1128 u16 speed_ability) 1129 { 1130 struct hclge_mac *mac = &hdev->hw.mac; 1131 1132 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1133 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1134 mac->supported); 1135 1136 hclge_convert_setting_sr(speed_ability, mac->supported); 1137 hclge_convert_setting_lr(speed_ability, mac->supported); 1138 hclge_convert_setting_cr(speed_ability, mac->supported); 1139 if (hnae3_dev_fec_supported(hdev)) 1140 hclge_convert_setting_fec(mac); 1141 1142 if (hnae3_dev_pause_supported(hdev)) 1143 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1144 1145 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); 1146 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1147 } 1148 1149 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, 1150 u16 speed_ability) 1151 { 1152 struct hclge_mac *mac = &hdev->hw.mac; 1153 1154 hclge_convert_setting_kr(speed_ability, mac->supported); 1155 if (hnae3_dev_fec_supported(hdev)) 1156 hclge_convert_setting_fec(mac); 1157 1158 if (hnae3_dev_pause_supported(hdev)) 1159 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1160 1161 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); 1162 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1163 } 1164 1165 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, 1166 u16 speed_ability) 1167 { 1168 unsigned long *supported = hdev->hw.mac.supported; 1169 1170 /* default to support all speed for GE port */ 1171 if (!speed_ability) 1172 speed_ability = HCLGE_SUPPORT_GE; 1173 1174 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1175 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1176 supported); 1177 1178 if (speed_ability & HCLGE_SUPPORT_100M_BIT) { 1179 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1180 supported); 1181 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 1182 supported); 1183 } 1184 1185 if (speed_ability & HCLGE_SUPPORT_10M_BIT) { 1186 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); 1187 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); 1188 } 1189 1190 if (hnae3_dev_pause_supported(hdev)) { 1191 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1192 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); 1193 } 1194 1195 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); 1196 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); 1197 } 1198 1199 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) 1200 { 1201 u8 media_type = hdev->hw.mac.media_type; 1202 1203 if (media_type == HNAE3_MEDIA_TYPE_FIBER) 1204 hclge_parse_fiber_link_mode(hdev, speed_ability); 1205 else if (media_type == HNAE3_MEDIA_TYPE_COPPER) 1206 hclge_parse_copper_link_mode(hdev, speed_ability); 1207 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) 1208 hclge_parse_backplane_link_mode(hdev, speed_ability); 1209 } 1210 1211 static u32 hclge_get_max_speed(u16 speed_ability) 1212 { 1213 if (speed_ability & HCLGE_SUPPORT_200G_BITS) 1214 return HCLGE_MAC_SPEED_200G; 1215 1216 if (speed_ability & HCLGE_SUPPORT_100G_BITS) 1217 return HCLGE_MAC_SPEED_100G; 1218 1219 if (speed_ability & HCLGE_SUPPORT_50G_BITS) 1220 return HCLGE_MAC_SPEED_50G; 1221 1222 if (speed_ability & HCLGE_SUPPORT_40G_BIT) 1223 return HCLGE_MAC_SPEED_40G; 1224 1225 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1226 return HCLGE_MAC_SPEED_25G; 1227 1228 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1229 return HCLGE_MAC_SPEED_10G; 1230 1231 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1232 return HCLGE_MAC_SPEED_1G; 1233 1234 if (speed_ability & HCLGE_SUPPORT_100M_BIT) 1235 return HCLGE_MAC_SPEED_100M; 1236 1237 if (speed_ability & HCLGE_SUPPORT_10M_BIT) 1238 return HCLGE_MAC_SPEED_10M; 1239 1240 return HCLGE_MAC_SPEED_1G; 1241 } 1242 1243 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1244 { 1245 #define HCLGE_TX_SPARE_SIZE_UNIT 4096 1246 #define SPEED_ABILITY_EXT_SHIFT 8 1247 1248 struct hclge_cfg_param_cmd *req; 1249 u64 mac_addr_tmp_high; 1250 u16 speed_ability_ext; 1251 u64 mac_addr_tmp; 1252 unsigned int i; 1253 1254 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1255 1256 /* get the configuration */ 1257 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1258 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1259 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1260 HCLGE_CFG_TQP_DESC_N_M, 1261 HCLGE_CFG_TQP_DESC_N_S); 1262 1263 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), 1264 HCLGE_CFG_PHY_ADDR_M, 1265 HCLGE_CFG_PHY_ADDR_S); 1266 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), 1267 HCLGE_CFG_MEDIA_TP_M, 1268 HCLGE_CFG_MEDIA_TP_S); 1269 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), 1270 HCLGE_CFG_RX_BUF_LEN_M, 1271 HCLGE_CFG_RX_BUF_LEN_S); 1272 /* get mac_address */ 1273 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1274 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), 1275 HCLGE_CFG_MAC_ADDR_H_M, 1276 HCLGE_CFG_MAC_ADDR_H_S); 1277 1278 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1279 1280 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), 1281 HCLGE_CFG_DEFAULT_SPEED_M, 1282 HCLGE_CFG_DEFAULT_SPEED_S); 1283 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 1284 HCLGE_CFG_RSS_SIZE_M, 1285 HCLGE_CFG_RSS_SIZE_S); 1286 1287 for (i = 0; i < ETH_ALEN; i++) 1288 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1289 1290 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1291 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1292 1293 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), 1294 HCLGE_CFG_SPEED_ABILITY_M, 1295 HCLGE_CFG_SPEED_ABILITY_S); 1296 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), 1297 HCLGE_CFG_SPEED_ABILITY_EXT_M, 1298 HCLGE_CFG_SPEED_ABILITY_EXT_S); 1299 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; 1300 1301 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), 1302 HCLGE_CFG_VLAN_FLTR_CAP_M, 1303 HCLGE_CFG_VLAN_FLTR_CAP_S); 1304 1305 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), 1306 HCLGE_CFG_UMV_TBL_SPACE_M, 1307 HCLGE_CFG_UMV_TBL_SPACE_S); 1308 1309 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), 1310 HCLGE_CFG_PF_RSS_SIZE_M, 1311 HCLGE_CFG_PF_RSS_SIZE_S); 1312 1313 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a 1314 * power of 2, instead of reading out directly. This would 1315 * be more flexible for future changes and expansions. 1316 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, 1317 * it does not make sense if PF's field is 0. In this case, PF and VF 1318 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. 1319 */ 1320 cfg->pf_rss_size_max = cfg->pf_rss_size_max ? 1321 1U << cfg->pf_rss_size_max : 1322 cfg->vf_rss_size_max; 1323 1324 /* The unit of the tx spare buffer size queried from configuration 1325 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is 1326 * needed here. 1327 */ 1328 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]), 1329 HCLGE_CFG_TX_SPARE_BUF_SIZE_M, 1330 HCLGE_CFG_TX_SPARE_BUF_SIZE_S); 1331 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT; 1332 } 1333 1334 /* hclge_get_cfg: query the static parameter from flash 1335 * @hdev: pointer to struct hclge_dev 1336 * @hcfg: the config structure to be getted 1337 */ 1338 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1339 { 1340 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1341 struct hclge_cfg_param_cmd *req; 1342 unsigned int i; 1343 int ret; 1344 1345 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1346 u32 offset = 0; 1347 1348 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1349 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1350 true); 1351 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, 1352 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1353 /* Len should be united by 4 bytes when send to hardware */ 1354 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1355 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1356 req->offset = cpu_to_le32(offset); 1357 } 1358 1359 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1360 if (ret) { 1361 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); 1362 return ret; 1363 } 1364 1365 hclge_parse_cfg(hcfg, desc); 1366 1367 return 0; 1368 } 1369 1370 static void hclge_set_default_dev_specs(struct hclge_dev *hdev) 1371 { 1372 #define HCLGE_MAX_NON_TSO_BD_NUM 8U 1373 1374 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1375 1376 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; 1377 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; 1378 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 1379 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; 1380 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; 1381 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; 1382 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; 1383 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1384 ae_dev->dev_specs.tnl_num = 0; 1385 } 1386 1387 static void hclge_parse_dev_specs(struct hclge_dev *hdev, 1388 struct hclge_desc *desc) 1389 { 1390 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1391 struct hclge_dev_specs_0_cmd *req0; 1392 struct hclge_dev_specs_1_cmd *req1; 1393 1394 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; 1395 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data; 1396 1397 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 1398 ae_dev->dev_specs.rss_ind_tbl_size = 1399 le16_to_cpu(req0->rss_ind_tbl_size); 1400 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 1401 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 1402 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); 1403 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); 1404 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 1405 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 1406 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); 1407 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); 1408 ae_dev->dev_specs.tnl_num = req1->tnl_num; 1409 ae_dev->dev_specs.hilink_version = req1->hilink_version; 1410 } 1411 1412 static void hclge_check_dev_specs(struct hclge_dev *hdev) 1413 { 1414 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 1415 1416 if (!dev_specs->max_non_tso_bd_num) 1417 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; 1418 if (!dev_specs->rss_ind_tbl_size) 1419 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; 1420 if (!dev_specs->rss_key_size) 1421 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 1422 if (!dev_specs->max_tm_rate) 1423 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; 1424 if (!dev_specs->max_qset_num) 1425 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM; 1426 if (!dev_specs->max_int_gl) 1427 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; 1428 if (!dev_specs->max_frm_size) 1429 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; 1430 if (!dev_specs->umv_size) 1431 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1432 } 1433 1434 static int hclge_query_mac_stats_num(struct hclge_dev *hdev) 1435 { 1436 u32 reg_num = 0; 1437 int ret; 1438 1439 ret = hclge_mac_query_reg_num(hdev, ®_num); 1440 if (ret && ret != -EOPNOTSUPP) 1441 return ret; 1442 1443 hdev->ae_dev->dev_specs.mac_stats_num = reg_num; 1444 return 0; 1445 } 1446 1447 static int hclge_query_dev_specs(struct hclge_dev *hdev) 1448 { 1449 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM]; 1450 int ret; 1451 int i; 1452 1453 ret = hclge_query_mac_stats_num(hdev); 1454 if (ret) 1455 return ret; 1456 1457 /* set default specifications as devices lower than version V3 do not 1458 * support querying specifications from firmware. 1459 */ 1460 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 1461 hclge_set_default_dev_specs(hdev); 1462 return 0; 1463 } 1464 1465 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 1466 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, 1467 true); 1468 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 1469 } 1470 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); 1471 1472 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); 1473 if (ret) 1474 return ret; 1475 1476 hclge_parse_dev_specs(hdev, desc); 1477 hclge_check_dev_specs(hdev); 1478 1479 return 0; 1480 } 1481 1482 static int hclge_get_cap(struct hclge_dev *hdev) 1483 { 1484 int ret; 1485 1486 ret = hclge_query_function_status(hdev); 1487 if (ret) { 1488 dev_err(&hdev->pdev->dev, 1489 "query function status error %d.\n", ret); 1490 return ret; 1491 } 1492 1493 /* get pf resource */ 1494 return hclge_query_pf_resource(hdev); 1495 } 1496 1497 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) 1498 { 1499 #define HCLGE_MIN_TX_DESC 64 1500 #define HCLGE_MIN_RX_DESC 64 1501 1502 if (!is_kdump_kernel()) 1503 return; 1504 1505 dev_info(&hdev->pdev->dev, 1506 "Running kdump kernel. Using minimal resources\n"); 1507 1508 /* minimal queue pairs equals to the number of vports */ 1509 hdev->num_tqps = hdev->num_req_vfs + 1; 1510 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; 1511 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; 1512 } 1513 1514 static void hclge_init_tc_config(struct hclge_dev *hdev) 1515 { 1516 unsigned int i; 1517 1518 if (hdev->tc_max > HNAE3_MAX_TC || 1519 hdev->tc_max < 1) { 1520 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", 1521 hdev->tc_max); 1522 hdev->tc_max = 1; 1523 } 1524 1525 /* Dev does not support DCB */ 1526 if (!hnae3_dev_dcb_supported(hdev)) { 1527 hdev->tc_max = 1; 1528 hdev->pfc_max = 0; 1529 } else { 1530 hdev->pfc_max = hdev->tc_max; 1531 } 1532 1533 hdev->tm_info.num_tc = 1; 1534 1535 /* Currently not support uncontiuous tc */ 1536 for (i = 0; i < hdev->tm_info.num_tc; i++) 1537 hnae3_set_bit(hdev->hw_tc_map, i, 1); 1538 1539 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1540 } 1541 1542 static int hclge_configure(struct hclge_dev *hdev) 1543 { 1544 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1545 struct hclge_cfg cfg; 1546 int ret; 1547 1548 ret = hclge_get_cfg(hdev, &cfg); 1549 if (ret) 1550 return ret; 1551 1552 hdev->base_tqp_pid = 0; 1553 hdev->vf_rss_size_max = cfg.vf_rss_size_max; 1554 hdev->pf_rss_size_max = cfg.pf_rss_size_max; 1555 hdev->rx_buf_len = cfg.rx_buf_len; 1556 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1557 hdev->hw.mac.media_type = cfg.media_type; 1558 hdev->hw.mac.phy_addr = cfg.phy_addr; 1559 hdev->num_tx_desc = cfg.tqp_desc_num; 1560 hdev->num_rx_desc = cfg.tqp_desc_num; 1561 hdev->tm_info.num_pg = 1; 1562 hdev->tc_max = cfg.tc_num; 1563 hdev->tm_info.hw_pfc_map = 0; 1564 if (cfg.umv_space) 1565 hdev->wanted_umv_size = cfg.umv_space; 1566 else 1567 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; 1568 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; 1569 hdev->gro_en = true; 1570 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) 1571 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 1572 1573 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 1574 hdev->fd_en = true; 1575 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 1576 } 1577 1578 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1579 if (ret) { 1580 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", 1581 cfg.default_speed, ret); 1582 return ret; 1583 } 1584 hdev->hw.mac.req_speed = hdev->hw.mac.speed; 1585 hdev->hw.mac.req_autoneg = AUTONEG_ENABLE; 1586 hdev->hw.mac.req_duplex = DUPLEX_FULL; 1587 1588 hclge_parse_link_mode(hdev, cfg.speed_ability); 1589 1590 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); 1591 1592 hclge_init_tc_config(hdev); 1593 hclge_init_kdump_kernel_config(hdev); 1594 1595 return ret; 1596 } 1597 1598 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, 1599 u16 tso_mss_max) 1600 { 1601 struct hclge_cfg_tso_status_cmd *req; 1602 struct hclge_desc desc; 1603 1604 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1605 1606 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1607 req->tso_mss_min = cpu_to_le16(tso_mss_min); 1608 req->tso_mss_max = cpu_to_le16(tso_mss_max); 1609 1610 return hclge_cmd_send(&hdev->hw, &desc, 1); 1611 } 1612 1613 static int hclge_config_gro(struct hclge_dev *hdev) 1614 { 1615 struct hclge_cfg_gro_status_cmd *req; 1616 struct hclge_desc desc; 1617 int ret; 1618 1619 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) 1620 return 0; 1621 1622 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); 1623 req = (struct hclge_cfg_gro_status_cmd *)desc.data; 1624 1625 req->gro_en = hdev->gro_en ? 1 : 0; 1626 1627 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1628 if (ret) 1629 dev_err(&hdev->pdev->dev, 1630 "GRO hardware config cmd failed, ret = %d\n", ret); 1631 1632 return ret; 1633 } 1634 1635 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1636 { 1637 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1638 struct hclge_comm_tqp *tqp; 1639 int i; 1640 1641 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1642 sizeof(struct hclge_comm_tqp), GFP_KERNEL); 1643 if (!hdev->htqp) 1644 return -ENOMEM; 1645 1646 tqp = hdev->htqp; 1647 1648 for (i = 0; i < hdev->num_tqps; i++) { 1649 tqp->dev = &hdev->pdev->dev; 1650 tqp->index = i; 1651 1652 tqp->q.ae_algo = &ae_algo; 1653 tqp->q.buf_size = hdev->rx_buf_len; 1654 tqp->q.tx_desc_num = hdev->num_tx_desc; 1655 tqp->q.rx_desc_num = hdev->num_rx_desc; 1656 1657 /* need an extended offset to configure queues >= 1658 * HCLGE_TQP_MAX_SIZE_DEV_V2 1659 */ 1660 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) 1661 tqp->q.io_base = hdev->hw.hw.io_base + 1662 HCLGE_TQP_REG_OFFSET + 1663 i * HCLGE_TQP_REG_SIZE; 1664 else 1665 tqp->q.io_base = hdev->hw.hw.io_base + 1666 HCLGE_TQP_REG_OFFSET + 1667 HCLGE_TQP_EXT_REG_OFFSET + 1668 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * 1669 HCLGE_TQP_REG_SIZE; 1670 1671 /* when device supports tx push and has device memory, 1672 * the queue can execute push mode or doorbell mode on 1673 * device memory. 1674 */ 1675 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) 1676 tqp->q.mem_base = hdev->hw.hw.mem_base + 1677 HCLGE_TQP_MEM_OFFSET(hdev, i); 1678 1679 tqp++; 1680 } 1681 1682 return 0; 1683 } 1684 1685 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1686 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1687 { 1688 struct hclge_tqp_map_cmd *req; 1689 struct hclge_desc desc; 1690 int ret; 1691 1692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1693 1694 req = (struct hclge_tqp_map_cmd *)desc.data; 1695 req->tqp_id = cpu_to_le16(tqp_pid); 1696 req->tqp_vf = func_id; 1697 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; 1698 if (!is_pf) 1699 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; 1700 req->tqp_vid = cpu_to_le16(tqp_vid); 1701 1702 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1703 if (ret) 1704 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); 1705 1706 return ret; 1707 } 1708 1709 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) 1710 { 1711 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1712 struct hclge_dev *hdev = vport->back; 1713 int i, alloced; 1714 1715 for (i = 0, alloced = 0; i < hdev->num_tqps && 1716 alloced < num_tqps; i++) { 1717 if (!hdev->htqp[i].alloced) { 1718 hdev->htqp[i].q.handle = &vport->nic; 1719 hdev->htqp[i].q.tqp_index = alloced; 1720 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; 1721 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; 1722 kinfo->tqp[alloced] = &hdev->htqp[i].q; 1723 hdev->htqp[i].alloced = true; 1724 alloced++; 1725 } 1726 } 1727 vport->alloc_tqps = alloced; 1728 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, 1729 vport->alloc_tqps / hdev->tm_info.num_tc); 1730 1731 /* ensure one to one mapping between irq and queue at default */ 1732 kinfo->rss_size = min_t(u16, kinfo->rss_size, 1733 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); 1734 1735 return 0; 1736 } 1737 1738 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, 1739 u16 num_tx_desc, u16 num_rx_desc) 1740 1741 { 1742 struct hnae3_handle *nic = &vport->nic; 1743 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1744 struct hclge_dev *hdev = vport->back; 1745 int ret; 1746 1747 kinfo->num_tx_desc = num_tx_desc; 1748 kinfo->num_rx_desc = num_rx_desc; 1749 1750 kinfo->rx_buf_len = hdev->rx_buf_len; 1751 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; 1752 1753 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, 1754 sizeof(struct hnae3_queue *), GFP_KERNEL); 1755 if (!kinfo->tqp) 1756 return -ENOMEM; 1757 1758 ret = hclge_assign_tqp(vport, num_tqps); 1759 if (ret) 1760 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1761 1762 return ret; 1763 } 1764 1765 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1766 struct hclge_vport *vport) 1767 { 1768 struct hnae3_handle *nic = &vport->nic; 1769 struct hnae3_knic_private_info *kinfo; 1770 u16 i; 1771 1772 kinfo = &nic->kinfo; 1773 for (i = 0; i < vport->alloc_tqps; i++) { 1774 struct hclge_comm_tqp *q = 1775 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); 1776 bool is_pf; 1777 int ret; 1778 1779 is_pf = !(vport->vport_id); 1780 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1781 i, is_pf); 1782 if (ret) 1783 return ret; 1784 } 1785 1786 return 0; 1787 } 1788 1789 static int hclge_map_tqp(struct hclge_dev *hdev) 1790 { 1791 struct hclge_vport *vport = hdev->vport; 1792 u16 i, num_vport; 1793 1794 num_vport = hdev->num_req_vfs + 1; 1795 for (i = 0; i < num_vport; i++) { 1796 int ret; 1797 1798 ret = hclge_map_tqp_to_vport(hdev, vport); 1799 if (ret) 1800 return ret; 1801 1802 vport++; 1803 } 1804 1805 return 0; 1806 } 1807 1808 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1809 { 1810 struct hnae3_handle *nic = &vport->nic; 1811 struct hclge_dev *hdev = vport->back; 1812 int ret; 1813 1814 nic->pdev = hdev->pdev; 1815 nic->ae_algo = &ae_algo; 1816 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, 1817 MAX_NUMNODES); 1818 nic->kinfo.io_base = hdev->hw.hw.io_base; 1819 1820 ret = hclge_knic_setup(vport, num_tqps, 1821 hdev->num_tx_desc, hdev->num_rx_desc); 1822 if (ret) 1823 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); 1824 1825 return ret; 1826 } 1827 1828 static int hclge_alloc_vport(struct hclge_dev *hdev) 1829 { 1830 struct pci_dev *pdev = hdev->pdev; 1831 struct hclge_vport *vport; 1832 u32 tqp_main_vport; 1833 u32 tqp_per_vport; 1834 int num_vport, i; 1835 int ret; 1836 1837 /* We need to alloc a vport for main NIC of PF */ 1838 num_vport = hdev->num_req_vfs + 1; 1839 1840 if (hdev->num_tqps < num_vport) { 1841 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", 1842 hdev->num_tqps, num_vport); 1843 return -EINVAL; 1844 } 1845 1846 /* Alloc the same number of TQPs for every vport */ 1847 tqp_per_vport = hdev->num_tqps / num_vport; 1848 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1849 1850 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1851 GFP_KERNEL); 1852 if (!vport) 1853 return -ENOMEM; 1854 1855 hdev->vport = vport; 1856 hdev->num_alloc_vport = num_vport; 1857 1858 if (IS_ENABLED(CONFIG_PCI_IOV)) 1859 hdev->num_alloc_vfs = hdev->num_req_vfs; 1860 1861 for (i = 0; i < num_vport; i++) { 1862 vport->back = hdev; 1863 vport->vport_id = i; 1864 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; 1865 vport->mps = HCLGE_MAC_DEFAULT_FRAME; 1866 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; 1867 vport->port_base_vlan_cfg.tbl_sta = true; 1868 vport->rxvlan_cfg.rx_vlan_offload_en = true; 1869 vport->req_vlan_fltr_en = true; 1870 INIT_LIST_HEAD(&vport->vlan_list); 1871 INIT_LIST_HEAD(&vport->uc_mac_list); 1872 INIT_LIST_HEAD(&vport->mc_mac_list); 1873 spin_lock_init(&vport->mac_list_lock); 1874 1875 if (i == 0) 1876 ret = hclge_vport_setup(vport, tqp_main_vport); 1877 else 1878 ret = hclge_vport_setup(vport, tqp_per_vport); 1879 if (ret) { 1880 dev_err(&pdev->dev, 1881 "vport setup failed for vport %d, %d\n", 1882 i, ret); 1883 return ret; 1884 } 1885 1886 vport++; 1887 } 1888 1889 return 0; 1890 } 1891 1892 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1893 struct hclge_pkt_buf_alloc *buf_alloc) 1894 { 1895 /* TX buffer size is unit by 128 byte */ 1896 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1897 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1898 struct hclge_tx_buff_alloc_cmd *req; 1899 struct hclge_desc desc; 1900 int ret; 1901 u8 i; 1902 1903 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1904 1905 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1906 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1907 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1908 1909 req->tx_pkt_buff[i] = 1910 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1911 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1912 } 1913 1914 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1915 if (ret) 1916 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1917 ret); 1918 1919 return ret; 1920 } 1921 1922 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1923 struct hclge_pkt_buf_alloc *buf_alloc) 1924 { 1925 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1926 1927 if (ret) 1928 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); 1929 1930 return ret; 1931 } 1932 1933 static u32 hclge_get_tc_num(struct hclge_dev *hdev) 1934 { 1935 unsigned int i; 1936 u32 cnt = 0; 1937 1938 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1939 if (hdev->hw_tc_map & BIT(i)) 1940 cnt++; 1941 return cnt; 1942 } 1943 1944 /* Get the number of pfc enabled TCs, which have private buffer */ 1945 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1946 struct hclge_pkt_buf_alloc *buf_alloc) 1947 { 1948 struct hclge_priv_buf *priv; 1949 unsigned int i; 1950 int cnt = 0; 1951 1952 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1953 priv = &buf_alloc->priv_buf[i]; 1954 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1955 priv->enable) 1956 cnt++; 1957 } 1958 1959 return cnt; 1960 } 1961 1962 /* Get the number of pfc disabled TCs, which have private buffer */ 1963 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1964 struct hclge_pkt_buf_alloc *buf_alloc) 1965 { 1966 struct hclge_priv_buf *priv; 1967 unsigned int i; 1968 int cnt = 0; 1969 1970 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1971 priv = &buf_alloc->priv_buf[i]; 1972 if (hdev->hw_tc_map & BIT(i) && 1973 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1974 priv->enable) 1975 cnt++; 1976 } 1977 1978 return cnt; 1979 } 1980 1981 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1982 { 1983 struct hclge_priv_buf *priv; 1984 u32 rx_priv = 0; 1985 int i; 1986 1987 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1988 priv = &buf_alloc->priv_buf[i]; 1989 if (priv->enable) 1990 rx_priv += priv->buf_size; 1991 } 1992 return rx_priv; 1993 } 1994 1995 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1996 { 1997 u32 i, total_tx_size = 0; 1998 1999 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 2000 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 2001 2002 return total_tx_size; 2003 } 2004 2005 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 2006 struct hclge_pkt_buf_alloc *buf_alloc, 2007 u32 rx_all) 2008 { 2009 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 2010 u32 tc_num = hclge_get_tc_num(hdev); 2011 u32 shared_buf, aligned_mps; 2012 u32 rx_priv; 2013 int i; 2014 2015 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); 2016 2017 if (hnae3_dev_dcb_supported(hdev)) 2018 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + 2019 hdev->dv_buf_size; 2020 else 2021 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF 2022 + hdev->dv_buf_size; 2023 2024 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 2025 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), 2026 HCLGE_BUF_SIZE_UNIT); 2027 2028 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 2029 if (rx_all < rx_priv + shared_std) 2030 return false; 2031 2032 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); 2033 buf_alloc->s_buf.buf_size = shared_buf; 2034 if (hnae3_dev_dcb_supported(hdev)) { 2035 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; 2036 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 2037 - roundup(aligned_mps / HCLGE_BUF_DIV_BY, 2038 HCLGE_BUF_SIZE_UNIT); 2039 } else { 2040 buf_alloc->s_buf.self.high = aligned_mps + 2041 HCLGE_NON_DCB_ADDITIONAL_BUF; 2042 buf_alloc->s_buf.self.low = aligned_mps; 2043 } 2044 2045 if (hnae3_dev_dcb_supported(hdev)) { 2046 hi_thrd = shared_buf - hdev->dv_buf_size; 2047 2048 if (tc_num <= NEED_RESERVE_TC_NUM) 2049 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT 2050 / BUF_MAX_PERCENT; 2051 2052 if (tc_num) 2053 hi_thrd = hi_thrd / tc_num; 2054 2055 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps); 2056 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); 2057 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; 2058 } else { 2059 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; 2060 lo_thrd = aligned_mps; 2061 } 2062 2063 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2064 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 2065 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 2066 } 2067 2068 return true; 2069 } 2070 2071 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 2072 struct hclge_pkt_buf_alloc *buf_alloc) 2073 { 2074 u32 i, total_size; 2075 2076 total_size = hdev->pkt_buf_size; 2077 2078 /* alloc tx buffer for all enabled tc */ 2079 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2080 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2081 2082 if (hdev->hw_tc_map & BIT(i)) { 2083 if (total_size < hdev->tx_buf_size) 2084 return -ENOMEM; 2085 2086 priv->tx_buf_size = hdev->tx_buf_size; 2087 } else { 2088 priv->tx_buf_size = 0; 2089 } 2090 2091 total_size -= priv->tx_buf_size; 2092 } 2093 2094 return 0; 2095 } 2096 2097 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, 2098 struct hclge_pkt_buf_alloc *buf_alloc) 2099 { 2100 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2101 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); 2102 unsigned int i; 2103 2104 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2105 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2106 2107 priv->enable = 0; 2108 priv->wl.low = 0; 2109 priv->wl.high = 0; 2110 priv->buf_size = 0; 2111 2112 if (!(hdev->hw_tc_map & BIT(i))) 2113 continue; 2114 2115 priv->enable = 1; 2116 2117 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 2118 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; 2119 priv->wl.high = roundup(priv->wl.low + aligned_mps, 2120 HCLGE_BUF_SIZE_UNIT); 2121 } else { 2122 priv->wl.low = 0; 2123 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : 2124 aligned_mps; 2125 } 2126 2127 priv->buf_size = priv->wl.high + hdev->dv_buf_size; 2128 } 2129 2130 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2131 } 2132 2133 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, 2134 struct hclge_pkt_buf_alloc *buf_alloc) 2135 { 2136 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2137 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 2138 int i; 2139 2140 /* let the last to be cleared first */ 2141 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 2142 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2143 unsigned int mask = BIT((unsigned int)i); 2144 2145 if (hdev->hw_tc_map & mask && 2146 !(hdev->tm_info.hw_pfc_map & mask)) { 2147 /* Clear the no pfc TC private buffer */ 2148 priv->wl.low = 0; 2149 priv->wl.high = 0; 2150 priv->buf_size = 0; 2151 priv->enable = 0; 2152 no_pfc_priv_num--; 2153 } 2154 2155 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 2156 no_pfc_priv_num == 0) 2157 break; 2158 } 2159 2160 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2161 } 2162 2163 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, 2164 struct hclge_pkt_buf_alloc *buf_alloc) 2165 { 2166 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2167 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 2168 int i; 2169 2170 /* let the last to be cleared first */ 2171 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 2172 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2173 unsigned int mask = BIT((unsigned int)i); 2174 2175 if (hdev->hw_tc_map & mask && 2176 hdev->tm_info.hw_pfc_map & mask) { 2177 /* Reduce the number of pfc TC with private buffer */ 2178 priv->wl.low = 0; 2179 priv->enable = 0; 2180 priv->wl.high = 0; 2181 priv->buf_size = 0; 2182 pfc_priv_num--; 2183 } 2184 2185 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 2186 pfc_priv_num == 0) 2187 break; 2188 } 2189 2190 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2191 } 2192 2193 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, 2194 struct hclge_pkt_buf_alloc *buf_alloc) 2195 { 2196 #define COMPENSATE_BUFFER 0x3C00 2197 #define COMPENSATE_HALF_MPS_NUM 5 2198 #define PRIV_WL_GAP 0x1800 2199 2200 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2201 u32 tc_num = hclge_get_tc_num(hdev); 2202 u32 half_mps = hdev->mps >> 1; 2203 u32 min_rx_priv; 2204 unsigned int i; 2205 2206 if (tc_num) 2207 rx_priv = rx_priv / tc_num; 2208 2209 if (tc_num <= NEED_RESERVE_TC_NUM) 2210 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 2211 2212 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + 2213 COMPENSATE_HALF_MPS_NUM * half_mps; 2214 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); 2215 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); 2216 if (rx_priv < min_rx_priv) 2217 return false; 2218 2219 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2220 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2221 2222 priv->enable = 0; 2223 priv->wl.low = 0; 2224 priv->wl.high = 0; 2225 priv->buf_size = 0; 2226 2227 if (!(hdev->hw_tc_map & BIT(i))) 2228 continue; 2229 2230 priv->enable = 1; 2231 priv->buf_size = rx_priv; 2232 priv->wl.high = rx_priv - hdev->dv_buf_size; 2233 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 2234 } 2235 2236 buf_alloc->s_buf.buf_size = 0; 2237 2238 return true; 2239 } 2240 2241 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 2242 * @hdev: pointer to struct hclge_dev 2243 * @buf_alloc: pointer to buffer calculation data 2244 * @return: 0: calculate successful, negative: fail 2245 */ 2246 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 2247 struct hclge_pkt_buf_alloc *buf_alloc) 2248 { 2249 /* When DCB is not supported, rx private buffer is not allocated. */ 2250 if (!hnae3_dev_dcb_supported(hdev)) { 2251 u32 rx_all = hdev->pkt_buf_size; 2252 2253 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 2254 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 2255 return -ENOMEM; 2256 2257 return 0; 2258 } 2259 2260 if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) 2261 return 0; 2262 2263 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) 2264 return 0; 2265 2266 /* try to decrease the buffer size */ 2267 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) 2268 return 0; 2269 2270 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) 2271 return 0; 2272 2273 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) 2274 return 0; 2275 2276 return -ENOMEM; 2277 } 2278 2279 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 2280 struct hclge_pkt_buf_alloc *buf_alloc) 2281 { 2282 struct hclge_rx_priv_buff_cmd *req; 2283 struct hclge_desc desc; 2284 int ret; 2285 int i; 2286 2287 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 2288 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 2289 2290 /* Alloc private buffer TCs */ 2291 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2292 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2293 2294 req->buf_num[i] = 2295 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 2296 req->buf_num[i] |= 2297 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 2298 } 2299 2300 req->shared_buf = 2301 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 2302 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 2303 2304 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2305 if (ret) 2306 dev_err(&hdev->pdev->dev, 2307 "rx private buffer alloc cmd failed %d\n", ret); 2308 2309 return ret; 2310 } 2311 2312 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 2313 struct hclge_pkt_buf_alloc *buf_alloc) 2314 { 2315 struct hclge_rx_priv_wl_buf *req; 2316 struct hclge_priv_buf *priv; 2317 struct hclge_desc desc[2]; 2318 int i, j; 2319 int ret; 2320 2321 for (i = 0; i < 2; i++) { 2322 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 2323 false); 2324 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 2325 2326 /* The first descriptor set the NEXT bit to 1 */ 2327 if (i == 0) 2328 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2329 else 2330 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2331 2332 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2333 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 2334 2335 priv = &buf_alloc->priv_buf[idx]; 2336 req->tc_wl[j].high = 2337 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 2338 req->tc_wl[j].high |= 2339 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2340 req->tc_wl[j].low = 2341 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 2342 req->tc_wl[j].low |= 2343 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2344 } 2345 } 2346 2347 /* Send 2 descriptor at one time */ 2348 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2349 if (ret) 2350 dev_err(&hdev->pdev->dev, 2351 "rx private waterline config cmd failed %d\n", 2352 ret); 2353 return ret; 2354 } 2355 2356 static int hclge_common_thrd_config(struct hclge_dev *hdev, 2357 struct hclge_pkt_buf_alloc *buf_alloc) 2358 { 2359 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 2360 struct hclge_rx_com_thrd *req; 2361 struct hclge_desc desc[2]; 2362 struct hclge_tc_thrd *tc; 2363 int i, j; 2364 int ret; 2365 2366 for (i = 0; i < 2; i++) { 2367 hclge_cmd_setup_basic_desc(&desc[i], 2368 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 2369 req = (struct hclge_rx_com_thrd *)&desc[i].data; 2370 2371 /* The first descriptor set the NEXT bit to 1 */ 2372 if (i == 0) 2373 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2374 else 2375 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2376 2377 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2378 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 2379 2380 req->com_thrd[j].high = 2381 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 2382 req->com_thrd[j].high |= 2383 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2384 req->com_thrd[j].low = 2385 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 2386 req->com_thrd[j].low |= 2387 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2388 } 2389 } 2390 2391 /* Send 2 descriptors at one time */ 2392 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2393 if (ret) 2394 dev_err(&hdev->pdev->dev, 2395 "common threshold config cmd failed %d\n", ret); 2396 return ret; 2397 } 2398 2399 static int hclge_common_wl_config(struct hclge_dev *hdev, 2400 struct hclge_pkt_buf_alloc *buf_alloc) 2401 { 2402 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 2403 struct hclge_rx_com_wl *req; 2404 struct hclge_desc desc; 2405 int ret; 2406 2407 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 2408 2409 req = (struct hclge_rx_com_wl *)desc.data; 2410 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 2411 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2412 2413 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 2414 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2415 2416 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2417 if (ret) 2418 dev_err(&hdev->pdev->dev, 2419 "common waterline config cmd failed %d\n", ret); 2420 2421 return ret; 2422 } 2423 2424 int hclge_buffer_alloc(struct hclge_dev *hdev) 2425 { 2426 struct hclge_pkt_buf_alloc *pkt_buf; 2427 int ret; 2428 2429 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 2430 if (!pkt_buf) 2431 return -ENOMEM; 2432 2433 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 2434 if (ret) { 2435 dev_err(&hdev->pdev->dev, 2436 "could not calc tx buffer size for all TCs %d\n", ret); 2437 goto out; 2438 } 2439 2440 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 2441 if (ret) { 2442 dev_err(&hdev->pdev->dev, 2443 "could not alloc tx buffers %d\n", ret); 2444 goto out; 2445 } 2446 2447 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 2448 if (ret) { 2449 dev_err(&hdev->pdev->dev, 2450 "could not calc rx priv buffer size for all TCs %d\n", 2451 ret); 2452 goto out; 2453 } 2454 2455 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 2456 if (ret) { 2457 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 2458 ret); 2459 goto out; 2460 } 2461 2462 if (hnae3_dev_dcb_supported(hdev)) { 2463 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2464 if (ret) { 2465 dev_err(&hdev->pdev->dev, 2466 "could not configure rx private waterline %d\n", 2467 ret); 2468 goto out; 2469 } 2470 2471 ret = hclge_common_thrd_config(hdev, pkt_buf); 2472 if (ret) { 2473 dev_err(&hdev->pdev->dev, 2474 "could not configure common threshold %d\n", 2475 ret); 2476 goto out; 2477 } 2478 } 2479 2480 ret = hclge_common_wl_config(hdev, pkt_buf); 2481 if (ret) 2482 dev_err(&hdev->pdev->dev, 2483 "could not configure common waterline %d\n", ret); 2484 2485 out: 2486 kfree(pkt_buf); 2487 return ret; 2488 } 2489 2490 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2491 { 2492 struct hnae3_handle *roce = &vport->roce; 2493 struct hnae3_handle *nic = &vport->nic; 2494 struct hclge_dev *hdev = vport->back; 2495 2496 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2497 2498 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) 2499 return -EINVAL; 2500 2501 roce->rinfo.base_vector = hdev->num_nic_msi; 2502 2503 roce->rinfo.netdev = nic->kinfo.netdev; 2504 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2505 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2506 2507 roce->pdev = nic->pdev; 2508 roce->ae_algo = nic->ae_algo; 2509 bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, 2510 MAX_NUMNODES); 2511 2512 return 0; 2513 } 2514 2515 static int hclge_init_msi(struct hclge_dev *hdev) 2516 { 2517 struct pci_dev *pdev = hdev->pdev; 2518 int vectors; 2519 int i; 2520 2521 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2522 hdev->num_msi, 2523 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2524 if (vectors < 0) { 2525 dev_err(&pdev->dev, 2526 "failed(%d) to allocate MSI/MSI-X vectors\n", 2527 vectors); 2528 return vectors; 2529 } 2530 if (vectors < hdev->num_msi) 2531 dev_warn(&hdev->pdev->dev, 2532 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2533 hdev->num_msi, vectors); 2534 2535 hdev->num_msi = vectors; 2536 hdev->num_msi_left = vectors; 2537 2538 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2539 sizeof(u16), GFP_KERNEL); 2540 if (!hdev->vector_status) { 2541 pci_free_irq_vectors(pdev); 2542 return -ENOMEM; 2543 } 2544 2545 for (i = 0; i < hdev->num_msi; i++) 2546 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2547 2548 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2549 sizeof(int), GFP_KERNEL); 2550 if (!hdev->vector_irq) { 2551 pci_free_irq_vectors(pdev); 2552 return -ENOMEM; 2553 } 2554 2555 return 0; 2556 } 2557 2558 static u8 hclge_check_speed_dup(u8 duplex, int speed) 2559 { 2560 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) 2561 duplex = HCLGE_MAC_FULL; 2562 2563 return duplex; 2564 } 2565 2566 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = { 2567 {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M}, 2568 {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M}, 2569 {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G}, 2570 {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G}, 2571 {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G}, 2572 {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G}, 2573 {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G}, 2574 {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G}, 2575 {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G}, 2576 }; 2577 2578 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw) 2579 { 2580 u16 i; 2581 2582 for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) { 2583 if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) { 2584 *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw; 2585 return 0; 2586 } 2587 } 2588 2589 return -EINVAL; 2590 } 2591 2592 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, 2593 u8 duplex, u8 lane_num) 2594 { 2595 struct hclge_config_mac_speed_dup_cmd *req; 2596 struct hclge_desc desc; 2597 u32 speed_fw; 2598 int ret; 2599 2600 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2601 2602 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2603 2604 if (duplex) 2605 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); 2606 2607 ret = hclge_convert_to_fw_speed(speed, &speed_fw); 2608 if (ret) { 2609 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2610 return ret; 2611 } 2612 2613 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, 2614 speed_fw); 2615 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2616 1); 2617 req->lane_num = lane_num; 2618 2619 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2620 if (ret) { 2621 dev_err(&hdev->pdev->dev, 2622 "mac speed/duplex config cmd failed %d.\n", ret); 2623 return ret; 2624 } 2625 2626 return 0; 2627 } 2628 2629 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) 2630 { 2631 struct hclge_mac *mac = &hdev->hw.mac; 2632 int ret; 2633 2634 duplex = hclge_check_speed_dup(duplex, speed); 2635 if (!mac->support_autoneg && mac->speed == speed && 2636 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0)) 2637 return 0; 2638 2639 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); 2640 if (ret) 2641 return ret; 2642 2643 hdev->hw.mac.speed = speed; 2644 hdev->hw.mac.duplex = duplex; 2645 if (!lane_num) 2646 hdev->hw.mac.lane_num = lane_num; 2647 2648 return 0; 2649 } 2650 2651 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2652 u8 duplex, u8 lane_num) 2653 { 2654 struct hclge_vport *vport = hclge_get_vport(handle); 2655 struct hclge_dev *hdev = vport->back; 2656 2657 return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); 2658 } 2659 2660 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2661 { 2662 struct hclge_config_auto_neg_cmd *req; 2663 struct hclge_desc desc; 2664 u32 flag = 0; 2665 int ret; 2666 2667 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2668 2669 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2670 if (enable) 2671 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U); 2672 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2673 2674 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2675 if (ret) 2676 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2677 ret); 2678 2679 return ret; 2680 } 2681 2682 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2683 { 2684 struct hclge_vport *vport = hclge_get_vport(handle); 2685 struct hclge_dev *hdev = vport->back; 2686 2687 if (!hdev->hw.mac.support_autoneg) { 2688 if (enable) { 2689 dev_err(&hdev->pdev->dev, 2690 "autoneg is not supported by current port\n"); 2691 return -EOPNOTSUPP; 2692 } else { 2693 return 0; 2694 } 2695 } 2696 2697 return hclge_set_autoneg_en(hdev, enable); 2698 } 2699 2700 static int hclge_get_autoneg(struct hnae3_handle *handle) 2701 { 2702 struct hclge_vport *vport = hclge_get_vport(handle); 2703 struct hclge_dev *hdev = vport->back; 2704 struct phy_device *phydev = hdev->hw.mac.phydev; 2705 2706 if (phydev) 2707 return phydev->autoneg; 2708 2709 return hdev->hw.mac.autoneg; 2710 } 2711 2712 static int hclge_restart_autoneg(struct hnae3_handle *handle) 2713 { 2714 struct hclge_vport *vport = hclge_get_vport(handle); 2715 struct hclge_dev *hdev = vport->back; 2716 int ret; 2717 2718 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); 2719 2720 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2721 if (ret) 2722 return ret; 2723 return hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2724 } 2725 2726 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt) 2727 { 2728 struct hclge_vport *vport = hclge_get_vport(handle); 2729 struct hclge_dev *hdev = vport->back; 2730 2731 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) 2732 return hclge_set_autoneg_en(hdev, !halt); 2733 2734 return 0; 2735 } 2736 2737 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, 2738 struct hclge_desc *desc, u32 desc_len) 2739 { 2740 u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2; 2741 u32 desc_index = 0; 2742 u32 data_index = 0; 2743 u32 i; 2744 2745 for (i = 0; i < lane_size; i++) { 2746 if (data_index >= HCLGE_DESC_DATA_LEN) { 2747 desc_index++; 2748 data_index = 0; 2749 } 2750 2751 if (desc_index >= desc_len) 2752 return; 2753 2754 hdev->fec_stats.per_lanes[i] += 2755 le32_to_cpu(desc[desc_index].data[data_index]); 2756 data_index++; 2757 } 2758 } 2759 2760 static void hclge_parse_fec_stats(struct hclge_dev *hdev, 2761 struct hclge_desc *desc, u32 desc_len) 2762 { 2763 struct hclge_query_fec_stats_cmd *req; 2764 2765 req = (struct hclge_query_fec_stats_cmd *)desc[0].data; 2766 2767 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; 2768 hdev->fec_stats.rs_corr_blocks += 2769 le32_to_cpu(req->rs_fec_corr_blocks); 2770 hdev->fec_stats.rs_uncorr_blocks += 2771 le32_to_cpu(req->rs_fec_uncorr_blocks); 2772 hdev->fec_stats.rs_error_blocks += 2773 le32_to_cpu(req->rs_fec_error_blocks); 2774 hdev->fec_stats.base_r_corr_blocks += 2775 le32_to_cpu(req->base_r_fec_corr_blocks); 2776 hdev->fec_stats.base_r_uncorr_blocks += 2777 le32_to_cpu(req->base_r_fec_uncorr_blocks); 2778 2779 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); 2780 } 2781 2782 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) 2783 { 2784 struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM]; 2785 int ret; 2786 u32 i; 2787 2788 for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) { 2789 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS, 2790 true); 2791 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1)) 2792 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2793 } 2794 2795 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); 2796 if (ret) 2797 return ret; 2798 2799 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); 2800 2801 return 0; 2802 } 2803 2804 static void hclge_update_fec_stats(struct hclge_dev *hdev) 2805 { 2806 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2807 int ret; 2808 2809 if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || 2810 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) 2811 return; 2812 2813 ret = hclge_update_fec_stats_hw(hdev); 2814 if (ret) 2815 dev_err(&hdev->pdev->dev, 2816 "failed to update fec stats, ret = %d\n", ret); 2817 2818 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); 2819 } 2820 2821 static void hclge_get_fec_stats_total(struct hclge_dev *hdev, 2822 struct ethtool_fec_stats *fec_stats) 2823 { 2824 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; 2825 fec_stats->uncorrectable_blocks.total = 2826 hdev->fec_stats.rs_uncorr_blocks; 2827 } 2828 2829 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, 2830 struct ethtool_fec_stats *fec_stats) 2831 { 2832 u32 i; 2833 2834 if (hdev->fec_stats.base_r_lane_num == 0 || 2835 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { 2836 dev_err(&hdev->pdev->dev, 2837 "fec stats lane number(%llu) is invalid\n", 2838 hdev->fec_stats.base_r_lane_num); 2839 return; 2840 } 2841 2842 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { 2843 fec_stats->corrected_blocks.lanes[i] = 2844 hdev->fec_stats.base_r_corr_per_lanes[i]; 2845 fec_stats->uncorrectable_blocks.lanes[i] = 2846 hdev->fec_stats.base_r_uncorr_per_lanes[i]; 2847 } 2848 } 2849 2850 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, 2851 struct ethtool_fec_stats *fec_stats) 2852 { 2853 u32 fec_mode = hdev->hw.mac.fec_mode; 2854 2855 switch (fec_mode) { 2856 case BIT(HNAE3_FEC_RS): 2857 case BIT(HNAE3_FEC_LLRS): 2858 hclge_get_fec_stats_total(hdev, fec_stats); 2859 break; 2860 case BIT(HNAE3_FEC_BASER): 2861 hclge_get_fec_stats_lanes(hdev, fec_stats); 2862 break; 2863 default: 2864 dev_err(&hdev->pdev->dev, 2865 "fec stats is not supported by current fec mode(0x%x)\n", 2866 fec_mode); 2867 break; 2868 } 2869 } 2870 2871 static void hclge_get_fec_stats(struct hnae3_handle *handle, 2872 struct ethtool_fec_stats *fec_stats) 2873 { 2874 struct hclge_vport *vport = hclge_get_vport(handle); 2875 struct hclge_dev *hdev = vport->back; 2876 u32 fec_mode = hdev->hw.mac.fec_mode; 2877 2878 if (fec_mode == BIT(HNAE3_FEC_NONE) || 2879 fec_mode == BIT(HNAE3_FEC_AUTO) || 2880 fec_mode == BIT(HNAE3_FEC_USER_DEF)) 2881 return; 2882 2883 hclge_update_fec_stats(hdev); 2884 2885 hclge_comm_get_fec_stats(hdev, fec_stats); 2886 } 2887 2888 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) 2889 { 2890 struct hclge_config_fec_cmd *req; 2891 struct hclge_desc desc; 2892 int ret; 2893 2894 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); 2895 2896 req = (struct hclge_config_fec_cmd *)desc.data; 2897 if (fec_mode & BIT(HNAE3_FEC_AUTO)) 2898 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); 2899 if (fec_mode & BIT(HNAE3_FEC_RS)) 2900 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2901 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); 2902 if (fec_mode & BIT(HNAE3_FEC_LLRS)) 2903 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2904 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS); 2905 if (fec_mode & BIT(HNAE3_FEC_BASER)) 2906 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2907 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); 2908 2909 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2910 if (ret) 2911 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); 2912 2913 return ret; 2914 } 2915 2916 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) 2917 { 2918 struct hclge_vport *vport = hclge_get_vport(handle); 2919 struct hclge_dev *hdev = vport->back; 2920 struct hclge_mac *mac = &hdev->hw.mac; 2921 int ret; 2922 2923 if (fec_mode && !(mac->fec_ability & fec_mode)) { 2924 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); 2925 return -EINVAL; 2926 } 2927 2928 ret = hclge_set_fec_hw(hdev, fec_mode); 2929 if (ret) 2930 return ret; 2931 2932 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); 2933 return 0; 2934 } 2935 2936 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, 2937 u8 *fec_mode) 2938 { 2939 struct hclge_vport *vport = hclge_get_vport(handle); 2940 struct hclge_dev *hdev = vport->back; 2941 struct hclge_mac *mac = &hdev->hw.mac; 2942 2943 if (fec_ability) 2944 *fec_ability = mac->fec_ability; 2945 if (fec_mode) 2946 *fec_mode = mac->fec_mode; 2947 } 2948 2949 static int hclge_mac_init(struct hclge_dev *hdev) 2950 { 2951 struct hclge_mac *mac = &hdev->hw.mac; 2952 int ret; 2953 2954 hdev->support_sfp_query = true; 2955 2956 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 2957 hdev->hw.mac.duplex = HCLGE_MAC_FULL; 2958 2959 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, 2960 hdev->hw.mac.duplex, hdev->hw.mac.lane_num); 2961 if (ret) 2962 return ret; 2963 2964 if (hdev->hw.mac.support_autoneg) { 2965 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); 2966 if (ret) 2967 return ret; 2968 } 2969 2970 mac->link = 0; 2971 2972 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { 2973 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); 2974 if (ret) 2975 return ret; 2976 } 2977 2978 ret = hclge_set_mac_mtu(hdev, hdev->mps); 2979 if (ret) { 2980 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); 2981 return ret; 2982 } 2983 2984 ret = hclge_set_default_loopback(hdev); 2985 if (ret) 2986 return ret; 2987 2988 ret = hclge_buffer_alloc(hdev); 2989 if (ret) 2990 dev_err(&hdev->pdev->dev, 2991 "allocate buffer fail, ret=%d\n", ret); 2992 2993 return ret; 2994 } 2995 2996 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2997 { 2998 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2999 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { 3000 hdev->last_mbx_scheduled = jiffies; 3001 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 3002 } 3003 } 3004 3005 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 3006 { 3007 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 3008 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && 3009 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { 3010 hdev->last_rst_scheduled = jiffies; 3011 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 3012 } 3013 } 3014 3015 static void hclge_errhand_task_schedule(struct hclge_dev *hdev) 3016 { 3017 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 3018 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) 3019 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 3020 } 3021 3022 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) 3023 { 3024 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 3025 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) 3026 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); 3027 } 3028 3029 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) 3030 { 3031 struct hclge_link_status_cmd *req; 3032 struct hclge_desc desc; 3033 int ret; 3034 3035 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 3036 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3037 if (ret) { 3038 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 3039 ret); 3040 return ret; 3041 } 3042 3043 req = (struct hclge_link_status_cmd *)desc.data; 3044 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? 3045 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 3046 3047 return 0; 3048 } 3049 3050 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) 3051 { 3052 struct phy_device *phydev = hdev->hw.mac.phydev; 3053 3054 *link_status = HCLGE_LINK_STATUS_DOWN; 3055 3056 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) 3057 return 0; 3058 3059 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) 3060 return 0; 3061 3062 return hclge_get_mac_link_status(hdev, link_status); 3063 } 3064 3065 static void hclge_push_link_status(struct hclge_dev *hdev) 3066 { 3067 struct hclge_vport *vport; 3068 int ret; 3069 u16 i; 3070 3071 for (i = 0; i < pci_num_vf(hdev->pdev); i++) { 3072 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; 3073 3074 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || 3075 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) 3076 continue; 3077 3078 ret = hclge_push_vf_link_status(vport); 3079 if (ret) { 3080 dev_err(&hdev->pdev->dev, 3081 "failed to push link status to vf%u, ret = %d\n", 3082 i, ret); 3083 } 3084 } 3085 } 3086 3087 static void hclge_update_link_status(struct hclge_dev *hdev) 3088 { 3089 struct hnae3_handle *rhandle = &hdev->vport[0].roce; 3090 struct hnae3_handle *handle = &hdev->vport[0].nic; 3091 struct hnae3_client *rclient = hdev->roce_client; 3092 struct hnae3_client *client = hdev->nic_client; 3093 int state; 3094 int ret; 3095 3096 if (!client) 3097 return; 3098 3099 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) 3100 return; 3101 3102 ret = hclge_get_mac_phy_link(hdev, &state); 3103 if (ret) { 3104 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 3105 return; 3106 } 3107 3108 if (state != hdev->hw.mac.link) { 3109 hdev->hw.mac.link = state; 3110 if (state == HCLGE_LINK_STATUS_UP) 3111 hclge_update_port_info(hdev); 3112 3113 client->ops->link_status_change(handle, state); 3114 hclge_config_mac_tnl_int(hdev, state); 3115 if (rclient && rclient->ops->link_status_change) 3116 rclient->ops->link_status_change(rhandle, state); 3117 3118 hclge_push_link_status(hdev); 3119 } 3120 3121 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 3122 } 3123 3124 static void hclge_update_speed_advertising(struct hclge_mac *mac) 3125 { 3126 u32 speed_ability; 3127 3128 if (hclge_get_speed_bit(mac->speed, &speed_ability)) 3129 return; 3130 3131 switch (mac->module_type) { 3132 case HNAE3_MODULE_TYPE_FIBRE_LR: 3133 hclge_convert_setting_lr(speed_ability, mac->advertising); 3134 break; 3135 case HNAE3_MODULE_TYPE_FIBRE_SR: 3136 case HNAE3_MODULE_TYPE_AOC: 3137 hclge_convert_setting_sr(speed_ability, mac->advertising); 3138 break; 3139 case HNAE3_MODULE_TYPE_CR: 3140 hclge_convert_setting_cr(speed_ability, mac->advertising); 3141 break; 3142 case HNAE3_MODULE_TYPE_KR: 3143 hclge_convert_setting_kr(speed_ability, mac->advertising); 3144 break; 3145 default: 3146 break; 3147 } 3148 } 3149 3150 static void hclge_update_fec_advertising(struct hclge_mac *mac) 3151 { 3152 if (mac->fec_mode & BIT(HNAE3_FEC_RS)) 3153 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 3154 mac->advertising); 3155 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS)) 3156 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 3157 mac->advertising); 3158 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) 3159 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 3160 mac->advertising); 3161 else 3162 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 3163 mac->advertising); 3164 } 3165 3166 static void hclge_update_pause_advertising(struct hclge_dev *hdev) 3167 { 3168 struct hclge_mac *mac = &hdev->hw.mac; 3169 bool rx_en, tx_en; 3170 3171 switch (hdev->fc_mode_last_time) { 3172 case HCLGE_FC_RX_PAUSE: 3173 rx_en = true; 3174 tx_en = false; 3175 break; 3176 case HCLGE_FC_TX_PAUSE: 3177 rx_en = false; 3178 tx_en = true; 3179 break; 3180 case HCLGE_FC_FULL: 3181 rx_en = true; 3182 tx_en = true; 3183 break; 3184 default: 3185 rx_en = false; 3186 tx_en = false; 3187 break; 3188 } 3189 3190 linkmode_set_pause(mac->advertising, tx_en, rx_en); 3191 } 3192 3193 static void hclge_update_advertising(struct hclge_dev *hdev) 3194 { 3195 struct hclge_mac *mac = &hdev->hw.mac; 3196 3197 linkmode_zero(mac->advertising); 3198 hclge_update_speed_advertising(mac); 3199 hclge_update_fec_advertising(mac); 3200 hclge_update_pause_advertising(hdev); 3201 } 3202 3203 static void hclge_update_port_capability(struct hclge_dev *hdev, 3204 struct hclge_mac *mac) 3205 { 3206 if (hnae3_dev_fec_supported(hdev)) 3207 hclge_convert_setting_fec(mac); 3208 3209 /* firmware can not identify back plane type, the media type 3210 * read from configuration can help deal it 3211 */ 3212 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && 3213 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) 3214 mac->module_type = HNAE3_MODULE_TYPE_KR; 3215 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 3216 mac->module_type = HNAE3_MODULE_TYPE_TP; 3217 3218 if (mac->support_autoneg) { 3219 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); 3220 linkmode_copy(mac->advertising, mac->supported); 3221 } else { 3222 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 3223 mac->supported); 3224 hclge_update_advertising(hdev); 3225 } 3226 } 3227 3228 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) 3229 { 3230 struct hclge_sfp_info_cmd *resp; 3231 struct hclge_desc desc; 3232 int ret; 3233 3234 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 3235 resp = (struct hclge_sfp_info_cmd *)desc.data; 3236 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3237 if (ret == -EOPNOTSUPP) { 3238 dev_warn(&hdev->pdev->dev, 3239 "IMP do not support get SFP speed %d\n", ret); 3240 return ret; 3241 } else if (ret) { 3242 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); 3243 return ret; 3244 } 3245 3246 *speed = le32_to_cpu(resp->speed); 3247 3248 return 0; 3249 } 3250 3251 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) 3252 { 3253 struct hclge_sfp_info_cmd *resp; 3254 struct hclge_desc desc; 3255 int ret; 3256 3257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 3258 resp = (struct hclge_sfp_info_cmd *)desc.data; 3259 3260 resp->query_type = QUERY_ACTIVE_SPEED; 3261 3262 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3263 if (ret == -EOPNOTSUPP) { 3264 dev_warn(&hdev->pdev->dev, 3265 "IMP does not support get SFP info %d\n", ret); 3266 return ret; 3267 } else if (ret) { 3268 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); 3269 return ret; 3270 } 3271 3272 /* In some case, mac speed get from IMP may be 0, it shouldn't be 3273 * set to mac->speed. 3274 */ 3275 if (!le32_to_cpu(resp->speed)) 3276 return 0; 3277 3278 mac->speed = le32_to_cpu(resp->speed); 3279 /* if resp->speed_ability is 0, it means it's an old version 3280 * firmware, do not update these params 3281 */ 3282 if (resp->speed_ability) { 3283 mac->module_type = le32_to_cpu(resp->module_type); 3284 mac->speed_ability = le32_to_cpu(resp->speed_ability); 3285 mac->autoneg = resp->autoneg; 3286 mac->support_autoneg = resp->autoneg_ability; 3287 mac->speed_type = QUERY_ACTIVE_SPEED; 3288 mac->lane_num = resp->lane_num; 3289 if (!resp->active_fec) 3290 mac->fec_mode = 0; 3291 else 3292 mac->fec_mode = BIT(resp->active_fec); 3293 mac->fec_ability = resp->fec_ability; 3294 } else { 3295 mac->speed_type = QUERY_SFP_SPEED; 3296 } 3297 3298 return 0; 3299 } 3300 3301 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, 3302 struct ethtool_link_ksettings *cmd) 3303 { 3304 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; 3305 struct hclge_vport *vport = hclge_get_vport(handle); 3306 struct hclge_phy_link_ksetting_0_cmd *req0; 3307 struct hclge_phy_link_ksetting_1_cmd *req1; 3308 u32 supported, advertising, lp_advertising; 3309 struct hclge_dev *hdev = vport->back; 3310 int ret; 3311 3312 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, 3313 true); 3314 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3315 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, 3316 true); 3317 3318 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); 3319 if (ret) { 3320 dev_err(&hdev->pdev->dev, 3321 "failed to get phy link ksetting, ret = %d.\n", ret); 3322 return ret; 3323 } 3324 3325 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; 3326 cmd->base.autoneg = req0->autoneg; 3327 cmd->base.speed = le32_to_cpu(req0->speed); 3328 cmd->base.duplex = req0->duplex; 3329 cmd->base.port = req0->port; 3330 cmd->base.transceiver = req0->transceiver; 3331 cmd->base.phy_address = req0->phy_address; 3332 cmd->base.eth_tp_mdix = req0->eth_tp_mdix; 3333 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; 3334 supported = le32_to_cpu(req0->supported); 3335 advertising = le32_to_cpu(req0->advertising); 3336 lp_advertising = le32_to_cpu(req0->lp_advertising); 3337 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 3338 supported); 3339 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 3340 advertising); 3341 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, 3342 lp_advertising); 3343 3344 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; 3345 cmd->base.master_slave_cfg = req1->master_slave_cfg; 3346 cmd->base.master_slave_state = req1->master_slave_state; 3347 3348 return 0; 3349 } 3350 3351 static int 3352 hclge_set_phy_link_ksettings(struct hnae3_handle *handle, 3353 const struct ethtool_link_ksettings *cmd) 3354 { 3355 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; 3356 struct hclge_vport *vport = hclge_get_vport(handle); 3357 struct hclge_phy_link_ksetting_0_cmd *req0; 3358 struct hclge_phy_link_ksetting_1_cmd *req1; 3359 struct hclge_dev *hdev = vport->back; 3360 u32 advertising; 3361 int ret; 3362 3363 if (cmd->base.autoneg == AUTONEG_DISABLE && 3364 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || 3365 (cmd->base.duplex != DUPLEX_HALF && 3366 cmd->base.duplex != DUPLEX_FULL))) 3367 return -EINVAL; 3368 3369 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, 3370 false); 3371 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3372 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, 3373 false); 3374 3375 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; 3376 req0->autoneg = cmd->base.autoneg; 3377 req0->speed = cpu_to_le32(cmd->base.speed); 3378 req0->duplex = cmd->base.duplex; 3379 ethtool_convert_link_mode_to_legacy_u32(&advertising, 3380 cmd->link_modes.advertising); 3381 req0->advertising = cpu_to_le32(advertising); 3382 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; 3383 3384 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; 3385 req1->master_slave_cfg = cmd->base.master_slave_cfg; 3386 3387 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); 3388 if (ret) { 3389 dev_err(&hdev->pdev->dev, 3390 "failed to set phy link ksettings, ret = %d.\n", ret); 3391 return ret; 3392 } 3393 3394 hdev->hw.mac.req_autoneg = cmd->base.autoneg; 3395 hdev->hw.mac.req_speed = cmd->base.speed; 3396 hdev->hw.mac.req_duplex = cmd->base.duplex; 3397 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); 3398 3399 return 0; 3400 } 3401 3402 static int hclge_update_tp_port_info(struct hclge_dev *hdev) 3403 { 3404 struct ethtool_link_ksettings cmd; 3405 int ret; 3406 3407 if (!hnae3_dev_phy_imp_supported(hdev)) 3408 return 0; 3409 3410 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); 3411 if (ret) 3412 return ret; 3413 3414 hdev->hw.mac.autoneg = cmd.base.autoneg; 3415 hdev->hw.mac.speed = cmd.base.speed; 3416 hdev->hw.mac.duplex = cmd.base.duplex; 3417 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); 3418 3419 return 0; 3420 } 3421 3422 static int hclge_tp_port_init(struct hclge_dev *hdev) 3423 { 3424 struct ethtool_link_ksettings cmd; 3425 3426 if (!hnae3_dev_phy_imp_supported(hdev)) 3427 return 0; 3428 3429 cmd.base.autoneg = hdev->hw.mac.req_autoneg; 3430 cmd.base.speed = hdev->hw.mac.req_speed; 3431 cmd.base.duplex = hdev->hw.mac.req_duplex; 3432 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); 3433 3434 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); 3435 } 3436 3437 static int hclge_update_port_info(struct hclge_dev *hdev) 3438 { 3439 struct hclge_mac *mac = &hdev->hw.mac; 3440 int speed; 3441 int ret; 3442 3443 /* get the port info from SFP cmd if not copper port */ 3444 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 3445 return hclge_update_tp_port_info(hdev); 3446 3447 /* if IMP does not support get SFP/qSFP info, return directly */ 3448 if (!hdev->support_sfp_query) 3449 return 0; 3450 3451 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3452 speed = mac->speed; 3453 ret = hclge_get_sfp_info(hdev, mac); 3454 } else { 3455 speed = HCLGE_MAC_SPEED_UNKNOWN; 3456 ret = hclge_get_sfp_speed(hdev, &speed); 3457 } 3458 3459 if (ret == -EOPNOTSUPP) { 3460 hdev->support_sfp_query = false; 3461 return ret; 3462 } else if (ret) { 3463 return ret; 3464 } 3465 3466 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3467 if (mac->speed_type == QUERY_ACTIVE_SPEED) { 3468 hclge_update_port_capability(hdev, mac); 3469 if (mac->speed != speed) 3470 (void)hclge_tm_port_shaper_cfg(hdev); 3471 return 0; 3472 } 3473 return hclge_cfg_mac_speed_dup(hdev, mac->speed, 3474 HCLGE_MAC_FULL, mac->lane_num); 3475 } else { 3476 if (speed == HCLGE_MAC_SPEED_UNKNOWN) 3477 return 0; /* do nothing if no SFP */ 3478 3479 /* must config full duplex for SFP */ 3480 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0); 3481 } 3482 } 3483 3484 static int hclge_get_status(struct hnae3_handle *handle) 3485 { 3486 struct hclge_vport *vport = hclge_get_vport(handle); 3487 struct hclge_dev *hdev = vport->back; 3488 3489 hclge_update_link_status(hdev); 3490 3491 return hdev->hw.mac.link; 3492 } 3493 3494 struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) 3495 { 3496 if (!pci_num_vf(hdev->pdev)) { 3497 dev_err(&hdev->pdev->dev, 3498 "SRIOV is disabled, can not get vport(%d) info.\n", vf); 3499 return NULL; 3500 } 3501 3502 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { 3503 dev_err(&hdev->pdev->dev, 3504 "vf id(%d) is out of range(0 <= vfid < %d)\n", 3505 vf, pci_num_vf(hdev->pdev)); 3506 return NULL; 3507 } 3508 3509 /* VF start from 1 in vport */ 3510 vf += HCLGE_VF_VPORT_START_NUM; 3511 return &hdev->vport[vf]; 3512 } 3513 3514 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, 3515 struct ifla_vf_info *ivf) 3516 { 3517 struct hclge_vport *vport = hclge_get_vport(handle); 3518 struct hclge_dev *hdev = vport->back; 3519 3520 vport = hclge_get_vf_vport(hdev, vf); 3521 if (!vport) 3522 return -EINVAL; 3523 3524 ivf->vf = vf; 3525 ivf->linkstate = vport->vf_info.link_state; 3526 ivf->spoofchk = vport->vf_info.spoofchk; 3527 ivf->trusted = vport->vf_info.trusted; 3528 ivf->min_tx_rate = 0; 3529 ivf->max_tx_rate = vport->vf_info.max_tx_rate; 3530 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; 3531 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); 3532 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; 3533 ether_addr_copy(ivf->mac, vport->vf_info.mac); 3534 3535 return 0; 3536 } 3537 3538 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, 3539 int link_state) 3540 { 3541 struct hclge_vport *vport = hclge_get_vport(handle); 3542 struct hclge_dev *hdev = vport->back; 3543 int link_state_old; 3544 int ret; 3545 3546 vport = hclge_get_vf_vport(hdev, vf); 3547 if (!vport) 3548 return -EINVAL; 3549 3550 link_state_old = vport->vf_info.link_state; 3551 vport->vf_info.link_state = link_state; 3552 3553 /* return success directly if the VF is unalive, VF will 3554 * query link state itself when it starts work. 3555 */ 3556 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 3557 return 0; 3558 3559 ret = hclge_push_vf_link_status(vport); 3560 if (ret) { 3561 vport->vf_info.link_state = link_state_old; 3562 dev_err(&hdev->pdev->dev, 3563 "failed to push vf%d link status, ret = %d\n", vf, ret); 3564 } 3565 3566 return ret; 3567 } 3568 3569 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 3570 { 3571 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg; 3572 3573 /* fetch the events from their corresponding regs */ 3574 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 3575 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 3576 hw_err_src_reg = hclge_read_dev(&hdev->hw, 3577 HCLGE_RAS_PF_OTHER_INT_STS_REG); 3578 3579 /* Assumption: If by any chance reset and mailbox events are reported 3580 * together then we will only process reset event in this go and will 3581 * defer the processing of the mailbox events. Since, we would have not 3582 * cleared RX CMDQ event this time we would receive again another 3583 * interrupt from H/W just for the mailbox. 3584 * 3585 * check for vector0 reset event sources 3586 */ 3587 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { 3588 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); 3589 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 3590 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3591 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 3592 hdev->rst_stats.imp_rst_cnt++; 3593 return HCLGE_VECTOR0_EVENT_RST; 3594 } 3595 3596 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { 3597 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); 3598 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3599 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 3600 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 3601 hdev->rst_stats.global_rst_cnt++; 3602 return HCLGE_VECTOR0_EVENT_RST; 3603 } 3604 3605 /* check for vector0 msix event and hardware error event source */ 3606 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK || 3607 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK) 3608 return HCLGE_VECTOR0_EVENT_ERR; 3609 3610 /* check for vector0 ptp event source */ 3611 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) { 3612 *clearval = msix_src_reg; 3613 return HCLGE_VECTOR0_EVENT_PTP; 3614 } 3615 3616 /* check for vector0 mailbox(=CMDQ RX) event source */ 3617 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 3618 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 3619 *clearval = cmdq_src_reg; 3620 return HCLGE_VECTOR0_EVENT_MBX; 3621 } 3622 3623 /* print other vector0 event source */ 3624 dev_info(&hdev->pdev->dev, 3625 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n", 3626 cmdq_src_reg, hw_err_src_reg, msix_src_reg); 3627 3628 return HCLGE_VECTOR0_EVENT_OTHER; 3629 } 3630 3631 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 3632 u32 regclr) 3633 { 3634 #define HCLGE_IMP_RESET_DELAY 5 3635 3636 switch (event_type) { 3637 case HCLGE_VECTOR0_EVENT_PTP: 3638 case HCLGE_VECTOR0_EVENT_RST: 3639 if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) 3640 mdelay(HCLGE_IMP_RESET_DELAY); 3641 3642 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 3643 break; 3644 case HCLGE_VECTOR0_EVENT_MBX: 3645 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 3646 break; 3647 default: 3648 break; 3649 } 3650 } 3651 3652 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) 3653 { 3654 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, 3655 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | 3656 BIT(HCLGE_VECTOR0_CORERESET_INT_B) | 3657 BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); 3658 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); 3659 } 3660 3661 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 3662 { 3663 writel(enable ? 1 : 0, vector->addr); 3664 } 3665 3666 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 3667 { 3668 struct hclge_dev *hdev = data; 3669 unsigned long flags; 3670 u32 clearval = 0; 3671 u32 event_cause; 3672 3673 hclge_enable_vector(&hdev->misc_vector, false); 3674 event_cause = hclge_check_event_cause(hdev, &clearval); 3675 3676 /* vector 0 interrupt is shared with reset and mailbox source events. */ 3677 switch (event_cause) { 3678 case HCLGE_VECTOR0_EVENT_ERR: 3679 hclge_errhand_task_schedule(hdev); 3680 break; 3681 case HCLGE_VECTOR0_EVENT_RST: 3682 hclge_reset_task_schedule(hdev); 3683 break; 3684 case HCLGE_VECTOR0_EVENT_PTP: 3685 spin_lock_irqsave(&hdev->ptp->lock, flags); 3686 hclge_ptp_clean_tx_hwts(hdev); 3687 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 3688 break; 3689 case HCLGE_VECTOR0_EVENT_MBX: 3690 /* If we are here then, 3691 * 1. Either we are not handling any mbx task and we are not 3692 * scheduled as well 3693 * OR 3694 * 2. We could be handling a mbx task but nothing more is 3695 * scheduled. 3696 * In both cases, we should schedule mbx task as there are more 3697 * mbx messages reported by this interrupt. 3698 */ 3699 hclge_mbx_task_schedule(hdev); 3700 break; 3701 default: 3702 dev_warn(&hdev->pdev->dev, 3703 "received unknown or unhandled event of vector0\n"); 3704 break; 3705 } 3706 3707 hclge_clear_event_cause(hdev, event_cause, clearval); 3708 3709 /* Enable interrupt if it is not caused by reset event or error event */ 3710 if (event_cause == HCLGE_VECTOR0_EVENT_PTP || 3711 event_cause == HCLGE_VECTOR0_EVENT_MBX || 3712 event_cause == HCLGE_VECTOR0_EVENT_OTHER) 3713 hclge_enable_vector(&hdev->misc_vector, true); 3714 3715 return IRQ_HANDLED; 3716 } 3717 3718 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 3719 { 3720 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { 3721 dev_warn(&hdev->pdev->dev, 3722 "vector(vector_id %d) has been freed.\n", vector_id); 3723 return; 3724 } 3725 3726 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 3727 hdev->num_msi_left += 1; 3728 hdev->num_msi_used -= 1; 3729 } 3730 3731 static void hclge_get_misc_vector(struct hclge_dev *hdev) 3732 { 3733 struct hclge_misc_vector *vector = &hdev->misc_vector; 3734 3735 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 3736 3737 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 3738 hdev->vector_status[0] = 0; 3739 3740 hdev->num_msi_left -= 1; 3741 hdev->num_msi_used += 1; 3742 } 3743 3744 static int hclge_misc_irq_init(struct hclge_dev *hdev) 3745 { 3746 int ret; 3747 3748 hclge_get_misc_vector(hdev); 3749 3750 /* this would be explicitly freed in the end */ 3751 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 3752 HCLGE_NAME, pci_name(hdev->pdev)); 3753 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 3754 0, hdev->misc_vector.name, hdev); 3755 if (ret) { 3756 hclge_free_vector(hdev, 0); 3757 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 3758 hdev->misc_vector.vector_irq); 3759 } 3760 3761 return ret; 3762 } 3763 3764 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 3765 { 3766 free_irq(hdev->misc_vector.vector_irq, hdev); 3767 hclge_free_vector(hdev, 0); 3768 } 3769 3770 int hclge_notify_client(struct hclge_dev *hdev, 3771 enum hnae3_reset_notify_type type) 3772 { 3773 struct hnae3_handle *handle = &hdev->vport[0].nic; 3774 struct hnae3_client *client = hdev->nic_client; 3775 int ret; 3776 3777 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) 3778 return 0; 3779 3780 if (!client->ops->reset_notify) 3781 return -EOPNOTSUPP; 3782 3783 ret = client->ops->reset_notify(handle, type); 3784 if (ret) 3785 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 3786 type, ret); 3787 3788 return ret; 3789 } 3790 3791 static int hclge_notify_roce_client(struct hclge_dev *hdev, 3792 enum hnae3_reset_notify_type type) 3793 { 3794 struct hnae3_handle *handle = &hdev->vport[0].roce; 3795 struct hnae3_client *client = hdev->roce_client; 3796 int ret; 3797 3798 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) 3799 return 0; 3800 3801 if (!client->ops->reset_notify) 3802 return -EOPNOTSUPP; 3803 3804 ret = client->ops->reset_notify(handle, type); 3805 if (ret) 3806 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 3807 type, ret); 3808 3809 return ret; 3810 } 3811 3812 static int hclge_reset_wait(struct hclge_dev *hdev) 3813 { 3814 #define HCLGE_RESET_WATI_MS 100 3815 #define HCLGE_RESET_WAIT_CNT 350 3816 3817 u32 val, reg, reg_bit; 3818 u32 cnt = 0; 3819 3820 switch (hdev->reset_type) { 3821 case HNAE3_IMP_RESET: 3822 reg = HCLGE_GLOBAL_RESET_REG; 3823 reg_bit = HCLGE_IMP_RESET_BIT; 3824 break; 3825 case HNAE3_GLOBAL_RESET: 3826 reg = HCLGE_GLOBAL_RESET_REG; 3827 reg_bit = HCLGE_GLOBAL_RESET_BIT; 3828 break; 3829 case HNAE3_FUNC_RESET: 3830 reg = HCLGE_FUN_RST_ING; 3831 reg_bit = HCLGE_FUN_RST_ING_B; 3832 break; 3833 default: 3834 dev_err(&hdev->pdev->dev, 3835 "Wait for unsupported reset type: %d\n", 3836 hdev->reset_type); 3837 return -EINVAL; 3838 } 3839 3840 val = hclge_read_dev(&hdev->hw, reg); 3841 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 3842 msleep(HCLGE_RESET_WATI_MS); 3843 val = hclge_read_dev(&hdev->hw, reg); 3844 cnt++; 3845 } 3846 3847 if (cnt >= HCLGE_RESET_WAIT_CNT) { 3848 dev_warn(&hdev->pdev->dev, 3849 "Wait for reset timeout: %d\n", hdev->reset_type); 3850 return -EBUSY; 3851 } 3852 3853 return 0; 3854 } 3855 3856 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) 3857 { 3858 struct hclge_vf_rst_cmd *req; 3859 struct hclge_desc desc; 3860 3861 req = (struct hclge_vf_rst_cmd *)desc.data; 3862 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); 3863 req->dest_vfid = func_id; 3864 3865 if (reset) 3866 req->vf_rst = 0x1; 3867 3868 return hclge_cmd_send(&hdev->hw, &desc, 1); 3869 } 3870 3871 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) 3872 { 3873 int i; 3874 3875 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { 3876 struct hclge_vport *vport = &hdev->vport[i]; 3877 int ret; 3878 3879 /* Send cmd to set/clear VF's FUNC_RST_ING */ 3880 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); 3881 if (ret) { 3882 dev_err(&hdev->pdev->dev, 3883 "set vf(%u) rst failed %d!\n", 3884 vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3885 ret); 3886 return ret; 3887 } 3888 3889 if (!reset || 3890 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) 3891 continue; 3892 3893 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && 3894 hdev->reset_type == HNAE3_FUNC_RESET) { 3895 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, 3896 &vport->need_notify); 3897 continue; 3898 } 3899 3900 /* Inform VF to process the reset. 3901 * hclge_inform_reset_assert_to_vf may fail if VF 3902 * driver is not loaded. 3903 */ 3904 ret = hclge_inform_reset_assert_to_vf(vport); 3905 if (ret) 3906 dev_warn(&hdev->pdev->dev, 3907 "inform reset to vf(%u) failed %d!\n", 3908 vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3909 ret); 3910 } 3911 3912 return 0; 3913 } 3914 3915 static void hclge_mailbox_service_task(struct hclge_dev *hdev) 3916 { 3917 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || 3918 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || 3919 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 3920 return; 3921 3922 if (time_is_before_jiffies(hdev->last_mbx_scheduled + 3923 HCLGE_MBX_SCHED_TIMEOUT)) 3924 dev_warn(&hdev->pdev->dev, 3925 "mbx service task is scheduled after %ums on cpu%u!\n", 3926 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), 3927 smp_processor_id()); 3928 3929 hclge_mbx_handler(hdev); 3930 3931 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 3932 } 3933 3934 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) 3935 { 3936 struct hclge_pf_rst_sync_cmd *req; 3937 struct hclge_desc desc; 3938 int cnt = 0; 3939 int ret; 3940 3941 req = (struct hclge_pf_rst_sync_cmd *)desc.data; 3942 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); 3943 3944 do { 3945 /* vf need to down netdev by mbx during PF or FLR reset */ 3946 hclge_mailbox_service_task(hdev); 3947 3948 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3949 /* for compatible with old firmware, wait 3950 * 100 ms for VF to stop IO 3951 */ 3952 if (ret == -EOPNOTSUPP) { 3953 msleep(HCLGE_RESET_SYNC_TIME); 3954 return; 3955 } else if (ret) { 3956 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", 3957 ret); 3958 return; 3959 } else if (req->all_vf_ready) { 3960 return; 3961 } 3962 msleep(HCLGE_PF_RESET_SYNC_TIME); 3963 hclge_comm_cmd_reuse_desc(&desc, true); 3964 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); 3965 3966 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); 3967 } 3968 3969 void hclge_report_hw_error(struct hclge_dev *hdev, 3970 enum hnae3_hw_error_type type) 3971 { 3972 struct hnae3_client *client = hdev->nic_client; 3973 3974 if (!client || !client->ops->process_hw_error || 3975 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) 3976 return; 3977 3978 client->ops->process_hw_error(&hdev->vport[0].nic, type); 3979 } 3980 3981 static void hclge_handle_imp_error(struct hclge_dev *hdev) 3982 { 3983 u32 reg_val; 3984 3985 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 3986 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { 3987 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); 3988 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); 3989 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 3990 } 3991 3992 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { 3993 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); 3994 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); 3995 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 3996 } 3997 } 3998 3999 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 4000 { 4001 struct hclge_desc desc; 4002 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 4003 int ret; 4004 4005 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 4006 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 4007 req->fun_reset_vfid = func_id; 4008 4009 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4010 if (ret) 4011 dev_err(&hdev->pdev->dev, 4012 "send function reset cmd fail, status =%d\n", ret); 4013 4014 return ret; 4015 } 4016 4017 static void hclge_do_reset(struct hclge_dev *hdev) 4018 { 4019 struct hnae3_handle *handle = &hdev->vport[0].nic; 4020 struct pci_dev *pdev = hdev->pdev; 4021 u32 val; 4022 4023 if (hclge_get_hw_reset_stat(handle)) { 4024 dev_info(&pdev->dev, "hardware reset not finish\n"); 4025 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", 4026 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), 4027 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 4028 return; 4029 } 4030 4031 switch (hdev->reset_type) { 4032 case HNAE3_IMP_RESET: 4033 dev_info(&pdev->dev, "IMP reset requested\n"); 4034 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 4035 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); 4036 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); 4037 break; 4038 case HNAE3_GLOBAL_RESET: 4039 dev_info(&pdev->dev, "global reset requested\n"); 4040 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 4041 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 4042 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 4043 break; 4044 case HNAE3_FUNC_RESET: 4045 dev_info(&pdev->dev, "PF reset requested\n"); 4046 /* schedule again to check later */ 4047 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 4048 hclge_reset_task_schedule(hdev); 4049 break; 4050 default: 4051 dev_warn(&pdev->dev, 4052 "unsupported reset type: %d\n", hdev->reset_type); 4053 break; 4054 } 4055 } 4056 4057 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 4058 unsigned long *addr) 4059 { 4060 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 4061 struct hclge_dev *hdev = ae_dev->priv; 4062 4063 /* return the highest priority reset level amongst all */ 4064 if (test_bit(HNAE3_IMP_RESET, addr)) { 4065 rst_level = HNAE3_IMP_RESET; 4066 clear_bit(HNAE3_IMP_RESET, addr); 4067 clear_bit(HNAE3_GLOBAL_RESET, addr); 4068 clear_bit(HNAE3_FUNC_RESET, addr); 4069 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { 4070 rst_level = HNAE3_GLOBAL_RESET; 4071 clear_bit(HNAE3_GLOBAL_RESET, addr); 4072 clear_bit(HNAE3_FUNC_RESET, addr); 4073 } else if (test_bit(HNAE3_FUNC_RESET, addr)) { 4074 rst_level = HNAE3_FUNC_RESET; 4075 clear_bit(HNAE3_FUNC_RESET, addr); 4076 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 4077 rst_level = HNAE3_FLR_RESET; 4078 clear_bit(HNAE3_FLR_RESET, addr); 4079 } 4080 4081 if (hdev->reset_type != HNAE3_NONE_RESET && 4082 rst_level < hdev->reset_type) 4083 return HNAE3_NONE_RESET; 4084 4085 return rst_level; 4086 } 4087 4088 static void hclge_clear_reset_cause(struct hclge_dev *hdev) 4089 { 4090 u32 clearval = 0; 4091 4092 switch (hdev->reset_type) { 4093 case HNAE3_IMP_RESET: 4094 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 4095 break; 4096 case HNAE3_GLOBAL_RESET: 4097 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 4098 break; 4099 default: 4100 break; 4101 } 4102 4103 if (!clearval) 4104 return; 4105 4106 /* For revision 0x20, the reset interrupt source 4107 * can only be cleared after hardware reset done 4108 */ 4109 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 4110 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, 4111 clearval); 4112 4113 hclge_enable_vector(&hdev->misc_vector, true); 4114 } 4115 4116 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) 4117 { 4118 u32 reg_val; 4119 4120 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 4121 if (enable) 4122 reg_val |= HCLGE_COMM_NIC_SW_RST_RDY; 4123 else 4124 reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY; 4125 4126 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); 4127 } 4128 4129 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) 4130 { 4131 int ret; 4132 4133 ret = hclge_set_all_vf_rst(hdev, true); 4134 if (ret) 4135 return ret; 4136 4137 hclge_func_reset_sync_vf(hdev); 4138 4139 return 0; 4140 } 4141 4142 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) 4143 { 4144 u32 reg_val; 4145 int ret = 0; 4146 4147 switch (hdev->reset_type) { 4148 case HNAE3_FUNC_RESET: 4149 ret = hclge_func_reset_notify_vf(hdev); 4150 if (ret) 4151 return ret; 4152 4153 ret = hclge_func_reset_cmd(hdev, 0); 4154 if (ret) { 4155 dev_err(&hdev->pdev->dev, 4156 "asserting function reset fail %d!\n", ret); 4157 return ret; 4158 } 4159 4160 /* After performaning pf reset, it is not necessary to do the 4161 * mailbox handling or send any command to firmware, because 4162 * any mailbox handling or command to firmware is only valid 4163 * after hclge_comm_cmd_init is called. 4164 */ 4165 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 4166 hdev->rst_stats.pf_rst_cnt++; 4167 break; 4168 case HNAE3_FLR_RESET: 4169 ret = hclge_func_reset_notify_vf(hdev); 4170 if (ret) 4171 return ret; 4172 break; 4173 case HNAE3_IMP_RESET: 4174 hclge_handle_imp_error(hdev); 4175 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 4176 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, 4177 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); 4178 break; 4179 default: 4180 break; 4181 } 4182 4183 /* inform hardware that preparatory work is done */ 4184 msleep(HCLGE_RESET_SYNC_TIME); 4185 hclge_reset_handshake(hdev, true); 4186 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); 4187 4188 return ret; 4189 } 4190 4191 static void hclge_show_rst_info(struct hclge_dev *hdev) 4192 { 4193 char *buf; 4194 4195 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL); 4196 if (!buf) 4197 return; 4198 4199 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); 4200 4201 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); 4202 4203 kfree(buf); 4204 } 4205 4206 static bool hclge_reset_err_handle(struct hclge_dev *hdev) 4207 { 4208 #define MAX_RESET_FAIL_CNT 5 4209 4210 if (hdev->reset_pending) { 4211 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", 4212 hdev->reset_pending); 4213 return true; 4214 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & 4215 HCLGE_RESET_INT_M) { 4216 dev_info(&hdev->pdev->dev, 4217 "reset failed because new reset interrupt\n"); 4218 hclge_clear_reset_cause(hdev); 4219 return false; 4220 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { 4221 hdev->rst_stats.reset_fail_cnt++; 4222 set_bit(hdev->reset_type, &hdev->reset_pending); 4223 dev_info(&hdev->pdev->dev, 4224 "re-schedule reset task(%u)\n", 4225 hdev->rst_stats.reset_fail_cnt); 4226 return true; 4227 } 4228 4229 hclge_clear_reset_cause(hdev); 4230 4231 /* recover the handshake status when reset fail */ 4232 hclge_reset_handshake(hdev, true); 4233 4234 dev_err(&hdev->pdev->dev, "Reset fail!\n"); 4235 4236 hclge_show_rst_info(hdev); 4237 4238 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 4239 4240 return false; 4241 } 4242 4243 static void hclge_update_reset_level(struct hclge_dev *hdev) 4244 { 4245 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4246 enum hnae3_reset_type reset_level; 4247 4248 /* reset request will not be set during reset, so clear 4249 * pending reset request to avoid unnecessary reset 4250 * caused by the same reason. 4251 */ 4252 hclge_get_reset_level(ae_dev, &hdev->reset_request); 4253 4254 /* if default_reset_request has a higher level reset request, 4255 * it should be handled as soon as possible. since some errors 4256 * need this kind of reset to fix. 4257 */ 4258 reset_level = hclge_get_reset_level(ae_dev, 4259 &hdev->default_reset_request); 4260 if (reset_level != HNAE3_NONE_RESET) 4261 set_bit(reset_level, &hdev->reset_request); 4262 } 4263 4264 static int hclge_set_rst_done(struct hclge_dev *hdev) 4265 { 4266 struct hclge_pf_rst_done_cmd *req; 4267 struct hclge_desc desc; 4268 int ret; 4269 4270 req = (struct hclge_pf_rst_done_cmd *)desc.data; 4271 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); 4272 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; 4273 4274 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4275 /* To be compatible with the old firmware, which does not support 4276 * command HCLGE_OPC_PF_RST_DONE, just print a warning and 4277 * return success 4278 */ 4279 if (ret == -EOPNOTSUPP) { 4280 dev_warn(&hdev->pdev->dev, 4281 "current firmware does not support command(0x%x)!\n", 4282 HCLGE_OPC_PF_RST_DONE); 4283 return 0; 4284 } else if (ret) { 4285 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", 4286 ret); 4287 } 4288 4289 return ret; 4290 } 4291 4292 static int hclge_reset_prepare_up(struct hclge_dev *hdev) 4293 { 4294 int ret = 0; 4295 4296 switch (hdev->reset_type) { 4297 case HNAE3_FUNC_RESET: 4298 case HNAE3_FLR_RESET: 4299 ret = hclge_set_all_vf_rst(hdev, false); 4300 break; 4301 case HNAE3_GLOBAL_RESET: 4302 case HNAE3_IMP_RESET: 4303 ret = hclge_set_rst_done(hdev); 4304 break; 4305 default: 4306 break; 4307 } 4308 4309 /* clear up the handshake status after re-initialize done */ 4310 hclge_reset_handshake(hdev, false); 4311 4312 return ret; 4313 } 4314 4315 static int hclge_reset_stack(struct hclge_dev *hdev) 4316 { 4317 int ret; 4318 4319 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 4320 if (ret) 4321 return ret; 4322 4323 ret = hclge_reset_ae_dev(hdev->ae_dev); 4324 if (ret) 4325 return ret; 4326 4327 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 4328 } 4329 4330 static int hclge_reset_prepare(struct hclge_dev *hdev) 4331 { 4332 int ret; 4333 4334 hdev->rst_stats.reset_cnt++; 4335 /* perform reset of the stack & ae device for a client */ 4336 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 4337 if (ret) 4338 return ret; 4339 4340 rtnl_lock(); 4341 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 4342 rtnl_unlock(); 4343 if (ret) 4344 return ret; 4345 4346 return hclge_reset_prepare_wait(hdev); 4347 } 4348 4349 static int hclge_reset_rebuild(struct hclge_dev *hdev) 4350 { 4351 int ret; 4352 4353 hdev->rst_stats.hw_reset_done_cnt++; 4354 4355 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 4356 if (ret) 4357 return ret; 4358 4359 rtnl_lock(); 4360 ret = hclge_reset_stack(hdev); 4361 rtnl_unlock(); 4362 if (ret) 4363 return ret; 4364 4365 hclge_clear_reset_cause(hdev); 4366 4367 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 4368 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 4369 * times 4370 */ 4371 if (ret && 4372 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) 4373 return ret; 4374 4375 ret = hclge_reset_prepare_up(hdev); 4376 if (ret) 4377 return ret; 4378 4379 rtnl_lock(); 4380 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); 4381 rtnl_unlock(); 4382 if (ret) 4383 return ret; 4384 4385 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); 4386 if (ret) 4387 return ret; 4388 4389 hdev->last_reset_time = jiffies; 4390 hdev->rst_stats.reset_fail_cnt = 0; 4391 hdev->rst_stats.reset_done_cnt++; 4392 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 4393 4394 hclge_update_reset_level(hdev); 4395 4396 return 0; 4397 } 4398 4399 static void hclge_reset(struct hclge_dev *hdev) 4400 { 4401 if (hclge_reset_prepare(hdev)) 4402 goto err_reset; 4403 4404 if (hclge_reset_wait(hdev)) 4405 goto err_reset; 4406 4407 if (hclge_reset_rebuild(hdev)) 4408 goto err_reset; 4409 4410 return; 4411 4412 err_reset: 4413 if (hclge_reset_err_handle(hdev)) 4414 hclge_reset_task_schedule(hdev); 4415 } 4416 4417 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) 4418 { 4419 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 4420 struct hclge_dev *hdev = ae_dev->priv; 4421 4422 /* We might end up getting called broadly because of 2 below cases: 4423 * 1. Recoverable error was conveyed through APEI and only way to bring 4424 * normalcy is to reset. 4425 * 2. A new reset request from the stack due to timeout 4426 * 4427 * check if this is a new reset request and we are not here just because 4428 * last reset attempt did not succeed and watchdog hit us again. We will 4429 * know this if last reset request did not occur very recently (watchdog 4430 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 4431 * In case of new request we reset the "reset level" to PF reset. 4432 * And if it is a repeat reset request of the most recent one then we 4433 * want to make sure we throttle the reset request. Therefore, we will 4434 * not allow it again before 3*HZ times. 4435 */ 4436 4437 if (time_before(jiffies, (hdev->last_reset_time + 4438 HCLGE_RESET_INTERVAL))) { 4439 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 4440 return; 4441 } 4442 4443 if (hdev->default_reset_request) { 4444 hdev->reset_level = 4445 hclge_get_reset_level(ae_dev, 4446 &hdev->default_reset_request); 4447 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { 4448 hdev->reset_level = HNAE3_FUNC_RESET; 4449 } 4450 4451 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", 4452 hdev->reset_level); 4453 4454 /* request reset & schedule reset task */ 4455 set_bit(hdev->reset_level, &hdev->reset_request); 4456 hclge_reset_task_schedule(hdev); 4457 4458 if (hdev->reset_level < HNAE3_GLOBAL_RESET) 4459 hdev->reset_level++; 4460 } 4461 4462 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 4463 enum hnae3_reset_type rst_type) 4464 { 4465 struct hclge_dev *hdev = ae_dev->priv; 4466 4467 set_bit(rst_type, &hdev->default_reset_request); 4468 } 4469 4470 static void hclge_reset_timer(struct timer_list *t) 4471 { 4472 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); 4473 4474 /* if default_reset_request has no value, it means that this reset 4475 * request has already be handled, so just return here 4476 */ 4477 if (!hdev->default_reset_request) 4478 return; 4479 4480 dev_info(&hdev->pdev->dev, 4481 "triggering reset in reset timer\n"); 4482 hclge_reset_event(hdev->pdev, NULL); 4483 } 4484 4485 static void hclge_reset_subtask(struct hclge_dev *hdev) 4486 { 4487 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4488 4489 /* check if there is any ongoing reset in the hardware. This status can 4490 * be checked from reset_pending. If there is then, we need to wait for 4491 * hardware to complete reset. 4492 * a. If we are able to figure out in reasonable time that hardware 4493 * has fully resetted then, we can proceed with driver, client 4494 * reset. 4495 * b. else, we can come back later to check this status so re-sched 4496 * now. 4497 */ 4498 hdev->last_reset_time = jiffies; 4499 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); 4500 if (hdev->reset_type != HNAE3_NONE_RESET) 4501 hclge_reset(hdev); 4502 4503 /* check if we got any *new* reset requests to be honored */ 4504 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); 4505 if (hdev->reset_type != HNAE3_NONE_RESET) 4506 hclge_do_reset(hdev); 4507 4508 hdev->reset_type = HNAE3_NONE_RESET; 4509 } 4510 4511 static void hclge_handle_err_reset_request(struct hclge_dev *hdev) 4512 { 4513 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4514 enum hnae3_reset_type reset_type; 4515 4516 if (ae_dev->hw_err_reset_req) { 4517 reset_type = hclge_get_reset_level(ae_dev, 4518 &ae_dev->hw_err_reset_req); 4519 hclge_set_def_reset_request(ae_dev, reset_type); 4520 } 4521 4522 if (hdev->default_reset_request && ae_dev->ops->reset_event) 4523 ae_dev->ops->reset_event(hdev->pdev, NULL); 4524 4525 /* enable interrupt after error handling complete */ 4526 hclge_enable_vector(&hdev->misc_vector, true); 4527 } 4528 4529 static void hclge_handle_err_recovery(struct hclge_dev *hdev) 4530 { 4531 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4532 4533 ae_dev->hw_err_reset_req = 0; 4534 4535 if (hclge_find_error_source(hdev)) { 4536 hclge_handle_error_info_log(ae_dev); 4537 hclge_handle_mac_tnl(hdev); 4538 hclge_handle_vf_queue_err_ras(hdev); 4539 } 4540 4541 hclge_handle_err_reset_request(hdev); 4542 } 4543 4544 static void hclge_misc_err_recovery(struct hclge_dev *hdev) 4545 { 4546 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4547 struct device *dev = &hdev->pdev->dev; 4548 u32 msix_sts_reg; 4549 4550 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 4551 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { 4552 if (hclge_handle_hw_msix_error 4553 (hdev, &hdev->default_reset_request)) 4554 dev_info(dev, "received msix interrupt 0x%x\n", 4555 msix_sts_reg); 4556 } 4557 4558 hclge_handle_hw_ras_error(ae_dev); 4559 4560 hclge_handle_err_reset_request(hdev); 4561 } 4562 4563 static void hclge_errhand_service_task(struct hclge_dev *hdev) 4564 { 4565 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) 4566 return; 4567 4568 if (hnae3_dev_ras_imp_supported(hdev)) 4569 hclge_handle_err_recovery(hdev); 4570 else 4571 hclge_misc_err_recovery(hdev); 4572 } 4573 4574 static void hclge_reset_service_task(struct hclge_dev *hdev) 4575 { 4576 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 4577 return; 4578 4579 if (time_is_before_jiffies(hdev->last_rst_scheduled + 4580 HCLGE_RESET_SCHED_TIMEOUT)) 4581 dev_warn(&hdev->pdev->dev, 4582 "reset service task is scheduled after %ums on cpu%u!\n", 4583 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), 4584 smp_processor_id()); 4585 4586 down(&hdev->reset_sem); 4587 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 4588 4589 hclge_reset_subtask(hdev); 4590 4591 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 4592 up(&hdev->reset_sem); 4593 } 4594 4595 static void hclge_update_vport_alive(struct hclge_dev *hdev) 4596 { 4597 #define HCLGE_ALIVE_SECONDS_NORMAL 8 4598 4599 unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ; 4600 int i; 4601 4602 /* start from vport 1 for PF is always alive */ 4603 for (i = 1; i < hdev->num_alloc_vport; i++) { 4604 struct hclge_vport *vport = &hdev->vport[i]; 4605 4606 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || 4607 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 4608 continue; 4609 if (time_after(jiffies, vport->last_active_jiffies + 4610 alive_time)) { 4611 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 4612 dev_warn(&hdev->pdev->dev, 4613 "VF %u heartbeat timeout\n", 4614 i - HCLGE_VF_VPORT_START_NUM); 4615 } 4616 } 4617 } 4618 4619 static void hclge_periodic_service_task(struct hclge_dev *hdev) 4620 { 4621 unsigned long delta = round_jiffies_relative(HZ); 4622 4623 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) 4624 return; 4625 4626 /* Always handle the link updating to make sure link state is 4627 * updated when it is triggered by mbx. 4628 */ 4629 hclge_update_link_status(hdev); 4630 hclge_sync_mac_table(hdev); 4631 hclge_sync_promisc_mode(hdev); 4632 hclge_sync_fd_table(hdev); 4633 4634 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 4635 delta = jiffies - hdev->last_serv_processed; 4636 4637 if (delta < round_jiffies_relative(HZ)) { 4638 delta = round_jiffies_relative(HZ) - delta; 4639 goto out; 4640 } 4641 } 4642 4643 hdev->serv_processed_cnt++; 4644 hclge_update_vport_alive(hdev); 4645 4646 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { 4647 hdev->last_serv_processed = jiffies; 4648 goto out; 4649 } 4650 4651 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) 4652 hclge_update_stats_for_all(hdev); 4653 4654 hclge_update_port_info(hdev); 4655 hclge_sync_vlan_filter(hdev); 4656 4657 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) 4658 hclge_rfs_filter_expire(hdev); 4659 4660 hdev->last_serv_processed = jiffies; 4661 4662 out: 4663 hclge_task_schedule(hdev, delta); 4664 } 4665 4666 static void hclge_ptp_service_task(struct hclge_dev *hdev) 4667 { 4668 unsigned long flags; 4669 4670 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || 4671 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || 4672 !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) 4673 return; 4674 4675 /* to prevent concurrence with the irq handler */ 4676 spin_lock_irqsave(&hdev->ptp->lock, flags); 4677 4678 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq 4679 * handler may handle it just before spin_lock_irqsave(). 4680 */ 4681 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) 4682 hclge_ptp_clean_tx_hwts(hdev); 4683 4684 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 4685 } 4686 4687 static void hclge_service_task(struct work_struct *work) 4688 { 4689 struct hclge_dev *hdev = 4690 container_of(work, struct hclge_dev, service_task.work); 4691 4692 hclge_errhand_service_task(hdev); 4693 hclge_reset_service_task(hdev); 4694 hclge_ptp_service_task(hdev); 4695 hclge_mailbox_service_task(hdev); 4696 hclge_periodic_service_task(hdev); 4697 4698 /* Handle error recovery, reset and mbx again in case periodical task 4699 * delays the handling by calling hclge_task_schedule() in 4700 * hclge_periodic_service_task(). 4701 */ 4702 hclge_errhand_service_task(hdev); 4703 hclge_reset_service_task(hdev); 4704 hclge_mailbox_service_task(hdev); 4705 } 4706 4707 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 4708 { 4709 /* VF handle has no client */ 4710 if (!handle->client) 4711 return container_of(handle, struct hclge_vport, nic); 4712 else if (handle->client->type == HNAE3_CLIENT_ROCE) 4713 return container_of(handle, struct hclge_vport, roce); 4714 else 4715 return container_of(handle, struct hclge_vport, nic); 4716 } 4717 4718 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, 4719 struct hnae3_vector_info *vector_info) 4720 { 4721 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64 4722 4723 vector_info->vector = pci_irq_vector(hdev->pdev, idx); 4724 4725 /* need an extend offset to config vector >= 64 */ 4726 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) 4727 vector_info->io_addr = hdev->hw.hw.io_base + 4728 HCLGE_VECTOR_REG_BASE + 4729 (idx - 1) * HCLGE_VECTOR_REG_OFFSET; 4730 else 4731 vector_info->io_addr = hdev->hw.hw.io_base + 4732 HCLGE_VECTOR_EXT_REG_BASE + 4733 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * 4734 HCLGE_VECTOR_REG_OFFSET_H + 4735 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * 4736 HCLGE_VECTOR_REG_OFFSET; 4737 4738 hdev->vector_status[idx] = hdev->vport[0].vport_id; 4739 hdev->vector_irq[idx] = vector_info->vector; 4740 } 4741 4742 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 4743 struct hnae3_vector_info *vector_info) 4744 { 4745 struct hclge_vport *vport = hclge_get_vport(handle); 4746 struct hnae3_vector_info *vector = vector_info; 4747 struct hclge_dev *hdev = vport->back; 4748 int alloc = 0; 4749 u16 i = 0; 4750 u16 j; 4751 4752 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); 4753 vector_num = min(hdev->num_msi_left, vector_num); 4754 4755 for (j = 0; j < vector_num; j++) { 4756 while (++i < hdev->num_nic_msi) { 4757 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 4758 hclge_get_vector_info(hdev, i, vector); 4759 vector++; 4760 alloc++; 4761 4762 break; 4763 } 4764 } 4765 } 4766 hdev->num_msi_left -= alloc; 4767 hdev->num_msi_used += alloc; 4768 4769 return alloc; 4770 } 4771 4772 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 4773 { 4774 int i; 4775 4776 for (i = 0; i < hdev->num_msi; i++) 4777 if (vector == hdev->vector_irq[i]) 4778 return i; 4779 4780 return -EINVAL; 4781 } 4782 4783 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 4784 { 4785 struct hclge_vport *vport = hclge_get_vport(handle); 4786 struct hclge_dev *hdev = vport->back; 4787 int vector_id; 4788 4789 vector_id = hclge_get_vector_index(hdev, vector); 4790 if (vector_id < 0) { 4791 dev_err(&hdev->pdev->dev, 4792 "Get vector index fail. vector = %d\n", vector); 4793 return vector_id; 4794 } 4795 4796 hclge_free_vector(hdev, vector_id); 4797 4798 return 0; 4799 } 4800 4801 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 4802 u8 *key, u8 *hfunc) 4803 { 4804 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 4805 struct hclge_vport *vport = hclge_get_vport(handle); 4806 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg; 4807 4808 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); 4809 4810 hclge_comm_get_rss_indir_tbl(rss_cfg, indir, 4811 ae_dev->dev_specs.rss_ind_tbl_size); 4812 4813 return 0; 4814 } 4815 4816 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 4817 const u8 *key, const u8 hfunc) 4818 { 4819 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 4820 struct hclge_vport *vport = hclge_get_vport(handle); 4821 struct hclge_dev *hdev = vport->back; 4822 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 4823 int ret, i; 4824 4825 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); 4826 if (ret) { 4827 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); 4828 return ret; 4829 } 4830 4831 /* Update the shadow RSS table with user specified qids */ 4832 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) 4833 rss_cfg->rss_indirection_tbl[i] = indir[i]; 4834 4835 /* Update the hardware */ 4836 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, 4837 rss_cfg->rss_indirection_tbl); 4838 } 4839 4840 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 4841 struct ethtool_rxnfc *nfc) 4842 { 4843 struct hclge_vport *vport = hclge_get_vport(handle); 4844 struct hclge_dev *hdev = vport->back; 4845 int ret; 4846 4847 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, 4848 &hdev->rss_cfg, nfc); 4849 if (ret) { 4850 dev_err(&hdev->pdev->dev, 4851 "failed to set rss tuple, ret = %d.\n", ret); 4852 return ret; 4853 } 4854 4855 return 0; 4856 } 4857 4858 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 4859 struct ethtool_rxnfc *nfc) 4860 { 4861 struct hclge_vport *vport = hclge_get_vport(handle); 4862 u8 tuple_sets; 4863 int ret; 4864 4865 nfc->data = 0; 4866 4867 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type, 4868 &tuple_sets); 4869 if (ret || !tuple_sets) 4870 return ret; 4871 4872 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); 4873 4874 return 0; 4875 } 4876 4877 static int hclge_get_tc_size(struct hnae3_handle *handle) 4878 { 4879 struct hclge_vport *vport = hclge_get_vport(handle); 4880 struct hclge_dev *hdev = vport->back; 4881 4882 return hdev->pf_rss_size_max; 4883 } 4884 4885 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) 4886 { 4887 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 4888 struct hclge_vport *vport = hdev->vport; 4889 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 4890 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; 4891 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 4892 struct hnae3_tc_info *tc_info; 4893 u16 roundup_size; 4894 u16 rss_size; 4895 int i; 4896 4897 tc_info = &vport->nic.kinfo.tc_info; 4898 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 4899 rss_size = tc_info->tqp_count[i]; 4900 tc_valid[i] = 0; 4901 4902 if (!(hdev->hw_tc_map & BIT(i))) 4903 continue; 4904 4905 /* tc_size set to hardware is the log2 of roundup power of two 4906 * of rss_size, the acutal queue size is limited by indirection 4907 * table. 4908 */ 4909 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || 4910 rss_size == 0) { 4911 dev_err(&hdev->pdev->dev, 4912 "Configure rss tc size failed, invalid TC_SIZE = %u\n", 4913 rss_size); 4914 return -EINVAL; 4915 } 4916 4917 roundup_size = roundup_pow_of_two(rss_size); 4918 roundup_size = ilog2(roundup_size); 4919 4920 tc_valid[i] = 1; 4921 tc_size[i] = roundup_size; 4922 tc_offset[i] = tc_info->tqp_offset[i]; 4923 } 4924 4925 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, 4926 tc_size); 4927 } 4928 4929 int hclge_rss_init_hw(struct hclge_dev *hdev) 4930 { 4931 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; 4932 u8 *key = hdev->rss_cfg.rss_hash_key; 4933 u8 hfunc = hdev->rss_cfg.rss_algo; 4934 int ret; 4935 4936 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 4937 rss_indir); 4938 if (ret) 4939 return ret; 4940 4941 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); 4942 if (ret) 4943 return ret; 4944 4945 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg); 4946 if (ret) 4947 return ret; 4948 4949 return hclge_init_rss_tc_mode(hdev); 4950 } 4951 4952 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 4953 int vector_id, bool en, 4954 struct hnae3_ring_chain_node *ring_chain) 4955 { 4956 struct hclge_dev *hdev = vport->back; 4957 struct hnae3_ring_chain_node *node; 4958 struct hclge_desc desc; 4959 struct hclge_ctrl_vector_chain_cmd *req = 4960 (struct hclge_ctrl_vector_chain_cmd *)desc.data; 4961 enum hclge_comm_cmd_status status; 4962 enum hclge_opcode_type op; 4963 u16 tqp_type_and_id; 4964 int i; 4965 4966 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 4967 hclge_cmd_setup_basic_desc(&desc, op, false); 4968 req->int_vector_id_l = hnae3_get_field(vector_id, 4969 HCLGE_VECTOR_ID_L_M, 4970 HCLGE_VECTOR_ID_L_S); 4971 req->int_vector_id_h = hnae3_get_field(vector_id, 4972 HCLGE_VECTOR_ID_H_M, 4973 HCLGE_VECTOR_ID_H_S); 4974 4975 i = 0; 4976 for (node = ring_chain; node; node = node->next) { 4977 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 4978 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 4979 HCLGE_INT_TYPE_S, 4980 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); 4981 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 4982 HCLGE_TQP_ID_S, node->tqp_index); 4983 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 4984 HCLGE_INT_GL_IDX_S, 4985 hnae3_get_field(node->int_gl_idx, 4986 HNAE3_RING_GL_IDX_M, 4987 HNAE3_RING_GL_IDX_S)); 4988 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 4989 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 4990 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 4991 req->vfid = vport->vport_id; 4992 4993 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4994 if (status) { 4995 dev_err(&hdev->pdev->dev, 4996 "Map TQP fail, status is %d.\n", 4997 status); 4998 return -EIO; 4999 } 5000 i = 0; 5001 5002 hclge_cmd_setup_basic_desc(&desc, 5003 op, 5004 false); 5005 req->int_vector_id_l = 5006 hnae3_get_field(vector_id, 5007 HCLGE_VECTOR_ID_L_M, 5008 HCLGE_VECTOR_ID_L_S); 5009 req->int_vector_id_h = 5010 hnae3_get_field(vector_id, 5011 HCLGE_VECTOR_ID_H_M, 5012 HCLGE_VECTOR_ID_H_S); 5013 } 5014 } 5015 5016 if (i > 0) { 5017 req->int_cause_num = i; 5018 req->vfid = vport->vport_id; 5019 status = hclge_cmd_send(&hdev->hw, &desc, 1); 5020 if (status) { 5021 dev_err(&hdev->pdev->dev, 5022 "Map TQP fail, status is %d.\n", status); 5023 return -EIO; 5024 } 5025 } 5026 5027 return 0; 5028 } 5029 5030 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, 5031 struct hnae3_ring_chain_node *ring_chain) 5032 { 5033 struct hclge_vport *vport = hclge_get_vport(handle); 5034 struct hclge_dev *hdev = vport->back; 5035 int vector_id; 5036 5037 vector_id = hclge_get_vector_index(hdev, vector); 5038 if (vector_id < 0) { 5039 dev_err(&hdev->pdev->dev, 5040 "failed to get vector index. vector=%d\n", vector); 5041 return vector_id; 5042 } 5043 5044 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 5045 } 5046 5047 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, 5048 struct hnae3_ring_chain_node *ring_chain) 5049 { 5050 struct hclge_vport *vport = hclge_get_vport(handle); 5051 struct hclge_dev *hdev = vport->back; 5052 int vector_id, ret; 5053 5054 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 5055 return 0; 5056 5057 vector_id = hclge_get_vector_index(hdev, vector); 5058 if (vector_id < 0) { 5059 dev_err(&handle->pdev->dev, 5060 "Get vector index fail. ret =%d\n", vector_id); 5061 return vector_id; 5062 } 5063 5064 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 5065 if (ret) 5066 dev_err(&handle->pdev->dev, 5067 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 5068 vector_id, ret); 5069 5070 return ret; 5071 } 5072 5073 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, 5074 bool en_uc, bool en_mc, bool en_bc) 5075 { 5076 struct hclge_vport *vport = &hdev->vport[vf_id]; 5077 struct hnae3_handle *handle = &vport->nic; 5078 struct hclge_promisc_cfg_cmd *req; 5079 struct hclge_desc desc; 5080 bool uc_tx_en = en_uc; 5081 u8 promisc_cfg = 0; 5082 int ret; 5083 5084 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 5085 5086 req = (struct hclge_promisc_cfg_cmd *)desc.data; 5087 req->vf_id = vf_id; 5088 5089 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) 5090 uc_tx_en = false; 5091 5092 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0); 5093 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0); 5094 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0); 5095 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0); 5096 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0); 5097 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0); 5098 req->extend_promisc = promisc_cfg; 5099 5100 /* to be compatible with DEVICE_VERSION_V1/2 */ 5101 promisc_cfg = 0; 5102 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0); 5103 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0); 5104 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0); 5105 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1); 5106 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1); 5107 req->promisc = promisc_cfg; 5108 5109 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5110 if (ret) 5111 dev_err(&hdev->pdev->dev, 5112 "failed to set vport %u promisc mode, ret = %d.\n", 5113 vf_id, ret); 5114 5115 return ret; 5116 } 5117 5118 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, 5119 bool en_mc_pmc, bool en_bc_pmc) 5120 { 5121 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, 5122 en_uc_pmc, en_mc_pmc, en_bc_pmc); 5123 } 5124 5125 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 5126 bool en_mc_pmc) 5127 { 5128 struct hclge_vport *vport = hclge_get_vport(handle); 5129 struct hclge_dev *hdev = vport->back; 5130 bool en_bc_pmc = true; 5131 5132 /* For device whose version below V2, if broadcast promisc enabled, 5133 * vlan filter is always bypassed. So broadcast promisc should be 5134 * disabled until user enable promisc mode 5135 */ 5136 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 5137 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; 5138 5139 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, 5140 en_bc_pmc); 5141 } 5142 5143 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) 5144 { 5145 struct hclge_vport *vport = hclge_get_vport(handle); 5146 5147 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 5148 } 5149 5150 static void hclge_sync_fd_state(struct hclge_dev *hdev) 5151 { 5152 if (hlist_empty(&hdev->fd_rule_list)) 5153 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 5154 } 5155 5156 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) 5157 { 5158 if (!test_bit(location, hdev->fd_bmap)) { 5159 set_bit(location, hdev->fd_bmap); 5160 hdev->hclge_fd_rule_num++; 5161 } 5162 } 5163 5164 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) 5165 { 5166 if (test_bit(location, hdev->fd_bmap)) { 5167 clear_bit(location, hdev->fd_bmap); 5168 hdev->hclge_fd_rule_num--; 5169 } 5170 } 5171 5172 static void hclge_fd_free_node(struct hclge_dev *hdev, 5173 struct hclge_fd_rule *rule) 5174 { 5175 hlist_del(&rule->rule_node); 5176 kfree(rule); 5177 hclge_sync_fd_state(hdev); 5178 } 5179 5180 static void hclge_update_fd_rule_node(struct hclge_dev *hdev, 5181 struct hclge_fd_rule *old_rule, 5182 struct hclge_fd_rule *new_rule, 5183 enum HCLGE_FD_NODE_STATE state) 5184 { 5185 switch (state) { 5186 case HCLGE_FD_TO_ADD: 5187 case HCLGE_FD_ACTIVE: 5188 /* 1) if the new state is TO_ADD, just replace the old rule 5189 * with the same location, no matter its state, because the 5190 * new rule will be configured to the hardware. 5191 * 2) if the new state is ACTIVE, it means the new rule 5192 * has been configured to the hardware, so just replace 5193 * the old rule node with the same location. 5194 * 3) for it doesn't add a new node to the list, so it's 5195 * unnecessary to update the rule number and fd_bmap. 5196 */ 5197 new_rule->rule_node.next = old_rule->rule_node.next; 5198 new_rule->rule_node.pprev = old_rule->rule_node.pprev; 5199 memcpy(old_rule, new_rule, sizeof(*old_rule)); 5200 kfree(new_rule); 5201 break; 5202 case HCLGE_FD_DELETED: 5203 hclge_fd_dec_rule_cnt(hdev, old_rule->location); 5204 hclge_fd_free_node(hdev, old_rule); 5205 break; 5206 case HCLGE_FD_TO_DEL: 5207 /* if new request is TO_DEL, and old rule is existent 5208 * 1) the state of old rule is TO_DEL, we need do nothing, 5209 * because we delete rule by location, other rule content 5210 * is unncessary. 5211 * 2) the state of old rule is ACTIVE, we need to change its 5212 * state to TO_DEL, so the rule will be deleted when periodic 5213 * task being scheduled. 5214 * 3) the state of old rule is TO_ADD, it means the rule hasn't 5215 * been added to hardware, so we just delete the rule node from 5216 * fd_rule_list directly. 5217 */ 5218 if (old_rule->state == HCLGE_FD_TO_ADD) { 5219 hclge_fd_dec_rule_cnt(hdev, old_rule->location); 5220 hclge_fd_free_node(hdev, old_rule); 5221 return; 5222 } 5223 old_rule->state = HCLGE_FD_TO_DEL; 5224 break; 5225 } 5226 } 5227 5228 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, 5229 u16 location, 5230 struct hclge_fd_rule **parent) 5231 { 5232 struct hclge_fd_rule *rule; 5233 struct hlist_node *node; 5234 5235 hlist_for_each_entry_safe(rule, node, hlist, rule_node) { 5236 if (rule->location == location) 5237 return rule; 5238 else if (rule->location > location) 5239 return NULL; 5240 /* record the parent node, use to keep the nodes in fd_rule_list 5241 * in ascend order. 5242 */ 5243 *parent = rule; 5244 } 5245 5246 return NULL; 5247 } 5248 5249 /* insert fd rule node in ascend order according to rule->location */ 5250 static void hclge_fd_insert_rule_node(struct hlist_head *hlist, 5251 struct hclge_fd_rule *rule, 5252 struct hclge_fd_rule *parent) 5253 { 5254 INIT_HLIST_NODE(&rule->rule_node); 5255 5256 if (parent) 5257 hlist_add_behind(&rule->rule_node, &parent->rule_node); 5258 else 5259 hlist_add_head(&rule->rule_node, hlist); 5260 } 5261 5262 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, 5263 struct hclge_fd_user_def_cfg *cfg) 5264 { 5265 struct hclge_fd_user_def_cfg_cmd *req; 5266 struct hclge_desc desc; 5267 u16 data = 0; 5268 int ret; 5269 5270 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false); 5271 5272 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; 5273 5274 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0); 5275 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5276 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset); 5277 req->ol2_cfg = cpu_to_le16(data); 5278 5279 data = 0; 5280 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0); 5281 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5282 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset); 5283 req->ol3_cfg = cpu_to_le16(data); 5284 5285 data = 0; 5286 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0); 5287 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5288 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset); 5289 req->ol4_cfg = cpu_to_le16(data); 5290 5291 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5292 if (ret) 5293 dev_err(&hdev->pdev->dev, 5294 "failed to set fd user def data, ret= %d\n", ret); 5295 return ret; 5296 } 5297 5298 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) 5299 { 5300 int ret; 5301 5302 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) 5303 return; 5304 5305 if (!locked) 5306 spin_lock_bh(&hdev->fd_rule_lock); 5307 5308 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); 5309 if (ret) 5310 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5311 5312 if (!locked) 5313 spin_unlock_bh(&hdev->fd_rule_lock); 5314 } 5315 5316 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, 5317 struct hclge_fd_rule *rule) 5318 { 5319 struct hlist_head *hlist = &hdev->fd_rule_list; 5320 struct hclge_fd_rule *fd_rule, *parent = NULL; 5321 struct hclge_fd_user_def_info *info, *old_info; 5322 struct hclge_fd_user_def_cfg *cfg; 5323 5324 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5325 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5326 return 0; 5327 5328 /* for valid layer is start from 1, so need minus 1 to get the cfg */ 5329 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5330 info = &rule->ep.user_def; 5331 5332 if (!cfg->ref_cnt || cfg->offset == info->offset) 5333 return 0; 5334 5335 if (cfg->ref_cnt > 1) 5336 goto error; 5337 5338 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); 5339 if (fd_rule) { 5340 old_info = &fd_rule->ep.user_def; 5341 if (info->layer == old_info->layer) 5342 return 0; 5343 } 5344 5345 error: 5346 dev_err(&hdev->pdev->dev, 5347 "No available offset for layer%d fd rule, each layer only support one user def offset.\n", 5348 info->layer + 1); 5349 return -ENOSPC; 5350 } 5351 5352 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, 5353 struct hclge_fd_rule *rule) 5354 { 5355 struct hclge_fd_user_def_cfg *cfg; 5356 5357 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5358 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5359 return; 5360 5361 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5362 if (!cfg->ref_cnt) { 5363 cfg->offset = rule->ep.user_def.offset; 5364 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5365 } 5366 cfg->ref_cnt++; 5367 } 5368 5369 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, 5370 struct hclge_fd_rule *rule) 5371 { 5372 struct hclge_fd_user_def_cfg *cfg; 5373 5374 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5375 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5376 return; 5377 5378 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5379 if (!cfg->ref_cnt) 5380 return; 5381 5382 cfg->ref_cnt--; 5383 if (!cfg->ref_cnt) { 5384 cfg->offset = 0; 5385 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5386 } 5387 } 5388 5389 static void hclge_update_fd_list(struct hclge_dev *hdev, 5390 enum HCLGE_FD_NODE_STATE state, u16 location, 5391 struct hclge_fd_rule *new_rule) 5392 { 5393 struct hlist_head *hlist = &hdev->fd_rule_list; 5394 struct hclge_fd_rule *fd_rule, *parent = NULL; 5395 5396 fd_rule = hclge_find_fd_rule(hlist, location, &parent); 5397 if (fd_rule) { 5398 hclge_fd_dec_user_def_refcnt(hdev, fd_rule); 5399 if (state == HCLGE_FD_ACTIVE) 5400 hclge_fd_inc_user_def_refcnt(hdev, new_rule); 5401 hclge_sync_fd_user_def_cfg(hdev, true); 5402 5403 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); 5404 return; 5405 } 5406 5407 /* it's unlikely to fail here, because we have checked the rule 5408 * exist before. 5409 */ 5410 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) { 5411 dev_warn(&hdev->pdev->dev, 5412 "failed to delete fd rule %u, it's inexistent\n", 5413 location); 5414 return; 5415 } 5416 5417 hclge_fd_inc_user_def_refcnt(hdev, new_rule); 5418 hclge_sync_fd_user_def_cfg(hdev, true); 5419 5420 hclge_fd_insert_rule_node(hlist, new_rule, parent); 5421 hclge_fd_inc_rule_cnt(hdev, new_rule->location); 5422 5423 if (state == HCLGE_FD_TO_ADD) { 5424 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 5425 hclge_task_schedule(hdev, 0); 5426 } 5427 } 5428 5429 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) 5430 { 5431 struct hclge_get_fd_mode_cmd *req; 5432 struct hclge_desc desc; 5433 int ret; 5434 5435 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); 5436 5437 req = (struct hclge_get_fd_mode_cmd *)desc.data; 5438 5439 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5440 if (ret) { 5441 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); 5442 return ret; 5443 } 5444 5445 *fd_mode = req->mode; 5446 5447 return ret; 5448 } 5449 5450 static int hclge_get_fd_allocation(struct hclge_dev *hdev, 5451 u32 *stage1_entry_num, 5452 u32 *stage2_entry_num, 5453 u16 *stage1_counter_num, 5454 u16 *stage2_counter_num) 5455 { 5456 struct hclge_get_fd_allocation_cmd *req; 5457 struct hclge_desc desc; 5458 int ret; 5459 5460 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); 5461 5462 req = (struct hclge_get_fd_allocation_cmd *)desc.data; 5463 5464 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5465 if (ret) { 5466 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", 5467 ret); 5468 return ret; 5469 } 5470 5471 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); 5472 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); 5473 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); 5474 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); 5475 5476 return ret; 5477 } 5478 5479 static int hclge_set_fd_key_config(struct hclge_dev *hdev, 5480 enum HCLGE_FD_STAGE stage_num) 5481 { 5482 struct hclge_set_fd_key_config_cmd *req; 5483 struct hclge_fd_key_cfg *stage; 5484 struct hclge_desc desc; 5485 int ret; 5486 5487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); 5488 5489 req = (struct hclge_set_fd_key_config_cmd *)desc.data; 5490 stage = &hdev->fd_cfg.key_cfg[stage_num]; 5491 req->stage = stage_num; 5492 req->key_select = stage->key_sel; 5493 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; 5494 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; 5495 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; 5496 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; 5497 req->tuple_mask = cpu_to_le32(~stage->tuple_active); 5498 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); 5499 5500 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5501 if (ret) 5502 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); 5503 5504 return ret; 5505 } 5506 5507 static void hclge_fd_disable_user_def(struct hclge_dev *hdev) 5508 { 5509 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; 5510 5511 spin_lock_bh(&hdev->fd_rule_lock); 5512 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); 5513 spin_unlock_bh(&hdev->fd_rule_lock); 5514 5515 hclge_fd_set_user_def_cmd(hdev, cfg); 5516 } 5517 5518 static int hclge_init_fd_config(struct hclge_dev *hdev) 5519 { 5520 #define LOW_2_WORDS 0x03 5521 struct hclge_fd_key_cfg *key_cfg; 5522 int ret; 5523 5524 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 5525 return 0; 5526 5527 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); 5528 if (ret) 5529 return ret; 5530 5531 switch (hdev->fd_cfg.fd_mode) { 5532 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: 5533 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; 5534 break; 5535 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: 5536 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; 5537 break; 5538 default: 5539 dev_err(&hdev->pdev->dev, 5540 "Unsupported flow director mode %u\n", 5541 hdev->fd_cfg.fd_mode); 5542 return -EOPNOTSUPP; 5543 } 5544 5545 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; 5546 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; 5547 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; 5548 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; 5549 key_cfg->outer_sipv6_word_en = 0; 5550 key_cfg->outer_dipv6_word_en = 0; 5551 5552 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | 5553 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | 5554 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 5555 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5556 5557 /* If use max 400bit key, we can support tuples for ether type */ 5558 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 5559 key_cfg->tuple_active |= 5560 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); 5561 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 5562 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; 5563 } 5564 5565 /* roce_type is used to filter roce frames 5566 * dst_vport is used to specify the rule 5567 */ 5568 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); 5569 5570 ret = hclge_get_fd_allocation(hdev, 5571 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 5572 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], 5573 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], 5574 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); 5575 if (ret) 5576 return ret; 5577 5578 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); 5579 } 5580 5581 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, 5582 int loc, u8 *key, bool is_add) 5583 { 5584 struct hclge_fd_tcam_config_1_cmd *req1; 5585 struct hclge_fd_tcam_config_2_cmd *req2; 5586 struct hclge_fd_tcam_config_3_cmd *req3; 5587 struct hclge_desc desc[3]; 5588 int ret; 5589 5590 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); 5591 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 5592 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); 5593 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 5594 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); 5595 5596 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 5597 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 5598 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 5599 5600 req1->stage = stage; 5601 req1->xy_sel = sel_x ? 1 : 0; 5602 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); 5603 req1->index = cpu_to_le32(loc); 5604 req1->entry_vld = sel_x ? is_add : 0; 5605 5606 if (key) { 5607 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); 5608 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], 5609 sizeof(req2->tcam_data)); 5610 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + 5611 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); 5612 } 5613 5614 ret = hclge_cmd_send(&hdev->hw, desc, 3); 5615 if (ret) 5616 dev_err(&hdev->pdev->dev, 5617 "config tcam key fail, ret=%d\n", 5618 ret); 5619 5620 return ret; 5621 } 5622 5623 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, 5624 struct hclge_fd_ad_data *action) 5625 { 5626 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 5627 struct hclge_fd_ad_config_cmd *req; 5628 struct hclge_desc desc; 5629 u64 ad_data = 0; 5630 int ret; 5631 5632 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); 5633 5634 req = (struct hclge_fd_ad_config_cmd *)desc.data; 5635 req->index = cpu_to_le32(loc); 5636 req->stage = stage; 5637 5638 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, 5639 action->write_rule_id_to_bd); 5640 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, 5641 action->rule_id); 5642 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { 5643 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, 5644 action->override_tc); 5645 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, 5646 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); 5647 } 5648 ad_data <<= 32; 5649 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); 5650 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, 5651 action->forward_to_direct_queue); 5652 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, 5653 action->queue_id); 5654 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); 5655 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, 5656 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); 5657 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); 5658 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, 5659 action->counter_id); 5660 5661 req->ad_data = cpu_to_le64(ad_data); 5662 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5663 if (ret) 5664 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); 5665 5666 return ret; 5667 } 5668 5669 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, 5670 struct hclge_fd_rule *rule) 5671 { 5672 int offset, moffset, ip_offset; 5673 enum HCLGE_FD_KEY_OPT key_opt; 5674 u16 tmp_x_s, tmp_y_s; 5675 u32 tmp_x_l, tmp_y_l; 5676 u8 *p = (u8 *)rule; 5677 int i; 5678 5679 if (rule->unused_tuple & BIT(tuple_bit)) 5680 return true; 5681 5682 key_opt = tuple_key_info[tuple_bit].key_opt; 5683 offset = tuple_key_info[tuple_bit].offset; 5684 moffset = tuple_key_info[tuple_bit].moffset; 5685 5686 switch (key_opt) { 5687 case KEY_OPT_U8: 5688 calc_x(*key_x, p[offset], p[moffset]); 5689 calc_y(*key_y, p[offset], p[moffset]); 5690 5691 return true; 5692 case KEY_OPT_LE16: 5693 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); 5694 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); 5695 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 5696 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 5697 5698 return true; 5699 case KEY_OPT_LE32: 5700 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); 5701 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); 5702 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5703 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5704 5705 return true; 5706 case KEY_OPT_MAC: 5707 for (i = 0; i < ETH_ALEN; i++) { 5708 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], 5709 p[moffset + i]); 5710 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], 5711 p[moffset + i]); 5712 } 5713 5714 return true; 5715 case KEY_OPT_IP: 5716 ip_offset = IPV4_INDEX * sizeof(u32); 5717 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), 5718 *(u32 *)(&p[moffset + ip_offset])); 5719 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), 5720 *(u32 *)(&p[moffset + ip_offset])); 5721 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5722 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5723 5724 return true; 5725 default: 5726 return false; 5727 } 5728 } 5729 5730 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, 5731 u8 vf_id, u8 network_port_id) 5732 { 5733 u32 port_number = 0; 5734 5735 if (port_type == HOST_PORT) { 5736 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, 5737 pf_id); 5738 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, 5739 vf_id); 5740 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); 5741 } else { 5742 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, 5743 HCLGE_NETWORK_PORT_ID_S, network_port_id); 5744 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); 5745 } 5746 5747 return port_number; 5748 } 5749 5750 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, 5751 __le32 *key_x, __le32 *key_y, 5752 struct hclge_fd_rule *rule) 5753 { 5754 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; 5755 u8 cur_pos = 0, tuple_size, shift_bits; 5756 unsigned int i; 5757 5758 for (i = 0; i < MAX_META_DATA; i++) { 5759 tuple_size = meta_data_key_info[i].key_length; 5760 tuple_bit = key_cfg->meta_data_active & BIT(i); 5761 5762 switch (tuple_bit) { 5763 case BIT(ROCE_TYPE): 5764 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); 5765 cur_pos += tuple_size; 5766 break; 5767 case BIT(DST_VPORT): 5768 port_number = hclge_get_port_number(HOST_PORT, 0, 5769 rule->vf_id, 0); 5770 hnae3_set_field(meta_data, 5771 GENMASK(cur_pos + tuple_size, cur_pos), 5772 cur_pos, port_number); 5773 cur_pos += tuple_size; 5774 break; 5775 default: 5776 break; 5777 } 5778 } 5779 5780 calc_x(tmp_x, meta_data, 0xFFFFFFFF); 5781 calc_y(tmp_y, meta_data, 0xFFFFFFFF); 5782 shift_bits = sizeof(meta_data) * 8 - cur_pos; 5783 5784 *key_x = cpu_to_le32(tmp_x << shift_bits); 5785 *key_y = cpu_to_le32(tmp_y << shift_bits); 5786 } 5787 5788 /* A complete key is combined with meta data key and tuple key. 5789 * Meta data key is stored at the MSB region, and tuple key is stored at 5790 * the LSB region, unused bits will be filled 0. 5791 */ 5792 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, 5793 struct hclge_fd_rule *rule) 5794 { 5795 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; 5796 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; 5797 u8 *cur_key_x, *cur_key_y; 5798 u8 meta_data_region; 5799 u8 tuple_size; 5800 int ret; 5801 u32 i; 5802 5803 memset(key_x, 0, sizeof(key_x)); 5804 memset(key_y, 0, sizeof(key_y)); 5805 cur_key_x = key_x; 5806 cur_key_y = key_y; 5807 5808 for (i = 0; i < MAX_TUPLE; i++) { 5809 bool tuple_valid; 5810 5811 tuple_size = tuple_key_info[i].key_length / 8; 5812 if (!(key_cfg->tuple_active & BIT(i))) 5813 continue; 5814 5815 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x, 5816 cur_key_y, rule); 5817 if (tuple_valid) { 5818 cur_key_x += tuple_size; 5819 cur_key_y += tuple_size; 5820 } 5821 } 5822 5823 meta_data_region = hdev->fd_cfg.max_key_length / 8 - 5824 MAX_META_DATA_LENGTH / 8; 5825 5826 hclge_fd_convert_meta_data(key_cfg, 5827 (__le32 *)(key_x + meta_data_region), 5828 (__le32 *)(key_y + meta_data_region), 5829 rule); 5830 5831 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, 5832 true); 5833 if (ret) { 5834 dev_err(&hdev->pdev->dev, 5835 "fd key_y config fail, loc=%u, ret=%d\n", 5836 rule->queue_id, ret); 5837 return ret; 5838 } 5839 5840 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, 5841 true); 5842 if (ret) 5843 dev_err(&hdev->pdev->dev, 5844 "fd key_x config fail, loc=%u, ret=%d\n", 5845 rule->queue_id, ret); 5846 return ret; 5847 } 5848 5849 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, 5850 struct hclge_fd_rule *rule) 5851 { 5852 struct hclge_vport *vport = hdev->vport; 5853 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5854 struct hclge_fd_ad_data ad_data; 5855 5856 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); 5857 ad_data.ad_id = rule->location; 5858 5859 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 5860 ad_data.drop_packet = true; 5861 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { 5862 ad_data.override_tc = true; 5863 ad_data.queue_id = 5864 kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; 5865 ad_data.tc_size = 5866 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); 5867 } else { 5868 ad_data.forward_to_direct_queue = true; 5869 ad_data.queue_id = rule->queue_id; 5870 } 5871 5872 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { 5873 ad_data.use_counter = true; 5874 ad_data.counter_id = rule->vf_id % 5875 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; 5876 } else { 5877 ad_data.use_counter = false; 5878 ad_data.counter_id = 0; 5879 } 5880 5881 ad_data.use_next_stage = false; 5882 ad_data.next_input_key = 0; 5883 5884 ad_data.write_rule_id_to_bd = true; 5885 ad_data.rule_id = rule->location; 5886 5887 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); 5888 } 5889 5890 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec, 5891 u32 *unused_tuple) 5892 { 5893 if (!spec || !unused_tuple) 5894 return -EINVAL; 5895 5896 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 5897 5898 if (!spec->ip4src) 5899 *unused_tuple |= BIT(INNER_SRC_IP); 5900 5901 if (!spec->ip4dst) 5902 *unused_tuple |= BIT(INNER_DST_IP); 5903 5904 if (!spec->psrc) 5905 *unused_tuple |= BIT(INNER_SRC_PORT); 5906 5907 if (!spec->pdst) 5908 *unused_tuple |= BIT(INNER_DST_PORT); 5909 5910 if (!spec->tos) 5911 *unused_tuple |= BIT(INNER_IP_TOS); 5912 5913 return 0; 5914 } 5915 5916 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, 5917 u32 *unused_tuple) 5918 { 5919 if (!spec || !unused_tuple) 5920 return -EINVAL; 5921 5922 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5923 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5924 5925 if (!spec->ip4src) 5926 *unused_tuple |= BIT(INNER_SRC_IP); 5927 5928 if (!spec->ip4dst) 5929 *unused_tuple |= BIT(INNER_DST_IP); 5930 5931 if (!spec->tos) 5932 *unused_tuple |= BIT(INNER_IP_TOS); 5933 5934 if (!spec->proto) 5935 *unused_tuple |= BIT(INNER_IP_PROTO); 5936 5937 if (spec->l4_4_bytes) 5938 return -EOPNOTSUPP; 5939 5940 if (spec->ip_ver != ETH_RX_NFC_IP4) 5941 return -EOPNOTSUPP; 5942 5943 return 0; 5944 } 5945 5946 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, 5947 u32 *unused_tuple) 5948 { 5949 if (!spec || !unused_tuple) 5950 return -EINVAL; 5951 5952 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 5953 5954 /* check whether src/dst ip address used */ 5955 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) 5956 *unused_tuple |= BIT(INNER_SRC_IP); 5957 5958 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) 5959 *unused_tuple |= BIT(INNER_DST_IP); 5960 5961 if (!spec->psrc) 5962 *unused_tuple |= BIT(INNER_SRC_PORT); 5963 5964 if (!spec->pdst) 5965 *unused_tuple |= BIT(INNER_DST_PORT); 5966 5967 if (!spec->tclass) 5968 *unused_tuple |= BIT(INNER_IP_TOS); 5969 5970 return 0; 5971 } 5972 5973 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, 5974 u32 *unused_tuple) 5975 { 5976 if (!spec || !unused_tuple) 5977 return -EINVAL; 5978 5979 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5980 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5981 5982 /* check whether src/dst ip address used */ 5983 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) 5984 *unused_tuple |= BIT(INNER_SRC_IP); 5985 5986 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) 5987 *unused_tuple |= BIT(INNER_DST_IP); 5988 5989 if (!spec->l4_proto) 5990 *unused_tuple |= BIT(INNER_IP_PROTO); 5991 5992 if (!spec->tclass) 5993 *unused_tuple |= BIT(INNER_IP_TOS); 5994 5995 if (spec->l4_4_bytes) 5996 return -EOPNOTSUPP; 5997 5998 return 0; 5999 } 6000 6001 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) 6002 { 6003 if (!spec || !unused_tuple) 6004 return -EINVAL; 6005 6006 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 6007 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | 6008 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); 6009 6010 if (is_zero_ether_addr(spec->h_source)) 6011 *unused_tuple |= BIT(INNER_SRC_MAC); 6012 6013 if (is_zero_ether_addr(spec->h_dest)) 6014 *unused_tuple |= BIT(INNER_DST_MAC); 6015 6016 if (!spec->h_proto) 6017 *unused_tuple |= BIT(INNER_ETH_TYPE); 6018 6019 return 0; 6020 } 6021 6022 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, 6023 struct ethtool_rx_flow_spec *fs, 6024 u32 *unused_tuple) 6025 { 6026 if (fs->flow_type & FLOW_EXT) { 6027 if (fs->h_ext.vlan_etype) { 6028 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); 6029 return -EOPNOTSUPP; 6030 } 6031 6032 if (!fs->h_ext.vlan_tci) 6033 *unused_tuple |= BIT(INNER_VLAN_TAG_FST); 6034 6035 if (fs->m_ext.vlan_tci && 6036 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { 6037 dev_err(&hdev->pdev->dev, 6038 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n", 6039 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); 6040 return -EINVAL; 6041 } 6042 } else { 6043 *unused_tuple |= BIT(INNER_VLAN_TAG_FST); 6044 } 6045 6046 if (fs->flow_type & FLOW_MAC_EXT) { 6047 if (hdev->fd_cfg.fd_mode != 6048 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 6049 dev_err(&hdev->pdev->dev, 6050 "FLOW_MAC_EXT is not supported in current fd mode!\n"); 6051 return -EOPNOTSUPP; 6052 } 6053 6054 if (is_zero_ether_addr(fs->h_ext.h_dest)) 6055 *unused_tuple |= BIT(INNER_DST_MAC); 6056 else 6057 *unused_tuple &= ~BIT(INNER_DST_MAC); 6058 } 6059 6060 return 0; 6061 } 6062 6063 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, 6064 struct hclge_fd_user_def_info *info) 6065 { 6066 switch (flow_type) { 6067 case ETHER_FLOW: 6068 info->layer = HCLGE_FD_USER_DEF_L2; 6069 *unused_tuple &= ~BIT(INNER_L2_RSV); 6070 break; 6071 case IP_USER_FLOW: 6072 case IPV6_USER_FLOW: 6073 info->layer = HCLGE_FD_USER_DEF_L3; 6074 *unused_tuple &= ~BIT(INNER_L3_RSV); 6075 break; 6076 case TCP_V4_FLOW: 6077 case UDP_V4_FLOW: 6078 case TCP_V6_FLOW: 6079 case UDP_V6_FLOW: 6080 info->layer = HCLGE_FD_USER_DEF_L4; 6081 *unused_tuple &= ~BIT(INNER_L4_RSV); 6082 break; 6083 default: 6084 return -EOPNOTSUPP; 6085 } 6086 6087 return 0; 6088 } 6089 6090 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) 6091 { 6092 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; 6093 } 6094 6095 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, 6096 struct ethtool_rx_flow_spec *fs, 6097 u32 *unused_tuple, 6098 struct hclge_fd_user_def_info *info) 6099 { 6100 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; 6101 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6102 u16 data, offset, data_mask, offset_mask; 6103 int ret; 6104 6105 info->layer = HCLGE_FD_USER_DEF_NONE; 6106 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; 6107 6108 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) 6109 return 0; 6110 6111 /* user-def data from ethtool is 64 bit value, the bit0~15 is used 6112 * for data, and bit32~47 is used for offset. 6113 */ 6114 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; 6115 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; 6116 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; 6117 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; 6118 6119 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) { 6120 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); 6121 return -EOPNOTSUPP; 6122 } 6123 6124 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) { 6125 dev_err(&hdev->pdev->dev, 6126 "user-def offset[%u] should be no more than %u\n", 6127 offset, HCLGE_FD_MAX_USER_DEF_OFFSET); 6128 return -EINVAL; 6129 } 6130 6131 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) { 6132 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); 6133 return -EINVAL; 6134 } 6135 6136 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); 6137 if (ret) { 6138 dev_err(&hdev->pdev->dev, 6139 "unsupported flow type for user-def bytes, ret = %d\n", 6140 ret); 6141 return ret; 6142 } 6143 6144 info->data = data; 6145 info->data_mask = data_mask; 6146 info->offset = offset; 6147 6148 return 0; 6149 } 6150 6151 static int hclge_fd_check_spec(struct hclge_dev *hdev, 6152 struct ethtool_rx_flow_spec *fs, 6153 u32 *unused_tuple, 6154 struct hclge_fd_user_def_info *info) 6155 { 6156 u32 flow_type; 6157 int ret; 6158 6159 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 6160 dev_err(&hdev->pdev->dev, 6161 "failed to config fd rules, invalid rule location: %u, max is %u\n.", 6162 fs->location, 6163 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); 6164 return -EINVAL; 6165 } 6166 6167 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); 6168 if (ret) 6169 return ret; 6170 6171 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6172 switch (flow_type) { 6173 case SCTP_V4_FLOW: 6174 case TCP_V4_FLOW: 6175 case UDP_V4_FLOW: 6176 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, 6177 unused_tuple); 6178 break; 6179 case IP_USER_FLOW: 6180 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, 6181 unused_tuple); 6182 break; 6183 case SCTP_V6_FLOW: 6184 case TCP_V6_FLOW: 6185 case UDP_V6_FLOW: 6186 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, 6187 unused_tuple); 6188 break; 6189 case IPV6_USER_FLOW: 6190 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, 6191 unused_tuple); 6192 break; 6193 case ETHER_FLOW: 6194 if (hdev->fd_cfg.fd_mode != 6195 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 6196 dev_err(&hdev->pdev->dev, 6197 "ETHER_FLOW is not supported in current fd mode!\n"); 6198 return -EOPNOTSUPP; 6199 } 6200 6201 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, 6202 unused_tuple); 6203 break; 6204 default: 6205 dev_err(&hdev->pdev->dev, 6206 "unsupported protocol type, protocol type = %#x\n", 6207 flow_type); 6208 return -EOPNOTSUPP; 6209 } 6210 6211 if (ret) { 6212 dev_err(&hdev->pdev->dev, 6213 "failed to check flow union tuple, ret = %d\n", 6214 ret); 6215 return ret; 6216 } 6217 6218 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); 6219 } 6220 6221 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs, 6222 struct hclge_fd_rule *rule, u8 ip_proto) 6223 { 6224 rule->tuples.src_ip[IPV4_INDEX] = 6225 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); 6226 rule->tuples_mask.src_ip[IPV4_INDEX] = 6227 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); 6228 6229 rule->tuples.dst_ip[IPV4_INDEX] = 6230 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); 6231 rule->tuples_mask.dst_ip[IPV4_INDEX] = 6232 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); 6233 6234 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); 6235 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); 6236 6237 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); 6238 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); 6239 6240 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; 6241 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; 6242 6243 rule->tuples.ether_proto = ETH_P_IP; 6244 rule->tuples_mask.ether_proto = 0xFFFF; 6245 6246 rule->tuples.ip_proto = ip_proto; 6247 rule->tuples_mask.ip_proto = 0xFF; 6248 } 6249 6250 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs, 6251 struct hclge_fd_rule *rule) 6252 { 6253 rule->tuples.src_ip[IPV4_INDEX] = 6254 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); 6255 rule->tuples_mask.src_ip[IPV4_INDEX] = 6256 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); 6257 6258 rule->tuples.dst_ip[IPV4_INDEX] = 6259 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); 6260 rule->tuples_mask.dst_ip[IPV4_INDEX] = 6261 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); 6262 6263 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; 6264 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; 6265 6266 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; 6267 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; 6268 6269 rule->tuples.ether_proto = ETH_P_IP; 6270 rule->tuples_mask.ether_proto = 0xFFFF; 6271 } 6272 6273 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs, 6274 struct hclge_fd_rule *rule, u8 ip_proto) 6275 { 6276 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, 6277 IPV6_SIZE); 6278 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, 6279 IPV6_SIZE); 6280 6281 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, 6282 IPV6_SIZE); 6283 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, 6284 IPV6_SIZE); 6285 6286 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); 6287 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); 6288 6289 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); 6290 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); 6291 6292 rule->tuples.ether_proto = ETH_P_IPV6; 6293 rule->tuples_mask.ether_proto = 0xFFFF; 6294 6295 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; 6296 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; 6297 6298 rule->tuples.ip_proto = ip_proto; 6299 rule->tuples_mask.ip_proto = 0xFF; 6300 } 6301 6302 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs, 6303 struct hclge_fd_rule *rule) 6304 { 6305 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, 6306 IPV6_SIZE); 6307 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, 6308 IPV6_SIZE); 6309 6310 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, 6311 IPV6_SIZE); 6312 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, 6313 IPV6_SIZE); 6314 6315 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; 6316 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; 6317 6318 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; 6319 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; 6320 6321 rule->tuples.ether_proto = ETH_P_IPV6; 6322 rule->tuples_mask.ether_proto = 0xFFFF; 6323 } 6324 6325 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs, 6326 struct hclge_fd_rule *rule) 6327 { 6328 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); 6329 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); 6330 6331 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); 6332 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); 6333 6334 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); 6335 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); 6336 } 6337 6338 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, 6339 struct hclge_fd_rule *rule) 6340 { 6341 switch (info->layer) { 6342 case HCLGE_FD_USER_DEF_L2: 6343 rule->tuples.l2_user_def = info->data; 6344 rule->tuples_mask.l2_user_def = info->data_mask; 6345 break; 6346 case HCLGE_FD_USER_DEF_L3: 6347 rule->tuples.l3_user_def = info->data; 6348 rule->tuples_mask.l3_user_def = info->data_mask; 6349 break; 6350 case HCLGE_FD_USER_DEF_L4: 6351 rule->tuples.l4_user_def = (u32)info->data << 16; 6352 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; 6353 break; 6354 default: 6355 break; 6356 } 6357 6358 rule->ep.user_def = *info; 6359 } 6360 6361 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs, 6362 struct hclge_fd_rule *rule, 6363 struct hclge_fd_user_def_info *info) 6364 { 6365 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6366 6367 switch (flow_type) { 6368 case SCTP_V4_FLOW: 6369 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP); 6370 break; 6371 case TCP_V4_FLOW: 6372 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP); 6373 break; 6374 case UDP_V4_FLOW: 6375 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP); 6376 break; 6377 case IP_USER_FLOW: 6378 hclge_fd_get_ip4_tuple(fs, rule); 6379 break; 6380 case SCTP_V6_FLOW: 6381 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP); 6382 break; 6383 case TCP_V6_FLOW: 6384 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP); 6385 break; 6386 case UDP_V6_FLOW: 6387 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP); 6388 break; 6389 case IPV6_USER_FLOW: 6390 hclge_fd_get_ip6_tuple(fs, rule); 6391 break; 6392 case ETHER_FLOW: 6393 hclge_fd_get_ether_tuple(fs, rule); 6394 break; 6395 default: 6396 return -EOPNOTSUPP; 6397 } 6398 6399 if (fs->flow_type & FLOW_EXT) { 6400 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); 6401 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); 6402 hclge_fd_get_user_def_tuple(info, rule); 6403 } 6404 6405 if (fs->flow_type & FLOW_MAC_EXT) { 6406 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); 6407 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); 6408 } 6409 6410 return 0; 6411 } 6412 6413 static int hclge_fd_config_rule(struct hclge_dev *hdev, 6414 struct hclge_fd_rule *rule) 6415 { 6416 int ret; 6417 6418 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 6419 if (ret) 6420 return ret; 6421 6422 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 6423 } 6424 6425 static int hclge_add_fd_entry_common(struct hclge_dev *hdev, 6426 struct hclge_fd_rule *rule) 6427 { 6428 int ret; 6429 6430 spin_lock_bh(&hdev->fd_rule_lock); 6431 6432 if (hdev->fd_active_type != rule->rule_type && 6433 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || 6434 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { 6435 dev_err(&hdev->pdev->dev, 6436 "mode conflict(new type %d, active type %d), please delete existent rules first\n", 6437 rule->rule_type, hdev->fd_active_type); 6438 spin_unlock_bh(&hdev->fd_rule_lock); 6439 return -EINVAL; 6440 } 6441 6442 ret = hclge_fd_check_user_def_refcnt(hdev, rule); 6443 if (ret) 6444 goto out; 6445 6446 ret = hclge_clear_arfs_rules(hdev); 6447 if (ret) 6448 goto out; 6449 6450 ret = hclge_fd_config_rule(hdev, rule); 6451 if (ret) 6452 goto out; 6453 6454 rule->state = HCLGE_FD_ACTIVE; 6455 hdev->fd_active_type = rule->rule_type; 6456 hclge_update_fd_list(hdev, rule->state, rule->location, rule); 6457 6458 out: 6459 spin_unlock_bh(&hdev->fd_rule_lock); 6460 return ret; 6461 } 6462 6463 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) 6464 { 6465 struct hclge_vport *vport = hclge_get_vport(handle); 6466 struct hclge_dev *hdev = vport->back; 6467 6468 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; 6469 } 6470 6471 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, 6472 u16 *vport_id, u8 *action, u16 *queue_id) 6473 { 6474 struct hclge_vport *vport = hdev->vport; 6475 6476 if (ring_cookie == RX_CLS_FLOW_DISC) { 6477 *action = HCLGE_FD_ACTION_DROP_PACKET; 6478 } else { 6479 u32 ring = ethtool_get_flow_spec_ring(ring_cookie); 6480 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); 6481 u16 tqps; 6482 6483 /* To keep consistent with user's configuration, minus 1 when 6484 * printing 'vf', because vf id from ethtool is added 1 for vf. 6485 */ 6486 if (vf > hdev->num_req_vfs) { 6487 dev_err(&hdev->pdev->dev, 6488 "Error: vf id (%u) should be less than %u\n", 6489 vf - 1U, hdev->num_req_vfs); 6490 return -EINVAL; 6491 } 6492 6493 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; 6494 tqps = hdev->vport[vf].nic.kinfo.num_tqps; 6495 6496 if (ring >= tqps) { 6497 dev_err(&hdev->pdev->dev, 6498 "Error: queue id (%u) > max tqp num (%u)\n", 6499 ring, tqps - 1U); 6500 return -EINVAL; 6501 } 6502 6503 *action = HCLGE_FD_ACTION_SELECT_QUEUE; 6504 *queue_id = ring; 6505 } 6506 6507 return 0; 6508 } 6509 6510 static int hclge_add_fd_entry(struct hnae3_handle *handle, 6511 struct ethtool_rxnfc *cmd) 6512 { 6513 struct hclge_vport *vport = hclge_get_vport(handle); 6514 struct hclge_dev *hdev = vport->back; 6515 struct hclge_fd_user_def_info info; 6516 u16 dst_vport_id = 0, q_index = 0; 6517 struct ethtool_rx_flow_spec *fs; 6518 struct hclge_fd_rule *rule; 6519 u32 unused = 0; 6520 u8 action; 6521 int ret; 6522 6523 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 6524 dev_err(&hdev->pdev->dev, 6525 "flow table director is not supported\n"); 6526 return -EOPNOTSUPP; 6527 } 6528 6529 if (!hdev->fd_en) { 6530 dev_err(&hdev->pdev->dev, 6531 "please enable flow director first\n"); 6532 return -EOPNOTSUPP; 6533 } 6534 6535 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6536 6537 ret = hclge_fd_check_spec(hdev, fs, &unused, &info); 6538 if (ret) 6539 return ret; 6540 6541 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, 6542 &action, &q_index); 6543 if (ret) 6544 return ret; 6545 6546 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 6547 if (!rule) 6548 return -ENOMEM; 6549 6550 ret = hclge_fd_get_tuple(fs, rule, &info); 6551 if (ret) { 6552 kfree(rule); 6553 return ret; 6554 } 6555 6556 rule->flow_type = fs->flow_type; 6557 rule->location = fs->location; 6558 rule->unused_tuple = unused; 6559 rule->vf_id = dst_vport_id; 6560 rule->queue_id = q_index; 6561 rule->action = action; 6562 rule->rule_type = HCLGE_FD_EP_ACTIVE; 6563 6564 ret = hclge_add_fd_entry_common(hdev, rule); 6565 if (ret) 6566 kfree(rule); 6567 6568 return ret; 6569 } 6570 6571 static int hclge_del_fd_entry(struct hnae3_handle *handle, 6572 struct ethtool_rxnfc *cmd) 6573 { 6574 struct hclge_vport *vport = hclge_get_vport(handle); 6575 struct hclge_dev *hdev = vport->back; 6576 struct ethtool_rx_flow_spec *fs; 6577 int ret; 6578 6579 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6580 return -EOPNOTSUPP; 6581 6582 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6583 6584 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 6585 return -EINVAL; 6586 6587 spin_lock_bh(&hdev->fd_rule_lock); 6588 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || 6589 !test_bit(fs->location, hdev->fd_bmap)) { 6590 dev_err(&hdev->pdev->dev, 6591 "Delete fail, rule %u is inexistent\n", fs->location); 6592 spin_unlock_bh(&hdev->fd_rule_lock); 6593 return -ENOENT; 6594 } 6595 6596 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, 6597 NULL, false); 6598 if (ret) 6599 goto out; 6600 6601 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); 6602 6603 out: 6604 spin_unlock_bh(&hdev->fd_rule_lock); 6605 return ret; 6606 } 6607 6608 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, 6609 bool clear_list) 6610 { 6611 struct hclge_fd_rule *rule; 6612 struct hlist_node *node; 6613 u16 location; 6614 6615 spin_lock_bh(&hdev->fd_rule_lock); 6616 6617 for_each_set_bit(location, hdev->fd_bmap, 6618 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 6619 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, 6620 NULL, false); 6621 6622 if (clear_list) { 6623 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, 6624 rule_node) { 6625 hlist_del(&rule->rule_node); 6626 kfree(rule); 6627 } 6628 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 6629 hdev->hclge_fd_rule_num = 0; 6630 bitmap_zero(hdev->fd_bmap, 6631 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 6632 } 6633 6634 spin_unlock_bh(&hdev->fd_rule_lock); 6635 } 6636 6637 static void hclge_del_all_fd_entries(struct hclge_dev *hdev) 6638 { 6639 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6640 return; 6641 6642 hclge_clear_fd_rules_in_list(hdev, true); 6643 hclge_fd_disable_user_def(hdev); 6644 } 6645 6646 static int hclge_restore_fd_entries(struct hnae3_handle *handle) 6647 { 6648 struct hclge_vport *vport = hclge_get_vport(handle); 6649 struct hclge_dev *hdev = vport->back; 6650 struct hclge_fd_rule *rule; 6651 struct hlist_node *node; 6652 6653 /* Return ok here, because reset error handling will check this 6654 * return value. If error is returned here, the reset process will 6655 * fail. 6656 */ 6657 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6658 return 0; 6659 6660 /* if fd is disabled, should not restore it when reset */ 6661 if (!hdev->fd_en) 6662 return 0; 6663 6664 spin_lock_bh(&hdev->fd_rule_lock); 6665 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 6666 if (rule->state == HCLGE_FD_ACTIVE) 6667 rule->state = HCLGE_FD_TO_ADD; 6668 } 6669 spin_unlock_bh(&hdev->fd_rule_lock); 6670 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 6671 6672 return 0; 6673 } 6674 6675 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, 6676 struct ethtool_rxnfc *cmd) 6677 { 6678 struct hclge_vport *vport = hclge_get_vport(handle); 6679 struct hclge_dev *hdev = vport->back; 6680 6681 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) 6682 return -EOPNOTSUPP; 6683 6684 cmd->rule_cnt = hdev->hclge_fd_rule_num; 6685 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 6686 6687 return 0; 6688 } 6689 6690 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, 6691 struct ethtool_tcpip4_spec *spec, 6692 struct ethtool_tcpip4_spec *spec_mask) 6693 { 6694 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 6695 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? 6696 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 6697 6698 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 6699 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? 6700 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 6701 6702 spec->psrc = cpu_to_be16(rule->tuples.src_port); 6703 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 6704 0 : cpu_to_be16(rule->tuples_mask.src_port); 6705 6706 spec->pdst = cpu_to_be16(rule->tuples.dst_port); 6707 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? 6708 0 : cpu_to_be16(rule->tuples_mask.dst_port); 6709 6710 spec->tos = rule->tuples.ip_tos; 6711 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6712 0 : rule->tuples_mask.ip_tos; 6713 } 6714 6715 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, 6716 struct ethtool_usrip4_spec *spec, 6717 struct ethtool_usrip4_spec *spec_mask) 6718 { 6719 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 6720 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? 6721 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 6722 6723 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 6724 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? 6725 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 6726 6727 spec->tos = rule->tuples.ip_tos; 6728 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6729 0 : rule->tuples_mask.ip_tos; 6730 6731 spec->proto = rule->tuples.ip_proto; 6732 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 6733 0 : rule->tuples_mask.ip_proto; 6734 6735 spec->ip_ver = ETH_RX_NFC_IP4; 6736 } 6737 6738 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, 6739 struct ethtool_tcpip6_spec *spec, 6740 struct ethtool_tcpip6_spec *spec_mask) 6741 { 6742 cpu_to_be32_array(spec->ip6src, 6743 rule->tuples.src_ip, IPV6_SIZE); 6744 cpu_to_be32_array(spec->ip6dst, 6745 rule->tuples.dst_ip, IPV6_SIZE); 6746 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 6747 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); 6748 else 6749 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, 6750 IPV6_SIZE); 6751 6752 if (rule->unused_tuple & BIT(INNER_DST_IP)) 6753 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); 6754 else 6755 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, 6756 IPV6_SIZE); 6757 6758 spec->tclass = rule->tuples.ip_tos; 6759 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6760 0 : rule->tuples_mask.ip_tos; 6761 6762 spec->psrc = cpu_to_be16(rule->tuples.src_port); 6763 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 6764 0 : cpu_to_be16(rule->tuples_mask.src_port); 6765 6766 spec->pdst = cpu_to_be16(rule->tuples.dst_port); 6767 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? 6768 0 : cpu_to_be16(rule->tuples_mask.dst_port); 6769 } 6770 6771 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, 6772 struct ethtool_usrip6_spec *spec, 6773 struct ethtool_usrip6_spec *spec_mask) 6774 { 6775 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); 6776 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); 6777 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 6778 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); 6779 else 6780 cpu_to_be32_array(spec_mask->ip6src, 6781 rule->tuples_mask.src_ip, IPV6_SIZE); 6782 6783 if (rule->unused_tuple & BIT(INNER_DST_IP)) 6784 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); 6785 else 6786 cpu_to_be32_array(spec_mask->ip6dst, 6787 rule->tuples_mask.dst_ip, IPV6_SIZE); 6788 6789 spec->tclass = rule->tuples.ip_tos; 6790 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6791 0 : rule->tuples_mask.ip_tos; 6792 6793 spec->l4_proto = rule->tuples.ip_proto; 6794 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 6795 0 : rule->tuples_mask.ip_proto; 6796 } 6797 6798 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, 6799 struct ethhdr *spec, 6800 struct ethhdr *spec_mask) 6801 { 6802 ether_addr_copy(spec->h_source, rule->tuples.src_mac); 6803 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); 6804 6805 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) 6806 eth_zero_addr(spec_mask->h_source); 6807 else 6808 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); 6809 6810 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6811 eth_zero_addr(spec_mask->h_dest); 6812 else 6813 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); 6814 6815 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); 6816 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? 6817 0 : cpu_to_be16(rule->tuples_mask.ether_proto); 6818 } 6819 6820 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, 6821 struct hclge_fd_rule *rule) 6822 { 6823 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == 6824 HCLGE_FD_TUPLE_USER_DEF_TUPLES) { 6825 fs->h_ext.data[0] = 0; 6826 fs->h_ext.data[1] = 0; 6827 fs->m_ext.data[0] = 0; 6828 fs->m_ext.data[1] = 0; 6829 } else { 6830 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); 6831 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); 6832 fs->m_ext.data[0] = 6833 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK); 6834 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); 6835 } 6836 } 6837 6838 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, 6839 struct hclge_fd_rule *rule) 6840 { 6841 if (fs->flow_type & FLOW_EXT) { 6842 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); 6843 fs->m_ext.vlan_tci = 6844 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? 6845 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); 6846 6847 hclge_fd_get_user_def_info(fs, rule); 6848 } 6849 6850 if (fs->flow_type & FLOW_MAC_EXT) { 6851 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); 6852 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6853 eth_zero_addr(fs->m_u.ether_spec.h_dest); 6854 else 6855 ether_addr_copy(fs->m_u.ether_spec.h_dest, 6856 rule->tuples_mask.dst_mac); 6857 } 6858 } 6859 6860 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, 6861 u16 location) 6862 { 6863 struct hclge_fd_rule *rule = NULL; 6864 struct hlist_node *node2; 6865 6866 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 6867 if (rule->location == location) 6868 return rule; 6869 else if (rule->location > location) 6870 return NULL; 6871 } 6872 6873 return NULL; 6874 } 6875 6876 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, 6877 struct hclge_fd_rule *rule) 6878 { 6879 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 6880 fs->ring_cookie = RX_CLS_FLOW_DISC; 6881 } else { 6882 u64 vf_id; 6883 6884 fs->ring_cookie = rule->queue_id; 6885 vf_id = rule->vf_id; 6886 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 6887 fs->ring_cookie |= vf_id; 6888 } 6889 } 6890 6891 static int hclge_get_fd_rule_info(struct hnae3_handle *handle, 6892 struct ethtool_rxnfc *cmd) 6893 { 6894 struct hclge_vport *vport = hclge_get_vport(handle); 6895 struct hclge_fd_rule *rule = NULL; 6896 struct hclge_dev *hdev = vport->back; 6897 struct ethtool_rx_flow_spec *fs; 6898 6899 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6900 return -EOPNOTSUPP; 6901 6902 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6903 6904 spin_lock_bh(&hdev->fd_rule_lock); 6905 6906 rule = hclge_get_fd_rule(hdev, fs->location); 6907 if (!rule) { 6908 spin_unlock_bh(&hdev->fd_rule_lock); 6909 return -ENOENT; 6910 } 6911 6912 fs->flow_type = rule->flow_type; 6913 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 6914 case SCTP_V4_FLOW: 6915 case TCP_V4_FLOW: 6916 case UDP_V4_FLOW: 6917 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, 6918 &fs->m_u.tcp_ip4_spec); 6919 break; 6920 case IP_USER_FLOW: 6921 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, 6922 &fs->m_u.usr_ip4_spec); 6923 break; 6924 case SCTP_V6_FLOW: 6925 case TCP_V6_FLOW: 6926 case UDP_V6_FLOW: 6927 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, 6928 &fs->m_u.tcp_ip6_spec); 6929 break; 6930 case IPV6_USER_FLOW: 6931 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, 6932 &fs->m_u.usr_ip6_spec); 6933 break; 6934 /* The flow type of fd rule has been checked before adding in to rule 6935 * list. As other flow types have been handled, it must be ETHER_FLOW 6936 * for the default case 6937 */ 6938 default: 6939 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, 6940 &fs->m_u.ether_spec); 6941 break; 6942 } 6943 6944 hclge_fd_get_ext_info(fs, rule); 6945 6946 hclge_fd_get_ring_cookie(fs, rule); 6947 6948 spin_unlock_bh(&hdev->fd_rule_lock); 6949 6950 return 0; 6951 } 6952 6953 static int hclge_get_all_rules(struct hnae3_handle *handle, 6954 struct ethtool_rxnfc *cmd, u32 *rule_locs) 6955 { 6956 struct hclge_vport *vport = hclge_get_vport(handle); 6957 struct hclge_dev *hdev = vport->back; 6958 struct hclge_fd_rule *rule; 6959 struct hlist_node *node2; 6960 int cnt = 0; 6961 6962 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6963 return -EOPNOTSUPP; 6964 6965 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 6966 6967 spin_lock_bh(&hdev->fd_rule_lock); 6968 hlist_for_each_entry_safe(rule, node2, 6969 &hdev->fd_rule_list, rule_node) { 6970 if (cnt == cmd->rule_cnt) { 6971 spin_unlock_bh(&hdev->fd_rule_lock); 6972 return -EMSGSIZE; 6973 } 6974 6975 if (rule->state == HCLGE_FD_TO_DEL) 6976 continue; 6977 6978 rule_locs[cnt] = rule->location; 6979 cnt++; 6980 } 6981 6982 spin_unlock_bh(&hdev->fd_rule_lock); 6983 6984 cmd->rule_cnt = cnt; 6985 6986 return 0; 6987 } 6988 6989 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, 6990 struct hclge_fd_rule_tuples *tuples) 6991 { 6992 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 6993 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 6994 6995 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); 6996 tuples->ip_proto = fkeys->basic.ip_proto; 6997 tuples->dst_port = be16_to_cpu(fkeys->ports.dst); 6998 6999 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 7000 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); 7001 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); 7002 } else { 7003 int i; 7004 7005 for (i = 0; i < IPV6_SIZE; i++) { 7006 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); 7007 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); 7008 } 7009 } 7010 } 7011 7012 /* traverse all rules, check whether an existed rule has the same tuples */ 7013 static struct hclge_fd_rule * 7014 hclge_fd_search_flow_keys(struct hclge_dev *hdev, 7015 const struct hclge_fd_rule_tuples *tuples) 7016 { 7017 struct hclge_fd_rule *rule = NULL; 7018 struct hlist_node *node; 7019 7020 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7021 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) 7022 return rule; 7023 } 7024 7025 return NULL; 7026 } 7027 7028 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, 7029 struct hclge_fd_rule *rule) 7030 { 7031 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 7032 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) | 7033 BIT(INNER_SRC_PORT); 7034 rule->action = 0; 7035 rule->vf_id = 0; 7036 rule->rule_type = HCLGE_FD_ARFS_ACTIVE; 7037 rule->state = HCLGE_FD_TO_ADD; 7038 if (tuples->ether_proto == ETH_P_IP) { 7039 if (tuples->ip_proto == IPPROTO_TCP) 7040 rule->flow_type = TCP_V4_FLOW; 7041 else 7042 rule->flow_type = UDP_V4_FLOW; 7043 } else { 7044 if (tuples->ip_proto == IPPROTO_TCP) 7045 rule->flow_type = TCP_V6_FLOW; 7046 else 7047 rule->flow_type = UDP_V6_FLOW; 7048 } 7049 memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); 7050 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); 7051 } 7052 7053 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, 7054 u16 flow_id, struct flow_keys *fkeys) 7055 { 7056 struct hclge_vport *vport = hclge_get_vport(handle); 7057 struct hclge_fd_rule_tuples new_tuples = {}; 7058 struct hclge_dev *hdev = vport->back; 7059 struct hclge_fd_rule *rule; 7060 u16 bit_id; 7061 7062 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7063 return -EOPNOTSUPP; 7064 7065 /* when there is already fd rule existed add by user, 7066 * arfs should not work 7067 */ 7068 spin_lock_bh(&hdev->fd_rule_lock); 7069 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && 7070 hdev->fd_active_type != HCLGE_FD_RULE_NONE) { 7071 spin_unlock_bh(&hdev->fd_rule_lock); 7072 return -EOPNOTSUPP; 7073 } 7074 7075 hclge_fd_get_flow_tuples(fkeys, &new_tuples); 7076 7077 /* check is there flow director filter existed for this flow, 7078 * if not, create a new filter for it; 7079 * if filter exist with different queue id, modify the filter; 7080 * if filter exist with same queue id, do nothing 7081 */ 7082 rule = hclge_fd_search_flow_keys(hdev, &new_tuples); 7083 if (!rule) { 7084 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); 7085 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 7086 spin_unlock_bh(&hdev->fd_rule_lock); 7087 return -ENOSPC; 7088 } 7089 7090 rule = kzalloc(sizeof(*rule), GFP_ATOMIC); 7091 if (!rule) { 7092 spin_unlock_bh(&hdev->fd_rule_lock); 7093 return -ENOMEM; 7094 } 7095 7096 rule->location = bit_id; 7097 rule->arfs.flow_id = flow_id; 7098 rule->queue_id = queue_id; 7099 hclge_fd_build_arfs_rule(&new_tuples, rule); 7100 hclge_update_fd_list(hdev, rule->state, rule->location, rule); 7101 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; 7102 } else if (rule->queue_id != queue_id) { 7103 rule->queue_id = queue_id; 7104 rule->state = HCLGE_FD_TO_ADD; 7105 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7106 hclge_task_schedule(hdev, 0); 7107 } 7108 spin_unlock_bh(&hdev->fd_rule_lock); 7109 return rule->location; 7110 } 7111 7112 static void hclge_rfs_filter_expire(struct hclge_dev *hdev) 7113 { 7114 #ifdef CONFIG_RFS_ACCEL 7115 struct hnae3_handle *handle = &hdev->vport[0].nic; 7116 struct hclge_fd_rule *rule; 7117 struct hlist_node *node; 7118 7119 spin_lock_bh(&hdev->fd_rule_lock); 7120 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { 7121 spin_unlock_bh(&hdev->fd_rule_lock); 7122 return; 7123 } 7124 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7125 if (rule->state != HCLGE_FD_ACTIVE) 7126 continue; 7127 if (rps_may_expire_flow(handle->netdev, rule->queue_id, 7128 rule->arfs.flow_id, rule->location)) { 7129 rule->state = HCLGE_FD_TO_DEL; 7130 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7131 } 7132 } 7133 spin_unlock_bh(&hdev->fd_rule_lock); 7134 #endif 7135 } 7136 7137 /* make sure being called after lock up with fd_rule_lock */ 7138 static int hclge_clear_arfs_rules(struct hclge_dev *hdev) 7139 { 7140 #ifdef CONFIG_RFS_ACCEL 7141 struct hclge_fd_rule *rule; 7142 struct hlist_node *node; 7143 int ret; 7144 7145 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) 7146 return 0; 7147 7148 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7149 switch (rule->state) { 7150 case HCLGE_FD_TO_DEL: 7151 case HCLGE_FD_ACTIVE: 7152 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 7153 rule->location, NULL, false); 7154 if (ret) 7155 return ret; 7156 fallthrough; 7157 case HCLGE_FD_TO_ADD: 7158 hclge_fd_dec_rule_cnt(hdev, rule->location); 7159 hlist_del(&rule->rule_node); 7160 kfree(rule); 7161 break; 7162 default: 7163 break; 7164 } 7165 } 7166 hclge_sync_fd_state(hdev); 7167 7168 #endif 7169 return 0; 7170 } 7171 7172 static void hclge_get_cls_key_basic(const struct flow_rule *flow, 7173 struct hclge_fd_rule *rule) 7174 { 7175 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) { 7176 struct flow_match_basic match; 7177 u16 ethtype_key, ethtype_mask; 7178 7179 flow_rule_match_basic(flow, &match); 7180 ethtype_key = ntohs(match.key->n_proto); 7181 ethtype_mask = ntohs(match.mask->n_proto); 7182 7183 if (ethtype_key == ETH_P_ALL) { 7184 ethtype_key = 0; 7185 ethtype_mask = 0; 7186 } 7187 rule->tuples.ether_proto = ethtype_key; 7188 rule->tuples_mask.ether_proto = ethtype_mask; 7189 rule->tuples.ip_proto = match.key->ip_proto; 7190 rule->tuples_mask.ip_proto = match.mask->ip_proto; 7191 } else { 7192 rule->unused_tuple |= BIT(INNER_IP_PROTO); 7193 rule->unused_tuple |= BIT(INNER_ETH_TYPE); 7194 } 7195 } 7196 7197 static void hclge_get_cls_key_mac(const struct flow_rule *flow, 7198 struct hclge_fd_rule *rule) 7199 { 7200 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 7201 struct flow_match_eth_addrs match; 7202 7203 flow_rule_match_eth_addrs(flow, &match); 7204 ether_addr_copy(rule->tuples.dst_mac, match.key->dst); 7205 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); 7206 ether_addr_copy(rule->tuples.src_mac, match.key->src); 7207 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); 7208 } else { 7209 rule->unused_tuple |= BIT(INNER_DST_MAC); 7210 rule->unused_tuple |= BIT(INNER_SRC_MAC); 7211 } 7212 } 7213 7214 static void hclge_get_cls_key_vlan(const struct flow_rule *flow, 7215 struct hclge_fd_rule *rule) 7216 { 7217 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { 7218 struct flow_match_vlan match; 7219 7220 flow_rule_match_vlan(flow, &match); 7221 rule->tuples.vlan_tag1 = match.key->vlan_id | 7222 (match.key->vlan_priority << VLAN_PRIO_SHIFT); 7223 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | 7224 (match.mask->vlan_priority << VLAN_PRIO_SHIFT); 7225 } else { 7226 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); 7227 } 7228 } 7229 7230 static int hclge_get_cls_key_ip(const struct flow_rule *flow, 7231 struct hclge_fd_rule *rule, 7232 struct netlink_ext_ack *extack) 7233 { 7234 u16 addr_type = 0; 7235 7236 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) { 7237 struct flow_match_control match; 7238 7239 flow_rule_match_control(flow, &match); 7240 addr_type = match.key->addr_type; 7241 7242 if (flow_rule_has_control_flags(match.mask->flags, extack)) 7243 return -EOPNOTSUPP; 7244 } 7245 7246 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 7247 struct flow_match_ipv4_addrs match; 7248 7249 flow_rule_match_ipv4_addrs(flow, &match); 7250 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); 7251 rule->tuples_mask.src_ip[IPV4_INDEX] = 7252 be32_to_cpu(match.mask->src); 7253 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); 7254 rule->tuples_mask.dst_ip[IPV4_INDEX] = 7255 be32_to_cpu(match.mask->dst); 7256 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 7257 struct flow_match_ipv6_addrs match; 7258 7259 flow_rule_match_ipv6_addrs(flow, &match); 7260 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, 7261 IPV6_SIZE); 7262 be32_to_cpu_array(rule->tuples_mask.src_ip, 7263 match.mask->src.s6_addr32, IPV6_SIZE); 7264 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, 7265 IPV6_SIZE); 7266 be32_to_cpu_array(rule->tuples_mask.dst_ip, 7267 match.mask->dst.s6_addr32, IPV6_SIZE); 7268 } else { 7269 rule->unused_tuple |= BIT(INNER_SRC_IP); 7270 rule->unused_tuple |= BIT(INNER_DST_IP); 7271 } 7272 7273 return 0; 7274 } 7275 7276 static void hclge_get_cls_key_port(const struct flow_rule *flow, 7277 struct hclge_fd_rule *rule) 7278 { 7279 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { 7280 struct flow_match_ports match; 7281 7282 flow_rule_match_ports(flow, &match); 7283 7284 rule->tuples.src_port = be16_to_cpu(match.key->src); 7285 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); 7286 rule->tuples.dst_port = be16_to_cpu(match.key->dst); 7287 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); 7288 } else { 7289 rule->unused_tuple |= BIT(INNER_SRC_PORT); 7290 rule->unused_tuple |= BIT(INNER_DST_PORT); 7291 } 7292 } 7293 7294 static int hclge_parse_cls_flower(struct hclge_dev *hdev, 7295 struct flow_cls_offload *cls_flower, 7296 struct hclge_fd_rule *rule) 7297 { 7298 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); 7299 struct netlink_ext_ack *extack = cls_flower->common.extack; 7300 struct flow_dissector *dissector = flow->match.dissector; 7301 int ret; 7302 7303 if (dissector->used_keys & 7304 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 7305 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 7306 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 7307 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 7308 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 7309 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 7310 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { 7311 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n", 7312 dissector->used_keys); 7313 return -EOPNOTSUPP; 7314 } 7315 7316 hclge_get_cls_key_basic(flow, rule); 7317 hclge_get_cls_key_mac(flow, rule); 7318 hclge_get_cls_key_vlan(flow, rule); 7319 7320 ret = hclge_get_cls_key_ip(flow, rule, extack); 7321 if (ret) 7322 return ret; 7323 7324 hclge_get_cls_key_port(flow, rule); 7325 7326 return 0; 7327 } 7328 7329 static int hclge_check_cls_flower(struct hclge_dev *hdev, 7330 struct flow_cls_offload *cls_flower, int tc) 7331 { 7332 u32 prio = cls_flower->common.prio; 7333 7334 if (tc < 0 || tc > hdev->tc_max) { 7335 dev_err(&hdev->pdev->dev, "invalid traffic class\n"); 7336 return -EINVAL; 7337 } 7338 7339 if (prio == 0 || 7340 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 7341 dev_err(&hdev->pdev->dev, 7342 "prio %u should be in range[1, %u]\n", 7343 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 7344 return -EINVAL; 7345 } 7346 7347 if (test_bit(prio - 1, hdev->fd_bmap)) { 7348 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); 7349 return -EINVAL; 7350 } 7351 return 0; 7352 } 7353 7354 static int hclge_add_cls_flower(struct hnae3_handle *handle, 7355 struct flow_cls_offload *cls_flower, 7356 int tc) 7357 { 7358 struct hclge_vport *vport = hclge_get_vport(handle); 7359 struct hclge_dev *hdev = vport->back; 7360 struct hclge_fd_rule *rule; 7361 int ret; 7362 7363 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 7364 dev_err(&hdev->pdev->dev, 7365 "cls flower is not supported\n"); 7366 return -EOPNOTSUPP; 7367 } 7368 7369 ret = hclge_check_cls_flower(hdev, cls_flower, tc); 7370 if (ret) { 7371 dev_err(&hdev->pdev->dev, 7372 "failed to check cls flower params, ret = %d\n", ret); 7373 return ret; 7374 } 7375 7376 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 7377 if (!rule) 7378 return -ENOMEM; 7379 7380 ret = hclge_parse_cls_flower(hdev, cls_flower, rule); 7381 if (ret) { 7382 kfree(rule); 7383 return ret; 7384 } 7385 7386 rule->action = HCLGE_FD_ACTION_SELECT_TC; 7387 rule->cls_flower.tc = tc; 7388 rule->location = cls_flower->common.prio - 1; 7389 rule->vf_id = 0; 7390 rule->cls_flower.cookie = cls_flower->cookie; 7391 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; 7392 7393 ret = hclge_add_fd_entry_common(hdev, rule); 7394 if (ret) 7395 kfree(rule); 7396 7397 return ret; 7398 } 7399 7400 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, 7401 unsigned long cookie) 7402 { 7403 struct hclge_fd_rule *rule; 7404 struct hlist_node *node; 7405 7406 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7407 if (rule->cls_flower.cookie == cookie) 7408 return rule; 7409 } 7410 7411 return NULL; 7412 } 7413 7414 static int hclge_del_cls_flower(struct hnae3_handle *handle, 7415 struct flow_cls_offload *cls_flower) 7416 { 7417 struct hclge_vport *vport = hclge_get_vport(handle); 7418 struct hclge_dev *hdev = vport->back; 7419 struct hclge_fd_rule *rule; 7420 int ret; 7421 7422 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7423 return -EOPNOTSUPP; 7424 7425 spin_lock_bh(&hdev->fd_rule_lock); 7426 7427 rule = hclge_find_cls_flower(hdev, cls_flower->cookie); 7428 if (!rule) { 7429 spin_unlock_bh(&hdev->fd_rule_lock); 7430 return -EINVAL; 7431 } 7432 7433 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, 7434 NULL, false); 7435 if (ret) { 7436 /* if tcam config fail, set rule state to TO_DEL, 7437 * so the rule will be deleted when periodic 7438 * task being scheduled. 7439 */ 7440 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); 7441 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7442 spin_unlock_bh(&hdev->fd_rule_lock); 7443 return ret; 7444 } 7445 7446 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); 7447 spin_unlock_bh(&hdev->fd_rule_lock); 7448 7449 return 0; 7450 } 7451 7452 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) 7453 { 7454 struct hclge_fd_rule *rule; 7455 struct hlist_node *node; 7456 int ret = 0; 7457 7458 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) 7459 return; 7460 7461 spin_lock_bh(&hdev->fd_rule_lock); 7462 7463 hlist_for_each_entry_safe(rule, node, hlist, rule_node) { 7464 switch (rule->state) { 7465 case HCLGE_FD_TO_ADD: 7466 ret = hclge_fd_config_rule(hdev, rule); 7467 if (ret) 7468 goto out; 7469 rule->state = HCLGE_FD_ACTIVE; 7470 break; 7471 case HCLGE_FD_TO_DEL: 7472 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 7473 rule->location, NULL, false); 7474 if (ret) 7475 goto out; 7476 hclge_fd_dec_rule_cnt(hdev, rule->location); 7477 hclge_fd_free_node(hdev, rule); 7478 break; 7479 default: 7480 break; 7481 } 7482 } 7483 7484 out: 7485 if (ret) 7486 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7487 7488 spin_unlock_bh(&hdev->fd_rule_lock); 7489 } 7490 7491 static void hclge_sync_fd_table(struct hclge_dev *hdev) 7492 { 7493 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7494 return; 7495 7496 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { 7497 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; 7498 7499 hclge_clear_fd_rules_in_list(hdev, clear_list); 7500 } 7501 7502 hclge_sync_fd_user_def_cfg(hdev, false); 7503 7504 hclge_sync_fd_list(hdev, &hdev->fd_rule_list); 7505 } 7506 7507 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) 7508 { 7509 struct hclge_vport *vport = hclge_get_vport(handle); 7510 struct hclge_dev *hdev = vport->back; 7511 7512 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || 7513 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); 7514 } 7515 7516 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle) 7517 { 7518 struct hclge_vport *vport = hclge_get_vport(handle); 7519 struct hclge_dev *hdev = vport->back; 7520 7521 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 7522 } 7523 7524 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) 7525 { 7526 struct hclge_vport *vport = hclge_get_vport(handle); 7527 struct hclge_dev *hdev = vport->back; 7528 7529 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 7530 } 7531 7532 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) 7533 { 7534 struct hclge_vport *vport = hclge_get_vport(handle); 7535 struct hclge_dev *hdev = vport->back; 7536 7537 return hdev->rst_stats.hw_reset_done_cnt; 7538 } 7539 7540 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) 7541 { 7542 struct hclge_vport *vport = hclge_get_vport(handle); 7543 struct hclge_dev *hdev = vport->back; 7544 7545 hdev->fd_en = enable; 7546 7547 if (!enable) 7548 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); 7549 else 7550 hclge_restore_fd_entries(handle); 7551 7552 hclge_task_schedule(hdev, 0); 7553 } 7554 7555 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 7556 { 7557 #define HCLGE_LINK_STATUS_WAIT_CNT 3 7558 7559 struct hclge_desc desc; 7560 struct hclge_config_mac_mode_cmd *req = 7561 (struct hclge_config_mac_mode_cmd *)desc.data; 7562 u32 loop_en = 0; 7563 int ret; 7564 7565 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 7566 7567 if (enable) { 7568 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U); 7569 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U); 7570 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U); 7571 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U); 7572 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U); 7573 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U); 7574 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U); 7575 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U); 7576 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U); 7577 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U); 7578 } 7579 7580 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 7581 7582 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7583 if (ret) { 7584 dev_err(&hdev->pdev->dev, 7585 "mac enable fail, ret =%d.\n", ret); 7586 return; 7587 } 7588 7589 if (!enable) 7590 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, 7591 HCLGE_LINK_STATUS_WAIT_CNT); 7592 } 7593 7594 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, 7595 u8 switch_param, u8 param_mask) 7596 { 7597 struct hclge_mac_vlan_switch_cmd *req; 7598 struct hclge_desc desc; 7599 u32 func_id; 7600 int ret; 7601 7602 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); 7603 req = (struct hclge_mac_vlan_switch_cmd *)desc.data; 7604 7605 /* read current config parameter */ 7606 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, 7607 true); 7608 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; 7609 req->func_id = cpu_to_le32(func_id); 7610 7611 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7612 if (ret) { 7613 dev_err(&hdev->pdev->dev, 7614 "read mac vlan switch parameter fail, ret = %d\n", ret); 7615 return ret; 7616 } 7617 7618 /* modify and write new config parameter */ 7619 hclge_comm_cmd_reuse_desc(&desc, false); 7620 req->switch_param = (req->switch_param & param_mask) | switch_param; 7621 req->param_mask = param_mask; 7622 7623 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7624 if (ret) 7625 dev_err(&hdev->pdev->dev, 7626 "set mac vlan switch parameter fail, ret = %d\n", ret); 7627 return ret; 7628 } 7629 7630 static void hclge_phy_link_status_wait(struct hclge_dev *hdev, 7631 int link_ret) 7632 { 7633 #define HCLGE_PHY_LINK_STATUS_NUM 200 7634 7635 struct phy_device *phydev = hdev->hw.mac.phydev; 7636 int i = 0; 7637 int ret; 7638 7639 do { 7640 ret = phy_read_status(phydev); 7641 if (ret) { 7642 dev_err(&hdev->pdev->dev, 7643 "phy update link status fail, ret = %d\n", ret); 7644 return; 7645 } 7646 7647 if (phydev->link == link_ret) 7648 break; 7649 7650 msleep(HCLGE_LINK_STATUS_MS); 7651 } while (++i < HCLGE_PHY_LINK_STATUS_NUM); 7652 } 7653 7654 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, 7655 int wait_cnt) 7656 { 7657 int link_status; 7658 int i = 0; 7659 int ret; 7660 7661 do { 7662 ret = hclge_get_mac_link_status(hdev, &link_status); 7663 if (ret) 7664 return ret; 7665 if (link_status == link_ret) 7666 return 0; 7667 7668 msleep(HCLGE_LINK_STATUS_MS); 7669 } while (++i < wait_cnt); 7670 return -EBUSY; 7671 } 7672 7673 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, 7674 bool is_phy) 7675 { 7676 #define HCLGE_MAC_LINK_STATUS_NUM 100 7677 7678 int link_ret; 7679 7680 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 7681 7682 if (is_phy) 7683 hclge_phy_link_status_wait(hdev, link_ret); 7684 7685 return hclge_mac_link_status_wait(hdev, link_ret, 7686 HCLGE_MAC_LINK_STATUS_NUM); 7687 } 7688 7689 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) 7690 { 7691 struct hclge_config_mac_mode_cmd *req; 7692 struct hclge_desc desc; 7693 u32 loop_en; 7694 int ret; 7695 7696 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 7697 /* 1 Read out the MAC mode config at first */ 7698 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 7699 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7700 if (ret) { 7701 dev_err(&hdev->pdev->dev, 7702 "mac loopback get fail, ret =%d.\n", ret); 7703 return ret; 7704 } 7705 7706 /* 2 Then setup the loopback flag */ 7707 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 7708 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 7709 7710 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 7711 7712 /* 3 Config mac work mode with loopback flag 7713 * and its original configure parameters 7714 */ 7715 hclge_comm_cmd_reuse_desc(&desc, false); 7716 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7717 if (ret) 7718 dev_err(&hdev->pdev->dev, 7719 "mac loopback set fail, ret =%d.\n", ret); 7720 return ret; 7721 } 7722 7723 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, 7724 enum hnae3_loop loop_mode) 7725 { 7726 struct hclge_common_lb_cmd *req; 7727 struct hclge_desc desc; 7728 u8 loop_mode_b; 7729 int ret; 7730 7731 req = (struct hclge_common_lb_cmd *)desc.data; 7732 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); 7733 7734 switch (loop_mode) { 7735 case HNAE3_LOOP_SERIAL_SERDES: 7736 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 7737 break; 7738 case HNAE3_LOOP_PARALLEL_SERDES: 7739 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 7740 break; 7741 case HNAE3_LOOP_PHY: 7742 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B; 7743 break; 7744 default: 7745 dev_err(&hdev->pdev->dev, 7746 "unsupported loopback mode %d\n", loop_mode); 7747 return -ENOTSUPP; 7748 } 7749 7750 req->mask = loop_mode_b; 7751 if (en) 7752 req->enable = loop_mode_b; 7753 7754 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7755 if (ret) 7756 dev_err(&hdev->pdev->dev, 7757 "failed to send loopback cmd, loop_mode = %d, ret = %d\n", 7758 loop_mode, ret); 7759 7760 return ret; 7761 } 7762 7763 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) 7764 { 7765 #define HCLGE_COMMON_LB_RETRY_MS 10 7766 #define HCLGE_COMMON_LB_RETRY_NUM 100 7767 7768 struct hclge_common_lb_cmd *req; 7769 struct hclge_desc desc; 7770 u32 i = 0; 7771 int ret; 7772 7773 req = (struct hclge_common_lb_cmd *)desc.data; 7774 7775 do { 7776 msleep(HCLGE_COMMON_LB_RETRY_MS); 7777 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, 7778 true); 7779 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7780 if (ret) { 7781 dev_err(&hdev->pdev->dev, 7782 "failed to get loopback done status, ret = %d\n", 7783 ret); 7784 return ret; 7785 } 7786 } while (++i < HCLGE_COMMON_LB_RETRY_NUM && 7787 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); 7788 7789 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { 7790 dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); 7791 return -EBUSY; 7792 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { 7793 dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); 7794 return -EIO; 7795 } 7796 7797 return 0; 7798 } 7799 7800 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, 7801 enum hnae3_loop loop_mode) 7802 { 7803 int ret; 7804 7805 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); 7806 if (ret) 7807 return ret; 7808 7809 return hclge_cfg_common_loopback_wait(hdev); 7810 } 7811 7812 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, 7813 enum hnae3_loop loop_mode) 7814 { 7815 int ret; 7816 7817 ret = hclge_cfg_common_loopback(hdev, en, loop_mode); 7818 if (ret) 7819 return ret; 7820 7821 hclge_cfg_mac_mode(hdev, en); 7822 7823 ret = hclge_mac_phy_link_status_wait(hdev, en, false); 7824 if (ret) 7825 dev_err(&hdev->pdev->dev, 7826 "serdes loopback config mac mode timeout\n"); 7827 7828 return ret; 7829 } 7830 7831 static int hclge_enable_phy_loopback(struct hclge_dev *hdev, 7832 struct phy_device *phydev) 7833 { 7834 int ret; 7835 7836 if (!phydev->suspended) { 7837 ret = phy_suspend(phydev); 7838 if (ret) 7839 return ret; 7840 } 7841 7842 ret = phy_resume(phydev); 7843 if (ret) 7844 return ret; 7845 7846 return phy_loopback(phydev, true); 7847 } 7848 7849 static int hclge_disable_phy_loopback(struct hclge_dev *hdev, 7850 struct phy_device *phydev) 7851 { 7852 int ret; 7853 7854 ret = phy_loopback(phydev, false); 7855 if (ret) 7856 return ret; 7857 7858 return phy_suspend(phydev); 7859 } 7860 7861 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) 7862 { 7863 struct phy_device *phydev = hdev->hw.mac.phydev; 7864 int ret; 7865 7866 if (!phydev) { 7867 if (hnae3_dev_phy_imp_supported(hdev)) 7868 return hclge_set_common_loopback(hdev, en, 7869 HNAE3_LOOP_PHY); 7870 return -ENOTSUPP; 7871 } 7872 7873 if (en) 7874 ret = hclge_enable_phy_loopback(hdev, phydev); 7875 else 7876 ret = hclge_disable_phy_loopback(hdev, phydev); 7877 if (ret) { 7878 dev_err(&hdev->pdev->dev, 7879 "set phy loopback fail, ret = %d\n", ret); 7880 return ret; 7881 } 7882 7883 hclge_cfg_mac_mode(hdev, en); 7884 7885 ret = hclge_mac_phy_link_status_wait(hdev, en, true); 7886 if (ret) 7887 dev_err(&hdev->pdev->dev, 7888 "phy loopback config mac mode timeout\n"); 7889 7890 return ret; 7891 } 7892 7893 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, 7894 u16 stream_id, bool enable) 7895 { 7896 struct hclge_desc desc; 7897 struct hclge_cfg_com_tqp_queue_cmd *req = 7898 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 7899 7900 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 7901 req->tqp_id = cpu_to_le16(tqp_id); 7902 req->stream_id = cpu_to_le16(stream_id); 7903 if (enable) 7904 req->enable |= 1U << HCLGE_TQP_ENABLE_B; 7905 7906 return hclge_cmd_send(&hdev->hw, &desc, 1); 7907 } 7908 7909 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable) 7910 { 7911 struct hclge_vport *vport = hclge_get_vport(handle); 7912 struct hclge_dev *hdev = vport->back; 7913 int ret; 7914 u16 i; 7915 7916 for (i = 0; i < handle->kinfo.num_tqps; i++) { 7917 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); 7918 if (ret) 7919 return ret; 7920 } 7921 return 0; 7922 } 7923 7924 static int hclge_set_loopback(struct hnae3_handle *handle, 7925 enum hnae3_loop loop_mode, bool en) 7926 { 7927 struct hclge_vport *vport = hclge_get_vport(handle); 7928 struct hclge_dev *hdev = vport->back; 7929 int ret = 0; 7930 7931 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By 7932 * default, SSU loopback is enabled, so if the SMAC and the DMAC are 7933 * the same, the packets are looped back in the SSU. If SSU loopback 7934 * is disabled, packets can reach MAC even if SMAC is the same as DMAC. 7935 */ 7936 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 7937 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); 7938 7939 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, 7940 HCLGE_SWITCH_ALW_LPBK_MASK); 7941 if (ret) 7942 return ret; 7943 } 7944 7945 switch (loop_mode) { 7946 case HNAE3_LOOP_APP: 7947 ret = hclge_set_app_loopback(hdev, en); 7948 break; 7949 case HNAE3_LOOP_SERIAL_SERDES: 7950 case HNAE3_LOOP_PARALLEL_SERDES: 7951 ret = hclge_set_common_loopback(hdev, en, loop_mode); 7952 break; 7953 case HNAE3_LOOP_PHY: 7954 ret = hclge_set_phy_loopback(hdev, en); 7955 break; 7956 case HNAE3_LOOP_EXTERNAL: 7957 break; 7958 default: 7959 ret = -ENOTSUPP; 7960 dev_err(&hdev->pdev->dev, 7961 "loop_mode %d is not supported\n", loop_mode); 7962 break; 7963 } 7964 7965 if (ret) 7966 return ret; 7967 7968 ret = hclge_tqp_enable(handle, en); 7969 if (ret) 7970 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", 7971 en ? "enable" : "disable", ret); 7972 7973 return ret; 7974 } 7975 7976 static int hclge_set_default_loopback(struct hclge_dev *hdev) 7977 { 7978 int ret; 7979 7980 ret = hclge_set_app_loopback(hdev, false); 7981 if (ret) 7982 return ret; 7983 7984 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); 7985 if (ret) 7986 return ret; 7987 7988 return hclge_cfg_common_loopback(hdev, false, 7989 HNAE3_LOOP_PARALLEL_SERDES); 7990 } 7991 7992 static void hclge_flush_link_update(struct hclge_dev *hdev) 7993 { 7994 #define HCLGE_FLUSH_LINK_TIMEOUT 100000 7995 7996 unsigned long last = hdev->serv_processed_cnt; 7997 int i = 0; 7998 7999 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && 8000 i++ < HCLGE_FLUSH_LINK_TIMEOUT && 8001 last == hdev->serv_processed_cnt) 8002 usleep_range(1, 1); 8003 } 8004 8005 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) 8006 { 8007 struct hclge_vport *vport = hclge_get_vport(handle); 8008 struct hclge_dev *hdev = vport->back; 8009 8010 if (enable) { 8011 hclge_task_schedule(hdev, 0); 8012 } else { 8013 /* Set the DOWN flag here to disable link updating */ 8014 set_bit(HCLGE_STATE_DOWN, &hdev->state); 8015 8016 smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */ 8017 hclge_flush_link_update(hdev); 8018 } 8019 } 8020 8021 static int hclge_ae_start(struct hnae3_handle *handle) 8022 { 8023 struct hclge_vport *vport = hclge_get_vport(handle); 8024 struct hclge_dev *hdev = vport->back; 8025 8026 /* mac enable */ 8027 hclge_cfg_mac_mode(hdev, true); 8028 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 8029 hdev->hw.mac.link = 0; 8030 8031 /* reset tqp stats */ 8032 hclge_comm_reset_tqp_stats(handle); 8033 8034 hclge_mac_start_phy(hdev); 8035 8036 return 0; 8037 } 8038 8039 static void hclge_ae_stop(struct hnae3_handle *handle) 8040 { 8041 struct hclge_vport *vport = hclge_get_vport(handle); 8042 struct hclge_dev *hdev = vport->back; 8043 8044 set_bit(HCLGE_STATE_DOWN, &hdev->state); 8045 spin_lock_bh(&hdev->fd_rule_lock); 8046 hclge_clear_arfs_rules(hdev); 8047 spin_unlock_bh(&hdev->fd_rule_lock); 8048 8049 /* If it is not PF reset or FLR, the firmware will disable the MAC, 8050 * so it only need to stop phy here. 8051 */ 8052 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { 8053 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, 8054 HCLGE_PFC_DISABLE); 8055 if (hdev->reset_type != HNAE3_FUNC_RESET && 8056 hdev->reset_type != HNAE3_FLR_RESET) { 8057 hclge_mac_stop_phy(hdev); 8058 hclge_update_link_status(hdev); 8059 return; 8060 } 8061 } 8062 8063 hclge_reset_tqp(handle); 8064 8065 hclge_config_mac_tnl_int(hdev, false); 8066 8067 /* Mac disable */ 8068 hclge_cfg_mac_mode(hdev, false); 8069 8070 hclge_mac_stop_phy(hdev); 8071 8072 /* reset tqp stats */ 8073 hclge_comm_reset_tqp_stats(handle); 8074 hclge_update_link_status(hdev); 8075 } 8076 8077 int hclge_vport_start(struct hclge_vport *vport) 8078 { 8079 struct hclge_dev *hdev = vport->back; 8080 8081 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 8082 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 8083 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 8084 vport->last_active_jiffies = jiffies; 8085 vport->need_notify = 0; 8086 8087 if (test_bit(vport->vport_id, hdev->vport_config_block)) { 8088 if (vport->vport_id) { 8089 hclge_restore_mac_table_common(vport); 8090 hclge_restore_vport_vlan_table(vport); 8091 } else { 8092 hclge_restore_hw_table(hdev); 8093 } 8094 } 8095 8096 clear_bit(vport->vport_id, hdev->vport_config_block); 8097 8098 return 0; 8099 } 8100 8101 void hclge_vport_stop(struct hclge_vport *vport) 8102 { 8103 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 8104 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 8105 vport->need_notify = 0; 8106 } 8107 8108 static int hclge_client_start(struct hnae3_handle *handle) 8109 { 8110 struct hclge_vport *vport = hclge_get_vport(handle); 8111 8112 return hclge_vport_start(vport); 8113 } 8114 8115 static void hclge_client_stop(struct hnae3_handle *handle) 8116 { 8117 struct hclge_vport *vport = hclge_get_vport(handle); 8118 8119 hclge_vport_stop(vport); 8120 } 8121 8122 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 8123 u16 cmdq_resp, u8 resp_code, 8124 enum hclge_mac_vlan_tbl_opcode op) 8125 { 8126 struct hclge_dev *hdev = vport->back; 8127 8128 if (cmdq_resp) { 8129 dev_err(&hdev->pdev->dev, 8130 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n", 8131 cmdq_resp); 8132 return -EIO; 8133 } 8134 8135 if (op == HCLGE_MAC_VLAN_ADD) { 8136 if (!resp_code || resp_code == 1) 8137 return 0; 8138 else if (resp_code == HCLGE_ADD_UC_OVERFLOW || 8139 resp_code == HCLGE_ADD_MC_OVERFLOW) 8140 return -ENOSPC; 8141 8142 dev_err(&hdev->pdev->dev, 8143 "add mac addr failed for undefined, code=%u.\n", 8144 resp_code); 8145 return -EIO; 8146 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 8147 if (!resp_code) { 8148 return 0; 8149 } else if (resp_code == 1) { 8150 dev_dbg(&hdev->pdev->dev, 8151 "remove mac addr failed for miss.\n"); 8152 return -ENOENT; 8153 } 8154 8155 dev_err(&hdev->pdev->dev, 8156 "remove mac addr failed for undefined, code=%u.\n", 8157 resp_code); 8158 return -EIO; 8159 } else if (op == HCLGE_MAC_VLAN_LKUP) { 8160 if (!resp_code) { 8161 return 0; 8162 } else if (resp_code == 1) { 8163 dev_dbg(&hdev->pdev->dev, 8164 "lookup mac addr failed for miss.\n"); 8165 return -ENOENT; 8166 } 8167 8168 dev_err(&hdev->pdev->dev, 8169 "lookup mac addr failed for undefined, code=%u.\n", 8170 resp_code); 8171 return -EIO; 8172 } 8173 8174 dev_err(&hdev->pdev->dev, 8175 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); 8176 8177 return -EINVAL; 8178 } 8179 8180 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 8181 { 8182 #define HCLGE_VF_NUM_IN_FIRST_DESC 192 8183 8184 unsigned int word_num; 8185 unsigned int bit_num; 8186 8187 if (vfid > 255 || vfid < 0) 8188 return -EIO; 8189 8190 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { 8191 word_num = vfid / 32; 8192 bit_num = vfid % 32; 8193 if (clr) 8194 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 8195 else 8196 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 8197 } else { 8198 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; 8199 bit_num = vfid % 32; 8200 if (clr) 8201 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 8202 else 8203 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 8204 } 8205 8206 return 0; 8207 } 8208 8209 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 8210 { 8211 #define HCLGE_DESC_NUMBER 3 8212 #define HCLGE_FUNC_NUMBER_PER_DESC 6 8213 int i, j; 8214 8215 for (i = 1; i < HCLGE_DESC_NUMBER; i++) 8216 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 8217 if (desc[i].data[j]) 8218 return false; 8219 8220 return true; 8221 } 8222 8223 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 8224 const u8 *addr, bool is_mc) 8225 { 8226 const unsigned char *mac_addr = addr; 8227 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 8228 (mac_addr[0]) | (mac_addr[1] << 8); 8229 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 8230 8231 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 8232 if (is_mc) { 8233 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 8234 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 8235 } 8236 8237 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 8238 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 8239 } 8240 8241 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 8242 struct hclge_mac_vlan_tbl_entry_cmd *req) 8243 { 8244 struct hclge_dev *hdev = vport->back; 8245 struct hclge_desc desc; 8246 u8 resp_code; 8247 u16 retval; 8248 int ret; 8249 8250 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 8251 8252 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8253 8254 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8255 if (ret) { 8256 dev_err(&hdev->pdev->dev, 8257 "del mac addr failed for cmd_send, ret =%d.\n", 8258 ret); 8259 return ret; 8260 } 8261 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 8262 retval = le16_to_cpu(desc.retval); 8263 8264 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 8265 HCLGE_MAC_VLAN_REMOVE); 8266 } 8267 8268 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 8269 struct hclge_mac_vlan_tbl_entry_cmd *req, 8270 struct hclge_desc *desc, 8271 bool is_mc) 8272 { 8273 struct hclge_dev *hdev = vport->back; 8274 u8 resp_code; 8275 u16 retval; 8276 int ret; 8277 8278 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 8279 if (is_mc) { 8280 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8281 memcpy(desc[0].data, 8282 req, 8283 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8284 hclge_cmd_setup_basic_desc(&desc[1], 8285 HCLGE_OPC_MAC_VLAN_ADD, 8286 true); 8287 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8288 hclge_cmd_setup_basic_desc(&desc[2], 8289 HCLGE_OPC_MAC_VLAN_ADD, 8290 true); 8291 ret = hclge_cmd_send(&hdev->hw, desc, 3); 8292 } else { 8293 memcpy(desc[0].data, 8294 req, 8295 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8296 ret = hclge_cmd_send(&hdev->hw, desc, 1); 8297 } 8298 if (ret) { 8299 dev_err(&hdev->pdev->dev, 8300 "lookup mac addr failed for cmd_send, ret =%d.\n", 8301 ret); 8302 return ret; 8303 } 8304 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 8305 retval = le16_to_cpu(desc[0].retval); 8306 8307 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 8308 HCLGE_MAC_VLAN_LKUP); 8309 } 8310 8311 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 8312 struct hclge_mac_vlan_tbl_entry_cmd *req, 8313 struct hclge_desc *mc_desc) 8314 { 8315 struct hclge_dev *hdev = vport->back; 8316 int cfg_status; 8317 u8 resp_code; 8318 u16 retval; 8319 int ret; 8320 8321 if (!mc_desc) { 8322 struct hclge_desc desc; 8323 8324 hclge_cmd_setup_basic_desc(&desc, 8325 HCLGE_OPC_MAC_VLAN_ADD, 8326 false); 8327 memcpy(desc.data, req, 8328 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8329 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8330 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 8331 retval = le16_to_cpu(desc.retval); 8332 8333 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 8334 resp_code, 8335 HCLGE_MAC_VLAN_ADD); 8336 } else { 8337 hclge_comm_cmd_reuse_desc(&mc_desc[0], false); 8338 mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8339 hclge_comm_cmd_reuse_desc(&mc_desc[1], false); 8340 mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8341 hclge_comm_cmd_reuse_desc(&mc_desc[2], false); 8342 mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT); 8343 memcpy(mc_desc[0].data, req, 8344 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8345 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 8346 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 8347 retval = le16_to_cpu(mc_desc[0].retval); 8348 8349 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 8350 resp_code, 8351 HCLGE_MAC_VLAN_ADD); 8352 } 8353 8354 if (ret) { 8355 dev_err(&hdev->pdev->dev, 8356 "add mac addr failed for cmd_send, ret =%d.\n", 8357 ret); 8358 return ret; 8359 } 8360 8361 return cfg_status; 8362 } 8363 8364 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 8365 u16 *allocated_size) 8366 { 8367 struct hclge_umv_spc_alc_cmd *req; 8368 struct hclge_desc desc; 8369 int ret; 8370 8371 req = (struct hclge_umv_spc_alc_cmd *)desc.data; 8372 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); 8373 8374 req->space_size = cpu_to_le32(space_size); 8375 8376 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8377 if (ret) { 8378 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", 8379 ret); 8380 return ret; 8381 } 8382 8383 *allocated_size = le32_to_cpu(desc.data[1]); 8384 8385 return 0; 8386 } 8387 8388 static int hclge_init_umv_space(struct hclge_dev *hdev) 8389 { 8390 u16 allocated_size = 0; 8391 int ret; 8392 8393 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); 8394 if (ret) 8395 return ret; 8396 8397 if (allocated_size < hdev->wanted_umv_size) 8398 dev_warn(&hdev->pdev->dev, 8399 "failed to alloc umv space, want %u, get %u\n", 8400 hdev->wanted_umv_size, allocated_size); 8401 8402 hdev->max_umv_size = allocated_size; 8403 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); 8404 hdev->share_umv_size = hdev->priv_umv_size + 8405 hdev->max_umv_size % (hdev->num_alloc_vport + 1); 8406 8407 if (hdev->ae_dev->dev_specs.mc_mac_size) 8408 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); 8409 8410 return 0; 8411 } 8412 8413 static void hclge_reset_umv_space(struct hclge_dev *hdev) 8414 { 8415 struct hclge_vport *vport; 8416 int i; 8417 8418 for (i = 0; i < hdev->num_alloc_vport; i++) { 8419 vport = &hdev->vport[i]; 8420 vport->used_umv_num = 0; 8421 } 8422 8423 mutex_lock(&hdev->vport_lock); 8424 hdev->share_umv_size = hdev->priv_umv_size + 8425 hdev->max_umv_size % (hdev->num_alloc_vport + 1); 8426 mutex_unlock(&hdev->vport_lock); 8427 8428 hdev->used_mc_mac_num = 0; 8429 } 8430 8431 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) 8432 { 8433 struct hclge_dev *hdev = vport->back; 8434 bool is_full; 8435 8436 if (need_lock) 8437 mutex_lock(&hdev->vport_lock); 8438 8439 is_full = (vport->used_umv_num >= hdev->priv_umv_size && 8440 hdev->share_umv_size == 0); 8441 8442 if (need_lock) 8443 mutex_unlock(&hdev->vport_lock); 8444 8445 return is_full; 8446 } 8447 8448 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) 8449 { 8450 struct hclge_dev *hdev = vport->back; 8451 8452 if (is_free) { 8453 if (vport->used_umv_num > hdev->priv_umv_size) 8454 hdev->share_umv_size++; 8455 8456 if (vport->used_umv_num > 0) 8457 vport->used_umv_num--; 8458 } else { 8459 if (vport->used_umv_num >= hdev->priv_umv_size && 8460 hdev->share_umv_size > 0) 8461 hdev->share_umv_size--; 8462 vport->used_umv_num++; 8463 } 8464 } 8465 8466 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, 8467 const u8 *mac_addr) 8468 { 8469 struct hclge_mac_node *mac_node, *tmp; 8470 8471 list_for_each_entry_safe(mac_node, tmp, list, node) 8472 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 8473 return mac_node; 8474 8475 return NULL; 8476 } 8477 8478 static void hclge_update_mac_node(struct hclge_mac_node *mac_node, 8479 enum HCLGE_MAC_NODE_STATE state) 8480 { 8481 switch (state) { 8482 /* from set_rx_mode or tmp_add_list */ 8483 case HCLGE_MAC_TO_ADD: 8484 if (mac_node->state == HCLGE_MAC_TO_DEL) 8485 mac_node->state = HCLGE_MAC_ACTIVE; 8486 break; 8487 /* only from set_rx_mode */ 8488 case HCLGE_MAC_TO_DEL: 8489 if (mac_node->state == HCLGE_MAC_TO_ADD) { 8490 list_del(&mac_node->node); 8491 kfree(mac_node); 8492 } else { 8493 mac_node->state = HCLGE_MAC_TO_DEL; 8494 } 8495 break; 8496 /* only from tmp_add_list, the mac_node->state won't be 8497 * ACTIVE. 8498 */ 8499 case HCLGE_MAC_ACTIVE: 8500 if (mac_node->state == HCLGE_MAC_TO_ADD) 8501 mac_node->state = HCLGE_MAC_ACTIVE; 8502 8503 break; 8504 } 8505 } 8506 8507 int hclge_update_mac_list(struct hclge_vport *vport, 8508 enum HCLGE_MAC_NODE_STATE state, 8509 enum HCLGE_MAC_ADDR_TYPE mac_type, 8510 const unsigned char *addr) 8511 { 8512 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8513 struct hclge_dev *hdev = vport->back; 8514 struct hclge_mac_node *mac_node; 8515 struct list_head *list; 8516 8517 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 8518 &vport->uc_mac_list : &vport->mc_mac_list; 8519 8520 spin_lock_bh(&vport->mac_list_lock); 8521 8522 /* if the mac addr is already in the mac list, no need to add a new 8523 * one into it, just check the mac addr state, convert it to a new 8524 * state, or just remove it, or do nothing. 8525 */ 8526 mac_node = hclge_find_mac_node(list, addr); 8527 if (mac_node) { 8528 hclge_update_mac_node(mac_node, state); 8529 spin_unlock_bh(&vport->mac_list_lock); 8530 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 8531 return 0; 8532 } 8533 8534 /* if this address is never added, unnecessary to delete */ 8535 if (state == HCLGE_MAC_TO_DEL) { 8536 spin_unlock_bh(&vport->mac_list_lock); 8537 hnae3_format_mac_addr(format_mac_addr, addr); 8538 dev_err(&hdev->pdev->dev, 8539 "failed to delete address %s from mac list\n", 8540 format_mac_addr); 8541 return -ENOENT; 8542 } 8543 8544 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 8545 if (!mac_node) { 8546 spin_unlock_bh(&vport->mac_list_lock); 8547 return -ENOMEM; 8548 } 8549 8550 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 8551 8552 mac_node->state = state; 8553 ether_addr_copy(mac_node->mac_addr, addr); 8554 list_add_tail(&mac_node->node, list); 8555 8556 spin_unlock_bh(&vport->mac_list_lock); 8557 8558 return 0; 8559 } 8560 8561 static int hclge_add_uc_addr(struct hnae3_handle *handle, 8562 const unsigned char *addr) 8563 { 8564 struct hclge_vport *vport = hclge_get_vport(handle); 8565 8566 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, 8567 addr); 8568 } 8569 8570 int hclge_add_uc_addr_common(struct hclge_vport *vport, 8571 const unsigned char *addr) 8572 { 8573 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8574 struct hclge_dev *hdev = vport->back; 8575 struct hclge_mac_vlan_tbl_entry_cmd req; 8576 struct hclge_desc desc; 8577 u16 egress_port = 0; 8578 int ret; 8579 8580 /* mac addr check */ 8581 if (is_zero_ether_addr(addr) || 8582 is_broadcast_ether_addr(addr) || 8583 is_multicast_ether_addr(addr)) { 8584 hnae3_format_mac_addr(format_mac_addr, addr); 8585 dev_err(&hdev->pdev->dev, 8586 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", 8587 format_mac_addr, is_zero_ether_addr(addr), 8588 is_broadcast_ether_addr(addr), 8589 is_multicast_ether_addr(addr)); 8590 return -EINVAL; 8591 } 8592 8593 memset(&req, 0, sizeof(req)); 8594 8595 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 8596 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 8597 8598 req.egress_port = cpu_to_le16(egress_port); 8599 8600 hclge_prepare_mac_addr(&req, addr, false); 8601 8602 /* Lookup the mac address in the mac_vlan table, and add 8603 * it if the entry is inexistent. Repeated unicast entry 8604 * is not allowed in the mac vlan table. 8605 */ 8606 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 8607 if (ret == -ENOENT) { 8608 mutex_lock(&hdev->vport_lock); 8609 if (!hclge_is_umv_space_full(vport, false)) { 8610 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); 8611 if (!ret) 8612 hclge_update_umv_space(vport, false); 8613 mutex_unlock(&hdev->vport_lock); 8614 return ret; 8615 } 8616 mutex_unlock(&hdev->vport_lock); 8617 8618 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) 8619 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", 8620 hdev->priv_umv_size); 8621 8622 return -ENOSPC; 8623 } 8624 8625 /* check if we just hit the duplicate */ 8626 if (!ret) 8627 return -EEXIST; 8628 8629 return ret; 8630 } 8631 8632 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 8633 const unsigned char *addr) 8634 { 8635 struct hclge_vport *vport = hclge_get_vport(handle); 8636 8637 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, 8638 addr); 8639 } 8640 8641 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 8642 const unsigned char *addr) 8643 { 8644 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8645 struct hclge_dev *hdev = vport->back; 8646 struct hclge_mac_vlan_tbl_entry_cmd req; 8647 int ret; 8648 8649 /* mac addr check */ 8650 if (is_zero_ether_addr(addr) || 8651 is_broadcast_ether_addr(addr) || 8652 is_multicast_ether_addr(addr)) { 8653 hnae3_format_mac_addr(format_mac_addr, addr); 8654 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", 8655 format_mac_addr); 8656 return -EINVAL; 8657 } 8658 8659 memset(&req, 0, sizeof(req)); 8660 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 8661 hclge_prepare_mac_addr(&req, addr, false); 8662 ret = hclge_remove_mac_vlan_tbl(vport, &req); 8663 if (!ret || ret == -ENOENT) { 8664 mutex_lock(&hdev->vport_lock); 8665 hclge_update_umv_space(vport, true); 8666 mutex_unlock(&hdev->vport_lock); 8667 return 0; 8668 } 8669 8670 return ret; 8671 } 8672 8673 static int hclge_add_mc_addr(struct hnae3_handle *handle, 8674 const unsigned char *addr) 8675 { 8676 struct hclge_vport *vport = hclge_get_vport(handle); 8677 8678 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, 8679 addr); 8680 } 8681 8682 int hclge_add_mc_addr_common(struct hclge_vport *vport, 8683 const unsigned char *addr) 8684 { 8685 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8686 struct hclge_dev *hdev = vport->back; 8687 struct hclge_mac_vlan_tbl_entry_cmd req; 8688 struct hclge_desc desc[3]; 8689 bool is_new_addr = false; 8690 int status; 8691 8692 /* mac addr check */ 8693 if (!is_multicast_ether_addr(addr)) { 8694 hnae3_format_mac_addr(format_mac_addr, addr); 8695 dev_err(&hdev->pdev->dev, 8696 "Add mc mac err! invalid mac:%s.\n", 8697 format_mac_addr); 8698 return -EINVAL; 8699 } 8700 memset(&req, 0, sizeof(req)); 8701 hclge_prepare_mac_addr(&req, addr, true); 8702 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 8703 if (status) { 8704 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && 8705 hdev->used_mc_mac_num >= 8706 hdev->ae_dev->dev_specs.mc_mac_size) 8707 goto err_no_space; 8708 8709 is_new_addr = true; 8710 8711 /* This mac addr do not exist, add new entry for it */ 8712 memset(desc[0].data, 0, sizeof(desc[0].data)); 8713 memset(desc[1].data, 0, sizeof(desc[0].data)); 8714 memset(desc[2].data, 0, sizeof(desc[0].data)); 8715 } 8716 status = hclge_update_desc_vfid(desc, vport->vport_id, false); 8717 if (status) 8718 return status; 8719 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 8720 if (status == -ENOSPC) 8721 goto err_no_space; 8722 else if (!status && is_new_addr) 8723 hdev->used_mc_mac_num++; 8724 8725 return status; 8726 8727 err_no_space: 8728 /* if already overflow, not to print each time */ 8729 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { 8730 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; 8731 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); 8732 } 8733 8734 return -ENOSPC; 8735 } 8736 8737 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 8738 const unsigned char *addr) 8739 { 8740 struct hclge_vport *vport = hclge_get_vport(handle); 8741 8742 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, 8743 addr); 8744 } 8745 8746 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 8747 const unsigned char *addr) 8748 { 8749 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8750 struct hclge_dev *hdev = vport->back; 8751 struct hclge_mac_vlan_tbl_entry_cmd req; 8752 enum hclge_comm_cmd_status status; 8753 struct hclge_desc desc[3]; 8754 8755 /* mac addr check */ 8756 if (!is_multicast_ether_addr(addr)) { 8757 hnae3_format_mac_addr(format_mac_addr, addr); 8758 dev_dbg(&hdev->pdev->dev, 8759 "Remove mc mac err! invalid mac:%s.\n", 8760 format_mac_addr); 8761 return -EINVAL; 8762 } 8763 8764 memset(&req, 0, sizeof(req)); 8765 hclge_prepare_mac_addr(&req, addr, true); 8766 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 8767 if (!status) { 8768 /* This mac addr exist, remove this handle's VFID for it */ 8769 status = hclge_update_desc_vfid(desc, vport->vport_id, true); 8770 if (status) 8771 return status; 8772 8773 if (hclge_is_all_function_id_zero(desc)) { 8774 /* All the vfid is zero, so need to delete this entry */ 8775 status = hclge_remove_mac_vlan_tbl(vport, &req); 8776 if (!status) 8777 hdev->used_mc_mac_num--; 8778 } else { 8779 /* Not all the vfid is zero, update the vfid */ 8780 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 8781 } 8782 } else if (status == -ENOENT) { 8783 status = 0; 8784 } 8785 8786 return status; 8787 } 8788 8789 static void hclge_sync_vport_mac_list(struct hclge_vport *vport, 8790 struct list_head *list, 8791 enum HCLGE_MAC_ADDR_TYPE mac_type) 8792 { 8793 int (*sync)(struct hclge_vport *vport, const unsigned char *addr); 8794 struct hclge_mac_node *mac_node, *tmp; 8795 int ret; 8796 8797 if (mac_type == HCLGE_MAC_ADDR_UC) 8798 sync = hclge_add_uc_addr_common; 8799 else 8800 sync = hclge_add_mc_addr_common; 8801 8802 list_for_each_entry_safe(mac_node, tmp, list, node) { 8803 ret = sync(vport, mac_node->mac_addr); 8804 if (!ret) { 8805 mac_node->state = HCLGE_MAC_ACTIVE; 8806 } else { 8807 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 8808 &vport->state); 8809 8810 /* If one unicast mac address is existing in hardware, 8811 * we need to try whether other unicast mac addresses 8812 * are new addresses that can be added. 8813 * Multicast mac address can be reusable, even though 8814 * there is no space to add new multicast mac address, 8815 * we should check whether other mac addresses are 8816 * existing in hardware for reuse. 8817 */ 8818 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) || 8819 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC)) 8820 break; 8821 } 8822 } 8823 } 8824 8825 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, 8826 struct list_head *list, 8827 enum HCLGE_MAC_ADDR_TYPE mac_type) 8828 { 8829 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); 8830 struct hclge_mac_node *mac_node, *tmp; 8831 int ret; 8832 8833 if (mac_type == HCLGE_MAC_ADDR_UC) 8834 unsync = hclge_rm_uc_addr_common; 8835 else 8836 unsync = hclge_rm_mc_addr_common; 8837 8838 list_for_each_entry_safe(mac_node, tmp, list, node) { 8839 ret = unsync(vport, mac_node->mac_addr); 8840 if (!ret || ret == -ENOENT) { 8841 list_del(&mac_node->node); 8842 kfree(mac_node); 8843 } else { 8844 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 8845 &vport->state); 8846 break; 8847 } 8848 } 8849 } 8850 8851 static bool hclge_sync_from_add_list(struct list_head *add_list, 8852 struct list_head *mac_list) 8853 { 8854 struct hclge_mac_node *mac_node, *tmp, *new_node; 8855 bool all_added = true; 8856 8857 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 8858 if (mac_node->state == HCLGE_MAC_TO_ADD) 8859 all_added = false; 8860 8861 /* if the mac address from tmp_add_list is not in the 8862 * uc/mc_mac_list, it means have received a TO_DEL request 8863 * during the time window of adding the mac address into mac 8864 * table. if mac_node state is ACTIVE, then change it to TO_DEL, 8865 * then it will be removed at next time. else it must be TO_ADD, 8866 * this address hasn't been added into mac table, 8867 * so just remove the mac node. 8868 */ 8869 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); 8870 if (new_node) { 8871 hclge_update_mac_node(new_node, mac_node->state); 8872 list_del(&mac_node->node); 8873 kfree(mac_node); 8874 } else if (mac_node->state == HCLGE_MAC_ACTIVE) { 8875 mac_node->state = HCLGE_MAC_TO_DEL; 8876 list_move_tail(&mac_node->node, mac_list); 8877 } else { 8878 list_del(&mac_node->node); 8879 kfree(mac_node); 8880 } 8881 } 8882 8883 return all_added; 8884 } 8885 8886 static void hclge_sync_from_del_list(struct list_head *del_list, 8887 struct list_head *mac_list) 8888 { 8889 struct hclge_mac_node *mac_node, *tmp, *new_node; 8890 8891 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 8892 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); 8893 if (new_node) { 8894 /* If the mac addr exists in the mac list, it means 8895 * received a new TO_ADD request during the time window 8896 * of configuring the mac address. For the mac node 8897 * state is TO_ADD, and the address is already in the 8898 * in the hardware(due to delete fail), so we just need 8899 * to change the mac node state to ACTIVE. 8900 */ 8901 new_node->state = HCLGE_MAC_ACTIVE; 8902 list_del(&mac_node->node); 8903 kfree(mac_node); 8904 } else { 8905 list_move_tail(&mac_node->node, mac_list); 8906 } 8907 } 8908 } 8909 8910 static void hclge_update_overflow_flags(struct hclge_vport *vport, 8911 enum HCLGE_MAC_ADDR_TYPE mac_type, 8912 bool is_all_added) 8913 { 8914 if (mac_type == HCLGE_MAC_ADDR_UC) { 8915 if (is_all_added) 8916 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; 8917 else if (hclge_is_umv_space_full(vport, true)) 8918 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; 8919 } else { 8920 if (is_all_added) 8921 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; 8922 else 8923 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; 8924 } 8925 } 8926 8927 static void hclge_sync_vport_mac_table(struct hclge_vport *vport, 8928 enum HCLGE_MAC_ADDR_TYPE mac_type) 8929 { 8930 struct hclge_mac_node *mac_node, *tmp, *new_node; 8931 struct list_head tmp_add_list, tmp_del_list; 8932 struct list_head *list; 8933 bool all_added; 8934 8935 INIT_LIST_HEAD(&tmp_add_list); 8936 INIT_LIST_HEAD(&tmp_del_list); 8937 8938 /* move the mac addr to the tmp_add_list and tmp_del_list, then 8939 * we can add/delete these mac addr outside the spin lock 8940 */ 8941 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 8942 &vport->uc_mac_list : &vport->mc_mac_list; 8943 8944 spin_lock_bh(&vport->mac_list_lock); 8945 8946 list_for_each_entry_safe(mac_node, tmp, list, node) { 8947 switch (mac_node->state) { 8948 case HCLGE_MAC_TO_DEL: 8949 list_move_tail(&mac_node->node, &tmp_del_list); 8950 break; 8951 case HCLGE_MAC_TO_ADD: 8952 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 8953 if (!new_node) 8954 goto stop_traverse; 8955 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 8956 new_node->state = mac_node->state; 8957 list_add_tail(&new_node->node, &tmp_add_list); 8958 break; 8959 default: 8960 break; 8961 } 8962 } 8963 8964 stop_traverse: 8965 spin_unlock_bh(&vport->mac_list_lock); 8966 8967 /* delete first, in order to get max mac table space for adding */ 8968 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); 8969 hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type); 8970 8971 /* if some mac addresses were added/deleted fail, move back to the 8972 * mac_list, and retry at next time. 8973 */ 8974 spin_lock_bh(&vport->mac_list_lock); 8975 8976 hclge_sync_from_del_list(&tmp_del_list, list); 8977 all_added = hclge_sync_from_add_list(&tmp_add_list, list); 8978 8979 spin_unlock_bh(&vport->mac_list_lock); 8980 8981 hclge_update_overflow_flags(vport, mac_type, all_added); 8982 } 8983 8984 static bool hclge_need_sync_mac_table(struct hclge_vport *vport) 8985 { 8986 struct hclge_dev *hdev = vport->back; 8987 8988 if (test_bit(vport->vport_id, hdev->vport_config_block)) 8989 return false; 8990 8991 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) 8992 return true; 8993 8994 return false; 8995 } 8996 8997 static void hclge_sync_mac_table(struct hclge_dev *hdev) 8998 { 8999 int i; 9000 9001 for (i = 0; i < hdev->num_alloc_vport; i++) { 9002 struct hclge_vport *vport = &hdev->vport[i]; 9003 9004 if (!hclge_need_sync_mac_table(vport)) 9005 continue; 9006 9007 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); 9008 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); 9009 } 9010 } 9011 9012 static void hclge_build_del_list(struct list_head *list, 9013 bool is_del_list, 9014 struct list_head *tmp_del_list) 9015 { 9016 struct hclge_mac_node *mac_cfg, *tmp; 9017 9018 list_for_each_entry_safe(mac_cfg, tmp, list, node) { 9019 switch (mac_cfg->state) { 9020 case HCLGE_MAC_TO_DEL: 9021 case HCLGE_MAC_ACTIVE: 9022 list_move_tail(&mac_cfg->node, tmp_del_list); 9023 break; 9024 case HCLGE_MAC_TO_ADD: 9025 if (is_del_list) { 9026 list_del(&mac_cfg->node); 9027 kfree(mac_cfg); 9028 } 9029 break; 9030 } 9031 } 9032 } 9033 9034 static void hclge_unsync_del_list(struct hclge_vport *vport, 9035 int (*unsync)(struct hclge_vport *vport, 9036 const unsigned char *addr), 9037 bool is_del_list, 9038 struct list_head *tmp_del_list) 9039 { 9040 struct hclge_mac_node *mac_cfg, *tmp; 9041 int ret; 9042 9043 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) { 9044 ret = unsync(vport, mac_cfg->mac_addr); 9045 if (!ret || ret == -ENOENT) { 9046 /* clear all mac addr from hardware, but remain these 9047 * mac addr in the mac list, and restore them after 9048 * vf reset finished. 9049 */ 9050 if (!is_del_list && 9051 mac_cfg->state == HCLGE_MAC_ACTIVE) { 9052 mac_cfg->state = HCLGE_MAC_TO_ADD; 9053 } else { 9054 list_del(&mac_cfg->node); 9055 kfree(mac_cfg); 9056 } 9057 } else if (is_del_list) { 9058 mac_cfg->state = HCLGE_MAC_TO_DEL; 9059 } 9060 } 9061 } 9062 9063 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, 9064 enum HCLGE_MAC_ADDR_TYPE mac_type) 9065 { 9066 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); 9067 struct hclge_dev *hdev = vport->back; 9068 struct list_head tmp_del_list, *list; 9069 9070 if (mac_type == HCLGE_MAC_ADDR_UC) { 9071 list = &vport->uc_mac_list; 9072 unsync = hclge_rm_uc_addr_common; 9073 } else { 9074 list = &vport->mc_mac_list; 9075 unsync = hclge_rm_mc_addr_common; 9076 } 9077 9078 INIT_LIST_HEAD(&tmp_del_list); 9079 9080 if (!is_del_list) 9081 set_bit(vport->vport_id, hdev->vport_config_block); 9082 9083 spin_lock_bh(&vport->mac_list_lock); 9084 9085 hclge_build_del_list(list, is_del_list, &tmp_del_list); 9086 9087 spin_unlock_bh(&vport->mac_list_lock); 9088 9089 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list); 9090 9091 spin_lock_bh(&vport->mac_list_lock); 9092 9093 hclge_sync_from_del_list(&tmp_del_list, list); 9094 9095 spin_unlock_bh(&vport->mac_list_lock); 9096 } 9097 9098 /* remove all mac address when uninitailize */ 9099 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, 9100 enum HCLGE_MAC_ADDR_TYPE mac_type) 9101 { 9102 struct hclge_mac_node *mac_node, *tmp; 9103 struct hclge_dev *hdev = vport->back; 9104 struct list_head tmp_del_list, *list; 9105 9106 INIT_LIST_HEAD(&tmp_del_list); 9107 9108 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 9109 &vport->uc_mac_list : &vport->mc_mac_list; 9110 9111 spin_lock_bh(&vport->mac_list_lock); 9112 9113 list_for_each_entry_safe(mac_node, tmp, list, node) { 9114 switch (mac_node->state) { 9115 case HCLGE_MAC_TO_DEL: 9116 case HCLGE_MAC_ACTIVE: 9117 list_move_tail(&mac_node->node, &tmp_del_list); 9118 break; 9119 case HCLGE_MAC_TO_ADD: 9120 list_del(&mac_node->node); 9121 kfree(mac_node); 9122 break; 9123 } 9124 } 9125 9126 spin_unlock_bh(&vport->mac_list_lock); 9127 9128 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); 9129 9130 if (!list_empty(&tmp_del_list)) 9131 dev_warn(&hdev->pdev->dev, 9132 "uninit %s mac list for vport %u not completely.\n", 9133 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc", 9134 vport->vport_id); 9135 9136 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { 9137 list_del(&mac_node->node); 9138 kfree(mac_node); 9139 } 9140 } 9141 9142 static void hclge_uninit_mac_table(struct hclge_dev *hdev) 9143 { 9144 struct hclge_vport *vport; 9145 int i; 9146 9147 for (i = 0; i < hdev->num_alloc_vport; i++) { 9148 vport = &hdev->vport[i]; 9149 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC); 9150 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC); 9151 } 9152 } 9153 9154 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 9155 u16 cmdq_resp, u8 resp_code) 9156 { 9157 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 9158 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 9159 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 9160 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 9161 9162 int return_status; 9163 9164 if (cmdq_resp) { 9165 dev_err(&hdev->pdev->dev, 9166 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 9167 cmdq_resp); 9168 return -EIO; 9169 } 9170 9171 switch (resp_code) { 9172 case HCLGE_ETHERTYPE_SUCCESS_ADD: 9173 case HCLGE_ETHERTYPE_ALREADY_ADD: 9174 return_status = 0; 9175 break; 9176 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 9177 dev_err(&hdev->pdev->dev, 9178 "add mac ethertype failed for manager table overflow.\n"); 9179 return_status = -EIO; 9180 break; 9181 case HCLGE_ETHERTYPE_KEY_CONFLICT: 9182 dev_err(&hdev->pdev->dev, 9183 "add mac ethertype failed for key conflict.\n"); 9184 return_status = -EIO; 9185 break; 9186 default: 9187 dev_err(&hdev->pdev->dev, 9188 "add mac ethertype failed for undefined, code=%u.\n", 9189 resp_code); 9190 return_status = -EIO; 9191 } 9192 9193 return return_status; 9194 } 9195 9196 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, 9197 u8 *mac_addr) 9198 { 9199 struct hclge_vport *vport = hclge_get_vport(handle); 9200 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 9201 struct hclge_dev *hdev = vport->back; 9202 9203 vport = hclge_get_vf_vport(hdev, vf); 9204 if (!vport) 9205 return -EINVAL; 9206 9207 hnae3_format_mac_addr(format_mac_addr, mac_addr); 9208 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { 9209 dev_info(&hdev->pdev->dev, 9210 "Specified MAC(=%s) is same as before, no change committed!\n", 9211 format_mac_addr); 9212 return 0; 9213 } 9214 9215 ether_addr_copy(vport->vf_info.mac, mac_addr); 9216 9217 /* there is a timewindow for PF to know VF unalive, it may 9218 * cause send mailbox fail, but it doesn't matter, VF will 9219 * query it when reinit. 9220 */ 9221 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { 9222 dev_info(&hdev->pdev->dev, 9223 "MAC of VF %d has been set to %s, and it will be reinitialized!\n", 9224 vf, format_mac_addr); 9225 (void)hclge_inform_reset_assert_to_vf(vport); 9226 return 0; 9227 } 9228 9229 dev_info(&hdev->pdev->dev, 9230 "MAC of VF %d has been set to %s, will be active after VF reset\n", 9231 vf, format_mac_addr); 9232 return 0; 9233 } 9234 9235 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 9236 const struct hclge_mac_mgr_tbl_entry_cmd *req) 9237 { 9238 struct hclge_desc desc; 9239 u8 resp_code; 9240 u16 retval; 9241 int ret; 9242 9243 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 9244 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 9245 9246 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9247 if (ret) { 9248 dev_err(&hdev->pdev->dev, 9249 "add mac ethertype failed for cmd_send, ret =%d.\n", 9250 ret); 9251 return ret; 9252 } 9253 9254 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 9255 retval = le16_to_cpu(desc.retval); 9256 9257 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 9258 } 9259 9260 static int init_mgr_tbl(struct hclge_dev *hdev) 9261 { 9262 int ret; 9263 int i; 9264 9265 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 9266 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 9267 if (ret) { 9268 dev_err(&hdev->pdev->dev, 9269 "add mac ethertype failed, ret =%d.\n", 9270 ret); 9271 return ret; 9272 } 9273 } 9274 9275 return 0; 9276 } 9277 9278 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 9279 { 9280 struct hclge_vport *vport = hclge_get_vport(handle); 9281 struct hclge_dev *hdev = vport->back; 9282 9283 ether_addr_copy(p, hdev->hw.mac.mac_addr); 9284 } 9285 9286 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, 9287 const u8 *old_addr, const u8 *new_addr) 9288 { 9289 struct list_head *list = &vport->uc_mac_list; 9290 struct hclge_mac_node *old_node, *new_node; 9291 9292 new_node = hclge_find_mac_node(list, new_addr); 9293 if (!new_node) { 9294 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 9295 if (!new_node) 9296 return -ENOMEM; 9297 9298 new_node->state = HCLGE_MAC_TO_ADD; 9299 ether_addr_copy(new_node->mac_addr, new_addr); 9300 list_add(&new_node->node, list); 9301 } else { 9302 if (new_node->state == HCLGE_MAC_TO_DEL) 9303 new_node->state = HCLGE_MAC_ACTIVE; 9304 9305 /* make sure the new addr is in the list head, avoid dev 9306 * addr may be not re-added into mac table for the umv space 9307 * limitation after global/imp reset which will clear mac 9308 * table by hardware. 9309 */ 9310 list_move(&new_node->node, list); 9311 } 9312 9313 if (old_addr && !ether_addr_equal(old_addr, new_addr)) { 9314 old_node = hclge_find_mac_node(list, old_addr); 9315 if (old_node) { 9316 if (old_node->state == HCLGE_MAC_TO_ADD) { 9317 list_del(&old_node->node); 9318 kfree(old_node); 9319 } else { 9320 old_node->state = HCLGE_MAC_TO_DEL; 9321 } 9322 } 9323 } 9324 9325 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 9326 9327 return 0; 9328 } 9329 9330 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, 9331 bool is_first) 9332 { 9333 const unsigned char *new_addr = (const unsigned char *)p; 9334 struct hclge_vport *vport = hclge_get_vport(handle); 9335 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 9336 struct hclge_dev *hdev = vport->back; 9337 unsigned char *old_addr = NULL; 9338 int ret; 9339 9340 /* mac addr check */ 9341 if (is_zero_ether_addr(new_addr) || 9342 is_broadcast_ether_addr(new_addr) || 9343 is_multicast_ether_addr(new_addr)) { 9344 hnae3_format_mac_addr(format_mac_addr, new_addr); 9345 dev_err(&hdev->pdev->dev, 9346 "change uc mac err! invalid mac: %s.\n", 9347 format_mac_addr); 9348 return -EINVAL; 9349 } 9350 9351 ret = hclge_pause_addr_cfg(hdev, new_addr); 9352 if (ret) { 9353 dev_err(&hdev->pdev->dev, 9354 "failed to configure mac pause address, ret = %d\n", 9355 ret); 9356 return ret; 9357 } 9358 9359 if (!is_first) 9360 old_addr = hdev->hw.mac.mac_addr; 9361 9362 spin_lock_bh(&vport->mac_list_lock); 9363 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); 9364 if (ret) { 9365 hnae3_format_mac_addr(format_mac_addr, new_addr); 9366 dev_err(&hdev->pdev->dev, 9367 "failed to change the mac addr:%s, ret = %d\n", 9368 format_mac_addr, ret); 9369 spin_unlock_bh(&vport->mac_list_lock); 9370 9371 if (!is_first) 9372 hclge_pause_addr_cfg(hdev, old_addr); 9373 9374 return ret; 9375 } 9376 /* we must update dev addr with spin lock protect, preventing dev addr 9377 * being removed by set_rx_mode path. 9378 */ 9379 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 9380 spin_unlock_bh(&vport->mac_list_lock); 9381 9382 hclge_task_schedule(hdev, 0); 9383 9384 return 0; 9385 } 9386 9387 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) 9388 { 9389 struct mii_ioctl_data *data = if_mii(ifr); 9390 9391 if (!hnae3_dev_phy_imp_supported(hdev)) 9392 return -EOPNOTSUPP; 9393 9394 switch (cmd) { 9395 case SIOCGMIIPHY: 9396 data->phy_id = hdev->hw.mac.phy_addr; 9397 /* this command reads phy id and register at the same time */ 9398 fallthrough; 9399 case SIOCGMIIREG: 9400 data->val_out = hclge_read_phy_reg(hdev, data->reg_num); 9401 return 0; 9402 9403 case SIOCSMIIREG: 9404 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); 9405 default: 9406 return -EOPNOTSUPP; 9407 } 9408 } 9409 9410 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, 9411 int cmd) 9412 { 9413 struct hclge_vport *vport = hclge_get_vport(handle); 9414 struct hclge_dev *hdev = vport->back; 9415 9416 switch (cmd) { 9417 case SIOCGHWTSTAMP: 9418 return hclge_ptp_get_cfg(hdev, ifr); 9419 case SIOCSHWTSTAMP: 9420 return hclge_ptp_set_cfg(hdev, ifr); 9421 default: 9422 if (!hdev->hw.mac.phydev) 9423 return hclge_mii_ioctl(hdev, ifr, cmd); 9424 } 9425 9426 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); 9427 } 9428 9429 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, 9430 bool bypass_en) 9431 { 9432 struct hclge_port_vlan_filter_bypass_cmd *req; 9433 struct hclge_desc desc; 9434 int ret; 9435 9436 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false); 9437 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; 9438 req->vf_id = vf_id; 9439 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B, 9440 bypass_en ? 1 : 0); 9441 9442 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9443 if (ret) 9444 dev_err(&hdev->pdev->dev, 9445 "failed to set vport%u port vlan filter bypass state, ret = %d.\n", 9446 vf_id, ret); 9447 9448 return ret; 9449 } 9450 9451 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 9452 u8 fe_type, bool filter_en, u8 vf_id) 9453 { 9454 struct hclge_vlan_filter_ctrl_cmd *req; 9455 struct hclge_desc desc; 9456 int ret; 9457 9458 /* read current vlan filter parameter */ 9459 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); 9460 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 9461 req->vlan_type = vlan_type; 9462 req->vf_id = vf_id; 9463 9464 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9465 if (ret) { 9466 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", 9467 vf_id, ret); 9468 return ret; 9469 } 9470 9471 /* modify and write new config parameter */ 9472 hclge_comm_cmd_reuse_desc(&desc, false); 9473 req->vlan_fe = filter_en ? 9474 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); 9475 9476 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9477 if (ret) 9478 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", 9479 vf_id, ret); 9480 9481 return ret; 9482 } 9483 9484 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) 9485 { 9486 struct hclge_dev *hdev = vport->back; 9487 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 9488 int ret; 9489 9490 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 9491 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9492 HCLGE_FILTER_FE_EGRESS_V1_B, 9493 enable, vport->vport_id); 9494 9495 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9496 HCLGE_FILTER_FE_EGRESS, enable, 9497 vport->vport_id); 9498 if (ret) 9499 return ret; 9500 9501 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { 9502 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, 9503 !enable); 9504 } else if (!vport->vport_id) { 9505 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 9506 enable = false; 9507 9508 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 9509 HCLGE_FILTER_FE_INGRESS, 9510 enable, 0); 9511 } 9512 9513 return ret; 9514 } 9515 9516 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) 9517 { 9518 struct hnae3_handle *handle = &vport->nic; 9519 struct hclge_vport_vlan_cfg *vlan, *tmp; 9520 struct hclge_dev *hdev = vport->back; 9521 9522 if (vport->vport_id) { 9523 if (vport->port_base_vlan_cfg.state != 9524 HNAE3_PORT_BASE_VLAN_DISABLE) 9525 return true; 9526 9527 if (vport->vf_info.trusted && vport->vf_info.request_uc_en) 9528 return false; 9529 } else if (handle->netdev_flags & HNAE3_USER_UPE) { 9530 return false; 9531 } 9532 9533 if (!vport->req_vlan_fltr_en) 9534 return false; 9535 9536 /* compatible with former device, always enable vlan filter */ 9537 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) 9538 return true; 9539 9540 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) 9541 if (vlan->vlan_id != 0) 9542 return true; 9543 9544 return false; 9545 } 9546 9547 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) 9548 { 9549 struct hclge_dev *hdev = vport->back; 9550 bool need_en; 9551 int ret; 9552 9553 mutex_lock(&hdev->vport_lock); 9554 9555 vport->req_vlan_fltr_en = request_en; 9556 9557 need_en = hclge_need_enable_vport_vlan_filter(vport); 9558 if (need_en == vport->cur_vlan_fltr_en) { 9559 mutex_unlock(&hdev->vport_lock); 9560 return 0; 9561 } 9562 9563 ret = hclge_set_vport_vlan_filter(vport, need_en); 9564 if (ret) { 9565 mutex_unlock(&hdev->vport_lock); 9566 return ret; 9567 } 9568 9569 vport->cur_vlan_fltr_en = need_en; 9570 9571 mutex_unlock(&hdev->vport_lock); 9572 9573 return 0; 9574 } 9575 9576 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 9577 { 9578 struct hclge_vport *vport = hclge_get_vport(handle); 9579 9580 return hclge_enable_vport_vlan_filter(vport, enable); 9581 } 9582 9583 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, 9584 bool is_kill, u16 vlan, 9585 struct hclge_desc *desc) 9586 { 9587 struct hclge_vlan_filter_vf_cfg_cmd *req0; 9588 struct hclge_vlan_filter_vf_cfg_cmd *req1; 9589 u8 vf_byte_val; 9590 u8 vf_byte_off; 9591 int ret; 9592 9593 hclge_cmd_setup_basic_desc(&desc[0], 9594 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 9595 hclge_cmd_setup_basic_desc(&desc[1], 9596 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 9597 9598 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 9599 9600 vf_byte_off = vfid / 8; 9601 vf_byte_val = 1 << (vfid % 8); 9602 9603 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 9604 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 9605 9606 req0->vlan_id = cpu_to_le16(vlan); 9607 req0->vlan_cfg = is_kill; 9608 9609 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 9610 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 9611 else 9612 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 9613 9614 ret = hclge_cmd_send(&hdev->hw, desc, 2); 9615 if (ret) { 9616 dev_err(&hdev->pdev->dev, 9617 "Send vf vlan command fail, ret =%d.\n", 9618 ret); 9619 return ret; 9620 } 9621 9622 return 0; 9623 } 9624 9625 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, 9626 bool is_kill, struct hclge_desc *desc) 9627 { 9628 struct hclge_vlan_filter_vf_cfg_cmd *req; 9629 9630 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 9631 9632 if (!is_kill) { 9633 #define HCLGE_VF_VLAN_NO_ENTRY 2 9634 if (!req->resp_code || req->resp_code == 1) 9635 return 0; 9636 9637 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { 9638 set_bit(vfid, hdev->vf_vlan_full); 9639 dev_warn(&hdev->pdev->dev, 9640 "vf vlan table is full, vf vlan filter is disabled\n"); 9641 return 0; 9642 } 9643 9644 dev_err(&hdev->pdev->dev, 9645 "Add vf vlan filter fail, ret =%u.\n", 9646 req->resp_code); 9647 } else { 9648 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 9649 if (!req->resp_code) 9650 return 0; 9651 9652 /* vf vlan filter is disabled when vf vlan table is full, 9653 * then new vlan id will not be added into vf vlan table. 9654 * Just return 0 without warning, avoid massive verbose 9655 * print logs when unload. 9656 */ 9657 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) 9658 return 0; 9659 9660 dev_err(&hdev->pdev->dev, 9661 "Kill vf vlan filter fail, ret =%u.\n", 9662 req->resp_code); 9663 } 9664 9665 return -EIO; 9666 } 9667 9668 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, 9669 bool is_kill, u16 vlan) 9670 { 9671 struct hclge_vport *vport = &hdev->vport[vfid]; 9672 struct hclge_desc desc[2]; 9673 int ret; 9674 9675 /* if vf vlan table is full, firmware will close vf vlan filter, it 9676 * is unable and unnecessary to add new vlan id to vf vlan filter. 9677 * If spoof check is enable, and vf vlan is full, it shouldn't add 9678 * new vlan, because tx packets with these vlan id will be dropped. 9679 */ 9680 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { 9681 if (vport->vf_info.spoofchk && vlan) { 9682 dev_err(&hdev->pdev->dev, 9683 "Can't add vlan due to spoof check is on and vf vlan table is full\n"); 9684 return -EPERM; 9685 } 9686 return 0; 9687 } 9688 9689 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); 9690 if (ret) 9691 return ret; 9692 9693 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); 9694 } 9695 9696 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 9697 u16 vlan_id, bool is_kill) 9698 { 9699 struct hclge_vlan_filter_pf_cfg_cmd *req; 9700 struct hclge_desc desc; 9701 u8 vlan_offset_byte_val; 9702 u8 vlan_offset_byte; 9703 u8 vlan_offset_160; 9704 int ret; 9705 9706 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 9707 9708 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; 9709 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / 9710 HCLGE_VLAN_BYTE_SIZE; 9711 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); 9712 9713 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 9714 req->vlan_offset = vlan_offset_160; 9715 req->vlan_cfg = is_kill; 9716 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 9717 9718 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9719 if (ret) 9720 dev_err(&hdev->pdev->dev, 9721 "port vlan command, send fail, ret =%d.\n", ret); 9722 return ret; 9723 } 9724 9725 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, 9726 u16 vlan_id, bool is_kill) 9727 { 9728 /* vlan 0 may be added twice when 8021q module is enabled */ 9729 if (!is_kill && !vlan_id && 9730 test_bit(vport_id, hdev->vlan_table[vlan_id])) 9731 return false; 9732 9733 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 9734 dev_warn(&hdev->pdev->dev, 9735 "Add port vlan failed, vport %u is already in vlan %u\n", 9736 vport_id, vlan_id); 9737 return false; 9738 } 9739 9740 if (is_kill && 9741 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 9742 dev_warn(&hdev->pdev->dev, 9743 "Delete port vlan failed, vport %u is not in vlan %u\n", 9744 vport_id, vlan_id); 9745 return false; 9746 } 9747 9748 return true; 9749 } 9750 9751 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 9752 u16 vport_id, u16 vlan_id, 9753 bool is_kill) 9754 { 9755 u16 vport_idx, vport_num = 0; 9756 int ret; 9757 9758 if (is_kill && !vlan_id) 9759 return 0; 9760 9761 if (vlan_id >= VLAN_N_VID) 9762 return -EINVAL; 9763 9764 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); 9765 if (ret) { 9766 dev_err(&hdev->pdev->dev, 9767 "Set %u vport vlan filter config fail, ret =%d.\n", 9768 vport_id, ret); 9769 return ret; 9770 } 9771 9772 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) 9773 return 0; 9774 9775 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) 9776 vport_num++; 9777 9778 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 9779 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 9780 is_kill); 9781 9782 return ret; 9783 } 9784 9785 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 9786 { 9787 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 9788 struct hclge_vport_vtag_tx_cfg_cmd *req; 9789 struct hclge_dev *hdev = vport->back; 9790 struct hclge_desc desc; 9791 u16 bmap_index; 9792 int status; 9793 9794 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 9795 9796 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 9797 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 9798 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 9799 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 9800 vcfg->accept_tag1 ? 1 : 0); 9801 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 9802 vcfg->accept_untag1 ? 1 : 0); 9803 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 9804 vcfg->accept_tag2 ? 1 : 0); 9805 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 9806 vcfg->accept_untag2 ? 1 : 0); 9807 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 9808 vcfg->insert_tag1_en ? 1 : 0); 9809 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 9810 vcfg->insert_tag2_en ? 1 : 0); 9811 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, 9812 vcfg->tag_shift_mode_en ? 1 : 0); 9813 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 9814 9815 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 9816 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 9817 HCLGE_VF_NUM_PER_BYTE; 9818 req->vf_bitmap[bmap_index] = 9819 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 9820 9821 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9822 if (status) 9823 dev_err(&hdev->pdev->dev, 9824 "Send port txvlan cfg command fail, ret =%d\n", 9825 status); 9826 9827 return status; 9828 } 9829 9830 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 9831 { 9832 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 9833 struct hclge_vport_vtag_rx_cfg_cmd *req; 9834 struct hclge_dev *hdev = vport->back; 9835 struct hclge_desc desc; 9836 u16 bmap_index; 9837 int status; 9838 9839 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 9840 9841 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 9842 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 9843 vcfg->strip_tag1_en ? 1 : 0); 9844 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 9845 vcfg->strip_tag2_en ? 1 : 0); 9846 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 9847 vcfg->vlan1_vlan_prionly ? 1 : 0); 9848 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 9849 vcfg->vlan2_vlan_prionly ? 1 : 0); 9850 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, 9851 vcfg->strip_tag1_discard_en ? 1 : 0); 9852 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, 9853 vcfg->strip_tag2_discard_en ? 1 : 0); 9854 9855 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 9856 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 9857 HCLGE_VF_NUM_PER_BYTE; 9858 req->vf_bitmap[bmap_index] = 9859 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 9860 9861 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9862 if (status) 9863 dev_err(&hdev->pdev->dev, 9864 "Send port rxvlan cfg command fail, ret =%d\n", 9865 status); 9866 9867 return status; 9868 } 9869 9870 static int hclge_vlan_offload_cfg(struct hclge_vport *vport, 9871 u16 port_base_vlan_state, 9872 u16 vlan_tag, u8 qos) 9873 { 9874 int ret; 9875 9876 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 9877 vport->txvlan_cfg.accept_tag1 = true; 9878 vport->txvlan_cfg.insert_tag1_en = false; 9879 vport->txvlan_cfg.default_tag1 = 0; 9880 } else { 9881 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); 9882 9883 vport->txvlan_cfg.accept_tag1 = 9884 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; 9885 vport->txvlan_cfg.insert_tag1_en = true; 9886 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | 9887 vlan_tag; 9888 } 9889 9890 vport->txvlan_cfg.accept_untag1 = true; 9891 9892 /* accept_tag2 and accept_untag2 are not supported on 9893 * pdev revision(0x20), new revision support them, 9894 * this two fields can not be configured by user. 9895 */ 9896 vport->txvlan_cfg.accept_tag2 = true; 9897 vport->txvlan_cfg.accept_untag2 = true; 9898 vport->txvlan_cfg.insert_tag2_en = false; 9899 vport->txvlan_cfg.default_tag2 = 0; 9900 vport->txvlan_cfg.tag_shift_mode_en = true; 9901 9902 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 9903 vport->rxvlan_cfg.strip_tag1_en = false; 9904 vport->rxvlan_cfg.strip_tag2_en = 9905 vport->rxvlan_cfg.rx_vlan_offload_en; 9906 vport->rxvlan_cfg.strip_tag2_discard_en = false; 9907 } else { 9908 vport->rxvlan_cfg.strip_tag1_en = 9909 vport->rxvlan_cfg.rx_vlan_offload_en; 9910 vport->rxvlan_cfg.strip_tag2_en = true; 9911 vport->rxvlan_cfg.strip_tag2_discard_en = true; 9912 } 9913 9914 vport->rxvlan_cfg.strip_tag1_discard_en = false; 9915 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 9916 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 9917 9918 ret = hclge_set_vlan_tx_offload_cfg(vport); 9919 if (ret) 9920 return ret; 9921 9922 return hclge_set_vlan_rx_offload_cfg(vport); 9923 } 9924 9925 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 9926 { 9927 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 9928 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 9929 struct hclge_desc desc; 9930 int status; 9931 9932 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 9933 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 9934 rx_req->ot_fst_vlan_type = 9935 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 9936 rx_req->ot_sec_vlan_type = 9937 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 9938 rx_req->in_fst_vlan_type = 9939 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 9940 rx_req->in_sec_vlan_type = 9941 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 9942 9943 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9944 if (status) { 9945 dev_err(&hdev->pdev->dev, 9946 "Send rxvlan protocol type command fail, ret =%d\n", 9947 status); 9948 return status; 9949 } 9950 9951 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 9952 9953 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; 9954 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 9955 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 9956 9957 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9958 if (status) 9959 dev_err(&hdev->pdev->dev, 9960 "Send txvlan protocol type command fail, ret =%d\n", 9961 status); 9962 9963 return status; 9964 } 9965 9966 static int hclge_init_vlan_filter(struct hclge_dev *hdev) 9967 { 9968 struct hclge_vport *vport; 9969 bool enable = true; 9970 int ret; 9971 int i; 9972 9973 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 9974 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9975 HCLGE_FILTER_FE_EGRESS_V1_B, 9976 true, 0); 9977 9978 /* for revision 0x21, vf vlan filter is per function */ 9979 for (i = 0; i < hdev->num_alloc_vport; i++) { 9980 vport = &hdev->vport[i]; 9981 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9982 HCLGE_FILTER_FE_EGRESS, true, 9983 vport->vport_id); 9984 if (ret) 9985 return ret; 9986 vport->cur_vlan_fltr_en = true; 9987 } 9988 9989 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) && 9990 !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) 9991 enable = false; 9992 9993 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 9994 HCLGE_FILTER_FE_INGRESS, enable, 0); 9995 } 9996 9997 static int hclge_init_vlan_type(struct hclge_dev *hdev) 9998 { 9999 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; 10000 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; 10001 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; 10002 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; 10003 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; 10004 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; 10005 10006 return hclge_set_vlan_protocol_type(hdev); 10007 } 10008 10009 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) 10010 { 10011 struct hclge_port_base_vlan_config *cfg; 10012 struct hclge_vport *vport; 10013 int ret; 10014 int i; 10015 10016 for (i = 0; i < hdev->num_alloc_vport; i++) { 10017 vport = &hdev->vport[i]; 10018 cfg = &vport->port_base_vlan_cfg; 10019 10020 ret = hclge_vlan_offload_cfg(vport, cfg->state, 10021 cfg->vlan_info.vlan_tag, 10022 cfg->vlan_info.qos); 10023 if (ret) 10024 return ret; 10025 } 10026 return 0; 10027 } 10028 10029 static int hclge_init_vlan_config(struct hclge_dev *hdev) 10030 { 10031 struct hnae3_handle *handle = &hdev->vport[0].nic; 10032 int ret; 10033 10034 ret = hclge_init_vlan_filter(hdev); 10035 if (ret) 10036 return ret; 10037 10038 ret = hclge_init_vlan_type(hdev); 10039 if (ret) 10040 return ret; 10041 10042 ret = hclge_init_vport_vlan_offload(hdev); 10043 if (ret) 10044 return ret; 10045 10046 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 10047 } 10048 10049 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 10050 bool writen_to_tbl) 10051 { 10052 struct hclge_vport_vlan_cfg *vlan, *tmp; 10053 struct hclge_dev *hdev = vport->back; 10054 10055 mutex_lock(&hdev->vport_lock); 10056 10057 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10058 if (vlan->vlan_id == vlan_id) { 10059 mutex_unlock(&hdev->vport_lock); 10060 return; 10061 } 10062 } 10063 10064 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 10065 if (!vlan) { 10066 mutex_unlock(&hdev->vport_lock); 10067 return; 10068 } 10069 10070 vlan->hd_tbl_status = writen_to_tbl; 10071 vlan->vlan_id = vlan_id; 10072 10073 list_add_tail(&vlan->node, &vport->vlan_list); 10074 mutex_unlock(&hdev->vport_lock); 10075 } 10076 10077 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) 10078 { 10079 struct hclge_vport_vlan_cfg *vlan, *tmp; 10080 struct hclge_dev *hdev = vport->back; 10081 int ret; 10082 10083 mutex_lock(&hdev->vport_lock); 10084 10085 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10086 if (!vlan->hd_tbl_status) { 10087 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10088 vport->vport_id, 10089 vlan->vlan_id, false); 10090 if (ret) { 10091 dev_err(&hdev->pdev->dev, 10092 "restore vport vlan list failed, ret=%d\n", 10093 ret); 10094 10095 mutex_unlock(&hdev->vport_lock); 10096 return ret; 10097 } 10098 } 10099 vlan->hd_tbl_status = true; 10100 } 10101 10102 mutex_unlock(&hdev->vport_lock); 10103 10104 return 0; 10105 } 10106 10107 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 10108 bool is_write_tbl) 10109 { 10110 struct hclge_vport_vlan_cfg *vlan, *tmp; 10111 struct hclge_dev *hdev = vport->back; 10112 10113 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10114 if (vlan->vlan_id == vlan_id) { 10115 if (is_write_tbl && vlan->hd_tbl_status) 10116 hclge_set_vlan_filter_hw(hdev, 10117 htons(ETH_P_8021Q), 10118 vport->vport_id, 10119 vlan_id, 10120 true); 10121 10122 list_del(&vlan->node); 10123 kfree(vlan); 10124 break; 10125 } 10126 } 10127 } 10128 10129 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) 10130 { 10131 struct hclge_vport_vlan_cfg *vlan, *tmp; 10132 struct hclge_dev *hdev = vport->back; 10133 10134 mutex_lock(&hdev->vport_lock); 10135 10136 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10137 if (vlan->hd_tbl_status) 10138 hclge_set_vlan_filter_hw(hdev, 10139 htons(ETH_P_8021Q), 10140 vport->vport_id, 10141 vlan->vlan_id, 10142 true); 10143 10144 vlan->hd_tbl_status = false; 10145 if (is_del_list) { 10146 list_del(&vlan->node); 10147 kfree(vlan); 10148 } 10149 } 10150 clear_bit(vport->vport_id, hdev->vf_vlan_full); 10151 mutex_unlock(&hdev->vport_lock); 10152 } 10153 10154 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) 10155 { 10156 struct hclge_vport_vlan_cfg *vlan, *tmp; 10157 struct hclge_vport *vport; 10158 int i; 10159 10160 mutex_lock(&hdev->vport_lock); 10161 10162 for (i = 0; i < hdev->num_alloc_vport; i++) { 10163 vport = &hdev->vport[i]; 10164 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10165 list_del(&vlan->node); 10166 kfree(vlan); 10167 } 10168 } 10169 10170 mutex_unlock(&hdev->vport_lock); 10171 } 10172 10173 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) 10174 { 10175 struct hclge_vlan_info *vlan_info; 10176 struct hclge_vport *vport; 10177 u16 vlan_proto; 10178 u16 vlan_id; 10179 u16 state; 10180 int vf_id; 10181 int ret; 10182 10183 /* PF should restore all vfs port base vlan */ 10184 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { 10185 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; 10186 vlan_info = vport->port_base_vlan_cfg.tbl_sta ? 10187 &vport->port_base_vlan_cfg.vlan_info : 10188 &vport->port_base_vlan_cfg.old_vlan_info; 10189 10190 vlan_id = vlan_info->vlan_tag; 10191 vlan_proto = vlan_info->vlan_proto; 10192 state = vport->port_base_vlan_cfg.state; 10193 10194 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { 10195 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); 10196 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), 10197 vport->vport_id, 10198 vlan_id, false); 10199 vport->port_base_vlan_cfg.tbl_sta = ret == 0; 10200 } 10201 } 10202 } 10203 10204 void hclge_restore_vport_vlan_table(struct hclge_vport *vport) 10205 { 10206 struct hclge_vport_vlan_cfg *vlan, *tmp; 10207 struct hclge_dev *hdev = vport->back; 10208 int ret; 10209 10210 mutex_lock(&hdev->vport_lock); 10211 10212 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10213 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10214 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10215 vport->vport_id, 10216 vlan->vlan_id, false); 10217 if (ret) 10218 break; 10219 vlan->hd_tbl_status = true; 10220 } 10221 } 10222 10223 mutex_unlock(&hdev->vport_lock); 10224 } 10225 10226 /* For global reset and imp reset, hardware will clear the mac table, 10227 * so we change the mac address state from ACTIVE to TO_ADD, then they 10228 * can be restored in the service task after reset complete. Furtherly, 10229 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to 10230 * be restored after reset, so just remove these mac nodes from mac_list. 10231 */ 10232 static void hclge_mac_node_convert_for_reset(struct list_head *list) 10233 { 10234 struct hclge_mac_node *mac_node, *tmp; 10235 10236 list_for_each_entry_safe(mac_node, tmp, list, node) { 10237 if (mac_node->state == HCLGE_MAC_ACTIVE) { 10238 mac_node->state = HCLGE_MAC_TO_ADD; 10239 } else if (mac_node->state == HCLGE_MAC_TO_DEL) { 10240 list_del(&mac_node->node); 10241 kfree(mac_node); 10242 } 10243 } 10244 } 10245 10246 void hclge_restore_mac_table_common(struct hclge_vport *vport) 10247 { 10248 spin_lock_bh(&vport->mac_list_lock); 10249 10250 hclge_mac_node_convert_for_reset(&vport->uc_mac_list); 10251 hclge_mac_node_convert_for_reset(&vport->mc_mac_list); 10252 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 10253 10254 spin_unlock_bh(&vport->mac_list_lock); 10255 } 10256 10257 static void hclge_restore_hw_table(struct hclge_dev *hdev) 10258 { 10259 struct hclge_vport *vport = &hdev->vport[0]; 10260 struct hnae3_handle *handle = &vport->nic; 10261 10262 hclge_restore_mac_table_common(vport); 10263 hclge_restore_vport_port_base_vlan_config(hdev); 10264 hclge_restore_vport_vlan_table(vport); 10265 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 10266 hclge_restore_fd_entries(handle); 10267 } 10268 10269 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 10270 { 10271 struct hclge_vport *vport = hclge_get_vport(handle); 10272 10273 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10274 vport->rxvlan_cfg.strip_tag1_en = false; 10275 vport->rxvlan_cfg.strip_tag2_en = enable; 10276 vport->rxvlan_cfg.strip_tag2_discard_en = false; 10277 } else { 10278 vport->rxvlan_cfg.strip_tag1_en = enable; 10279 vport->rxvlan_cfg.strip_tag2_en = true; 10280 vport->rxvlan_cfg.strip_tag2_discard_en = true; 10281 } 10282 10283 vport->rxvlan_cfg.strip_tag1_discard_en = false; 10284 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 10285 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 10286 vport->rxvlan_cfg.rx_vlan_offload_en = enable; 10287 10288 return hclge_set_vlan_rx_offload_cfg(vport); 10289 } 10290 10291 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport) 10292 { 10293 struct hclge_dev *hdev = vport->back; 10294 10295 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) 10296 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); 10297 } 10298 10299 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, 10300 u16 port_base_vlan_state, 10301 struct hclge_vlan_info *new_info, 10302 struct hclge_vlan_info *old_info) 10303 { 10304 struct hclge_dev *hdev = vport->back; 10305 int ret; 10306 10307 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { 10308 hclge_rm_vport_all_vlan_table(vport, false); 10309 /* force clear VLAN 0 */ 10310 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); 10311 if (ret) 10312 return ret; 10313 return hclge_set_vlan_filter_hw(hdev, 10314 htons(new_info->vlan_proto), 10315 vport->vport_id, 10316 new_info->vlan_tag, 10317 false); 10318 } 10319 10320 vport->port_base_vlan_cfg.tbl_sta = false; 10321 10322 /* force add VLAN 0 */ 10323 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); 10324 if (ret) 10325 return ret; 10326 10327 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), 10328 vport->vport_id, old_info->vlan_tag, 10329 true); 10330 if (ret) 10331 return ret; 10332 10333 return hclge_add_vport_all_vlan_table(vport); 10334 } 10335 10336 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, 10337 const struct hclge_vlan_info *old_cfg) 10338 { 10339 if (new_cfg->vlan_tag != old_cfg->vlan_tag) 10340 return true; 10341 10342 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) 10343 return true; 10344 10345 return false; 10346 } 10347 10348 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, 10349 struct hclge_vlan_info *new_info, 10350 struct hclge_vlan_info *old_info) 10351 { 10352 struct hclge_dev *hdev = vport->back; 10353 int ret; 10354 10355 /* add new VLAN tag */ 10356 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), 10357 vport->vport_id, new_info->vlan_tag, 10358 false); 10359 if (ret) 10360 return ret; 10361 10362 vport->port_base_vlan_cfg.tbl_sta = false; 10363 /* remove old VLAN tag */ 10364 if (old_info->vlan_tag == 0) 10365 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, 10366 true, 0); 10367 else 10368 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10369 vport->vport_id, 10370 old_info->vlan_tag, true); 10371 if (ret) 10372 dev_err(&hdev->pdev->dev, 10373 "failed to clear vport%u port base vlan %u, ret = %d.\n", 10374 vport->vport_id, old_info->vlan_tag, ret); 10375 10376 return ret; 10377 } 10378 10379 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, 10380 struct hclge_vlan_info *vlan_info) 10381 { 10382 struct hnae3_handle *nic = &vport->nic; 10383 struct hclge_vlan_info *old_vlan_info; 10384 int ret; 10385 10386 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; 10387 10388 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag, 10389 vlan_info->qos); 10390 if (ret) 10391 return ret; 10392 10393 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) 10394 goto out; 10395 10396 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) 10397 ret = hclge_modify_port_base_vlan_tag(vport, vlan_info, 10398 old_vlan_info); 10399 else 10400 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, 10401 old_vlan_info); 10402 if (ret) 10403 return ret; 10404 10405 out: 10406 vport->port_base_vlan_cfg.state = state; 10407 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 10408 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 10409 else 10410 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 10411 10412 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; 10413 vport->port_base_vlan_cfg.vlan_info = *vlan_info; 10414 vport->port_base_vlan_cfg.tbl_sta = true; 10415 hclge_set_vport_vlan_fltr_change(vport); 10416 10417 return 0; 10418 } 10419 10420 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, 10421 enum hnae3_port_base_vlan_state state, 10422 u16 vlan, u8 qos) 10423 { 10424 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10425 if (!vlan && !qos) 10426 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 10427 10428 return HNAE3_PORT_BASE_VLAN_ENABLE; 10429 } 10430 10431 if (!vlan && !qos) 10432 return HNAE3_PORT_BASE_VLAN_DISABLE; 10433 10434 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && 10435 vport->port_base_vlan_cfg.vlan_info.qos == qos) 10436 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 10437 10438 return HNAE3_PORT_BASE_VLAN_MODIFY; 10439 } 10440 10441 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 10442 u16 vlan, u8 qos, __be16 proto) 10443 { 10444 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 10445 struct hclge_vport *vport = hclge_get_vport(handle); 10446 struct hclge_dev *hdev = vport->back; 10447 struct hclge_vlan_info vlan_info; 10448 u16 state; 10449 int ret; 10450 10451 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 10452 return -EOPNOTSUPP; 10453 10454 vport = hclge_get_vf_vport(hdev, vfid); 10455 if (!vport) 10456 return -EINVAL; 10457 10458 /* qos is a 3 bits value, so can not be bigger than 7 */ 10459 if (vlan > VLAN_N_VID - 1 || qos > 7) 10460 return -EINVAL; 10461 if (proto != htons(ETH_P_8021Q)) 10462 return -EPROTONOSUPPORT; 10463 10464 state = hclge_get_port_base_vlan_state(vport, 10465 vport->port_base_vlan_cfg.state, 10466 vlan, qos); 10467 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) 10468 return 0; 10469 10470 vlan_info.vlan_tag = vlan; 10471 vlan_info.qos = qos; 10472 vlan_info.vlan_proto = ntohs(proto); 10473 10474 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); 10475 if (ret) { 10476 dev_err(&hdev->pdev->dev, 10477 "failed to update port base vlan for vf %d, ret = %d\n", 10478 vfid, ret); 10479 return ret; 10480 } 10481 10482 /* there is a timewindow for PF to know VF unalive, it may 10483 * cause send mailbox fail, but it doesn't matter, VF will 10484 * query it when reinit. 10485 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based 10486 * VLAN state. 10487 */ 10488 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 10489 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 10490 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], 10491 vport->vport_id, 10492 state, 10493 &vlan_info); 10494 else 10495 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, 10496 &vport->need_notify); 10497 } 10498 return 0; 10499 } 10500 10501 static void hclge_clear_vf_vlan(struct hclge_dev *hdev) 10502 { 10503 struct hclge_vlan_info *vlan_info; 10504 struct hclge_vport *vport; 10505 int ret; 10506 int vf; 10507 10508 /* clear port base vlan for all vf */ 10509 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { 10510 vport = &hdev->vport[vf]; 10511 vlan_info = &vport->port_base_vlan_cfg.vlan_info; 10512 10513 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10514 vport->vport_id, 10515 vlan_info->vlan_tag, true); 10516 if (ret) 10517 dev_err(&hdev->pdev->dev, 10518 "failed to clear vf vlan for vf%d, ret = %d\n", 10519 vf - HCLGE_VF_VPORT_START_NUM, ret); 10520 } 10521 } 10522 10523 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 10524 u16 vlan_id, bool is_kill) 10525 { 10526 struct hclge_vport *vport = hclge_get_vport(handle); 10527 struct hclge_dev *hdev = vport->back; 10528 bool writen_to_tbl = false; 10529 int ret = 0; 10530 10531 /* When device is resetting or reset failed, firmware is unable to 10532 * handle mailbox. Just record the vlan id, and remove it after 10533 * reset finished. 10534 */ 10535 mutex_lock(&hdev->vport_lock); 10536 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 10537 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { 10538 set_bit(vlan_id, vport->vlan_del_fail_bmap); 10539 mutex_unlock(&hdev->vport_lock); 10540 return -EBUSY; 10541 } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { 10542 clear_bit(vlan_id, vport->vlan_del_fail_bmap); 10543 } 10544 mutex_unlock(&hdev->vport_lock); 10545 10546 /* when port base vlan enabled, we use port base vlan as the vlan 10547 * filter entry. In this case, we don't update vlan filter table 10548 * when user add new vlan or remove exist vlan, just update the vport 10549 * vlan list. The vlan id in vlan list will be writen in vlan filter 10550 * table until port base vlan disabled 10551 */ 10552 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10553 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, 10554 vlan_id, is_kill); 10555 writen_to_tbl = true; 10556 } 10557 10558 if (!ret) { 10559 if (!is_kill) { 10560 hclge_add_vport_vlan_table(vport, vlan_id, 10561 writen_to_tbl); 10562 } else if (is_kill && vlan_id != 0) { 10563 mutex_lock(&hdev->vport_lock); 10564 hclge_rm_vport_vlan_table(vport, vlan_id, false); 10565 mutex_unlock(&hdev->vport_lock); 10566 } 10567 } else if (is_kill) { 10568 /* when remove hw vlan filter failed, record the vlan id, 10569 * and try to remove it from hw later, to be consistence 10570 * with stack 10571 */ 10572 mutex_lock(&hdev->vport_lock); 10573 set_bit(vlan_id, vport->vlan_del_fail_bmap); 10574 mutex_unlock(&hdev->vport_lock); 10575 } 10576 10577 hclge_set_vport_vlan_fltr_change(vport); 10578 10579 return ret; 10580 } 10581 10582 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) 10583 { 10584 struct hclge_vport *vport; 10585 int ret; 10586 u16 i; 10587 10588 for (i = 0; i < hdev->num_alloc_vport; i++) { 10589 vport = &hdev->vport[i]; 10590 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 10591 &vport->state)) 10592 continue; 10593 10594 ret = hclge_enable_vport_vlan_filter(vport, 10595 vport->req_vlan_fltr_en); 10596 if (ret) { 10597 dev_err(&hdev->pdev->dev, 10598 "failed to sync vlan filter state for vport%u, ret = %d\n", 10599 vport->vport_id, ret); 10600 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 10601 &vport->state); 10602 return; 10603 } 10604 } 10605 } 10606 10607 static void hclge_sync_vlan_filter(struct hclge_dev *hdev) 10608 { 10609 #define HCLGE_MAX_SYNC_COUNT 60 10610 10611 int i, ret, sync_cnt = 0; 10612 u16 vlan_id; 10613 10614 mutex_lock(&hdev->vport_lock); 10615 /* start from vport 1 for PF is always alive */ 10616 for (i = 0; i < hdev->num_alloc_vport; i++) { 10617 struct hclge_vport *vport = &hdev->vport[i]; 10618 10619 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 10620 VLAN_N_VID); 10621 while (vlan_id != VLAN_N_VID) { 10622 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10623 vport->vport_id, vlan_id, 10624 true); 10625 if (ret && ret != -EINVAL) { 10626 mutex_unlock(&hdev->vport_lock); 10627 return; 10628 } 10629 10630 clear_bit(vlan_id, vport->vlan_del_fail_bmap); 10631 hclge_rm_vport_vlan_table(vport, vlan_id, false); 10632 hclge_set_vport_vlan_fltr_change(vport); 10633 10634 sync_cnt++; 10635 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) { 10636 mutex_unlock(&hdev->vport_lock); 10637 return; 10638 } 10639 10640 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 10641 VLAN_N_VID); 10642 } 10643 } 10644 mutex_unlock(&hdev->vport_lock); 10645 10646 hclge_sync_vlan_fltr_state(hdev); 10647 } 10648 10649 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) 10650 { 10651 struct hclge_config_max_frm_size_cmd *req; 10652 struct hclge_desc desc; 10653 10654 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 10655 10656 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 10657 req->max_frm_size = cpu_to_le16(new_mps); 10658 req->min_frm_size = HCLGE_MAC_MIN_FRAME; 10659 10660 return hclge_cmd_send(&hdev->hw, &desc, 1); 10661 } 10662 10663 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 10664 { 10665 struct hclge_vport *vport = hclge_get_vport(handle); 10666 10667 return hclge_set_vport_mtu(vport, new_mtu); 10668 } 10669 10670 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) 10671 { 10672 struct hclge_dev *hdev = vport->back; 10673 int i, max_frm_size, ret; 10674 10675 /* HW supprt 2 layer vlan */ 10676 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 10677 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 10678 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) 10679 return -EINVAL; 10680 10681 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 10682 mutex_lock(&hdev->vport_lock); 10683 /* VF's mps must fit within hdev->mps */ 10684 if (vport->vport_id && max_frm_size > hdev->mps) { 10685 mutex_unlock(&hdev->vport_lock); 10686 return -EINVAL; 10687 } else if (vport->vport_id) { 10688 vport->mps = max_frm_size; 10689 mutex_unlock(&hdev->vport_lock); 10690 return 0; 10691 } 10692 10693 /* PF's mps must be greater then VF's mps */ 10694 for (i = 1; i < hdev->num_alloc_vport; i++) 10695 if (max_frm_size < hdev->vport[i].mps) { 10696 dev_err(&hdev->pdev->dev, 10697 "failed to set pf mtu for less than vport %d, mps = %u.\n", 10698 i, hdev->vport[i].mps); 10699 mutex_unlock(&hdev->vport_lock); 10700 return -EINVAL; 10701 } 10702 10703 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 10704 10705 ret = hclge_set_mac_mtu(hdev, max_frm_size); 10706 if (ret) { 10707 dev_err(&hdev->pdev->dev, 10708 "Change mtu fail, ret =%d\n", ret); 10709 goto out; 10710 } 10711 10712 hdev->mps = max_frm_size; 10713 vport->mps = max_frm_size; 10714 10715 ret = hclge_buffer_alloc(hdev); 10716 if (ret) 10717 dev_err(&hdev->pdev->dev, 10718 "Allocate buffer fail, ret =%d\n", ret); 10719 10720 out: 10721 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 10722 mutex_unlock(&hdev->vport_lock); 10723 return ret; 10724 } 10725 10726 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, 10727 bool enable) 10728 { 10729 struct hclge_reset_tqp_queue_cmd *req; 10730 struct hclge_desc desc; 10731 int ret; 10732 10733 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 10734 10735 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 10736 req->tqp_id = cpu_to_le16(queue_id); 10737 if (enable) 10738 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); 10739 10740 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10741 if (ret) { 10742 dev_err(&hdev->pdev->dev, 10743 "Send tqp reset cmd error, status =%d\n", ret); 10744 return ret; 10745 } 10746 10747 return 0; 10748 } 10749 10750 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, 10751 u8 *reset_status) 10752 { 10753 struct hclge_reset_tqp_queue_cmd *req; 10754 struct hclge_desc desc; 10755 int ret; 10756 10757 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 10758 10759 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 10760 req->tqp_id = cpu_to_le16(queue_id); 10761 10762 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10763 if (ret) { 10764 dev_err(&hdev->pdev->dev, 10765 "Get reset status error, status =%d\n", ret); 10766 return ret; 10767 } 10768 10769 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 10770 10771 return 0; 10772 } 10773 10774 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) 10775 { 10776 struct hclge_comm_tqp *tqp; 10777 struct hnae3_queue *queue; 10778 10779 queue = handle->kinfo.tqp[queue_id]; 10780 tqp = container_of(queue, struct hclge_comm_tqp, q); 10781 10782 return tqp->index; 10783 } 10784 10785 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle) 10786 { 10787 struct hclge_vport *vport = hclge_get_vport(handle); 10788 struct hclge_dev *hdev = vport->back; 10789 u16 reset_try_times = 0; 10790 u8 reset_status; 10791 u16 queue_gid; 10792 int ret; 10793 u16 i; 10794 10795 for (i = 0; i < handle->kinfo.num_tqps; i++) { 10796 queue_gid = hclge_covert_handle_qid_global(handle, i); 10797 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); 10798 if (ret) { 10799 dev_err(&hdev->pdev->dev, 10800 "failed to send reset tqp cmd, ret = %d\n", 10801 ret); 10802 return ret; 10803 } 10804 10805 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 10806 ret = hclge_get_reset_status(hdev, queue_gid, 10807 &reset_status); 10808 if (ret) 10809 return ret; 10810 10811 if (reset_status) 10812 break; 10813 10814 /* Wait for tqp hw reset */ 10815 usleep_range(1000, 1200); 10816 } 10817 10818 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 10819 dev_err(&hdev->pdev->dev, 10820 "wait for tqp hw reset timeout\n"); 10821 return -ETIME; 10822 } 10823 10824 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); 10825 if (ret) { 10826 dev_err(&hdev->pdev->dev, 10827 "failed to deassert soft reset, ret = %d\n", 10828 ret); 10829 return ret; 10830 } 10831 reset_try_times = 0; 10832 } 10833 return 0; 10834 } 10835 10836 static int hclge_reset_rcb(struct hnae3_handle *handle) 10837 { 10838 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U 10839 #define HCLGE_RESET_RCB_SUCCESS 1U 10840 10841 struct hclge_vport *vport = hclge_get_vport(handle); 10842 struct hclge_dev *hdev = vport->back; 10843 struct hclge_reset_cmd *req; 10844 struct hclge_desc desc; 10845 u8 return_status; 10846 u16 queue_gid; 10847 int ret; 10848 10849 queue_gid = hclge_covert_handle_qid_global(handle, 0); 10850 10851 req = (struct hclge_reset_cmd *)desc.data; 10852 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 10853 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); 10854 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); 10855 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); 10856 10857 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10858 if (ret) { 10859 dev_err(&hdev->pdev->dev, 10860 "failed to send rcb reset cmd, ret = %d\n", ret); 10861 return ret; 10862 } 10863 10864 return_status = req->fun_reset_rcb_return_status; 10865 if (return_status == HCLGE_RESET_RCB_SUCCESS) 10866 return 0; 10867 10868 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) { 10869 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", 10870 return_status); 10871 return -EIO; 10872 } 10873 10874 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd 10875 * again to reset all tqps 10876 */ 10877 return hclge_reset_tqp_cmd(handle); 10878 } 10879 10880 int hclge_reset_tqp(struct hnae3_handle *handle) 10881 { 10882 struct hclge_vport *vport = hclge_get_vport(handle); 10883 struct hclge_dev *hdev = vport->back; 10884 int ret; 10885 10886 /* only need to disable PF's tqp */ 10887 if (!vport->vport_id) { 10888 ret = hclge_tqp_enable(handle, false); 10889 if (ret) { 10890 dev_err(&hdev->pdev->dev, 10891 "failed to disable tqp, ret = %d\n", ret); 10892 return ret; 10893 } 10894 } 10895 10896 return hclge_reset_rcb(handle); 10897 } 10898 10899 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 10900 { 10901 struct hclge_vport *vport = hclge_get_vport(handle); 10902 struct hclge_dev *hdev = vport->back; 10903 10904 return hdev->fw_version; 10905 } 10906 10907 int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version) 10908 { 10909 struct hclge_comm_query_scc_cmd *resp; 10910 struct hclge_desc desc; 10911 int ret; 10912 10913 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1); 10914 resp = (struct hclge_comm_query_scc_cmd *)desc.data; 10915 10916 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10917 if (ret) 10918 return ret; 10919 10920 *scc_version = le32_to_cpu(resp->scc_version); 10921 10922 return 0; 10923 } 10924 10925 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 10926 { 10927 struct phy_device *phydev = hdev->hw.mac.phydev; 10928 10929 if (!phydev) 10930 return; 10931 10932 phy_set_asym_pause(phydev, rx_en, tx_en); 10933 } 10934 10935 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 10936 { 10937 int ret; 10938 10939 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 10940 return 0; 10941 10942 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 10943 if (ret) 10944 dev_err(&hdev->pdev->dev, 10945 "configure pauseparam error, ret = %d.\n", ret); 10946 10947 return ret; 10948 } 10949 10950 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 10951 { 10952 struct phy_device *phydev = hdev->hw.mac.phydev; 10953 u16 remote_advertising = 0; 10954 u16 local_advertising; 10955 u32 rx_pause, tx_pause; 10956 u8 flowctl; 10957 10958 if (!phydev->link) 10959 return 0; 10960 10961 if (!phydev->autoneg) 10962 return hclge_mac_pause_setup_hw(hdev); 10963 10964 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); 10965 10966 if (phydev->pause) 10967 remote_advertising = LPA_PAUSE_CAP; 10968 10969 if (phydev->asym_pause) 10970 remote_advertising |= LPA_PAUSE_ASYM; 10971 10972 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 10973 remote_advertising); 10974 tx_pause = flowctl & FLOW_CTRL_TX; 10975 rx_pause = flowctl & FLOW_CTRL_RX; 10976 10977 if (phydev->duplex == HCLGE_MAC_HALF) { 10978 tx_pause = 0; 10979 rx_pause = 0; 10980 } 10981 10982 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 10983 } 10984 10985 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 10986 u32 *rx_en, u32 *tx_en) 10987 { 10988 struct hclge_vport *vport = hclge_get_vport(handle); 10989 struct hclge_dev *hdev = vport->back; 10990 u8 media_type = hdev->hw.mac.media_type; 10991 10992 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ? 10993 hclge_get_autoneg(handle) : 0; 10994 10995 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 10996 *rx_en = 0; 10997 *tx_en = 0; 10998 return; 10999 } 11000 11001 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 11002 *rx_en = 1; 11003 *tx_en = 0; 11004 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 11005 *tx_en = 1; 11006 *rx_en = 0; 11007 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 11008 *rx_en = 1; 11009 *tx_en = 1; 11010 } else { 11011 *rx_en = 0; 11012 *tx_en = 0; 11013 } 11014 } 11015 11016 static void hclge_record_user_pauseparam(struct hclge_dev *hdev, 11017 u32 rx_en, u32 tx_en) 11018 { 11019 if (rx_en && tx_en) 11020 hdev->fc_mode_last_time = HCLGE_FC_FULL; 11021 else if (rx_en && !tx_en) 11022 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 11023 else if (!rx_en && tx_en) 11024 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 11025 else 11026 hdev->fc_mode_last_time = HCLGE_FC_NONE; 11027 11028 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 11029 } 11030 11031 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 11032 u32 rx_en, u32 tx_en) 11033 { 11034 struct hclge_vport *vport = hclge_get_vport(handle); 11035 struct hclge_dev *hdev = vport->back; 11036 struct phy_device *phydev = hdev->hw.mac.phydev; 11037 u32 fc_autoneg; 11038 11039 if (phydev || hnae3_dev_phy_imp_supported(hdev)) { 11040 fc_autoneg = hclge_get_autoneg(handle); 11041 if (auto_neg != fc_autoneg) { 11042 dev_info(&hdev->pdev->dev, 11043 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 11044 return -EOPNOTSUPP; 11045 } 11046 } 11047 11048 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 11049 dev_info(&hdev->pdev->dev, 11050 "Priority flow control enabled. Cannot set link flow control.\n"); 11051 return -EOPNOTSUPP; 11052 } 11053 11054 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 11055 11056 hclge_record_user_pauseparam(hdev, rx_en, tx_en); 11057 11058 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) 11059 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 11060 11061 if (phydev) 11062 return phy_start_aneg(phydev); 11063 11064 return -EOPNOTSUPP; 11065 } 11066 11067 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 11068 u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num) 11069 { 11070 struct hclge_vport *vport = hclge_get_vport(handle); 11071 struct hclge_dev *hdev = vport->back; 11072 11073 if (speed) 11074 *speed = hdev->hw.mac.speed; 11075 if (duplex) 11076 *duplex = hdev->hw.mac.duplex; 11077 if (auto_neg) 11078 *auto_neg = hdev->hw.mac.autoneg; 11079 if (lane_num) 11080 *lane_num = hdev->hw.mac.lane_num; 11081 } 11082 11083 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, 11084 u8 *module_type) 11085 { 11086 struct hclge_vport *vport = hclge_get_vport(handle); 11087 struct hclge_dev *hdev = vport->back; 11088 11089 /* When nic is down, the service task is not running, doesn't update 11090 * the port information per second. Query the port information before 11091 * return the media type, ensure getting the correct media information. 11092 */ 11093 hclge_update_port_info(hdev); 11094 11095 if (media_type) 11096 *media_type = hdev->hw.mac.media_type; 11097 11098 if (module_type) 11099 *module_type = hdev->hw.mac.module_type; 11100 } 11101 11102 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 11103 u8 *tp_mdix_ctrl, u8 *tp_mdix) 11104 { 11105 struct hclge_vport *vport = hclge_get_vport(handle); 11106 struct hclge_dev *hdev = vport->back; 11107 struct phy_device *phydev = hdev->hw.mac.phydev; 11108 int mdix_ctrl, mdix, is_resolved; 11109 unsigned int retval; 11110 11111 if (!phydev) { 11112 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 11113 *tp_mdix = ETH_TP_MDI_INVALID; 11114 return; 11115 } 11116 11117 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 11118 11119 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 11120 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 11121 HCLGE_PHY_MDIX_CTRL_S); 11122 11123 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 11124 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 11125 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 11126 11127 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 11128 11129 switch (mdix_ctrl) { 11130 case 0x0: 11131 *tp_mdix_ctrl = ETH_TP_MDI; 11132 break; 11133 case 0x1: 11134 *tp_mdix_ctrl = ETH_TP_MDI_X; 11135 break; 11136 case 0x3: 11137 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 11138 break; 11139 default: 11140 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 11141 break; 11142 } 11143 11144 if (!is_resolved) 11145 *tp_mdix = ETH_TP_MDI_INVALID; 11146 else if (mdix) 11147 *tp_mdix = ETH_TP_MDI_X; 11148 else 11149 *tp_mdix = ETH_TP_MDI; 11150 } 11151 11152 static void hclge_info_show(struct hclge_dev *hdev) 11153 { 11154 struct hnae3_handle *handle = &hdev->vport->nic; 11155 struct device *dev = &hdev->pdev->dev; 11156 11157 dev_info(dev, "PF info begin:\n"); 11158 11159 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 11160 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 11161 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 11162 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 11163 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); 11164 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 11165 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); 11166 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); 11167 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); 11168 dev_info(dev, "This is %s PF\n", 11169 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); 11170 dev_info(dev, "DCB %s\n", 11171 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable"); 11172 dev_info(dev, "MQPRIO %s\n", 11173 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable"); 11174 dev_info(dev, "Default tx spare buffer size: %u\n", 11175 hdev->tx_spare_buf_size); 11176 11177 dev_info(dev, "PF info end.\n"); 11178 } 11179 11180 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 11181 struct hclge_vport *vport) 11182 { 11183 struct hnae3_client *client = vport->nic.client; 11184 struct hclge_dev *hdev = ae_dev->priv; 11185 int rst_cnt = hdev->rst_stats.reset_cnt; 11186 int ret; 11187 11188 ret = client->ops->init_instance(&vport->nic); 11189 if (ret) 11190 return ret; 11191 11192 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11193 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 11194 rst_cnt != hdev->rst_stats.reset_cnt) { 11195 ret = -EBUSY; 11196 goto init_nic_err; 11197 } 11198 11199 /* Enable nic hw error interrupts */ 11200 ret = hclge_config_nic_hw_error(hdev, true); 11201 if (ret) { 11202 dev_err(&ae_dev->pdev->dev, 11203 "fail(%d) to enable hw error interrupts\n", ret); 11204 goto init_nic_err; 11205 } 11206 11207 hnae3_set_client_init_flag(client, ae_dev, 1); 11208 11209 if (netif_msg_drv(&hdev->vport->nic)) 11210 hclge_info_show(hdev); 11211 11212 return ret; 11213 11214 init_nic_err: 11215 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11216 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11217 msleep(HCLGE_WAIT_RESET_DONE); 11218 11219 client->ops->uninit_instance(&vport->nic, 0); 11220 11221 return ret; 11222 } 11223 11224 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 11225 struct hclge_vport *vport) 11226 { 11227 struct hclge_dev *hdev = ae_dev->priv; 11228 struct hnae3_client *client; 11229 int rst_cnt; 11230 int ret; 11231 11232 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 11233 !hdev->nic_client) 11234 return 0; 11235 11236 client = hdev->roce_client; 11237 ret = hclge_init_roce_base_info(vport); 11238 if (ret) 11239 return ret; 11240 11241 rst_cnt = hdev->rst_stats.reset_cnt; 11242 ret = client->ops->init_instance(&vport->roce); 11243 if (ret) 11244 return ret; 11245 11246 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11247 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 11248 rst_cnt != hdev->rst_stats.reset_cnt) { 11249 ret = -EBUSY; 11250 goto init_roce_err; 11251 } 11252 11253 /* Enable roce ras interrupts */ 11254 ret = hclge_config_rocee_ras_interrupt(hdev, true); 11255 if (ret) { 11256 dev_err(&ae_dev->pdev->dev, 11257 "fail(%d) to enable roce ras interrupts\n", ret); 11258 goto init_roce_err; 11259 } 11260 11261 hnae3_set_client_init_flag(client, ae_dev, 1); 11262 11263 return 0; 11264 11265 init_roce_err: 11266 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11267 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11268 msleep(HCLGE_WAIT_RESET_DONE); 11269 11270 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); 11271 11272 return ret; 11273 } 11274 11275 static int hclge_init_client_instance(struct hnae3_client *client, 11276 struct hnae3_ae_dev *ae_dev) 11277 { 11278 struct hclge_dev *hdev = ae_dev->priv; 11279 struct hclge_vport *vport = &hdev->vport[0]; 11280 int ret; 11281 11282 switch (client->type) { 11283 case HNAE3_CLIENT_KNIC: 11284 hdev->nic_client = client; 11285 vport->nic.client = client; 11286 ret = hclge_init_nic_client_instance(ae_dev, vport); 11287 if (ret) 11288 goto clear_nic; 11289 11290 ret = hclge_init_roce_client_instance(ae_dev, vport); 11291 if (ret) 11292 goto clear_roce; 11293 11294 break; 11295 case HNAE3_CLIENT_ROCE: 11296 if (hnae3_dev_roce_supported(hdev)) { 11297 hdev->roce_client = client; 11298 vport->roce.client = client; 11299 } 11300 11301 ret = hclge_init_roce_client_instance(ae_dev, vport); 11302 if (ret) 11303 goto clear_roce; 11304 11305 break; 11306 default: 11307 return -EINVAL; 11308 } 11309 11310 return 0; 11311 11312 clear_nic: 11313 hdev->nic_client = NULL; 11314 vport->nic.client = NULL; 11315 return ret; 11316 clear_roce: 11317 hdev->roce_client = NULL; 11318 vport->roce.client = NULL; 11319 return ret; 11320 } 11321 11322 static void hclge_uninit_client_instance(struct hnae3_client *client, 11323 struct hnae3_ae_dev *ae_dev) 11324 { 11325 struct hclge_dev *hdev = ae_dev->priv; 11326 struct hclge_vport *vport = &hdev->vport[0]; 11327 11328 if (hdev->roce_client) { 11329 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11330 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11331 msleep(HCLGE_WAIT_RESET_DONE); 11332 11333 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); 11334 hdev->roce_client = NULL; 11335 vport->roce.client = NULL; 11336 } 11337 if (client->type == HNAE3_CLIENT_ROCE) 11338 return; 11339 if (hdev->nic_client && client->ops->uninit_instance) { 11340 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11341 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11342 msleep(HCLGE_WAIT_RESET_DONE); 11343 11344 client->ops->uninit_instance(&vport->nic, 0); 11345 hdev->nic_client = NULL; 11346 vport->nic.client = NULL; 11347 } 11348 } 11349 11350 static int hclge_dev_mem_map(struct hclge_dev *hdev) 11351 { 11352 struct pci_dev *pdev = hdev->pdev; 11353 struct hclge_hw *hw = &hdev->hw; 11354 11355 /* for device does not have device memory, return directly */ 11356 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR))) 11357 return 0; 11358 11359 hw->hw.mem_base = 11360 devm_ioremap_wc(&pdev->dev, 11361 pci_resource_start(pdev, HCLGE_MEM_BAR), 11362 pci_resource_len(pdev, HCLGE_MEM_BAR)); 11363 if (!hw->hw.mem_base) { 11364 dev_err(&pdev->dev, "failed to map device memory\n"); 11365 return -EFAULT; 11366 } 11367 11368 return 0; 11369 } 11370 11371 static int hclge_pci_init(struct hclge_dev *hdev) 11372 { 11373 struct pci_dev *pdev = hdev->pdev; 11374 struct hclge_hw *hw; 11375 int ret; 11376 11377 ret = pci_enable_device(pdev); 11378 if (ret) { 11379 dev_err(&pdev->dev, "failed to enable PCI device\n"); 11380 return ret; 11381 } 11382 11383 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11384 if (ret) { 11385 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11386 if (ret) { 11387 dev_err(&pdev->dev, 11388 "can't set consistent PCI DMA"); 11389 goto err_disable_device; 11390 } 11391 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 11392 } 11393 11394 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 11395 if (ret) { 11396 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 11397 goto err_disable_device; 11398 } 11399 11400 pci_set_master(pdev); 11401 hw = &hdev->hw; 11402 hw->hw.io_base = pcim_iomap(pdev, 2, 0); 11403 if (!hw->hw.io_base) { 11404 dev_err(&pdev->dev, "Can't map configuration register space\n"); 11405 ret = -ENOMEM; 11406 goto err_release_regions; 11407 } 11408 11409 ret = hclge_dev_mem_map(hdev); 11410 if (ret) 11411 goto err_unmap_io_base; 11412 11413 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 11414 11415 return 0; 11416 11417 err_unmap_io_base: 11418 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11419 err_release_regions: 11420 pci_release_regions(pdev); 11421 err_disable_device: 11422 pci_disable_device(pdev); 11423 11424 return ret; 11425 } 11426 11427 static void hclge_pci_uninit(struct hclge_dev *hdev) 11428 { 11429 struct pci_dev *pdev = hdev->pdev; 11430 11431 if (hdev->hw.hw.mem_base) 11432 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 11433 11434 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11435 pci_free_irq_vectors(pdev); 11436 pci_release_mem_regions(pdev); 11437 pci_disable_device(pdev); 11438 } 11439 11440 static void hclge_state_init(struct hclge_dev *hdev) 11441 { 11442 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 11443 set_bit(HCLGE_STATE_DOWN, &hdev->state); 11444 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 11445 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11446 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 11447 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 11448 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 11449 } 11450 11451 static void hclge_state_uninit(struct hclge_dev *hdev) 11452 { 11453 set_bit(HCLGE_STATE_DOWN, &hdev->state); 11454 set_bit(HCLGE_STATE_REMOVING, &hdev->state); 11455 11456 if (hdev->reset_timer.function) 11457 del_timer_sync(&hdev->reset_timer); 11458 if (hdev->service_task.work.func) 11459 cancel_delayed_work_sync(&hdev->service_task); 11460 } 11461 11462 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 11463 enum hnae3_reset_type rst_type) 11464 { 11465 #define HCLGE_RESET_RETRY_WAIT_MS 500 11466 #define HCLGE_RESET_RETRY_CNT 5 11467 11468 struct hclge_dev *hdev = ae_dev->priv; 11469 int retry_cnt = 0; 11470 int ret; 11471 11472 while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) { 11473 down(&hdev->reset_sem); 11474 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11475 hdev->reset_type = rst_type; 11476 ret = hclge_reset_prepare(hdev); 11477 if (!ret && !hdev->reset_pending) 11478 break; 11479 11480 dev_err(&hdev->pdev->dev, 11481 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 11482 ret, hdev->reset_pending, retry_cnt); 11483 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11484 up(&hdev->reset_sem); 11485 msleep(HCLGE_RESET_RETRY_WAIT_MS); 11486 } 11487 11488 /* disable misc vector before reset done */ 11489 hclge_enable_vector(&hdev->misc_vector, false); 11490 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 11491 11492 if (hdev->reset_type == HNAE3_FLR_RESET) 11493 hdev->rst_stats.flr_rst_cnt++; 11494 } 11495 11496 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) 11497 { 11498 struct hclge_dev *hdev = ae_dev->priv; 11499 int ret; 11500 11501 hclge_enable_vector(&hdev->misc_vector, true); 11502 11503 ret = hclge_reset_rebuild(hdev); 11504 if (ret) 11505 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); 11506 11507 hdev->reset_type = HNAE3_NONE_RESET; 11508 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11509 up(&hdev->reset_sem); 11510 } 11511 11512 static void hclge_clear_resetting_state(struct hclge_dev *hdev) 11513 { 11514 u16 i; 11515 11516 for (i = 0; i < hdev->num_alloc_vport; i++) { 11517 struct hclge_vport *vport = &hdev->vport[i]; 11518 int ret; 11519 11520 /* Send cmd to clear vport's FUNC_RST_ING */ 11521 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); 11522 if (ret) 11523 dev_warn(&hdev->pdev->dev, 11524 "clear vport(%u) rst failed %d!\n", 11525 vport->vport_id, ret); 11526 } 11527 } 11528 11529 static int hclge_clear_hw_resource(struct hclge_dev *hdev) 11530 { 11531 struct hclge_desc desc; 11532 int ret; 11533 11534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); 11535 11536 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11537 /* This new command is only supported by new firmware, it will 11538 * fail with older firmware. Error value -EOPNOSUPP can only be 11539 * returned by older firmware running this command, to keep code 11540 * backward compatible we will override this value and return 11541 * success. 11542 */ 11543 if (ret && ret != -EOPNOTSUPP) { 11544 dev_err(&hdev->pdev->dev, 11545 "failed to clear hw resource, ret = %d\n", ret); 11546 return ret; 11547 } 11548 return 0; 11549 } 11550 11551 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) 11552 { 11553 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 11554 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); 11555 } 11556 11557 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) 11558 { 11559 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 11560 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); 11561 } 11562 11563 static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle) 11564 { 11565 struct hclge_vport *vport = hclge_get_vport(handle); 11566 11567 return &vport->back->hw.mac.wol; 11568 } 11569 11570 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev, 11571 u32 *wol_supported) 11572 { 11573 struct hclge_query_wol_supported_cmd *wol_supported_cmd; 11574 struct hclge_desc desc; 11575 int ret; 11576 11577 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE, 11578 true); 11579 wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data; 11580 11581 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11582 if (ret) { 11583 dev_err(&hdev->pdev->dev, 11584 "failed to query wol supported, ret = %d\n", ret); 11585 return ret; 11586 } 11587 11588 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode); 11589 11590 return 0; 11591 } 11592 11593 static int hclge_set_wol_cfg(struct hclge_dev *hdev, 11594 struct hclge_wol_info *wol_info) 11595 { 11596 struct hclge_wol_cfg_cmd *wol_cfg_cmd; 11597 struct hclge_desc desc; 11598 int ret; 11599 11600 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false); 11601 wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data; 11602 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode); 11603 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size; 11604 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX); 11605 11606 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11607 if (ret) 11608 dev_err(&hdev->pdev->dev, 11609 "failed to set wol config, ret = %d\n", ret); 11610 11611 return ret; 11612 } 11613 11614 static int hclge_update_wol(struct hclge_dev *hdev) 11615 { 11616 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; 11617 11618 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) 11619 return 0; 11620 11621 return hclge_set_wol_cfg(hdev, wol_info); 11622 } 11623 11624 static int hclge_init_wol(struct hclge_dev *hdev) 11625 { 11626 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; 11627 int ret; 11628 11629 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) 11630 return 0; 11631 11632 memset(wol_info, 0, sizeof(struct hclge_wol_info)); 11633 ret = hclge_get_wol_supported_mode(hdev, 11634 &wol_info->wol_support_mode); 11635 if (ret) { 11636 wol_info->wol_support_mode = 0; 11637 return ret; 11638 } 11639 11640 return hclge_update_wol(hdev); 11641 } 11642 11643 static void hclge_get_wol(struct hnae3_handle *handle, 11644 struct ethtool_wolinfo *wol) 11645 { 11646 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); 11647 11648 wol->supported = wol_info->wol_support_mode; 11649 wol->wolopts = wol_info->wol_current_mode; 11650 if (wol_info->wol_current_mode & WAKE_MAGICSECURE) 11651 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX); 11652 } 11653 11654 static int hclge_set_wol(struct hnae3_handle *handle, 11655 struct ethtool_wolinfo *wol) 11656 { 11657 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); 11658 struct hclge_vport *vport = hclge_get_vport(handle); 11659 u32 wol_mode; 11660 int ret; 11661 11662 wol_mode = wol->wolopts; 11663 if (wol_mode & ~wol_info->wol_support_mode) 11664 return -EINVAL; 11665 11666 wol_info->wol_current_mode = wol_mode; 11667 if (wol_mode & WAKE_MAGICSECURE) { 11668 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX); 11669 wol_info->wol_sopass_size = SOPASS_MAX; 11670 } else { 11671 wol_info->wol_sopass_size = 0; 11672 } 11673 11674 ret = hclge_set_wol_cfg(vport->back, wol_info); 11675 if (ret) 11676 wol_info->wol_current_mode = 0; 11677 11678 return ret; 11679 } 11680 11681 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 11682 { 11683 struct pci_dev *pdev = ae_dev->pdev; 11684 struct hclge_dev *hdev; 11685 int ret; 11686 11687 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 11688 if (!hdev) 11689 return -ENOMEM; 11690 11691 hdev->pdev = pdev; 11692 hdev->ae_dev = ae_dev; 11693 hdev->reset_type = HNAE3_NONE_RESET; 11694 hdev->reset_level = HNAE3_FUNC_RESET; 11695 ae_dev->priv = hdev; 11696 11697 /* HW supprt 2 layer vlan */ 11698 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 11699 11700 mutex_init(&hdev->vport_lock); 11701 spin_lock_init(&hdev->fd_rule_lock); 11702 sema_init(&hdev->reset_sem, 1); 11703 11704 ret = hclge_pci_init(hdev); 11705 if (ret) 11706 goto out; 11707 11708 /* Firmware command queue initialize */ 11709 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 11710 if (ret) 11711 goto err_pci_uninit; 11712 11713 /* Firmware command initialize */ 11714 hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops); 11715 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, 11716 true, hdev->reset_pending); 11717 if (ret) 11718 goto err_cmd_uninit; 11719 11720 ret = hclge_clear_hw_resource(hdev); 11721 if (ret) 11722 goto err_cmd_uninit; 11723 11724 ret = hclge_get_cap(hdev); 11725 if (ret) 11726 goto err_cmd_uninit; 11727 11728 ret = hclge_query_dev_specs(hdev); 11729 if (ret) { 11730 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n", 11731 ret); 11732 goto err_cmd_uninit; 11733 } 11734 11735 ret = hclge_configure(hdev); 11736 if (ret) { 11737 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 11738 goto err_cmd_uninit; 11739 } 11740 11741 ret = hclge_init_msi(hdev); 11742 if (ret) { 11743 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 11744 goto err_cmd_uninit; 11745 } 11746 11747 ret = hclge_misc_irq_init(hdev); 11748 if (ret) 11749 goto err_msi_uninit; 11750 11751 ret = hclge_alloc_tqps(hdev); 11752 if (ret) { 11753 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 11754 goto err_msi_irq_uninit; 11755 } 11756 11757 ret = hclge_alloc_vport(hdev); 11758 if (ret) 11759 goto err_msi_irq_uninit; 11760 11761 ret = hclge_map_tqp(hdev); 11762 if (ret) 11763 goto err_msi_irq_uninit; 11764 11765 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 11766 clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); 11767 if (hnae3_dev_phy_imp_supported(hdev)) 11768 ret = hclge_update_tp_port_info(hdev); 11769 else 11770 ret = hclge_mac_mdio_config(hdev); 11771 11772 if (ret) 11773 goto err_msi_irq_uninit; 11774 } 11775 11776 ret = hclge_init_umv_space(hdev); 11777 if (ret) 11778 goto err_mdiobus_unreg; 11779 11780 ret = hclge_mac_init(hdev); 11781 if (ret) { 11782 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 11783 goto err_mdiobus_unreg; 11784 } 11785 11786 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 11787 if (ret) { 11788 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 11789 goto err_mdiobus_unreg; 11790 } 11791 11792 ret = hclge_config_gro(hdev); 11793 if (ret) 11794 goto err_mdiobus_unreg; 11795 11796 ret = hclge_init_vlan_config(hdev); 11797 if (ret) { 11798 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 11799 goto err_mdiobus_unreg; 11800 } 11801 11802 ret = hclge_tm_schd_init(hdev); 11803 if (ret) { 11804 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 11805 goto err_mdiobus_unreg; 11806 } 11807 11808 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, 11809 &hdev->rss_cfg); 11810 if (ret) { 11811 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 11812 goto err_mdiobus_unreg; 11813 } 11814 11815 ret = hclge_rss_init_hw(hdev); 11816 if (ret) { 11817 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 11818 goto err_mdiobus_unreg; 11819 } 11820 11821 ret = init_mgr_tbl(hdev); 11822 if (ret) { 11823 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 11824 goto err_mdiobus_unreg; 11825 } 11826 11827 ret = hclge_init_fd_config(hdev); 11828 if (ret) { 11829 dev_err(&pdev->dev, 11830 "fd table init fail, ret=%d\n", ret); 11831 goto err_mdiobus_unreg; 11832 } 11833 11834 ret = hclge_ptp_init(hdev); 11835 if (ret) 11836 goto err_mdiobus_unreg; 11837 11838 ret = hclge_update_port_info(hdev); 11839 if (ret) 11840 goto err_ptp_uninit; 11841 11842 INIT_KFIFO(hdev->mac_tnl_log); 11843 11844 hclge_dcb_ops_set(hdev); 11845 11846 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); 11847 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); 11848 11849 hclge_clear_all_event_cause(hdev); 11850 hclge_clear_resetting_state(hdev); 11851 11852 /* Log and clear the hw errors those already occurred */ 11853 if (hnae3_dev_ras_imp_supported(hdev)) 11854 hclge_handle_occurred_error(hdev); 11855 else 11856 hclge_handle_all_hns_hw_errors(ae_dev); 11857 11858 /* request delayed reset for the error recovery because an immediate 11859 * global reset on a PF affecting pending initialization of other PFs 11860 */ 11861 if (ae_dev->hw_err_reset_req) { 11862 enum hnae3_reset_type reset_level; 11863 11864 reset_level = hclge_get_reset_level(ae_dev, 11865 &ae_dev->hw_err_reset_req); 11866 hclge_set_def_reset_request(ae_dev, reset_level); 11867 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 11868 } 11869 11870 hclge_init_rxd_adv_layout(hdev); 11871 11872 /* Enable MISC vector(vector0) */ 11873 hclge_enable_vector(&hdev->misc_vector, true); 11874 11875 ret = hclge_init_wol(hdev); 11876 if (ret) 11877 dev_warn(&pdev->dev, 11878 "failed to wake on lan init, ret = %d\n", ret); 11879 11880 ret = hclge_devlink_init(hdev); 11881 if (ret) 11882 goto err_ptp_uninit; 11883 11884 hclge_state_init(hdev); 11885 hdev->last_reset_time = jiffies; 11886 11887 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", 11888 HCLGE_DRIVER_NAME); 11889 11890 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); 11891 return 0; 11892 11893 err_ptp_uninit: 11894 hclge_ptp_uninit(hdev); 11895 err_mdiobus_unreg: 11896 if (hdev->hw.mac.phydev) 11897 mdiobus_unregister(hdev->hw.mac.mdio_bus); 11898 err_msi_irq_uninit: 11899 hclge_misc_irq_uninit(hdev); 11900 err_msi_uninit: 11901 pci_free_irq_vectors(pdev); 11902 err_cmd_uninit: 11903 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 11904 err_pci_uninit: 11905 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11906 pci_release_regions(pdev); 11907 pci_disable_device(pdev); 11908 out: 11909 mutex_destroy(&hdev->vport_lock); 11910 return ret; 11911 } 11912 11913 static void hclge_stats_clear(struct hclge_dev *hdev) 11914 { 11915 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); 11916 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); 11917 } 11918 11919 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 11920 { 11921 return hclge_config_switch_param(hdev, vf, enable, 11922 HCLGE_SWITCH_ANTI_SPOOF_MASK); 11923 } 11924 11925 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 11926 { 11927 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 11928 HCLGE_FILTER_FE_NIC_INGRESS_B, 11929 enable, vf); 11930 } 11931 11932 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) 11933 { 11934 int ret; 11935 11936 ret = hclge_set_mac_spoofchk(hdev, vf, enable); 11937 if (ret) { 11938 dev_err(&hdev->pdev->dev, 11939 "Set vf %d mac spoof check %s failed, ret=%d\n", 11940 vf, enable ? "on" : "off", ret); 11941 return ret; 11942 } 11943 11944 ret = hclge_set_vlan_spoofchk(hdev, vf, enable); 11945 if (ret) 11946 dev_err(&hdev->pdev->dev, 11947 "Set vf %d vlan spoof check %s failed, ret=%d\n", 11948 vf, enable ? "on" : "off", ret); 11949 11950 return ret; 11951 } 11952 11953 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, 11954 bool enable) 11955 { 11956 struct hclge_vport *vport = hclge_get_vport(handle); 11957 struct hclge_dev *hdev = vport->back; 11958 u32 new_spoofchk = enable ? 1 : 0; 11959 int ret; 11960 11961 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 11962 return -EOPNOTSUPP; 11963 11964 vport = hclge_get_vf_vport(hdev, vf); 11965 if (!vport) 11966 return -EINVAL; 11967 11968 if (vport->vf_info.spoofchk == new_spoofchk) 11969 return 0; 11970 11971 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) 11972 dev_warn(&hdev->pdev->dev, 11973 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", 11974 vf); 11975 else if (enable && hclge_is_umv_space_full(vport, true)) 11976 dev_warn(&hdev->pdev->dev, 11977 "vf %d mac table is full, enable spoof check may cause its packet send fail\n", 11978 vf); 11979 11980 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); 11981 if (ret) 11982 return ret; 11983 11984 vport->vf_info.spoofchk = new_spoofchk; 11985 return 0; 11986 } 11987 11988 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) 11989 { 11990 struct hclge_vport *vport = hdev->vport; 11991 int ret; 11992 int i; 11993 11994 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 11995 return 0; 11996 11997 /* resume the vf spoof check state after reset */ 11998 for (i = 0; i < hdev->num_alloc_vport; i++) { 11999 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, 12000 vport->vf_info.spoofchk); 12001 if (ret) 12002 return ret; 12003 12004 vport++; 12005 } 12006 12007 return 0; 12008 } 12009 12010 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) 12011 { 12012 struct hclge_vport *vport = hclge_get_vport(handle); 12013 struct hclge_dev *hdev = vport->back; 12014 u32 new_trusted = enable ? 1 : 0; 12015 12016 vport = hclge_get_vf_vport(hdev, vf); 12017 if (!vport) 12018 return -EINVAL; 12019 12020 if (vport->vf_info.trusted == new_trusted) 12021 return 0; 12022 12023 vport->vf_info.trusted = new_trusted; 12024 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12025 hclge_task_schedule(hdev, 0); 12026 12027 return 0; 12028 } 12029 12030 static void hclge_reset_vf_rate(struct hclge_dev *hdev) 12031 { 12032 int ret; 12033 int vf; 12034 12035 /* reset vf rate to default value */ 12036 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { 12037 struct hclge_vport *vport = &hdev->vport[vf]; 12038 12039 vport->vf_info.max_tx_rate = 0; 12040 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); 12041 if (ret) 12042 dev_err(&hdev->pdev->dev, 12043 "vf%d failed to reset to default, ret=%d\n", 12044 vf - HCLGE_VF_VPORT_START_NUM, ret); 12045 } 12046 } 12047 12048 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, 12049 int min_tx_rate, int max_tx_rate) 12050 { 12051 if (min_tx_rate != 0 || 12052 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { 12053 dev_err(&hdev->pdev->dev, 12054 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n", 12055 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); 12056 return -EINVAL; 12057 } 12058 12059 return 0; 12060 } 12061 12062 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, 12063 int min_tx_rate, int max_tx_rate, bool force) 12064 { 12065 struct hclge_vport *vport = hclge_get_vport(handle); 12066 struct hclge_dev *hdev = vport->back; 12067 int ret; 12068 12069 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); 12070 if (ret) 12071 return ret; 12072 12073 vport = hclge_get_vf_vport(hdev, vf); 12074 if (!vport) 12075 return -EINVAL; 12076 12077 if (!force && max_tx_rate == vport->vf_info.max_tx_rate) 12078 return 0; 12079 12080 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); 12081 if (ret) 12082 return ret; 12083 12084 vport->vf_info.max_tx_rate = max_tx_rate; 12085 12086 return 0; 12087 } 12088 12089 static int hclge_resume_vf_rate(struct hclge_dev *hdev) 12090 { 12091 struct hnae3_handle *handle = &hdev->vport->nic; 12092 struct hclge_vport *vport; 12093 int ret; 12094 int vf; 12095 12096 /* resume the vf max_tx_rate after reset */ 12097 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { 12098 vport = hclge_get_vf_vport(hdev, vf); 12099 if (!vport) 12100 return -EINVAL; 12101 12102 /* zero means max rate, after reset, firmware already set it to 12103 * max rate, so just continue. 12104 */ 12105 if (!vport->vf_info.max_tx_rate) 12106 continue; 12107 12108 ret = hclge_set_vf_rate(handle, vf, 0, 12109 vport->vf_info.max_tx_rate, true); 12110 if (ret) { 12111 dev_err(&hdev->pdev->dev, 12112 "vf%d failed to resume tx_rate:%u, ret=%d\n", 12113 vf, vport->vf_info.max_tx_rate, ret); 12114 return ret; 12115 } 12116 } 12117 12118 return 0; 12119 } 12120 12121 static void hclge_reset_vport_state(struct hclge_dev *hdev) 12122 { 12123 struct hclge_vport *vport = hdev->vport; 12124 int i; 12125 12126 for (i = 0; i < hdev->num_alloc_vport; i++) { 12127 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 12128 vport++; 12129 } 12130 } 12131 12132 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 12133 { 12134 struct hclge_dev *hdev = ae_dev->priv; 12135 struct pci_dev *pdev = ae_dev->pdev; 12136 int ret; 12137 12138 set_bit(HCLGE_STATE_DOWN, &hdev->state); 12139 12140 hclge_stats_clear(hdev); 12141 /* NOTE: pf reset needn't to clear or restore pf and vf table entry. 12142 * so here should not clean table in memory. 12143 */ 12144 if (hdev->reset_type == HNAE3_IMP_RESET || 12145 hdev->reset_type == HNAE3_GLOBAL_RESET) { 12146 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 12147 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); 12148 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); 12149 hclge_reset_umv_space(hdev); 12150 } 12151 12152 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, 12153 true, hdev->reset_pending); 12154 if (ret) { 12155 dev_err(&pdev->dev, "Cmd queue init failed\n"); 12156 return ret; 12157 } 12158 12159 ret = hclge_map_tqp(hdev); 12160 if (ret) { 12161 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 12162 return ret; 12163 } 12164 12165 ret = hclge_mac_init(hdev); 12166 if (ret) { 12167 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 12168 return ret; 12169 } 12170 12171 ret = hclge_tp_port_init(hdev); 12172 if (ret) { 12173 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", 12174 ret); 12175 return ret; 12176 } 12177 12178 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 12179 if (ret) { 12180 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 12181 return ret; 12182 } 12183 12184 ret = hclge_config_gro(hdev); 12185 if (ret) 12186 return ret; 12187 12188 ret = hclge_init_vlan_config(hdev); 12189 if (ret) { 12190 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 12191 return ret; 12192 } 12193 12194 hclge_reset_tc_config(hdev); 12195 12196 ret = hclge_tm_init_hw(hdev, true); 12197 if (ret) { 12198 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 12199 return ret; 12200 } 12201 12202 ret = hclge_rss_init_hw(hdev); 12203 if (ret) { 12204 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 12205 return ret; 12206 } 12207 12208 ret = init_mgr_tbl(hdev); 12209 if (ret) { 12210 dev_err(&pdev->dev, 12211 "failed to reinit manager table, ret = %d\n", ret); 12212 return ret; 12213 } 12214 12215 ret = hclge_init_fd_config(hdev); 12216 if (ret) { 12217 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); 12218 return ret; 12219 } 12220 12221 ret = hclge_ptp_init(hdev); 12222 if (ret) 12223 return ret; 12224 12225 /* Log and clear the hw errors those already occurred */ 12226 if (hnae3_dev_ras_imp_supported(hdev)) 12227 hclge_handle_occurred_error(hdev); 12228 else 12229 hclge_handle_all_hns_hw_errors(ae_dev); 12230 12231 /* Re-enable the hw error interrupts because 12232 * the interrupts get disabled on global reset. 12233 */ 12234 ret = hclge_config_nic_hw_error(hdev, true); 12235 if (ret) { 12236 dev_err(&pdev->dev, 12237 "fail(%d) to re-enable NIC hw error interrupts\n", 12238 ret); 12239 return ret; 12240 } 12241 12242 if (hdev->roce_client) { 12243 ret = hclge_config_rocee_ras_interrupt(hdev, true); 12244 if (ret) { 12245 dev_err(&pdev->dev, 12246 "fail(%d) to re-enable roce ras interrupts\n", 12247 ret); 12248 return ret; 12249 } 12250 } 12251 12252 hclge_reset_vport_state(hdev); 12253 ret = hclge_reset_vport_spoofchk(hdev); 12254 if (ret) 12255 return ret; 12256 12257 ret = hclge_resume_vf_rate(hdev); 12258 if (ret) 12259 return ret; 12260 12261 hclge_init_rxd_adv_layout(hdev); 12262 12263 ret = hclge_update_wol(hdev); 12264 if (ret) 12265 dev_warn(&pdev->dev, 12266 "failed to update wol config, ret = %d\n", ret); 12267 12268 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 12269 HCLGE_DRIVER_NAME); 12270 12271 return 0; 12272 } 12273 12274 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 12275 { 12276 struct hclge_dev *hdev = ae_dev->priv; 12277 struct hclge_mac *mac = &hdev->hw.mac; 12278 12279 hclge_reset_vf_rate(hdev); 12280 hclge_clear_vf_vlan(hdev); 12281 hclge_state_uninit(hdev); 12282 hclge_ptp_uninit(hdev); 12283 hclge_uninit_rxd_adv_layout(hdev); 12284 hclge_uninit_mac_table(hdev); 12285 hclge_del_all_fd_entries(hdev); 12286 12287 if (mac->phydev) 12288 mdiobus_unregister(mac->mdio_bus); 12289 12290 /* Disable MISC vector(vector0) */ 12291 hclge_enable_vector(&hdev->misc_vector, false); 12292 synchronize_irq(hdev->misc_vector.vector_irq); 12293 12294 /* Disable all hw interrupts */ 12295 hclge_config_mac_tnl_int(hdev, false); 12296 hclge_config_nic_hw_error(hdev, false); 12297 hclge_config_rocee_ras_interrupt(hdev, false); 12298 12299 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 12300 hclge_misc_irq_uninit(hdev); 12301 hclge_devlink_uninit(hdev); 12302 hclge_pci_uninit(hdev); 12303 hclge_uninit_vport_vlan_table(hdev); 12304 mutex_destroy(&hdev->vport_lock); 12305 ae_dev->priv = NULL; 12306 } 12307 12308 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 12309 { 12310 struct hclge_vport *vport = hclge_get_vport(handle); 12311 struct hclge_dev *hdev = vport->back; 12312 12313 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); 12314 } 12315 12316 static void hclge_get_channels(struct hnae3_handle *handle, 12317 struct ethtool_channels *ch) 12318 { 12319 ch->max_combined = hclge_get_max_channels(handle); 12320 ch->other_count = 1; 12321 ch->max_other = 1; 12322 ch->combined_count = handle->kinfo.rss_size; 12323 } 12324 12325 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 12326 u16 *alloc_tqps, u16 *max_rss_size) 12327 { 12328 struct hclge_vport *vport = hclge_get_vport(handle); 12329 struct hclge_dev *hdev = vport->back; 12330 12331 *alloc_tqps = vport->alloc_tqps; 12332 *max_rss_size = hdev->pf_rss_size_max; 12333 } 12334 12335 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle) 12336 { 12337 struct hclge_vport *vport = hclge_get_vport(handle); 12338 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 12339 struct hclge_dev *hdev = vport->back; 12340 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 12341 u16 tc_valid[HCLGE_MAX_TC_NUM]; 12342 u16 roundup_size; 12343 unsigned int i; 12344 12345 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); 12346 roundup_size = ilog2(roundup_size); 12347 /* Set the RSS TC mode according to the new RSS size */ 12348 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 12349 tc_valid[i] = 0; 12350 12351 if (!(hdev->hw_tc_map & BIT(i))) 12352 continue; 12353 12354 tc_valid[i] = 1; 12355 tc_size[i] = roundup_size; 12356 tc_offset[i] = vport->nic.kinfo.rss_size * i; 12357 } 12358 12359 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, 12360 tc_size); 12361 } 12362 12363 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 12364 bool rxfh_configured) 12365 { 12366 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 12367 struct hclge_vport *vport = hclge_get_vport(handle); 12368 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 12369 struct hclge_dev *hdev = vport->back; 12370 u16 cur_rss_size = kinfo->rss_size; 12371 u16 cur_tqps = kinfo->num_tqps; 12372 u32 *rss_indir; 12373 unsigned int i; 12374 int ret; 12375 12376 kinfo->req_rss_size = new_tqps_num; 12377 12378 ret = hclge_tm_vport_map_update(hdev); 12379 if (ret) { 12380 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); 12381 return ret; 12382 } 12383 12384 ret = hclge_set_rss_tc_mode_cfg(handle); 12385 if (ret) 12386 return ret; 12387 12388 /* RSS indirection table has been configured by user */ 12389 if (rxfh_configured) 12390 goto out; 12391 12392 /* Reinitializes the rss indirect table according to the new RSS size */ 12393 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32), 12394 GFP_KERNEL); 12395 if (!rss_indir) 12396 return -ENOMEM; 12397 12398 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) 12399 rss_indir[i] = i % kinfo->rss_size; 12400 12401 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 12402 if (ret) 12403 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 12404 ret); 12405 12406 kfree(rss_indir); 12407 12408 out: 12409 if (!ret) 12410 dev_info(&hdev->pdev->dev, 12411 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 12412 cur_rss_size, kinfo->rss_size, 12413 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 12414 12415 return ret; 12416 } 12417 12418 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 12419 { 12420 struct hclge_set_led_state_cmd *req; 12421 struct hclge_desc desc; 12422 int ret; 12423 12424 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 12425 12426 req = (struct hclge_set_led_state_cmd *)desc.data; 12427 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 12428 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 12429 12430 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12431 if (ret) 12432 dev_err(&hdev->pdev->dev, 12433 "Send set led state cmd error, ret =%d\n", ret); 12434 12435 return ret; 12436 } 12437 12438 enum hclge_led_status { 12439 HCLGE_LED_OFF, 12440 HCLGE_LED_ON, 12441 HCLGE_LED_NO_CHANGE = 0xFF, 12442 }; 12443 12444 static int hclge_set_led_id(struct hnae3_handle *handle, 12445 enum ethtool_phys_id_state status) 12446 { 12447 struct hclge_vport *vport = hclge_get_vport(handle); 12448 struct hclge_dev *hdev = vport->back; 12449 12450 switch (status) { 12451 case ETHTOOL_ID_ACTIVE: 12452 return hclge_set_led_status(hdev, HCLGE_LED_ON); 12453 case ETHTOOL_ID_INACTIVE: 12454 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 12455 default: 12456 return -EINVAL; 12457 } 12458 } 12459 12460 static void hclge_get_link_mode(struct hnae3_handle *handle, 12461 unsigned long *supported, 12462 unsigned long *advertising) 12463 { 12464 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 12465 struct hclge_vport *vport = hclge_get_vport(handle); 12466 struct hclge_dev *hdev = vport->back; 12467 unsigned int idx = 0; 12468 12469 for (; idx < size; idx++) { 12470 supported[idx] = hdev->hw.mac.supported[idx]; 12471 advertising[idx] = hdev->hw.mac.advertising[idx]; 12472 } 12473 } 12474 12475 static int hclge_gro_en(struct hnae3_handle *handle, bool enable) 12476 { 12477 struct hclge_vport *vport = hclge_get_vport(handle); 12478 struct hclge_dev *hdev = vport->back; 12479 bool gro_en_old = hdev->gro_en; 12480 int ret; 12481 12482 hdev->gro_en = enable; 12483 ret = hclge_config_gro(hdev); 12484 if (ret) 12485 hdev->gro_en = gro_en_old; 12486 12487 return ret; 12488 } 12489 12490 static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport) 12491 { 12492 struct hnae3_handle *handle = &vport->nic; 12493 struct hclge_dev *hdev = vport->back; 12494 bool uc_en = false; 12495 bool mc_en = false; 12496 u8 tmp_flags; 12497 bool bc_en; 12498 int ret; 12499 12500 if (vport->last_promisc_flags != vport->overflow_promisc_flags) { 12501 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12502 vport->last_promisc_flags = vport->overflow_promisc_flags; 12503 } 12504 12505 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, 12506 &vport->state)) 12507 return 0; 12508 12509 /* for PF */ 12510 if (!vport->vport_id) { 12511 tmp_flags = handle->netdev_flags | vport->last_promisc_flags; 12512 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, 12513 tmp_flags & HNAE3_MPE); 12514 if (!ret) 12515 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 12516 &vport->state); 12517 else 12518 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, 12519 &vport->state); 12520 return ret; 12521 } 12522 12523 /* for VF */ 12524 if (vport->vf_info.trusted) { 12525 uc_en = vport->vf_info.request_uc_en > 0 || 12526 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; 12527 mc_en = vport->vf_info.request_mc_en > 0 || 12528 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; 12529 } 12530 bc_en = vport->vf_info.request_bc_en > 0; 12531 12532 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, 12533 mc_en, bc_en); 12534 if (ret) { 12535 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12536 return ret; 12537 } 12538 hclge_set_vport_vlan_fltr_change(vport); 12539 12540 return 0; 12541 } 12542 12543 static void hclge_sync_promisc_mode(struct hclge_dev *hdev) 12544 { 12545 struct hclge_vport *vport; 12546 int ret; 12547 u16 i; 12548 12549 for (i = 0; i < hdev->num_alloc_vport; i++) { 12550 vport = &hdev->vport[i]; 12551 12552 ret = hclge_sync_vport_promisc_mode(vport); 12553 if (ret) 12554 return; 12555 } 12556 } 12557 12558 static bool hclge_module_existed(struct hclge_dev *hdev) 12559 { 12560 struct hclge_desc desc; 12561 u32 existed; 12562 int ret; 12563 12564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true); 12565 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12566 if (ret) { 12567 dev_err(&hdev->pdev->dev, 12568 "failed to get SFP exist state, ret = %d\n", ret); 12569 return false; 12570 } 12571 12572 existed = le32_to_cpu(desc.data[0]); 12573 12574 return existed != 0; 12575 } 12576 12577 /* need 6 bds(total 140 bytes) in one reading 12578 * return the number of bytes actually read, 0 means read failed. 12579 */ 12580 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, 12581 u32 len, u8 *data) 12582 { 12583 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM]; 12584 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0; 12585 u16 read_len; 12586 u16 copy_len; 12587 int ret; 12588 int i; 12589 12590 /* setup all 6 bds to read module eeprom info. */ 12591 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) { 12592 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM, 12593 true); 12594 12595 /* bd0~bd4 need next flag */ 12596 if (i < HCLGE_SFP_INFO_CMD_NUM - 1) 12597 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 12598 } 12599 12600 /* setup bd0, this bd contains offset and read length. */ 12601 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data; 12602 sfp_info_bd0->offset = cpu_to_le16((u16)offset); 12603 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN); 12604 sfp_info_bd0->read_len = cpu_to_le16(read_len); 12605 12606 ret = hclge_cmd_send(&hdev->hw, desc, i); 12607 if (ret) { 12608 dev_err(&hdev->pdev->dev, 12609 "failed to get SFP eeprom info, ret = %d\n", ret); 12610 return 0; 12611 } 12612 12613 /* copy sfp info from bd0 to out buffer. */ 12614 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN); 12615 memcpy(data, sfp_info_bd0->data, copy_len); 12616 read_len = copy_len; 12617 12618 /* copy sfp info from bd1~bd5 to out buffer if needed. */ 12619 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) { 12620 if (read_len >= len) 12621 return read_len; 12622 12623 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); 12624 memcpy(data + read_len, desc[i].data, copy_len); 12625 read_len += copy_len; 12626 } 12627 12628 return read_len; 12629 } 12630 12631 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, 12632 u32 len, u8 *data) 12633 { 12634 struct hclge_vport *vport = hclge_get_vport(handle); 12635 struct hclge_dev *hdev = vport->back; 12636 u32 read_len = 0; 12637 u16 data_len; 12638 12639 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 12640 return -EOPNOTSUPP; 12641 12642 if (!hclge_module_existed(hdev)) 12643 return -ENXIO; 12644 12645 while (read_len < len) { 12646 data_len = hclge_get_sfp_eeprom_info(hdev, 12647 offset + read_len, 12648 len - read_len, 12649 data + read_len); 12650 if (!data_len) 12651 return -EIO; 12652 12653 read_len += data_len; 12654 } 12655 12656 return 0; 12657 } 12658 12659 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, 12660 u32 *status_code) 12661 { 12662 struct hclge_vport *vport = hclge_get_vport(handle); 12663 struct hclge_dev *hdev = vport->back; 12664 struct hclge_desc desc; 12665 int ret; 12666 12667 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) 12668 return -EOPNOTSUPP; 12669 12670 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); 12671 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12672 if (ret) { 12673 dev_err(&hdev->pdev->dev, 12674 "failed to query link diagnosis info, ret = %d\n", ret); 12675 return ret; 12676 } 12677 12678 *status_code = le32_to_cpu(desc.data[0]); 12679 return 0; 12680 } 12681 12682 /* After disable sriov, VF still has some config and info need clean, 12683 * which configed by PF. 12684 */ 12685 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) 12686 { 12687 struct hclge_dev *hdev = vport->back; 12688 struct hclge_vlan_info vlan_info; 12689 int ret; 12690 12691 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 12692 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 12693 vport->need_notify = 0; 12694 vport->mps = 0; 12695 12696 /* after disable sriov, clean VF rate configured by PF */ 12697 ret = hclge_tm_qs_shaper_cfg(vport, 0); 12698 if (ret) 12699 dev_err(&hdev->pdev->dev, 12700 "failed to clean vf%d rate config, ret = %d\n", 12701 vfid, ret); 12702 12703 vlan_info.vlan_tag = 0; 12704 vlan_info.qos = 0; 12705 vlan_info.vlan_proto = ETH_P_8021Q; 12706 ret = hclge_update_port_base_vlan_cfg(vport, 12707 HNAE3_PORT_BASE_VLAN_DISABLE, 12708 &vlan_info); 12709 if (ret) 12710 dev_err(&hdev->pdev->dev, 12711 "failed to clean vf%d port base vlan, ret = %d\n", 12712 vfid, ret); 12713 12714 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); 12715 if (ret) 12716 dev_err(&hdev->pdev->dev, 12717 "failed to clean vf%d spoof config, ret = %d\n", 12718 vfid, ret); 12719 12720 memset(&vport->vf_info, 0, sizeof(vport->vf_info)); 12721 } 12722 12723 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) 12724 { 12725 struct hclge_dev *hdev = ae_dev->priv; 12726 struct hclge_vport *vport; 12727 int i; 12728 12729 for (i = 0; i < num_vfs; i++) { 12730 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; 12731 12732 hclge_clear_vport_vf_info(vport, i); 12733 } 12734 } 12735 12736 static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, 12737 u8 *priority) 12738 { 12739 struct hclge_vport *vport = hclge_get_vport(h); 12740 12741 if (dscp >= HNAE3_MAX_DSCP) 12742 return -EINVAL; 12743 12744 if (tc_mode) 12745 *tc_mode = vport->nic.kinfo.tc_map_mode; 12746 if (priority) 12747 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : 12748 vport->nic.kinfo.dscp_prio[dscp]; 12749 12750 return 0; 12751 } 12752 12753 static const struct hnae3_ae_ops hclge_ops = { 12754 .init_ae_dev = hclge_init_ae_dev, 12755 .uninit_ae_dev = hclge_uninit_ae_dev, 12756 .reset_prepare = hclge_reset_prepare_general, 12757 .reset_done = hclge_reset_done, 12758 .init_client_instance = hclge_init_client_instance, 12759 .uninit_client_instance = hclge_uninit_client_instance, 12760 .map_ring_to_vector = hclge_map_ring_to_vector, 12761 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 12762 .get_vector = hclge_get_vector, 12763 .put_vector = hclge_put_vector, 12764 .set_promisc_mode = hclge_set_promisc_mode, 12765 .request_update_promisc_mode = hclge_request_update_promisc_mode, 12766 .set_loopback = hclge_set_loopback, 12767 .start = hclge_ae_start, 12768 .stop = hclge_ae_stop, 12769 .client_start = hclge_client_start, 12770 .client_stop = hclge_client_stop, 12771 .get_status = hclge_get_status, 12772 .get_ksettings_an_result = hclge_get_ksettings_an_result, 12773 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 12774 .get_media_type = hclge_get_media_type, 12775 .check_port_speed = hclge_check_port_speed, 12776 .get_fec_stats = hclge_get_fec_stats, 12777 .get_fec = hclge_get_fec, 12778 .set_fec = hclge_set_fec, 12779 .get_rss_key_size = hclge_comm_get_rss_key_size, 12780 .get_rss = hclge_get_rss, 12781 .set_rss = hclge_set_rss, 12782 .set_rss_tuple = hclge_set_rss_tuple, 12783 .get_rss_tuple = hclge_get_rss_tuple, 12784 .get_tc_size = hclge_get_tc_size, 12785 .get_mac_addr = hclge_get_mac_addr, 12786 .set_mac_addr = hclge_set_mac_addr, 12787 .do_ioctl = hclge_do_ioctl, 12788 .add_uc_addr = hclge_add_uc_addr, 12789 .rm_uc_addr = hclge_rm_uc_addr, 12790 .add_mc_addr = hclge_add_mc_addr, 12791 .rm_mc_addr = hclge_rm_mc_addr, 12792 .set_autoneg = hclge_set_autoneg, 12793 .get_autoneg = hclge_get_autoneg, 12794 .restart_autoneg = hclge_restart_autoneg, 12795 .halt_autoneg = hclge_halt_autoneg, 12796 .get_pauseparam = hclge_get_pauseparam, 12797 .set_pauseparam = hclge_set_pauseparam, 12798 .set_mtu = hclge_set_mtu, 12799 .reset_queue = hclge_reset_tqp, 12800 .get_stats = hclge_get_stats, 12801 .get_mac_stats = hclge_get_mac_stat, 12802 .update_stats = hclge_update_stats, 12803 .get_strings = hclge_get_strings, 12804 .get_sset_count = hclge_get_sset_count, 12805 .get_fw_version = hclge_get_fw_version, 12806 .get_mdix_mode = hclge_get_mdix_mode, 12807 .enable_vlan_filter = hclge_enable_vlan_filter, 12808 .set_vlan_filter = hclge_set_vlan_filter, 12809 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 12810 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 12811 .reset_event = hclge_reset_event, 12812 .get_reset_level = hclge_get_reset_level, 12813 .set_default_reset_request = hclge_set_def_reset_request, 12814 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 12815 .set_channels = hclge_set_channels, 12816 .get_channels = hclge_get_channels, 12817 .get_regs_len = hclge_get_regs_len, 12818 .get_regs = hclge_get_regs, 12819 .set_led_id = hclge_set_led_id, 12820 .get_link_mode = hclge_get_link_mode, 12821 .add_fd_entry = hclge_add_fd_entry, 12822 .del_fd_entry = hclge_del_fd_entry, 12823 .get_fd_rule_cnt = hclge_get_fd_rule_cnt, 12824 .get_fd_rule_info = hclge_get_fd_rule_info, 12825 .get_fd_all_rules = hclge_get_all_rules, 12826 .enable_fd = hclge_enable_fd, 12827 .add_arfs_entry = hclge_add_fd_entry_by_arfs, 12828 .dbg_read_cmd = hclge_dbg_read_cmd, 12829 .handle_hw_ras_error = hclge_handle_hw_ras_error, 12830 .get_hw_reset_stat = hclge_get_hw_reset_stat, 12831 .ae_dev_resetting = hclge_ae_dev_resetting, 12832 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, 12833 .set_gro_en = hclge_gro_en, 12834 .get_global_queue_id = hclge_covert_handle_qid_global, 12835 .set_timer_task = hclge_set_timer_task, 12836 .mac_connect_phy = hclge_mac_connect_phy, 12837 .mac_disconnect_phy = hclge_mac_disconnect_phy, 12838 .get_vf_config = hclge_get_vf_config, 12839 .set_vf_link_state = hclge_set_vf_link_state, 12840 .set_vf_spoofchk = hclge_set_vf_spoofchk, 12841 .set_vf_trust = hclge_set_vf_trust, 12842 .set_vf_rate = hclge_set_vf_rate, 12843 .set_vf_mac = hclge_set_vf_mac, 12844 .get_module_eeprom = hclge_get_module_eeprom, 12845 .get_cmdq_stat = hclge_get_cmdq_stat, 12846 .add_cls_flower = hclge_add_cls_flower, 12847 .del_cls_flower = hclge_del_cls_flower, 12848 .cls_flower_active = hclge_is_cls_flower_active, 12849 .get_phy_link_ksettings = hclge_get_phy_link_ksettings, 12850 .set_phy_link_ksettings = hclge_set_phy_link_ksettings, 12851 .set_tx_hwts_info = hclge_ptp_set_tx_info, 12852 .get_rx_hwts = hclge_ptp_get_rx_hwts, 12853 .get_ts_info = hclge_ptp_get_ts_info, 12854 .get_link_diagnosis_info = hclge_get_link_diagnosis_info, 12855 .clean_vf_config = hclge_clean_vport_config, 12856 .get_dscp_prio = hclge_get_dscp_prio, 12857 .get_wol = hclge_get_wol, 12858 .set_wol = hclge_set_wol, 12859 }; 12860 12861 static struct hnae3_ae_algo ae_algo = { 12862 .ops = &hclge_ops, 12863 .pdev_id_table = ae_algo_pci_tbl, 12864 }; 12865 12866 static int __init hclge_init(void) 12867 { 12868 pr_info("%s is initializing\n", HCLGE_NAME); 12869 12870 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); 12871 if (!hclge_wq) { 12872 pr_err("%s: failed to create workqueue\n", HCLGE_NAME); 12873 return -ENOMEM; 12874 } 12875 12876 hnae3_register_ae_algo(&ae_algo); 12877 12878 return 0; 12879 } 12880 12881 static void __exit hclge_exit(void) 12882 { 12883 hnae3_unregister_ae_algo_prepare(&ae_algo); 12884 hnae3_unregister_ae_algo(&ae_algo); 12885 destroy_workqueue(hclge_wq); 12886 } 12887 module_init(hclge_init); 12888 module_exit(hclge_exit); 12889 12890 MODULE_LICENSE("GPL"); 12891 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 12892 MODULE_DESCRIPTION("HCLGE Driver"); 12893 MODULE_VERSION(HCLGE_MOD_VERSION); 12894