1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/acpi.h> 5 #include <linux/device.h> 6 #include <linux/etherdevice.h> 7 #include <linux/init.h> 8 #include <linux/interrupt.h> 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/platform_device.h> 14 #include <linux/if_vlan.h> 15 #include <linux/crash_dump.h> 16 #include <net/ipv6.h> 17 #include <net/rtnetlink.h> 18 #include "hclge_cmd.h" 19 #include "hclge_dcb.h" 20 #include "hclge_main.h" 21 #include "hclge_mbx.h" 22 #include "hclge_mdio.h" 23 #include "hclge_regs.h" 24 #include "hclge_tm.h" 25 #include "hclge_err.h" 26 #include "hnae3.h" 27 #include "hclge_devlink.h" 28 #include "hclge_comm_cmd.h" 29 30 #define HCLGE_NAME "hclge" 31 32 #define HCLGE_BUF_SIZE_UNIT 256U 33 #define HCLGE_BUF_MUL_BY 2 34 #define HCLGE_BUF_DIV_BY 2 35 #define NEED_RESERVE_TC_NUM 2 36 #define BUF_MAX_PERCENT 100 37 #define BUF_RESERVE_PERCENT 90 38 39 #define HCLGE_RESET_MAX_FAIL_CNT 5 40 #define HCLGE_RESET_SYNC_TIME 100 41 #define HCLGE_PF_RESET_SYNC_TIME 20 42 #define HCLGE_PF_RESET_SYNC_CNT 1500 43 44 #define HCLGE_LINK_STATUS_MS 10 45 46 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); 47 static int hclge_init_vlan_config(struct hclge_dev *hdev); 48 static void hclge_sync_vlan_filter(struct hclge_dev *hdev); 49 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); 50 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); 51 static void hclge_rfs_filter_expire(struct hclge_dev *hdev); 52 static int hclge_clear_arfs_rules(struct hclge_dev *hdev); 53 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 54 unsigned long *addr); 55 static int hclge_set_default_loopback(struct hclge_dev *hdev); 56 57 static void hclge_sync_mac_table(struct hclge_dev *hdev); 58 static void hclge_restore_hw_table(struct hclge_dev *hdev); 59 static void hclge_sync_promisc_mode(struct hclge_dev *hdev); 60 static void hclge_sync_fd_table(struct hclge_dev *hdev); 61 static void hclge_update_fec_stats(struct hclge_dev *hdev); 62 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, 63 int wait_cnt); 64 static int hclge_update_port_info(struct hclge_dev *hdev); 65 66 static struct hnae3_ae_algo ae_algo; 67 68 static struct workqueue_struct *hclge_wq; 69 70 static const struct pci_device_id ae_algo_pci_tbl[] = { 71 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0}, 79 /* required last entry */ 80 {0, } 81 }; 82 83 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); 84 85 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { 86 "External Loopback test", 87 "App Loopback test", 88 "Serdes serial Loopback test", 89 "Serdes parallel Loopback test", 90 "Phy Loopback test" 91 }; 92 93 static const struct hclge_comm_stats_str g_mac_stats_string[] = { 94 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, 95 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, 96 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, 97 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, 98 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 99 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, 100 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 101 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, 102 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 103 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, 104 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 105 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, 106 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 107 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, 108 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 109 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, 110 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 111 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, 112 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 113 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, 114 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 115 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, 116 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, 118 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, 120 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, 122 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, 124 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 125 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, 126 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 127 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, 128 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 129 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, 130 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 131 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, 132 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 133 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, 134 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 135 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, 136 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 137 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, 138 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 139 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, 140 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 141 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, 142 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 143 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, 144 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 145 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, 146 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 147 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, 148 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 149 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, 150 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 151 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, 152 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 153 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, 154 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 155 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, 156 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 157 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, 158 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 159 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, 160 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 161 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, 162 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 163 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, 164 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 165 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, 166 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 167 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, 168 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 169 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, 170 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 171 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, 172 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, 173 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, 174 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, 176 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, 178 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, 180 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, 182 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, 184 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 185 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, 186 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 187 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, 188 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 189 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, 190 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 191 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, 192 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, 194 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, 196 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, 198 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, 200 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, 202 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, 204 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, 206 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 207 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, 208 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 209 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, 210 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 211 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, 212 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 213 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, 214 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 215 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, 216 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 217 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, 218 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 219 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, 220 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 221 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, 222 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 223 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, 224 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 225 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, 226 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, 228 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, 230 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 231 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, 232 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, 234 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, 235 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, 236 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, 238 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 239 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, 240 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 241 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, 242 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 243 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, 244 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 245 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, 246 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 247 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, 248 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 249 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, 250 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 251 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, 252 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 253 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, 254 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 255 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, 256 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 257 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, 258 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 259 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, 260 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 261 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, 262 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 263 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, 264 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 265 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, 266 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 267 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, 268 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 269 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, 270 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 271 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, 272 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 273 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, 274 275 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 276 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, 277 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 278 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, 279 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 280 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, 281 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 282 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, 283 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 284 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, 285 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 286 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, 287 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, 289 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, 291 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, 293 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, 295 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, 297 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, 298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} 299 }; 300 301 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { 302 { 303 .flags = HCLGE_MAC_MGR_MASK_VLAN_B, 304 .ethter_type = cpu_to_le16(ETH_P_LLDP), 305 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, 306 .i_port_bitmap = 0x1, 307 }, 308 }; 309 310 static const struct key_info meta_data_key_info[] = { 311 { PACKET_TYPE_ID, 6 }, 312 { IP_FRAGEMENT, 1 }, 313 { ROCE_TYPE, 1 }, 314 { NEXT_KEY, 5 }, 315 { VLAN_NUMBER, 2 }, 316 { SRC_VPORT, 12 }, 317 { DST_VPORT, 12 }, 318 { TUNNEL_PACKET, 1 }, 319 }; 320 321 static const struct key_info tuple_key_info[] = { 322 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 }, 323 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 }, 324 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 }, 325 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, 326 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 }, 327 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 }, 328 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 }, 329 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 }, 330 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 }, 331 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 }, 332 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 }, 333 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, 334 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, 335 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, 336 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, 337 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, 338 { INNER_DST_MAC, 48, KEY_OPT_MAC, 339 offsetof(struct hclge_fd_rule, tuples.dst_mac), 340 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, 341 { INNER_SRC_MAC, 48, KEY_OPT_MAC, 342 offsetof(struct hclge_fd_rule, tuples.src_mac), 343 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, 344 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16, 345 offsetof(struct hclge_fd_rule, tuples.vlan_tag1), 346 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, 347 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 }, 348 { INNER_ETH_TYPE, 16, KEY_OPT_LE16, 349 offsetof(struct hclge_fd_rule, tuples.ether_proto), 350 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, 351 { INNER_L2_RSV, 16, KEY_OPT_LE16, 352 offsetof(struct hclge_fd_rule, tuples.l2_user_def), 353 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, 354 { INNER_IP_TOS, 8, KEY_OPT_U8, 355 offsetof(struct hclge_fd_rule, tuples.ip_tos), 356 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, 357 { INNER_IP_PROTO, 8, KEY_OPT_U8, 358 offsetof(struct hclge_fd_rule, tuples.ip_proto), 359 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, 360 { INNER_SRC_IP, 32, KEY_OPT_IP, 361 offsetof(struct hclge_fd_rule, tuples.src_ip), 362 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, 363 { INNER_DST_IP, 32, KEY_OPT_IP, 364 offsetof(struct hclge_fd_rule, tuples.dst_ip), 365 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, 366 { INNER_L3_RSV, 16, KEY_OPT_LE16, 367 offsetof(struct hclge_fd_rule, tuples.l3_user_def), 368 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, 369 { INNER_SRC_PORT, 16, KEY_OPT_LE16, 370 offsetof(struct hclge_fd_rule, tuples.src_port), 371 offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, 372 { INNER_DST_PORT, 16, KEY_OPT_LE16, 373 offsetof(struct hclge_fd_rule, tuples.dst_port), 374 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, 375 { INNER_L4_RSV, 32, KEY_OPT_LE32, 376 offsetof(struct hclge_fd_rule, tuples.l4_user_def), 377 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, 378 }; 379 380 /** 381 * hclge_cmd_send - send command to command queue 382 * @hw: pointer to the hw struct 383 * @desc: prefilled descriptor for describing the command 384 * @num : the number of descriptors to be sent 385 * 386 * This is the main send command for command queue, it 387 * sends the queue, cleans the queue, etc 388 **/ 389 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) 390 { 391 return hclge_comm_cmd_send(&hw->hw, desc, num); 392 } 393 394 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) 395 { 396 #define HCLGE_MAC_CMD_NUM 21 397 398 u64 *data = (u64 *)(&hdev->mac_stats); 399 struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; 400 __le64 *desc_data; 401 u32 data_size; 402 int ret; 403 u32 i; 404 405 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); 406 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); 407 if (ret) { 408 dev_err(&hdev->pdev->dev, 409 "Get MAC pkt stats fail, status = %d.\n", ret); 410 411 return ret; 412 } 413 414 /* The first desc has a 64-bit header, so data size need to minus 1 */ 415 data_size = sizeof(desc) / (sizeof(u64)) - 1; 416 417 desc_data = (__le64 *)(&desc[0].data[0]); 418 for (i = 0; i < data_size; i++) { 419 /* data memory is continuous becase only the first desc has a 420 * header in this command 421 */ 422 *data += le64_to_cpu(*desc_data); 423 data++; 424 desc_data++; 425 } 426 427 return 0; 428 } 429 430 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) 431 { 432 #define HCLGE_REG_NUM_PER_DESC 4 433 434 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; 435 u64 *data = (u64 *)(&hdev->mac_stats); 436 struct hclge_desc *desc; 437 __le64 *desc_data; 438 u32 data_size; 439 u32 desc_num; 440 int ret; 441 u32 i; 442 443 /* The first desc has a 64-bit header, so need to consider it */ 444 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1; 445 446 /* This may be called inside atomic sections, 447 * so GFP_ATOMIC is more suitalbe here 448 */ 449 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC); 450 if (!desc) 451 return -ENOMEM; 452 453 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); 454 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); 455 if (ret) { 456 kfree(desc); 457 return ret; 458 } 459 460 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); 461 462 desc_data = (__le64 *)(&desc[0].data[0]); 463 for (i = 0; i < data_size; i++) { 464 /* data memory is continuous becase only the first desc has a 465 * header in this command 466 */ 467 *data += le64_to_cpu(*desc_data); 468 data++; 469 desc_data++; 470 } 471 472 kfree(desc); 473 474 return 0; 475 } 476 477 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) 478 { 479 struct hclge_desc desc; 480 int ret; 481 482 /* Driver needs total register number of both valid registers and 483 * reserved registers, but the old firmware only returns number 484 * of valid registers in device V2. To be compatible with these 485 * devices, driver uses a fixed value. 486 */ 487 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { 488 *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1; 489 return 0; 490 } 491 492 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); 493 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 494 if (ret) { 495 dev_err(&hdev->pdev->dev, 496 "failed to query mac statistic reg number, ret = %d\n", 497 ret); 498 return ret; 499 } 500 501 *reg_num = le32_to_cpu(desc.data[0]); 502 if (*reg_num == 0) { 503 dev_err(&hdev->pdev->dev, 504 "mac statistic reg number is invalid!\n"); 505 return -ENODATA; 506 } 507 508 return 0; 509 } 510 511 int hclge_mac_update_stats(struct hclge_dev *hdev) 512 { 513 /* The firmware supports the new statistics acquisition method */ 514 if (hdev->ae_dev->dev_specs.mac_stats_num) 515 return hclge_mac_update_stats_complete(hdev); 516 else 517 return hclge_mac_update_stats_defective(hdev); 518 } 519 520 static int hclge_comm_get_count(struct hclge_dev *hdev, 521 const struct hclge_comm_stats_str strs[], 522 u32 size) 523 { 524 int count = 0; 525 u32 i; 526 527 for (i = 0; i < size; i++) 528 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) 529 count++; 530 531 return count; 532 } 533 534 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, 535 const struct hclge_comm_stats_str strs[], 536 int size, u64 *data) 537 { 538 u64 *buf = data; 539 u32 i; 540 541 for (i = 0; i < size; i++) { 542 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) 543 continue; 544 545 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); 546 buf++; 547 } 548 549 return buf; 550 } 551 552 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, 553 const struct hclge_comm_stats_str strs[], 554 int size, u8 *data) 555 { 556 char *buff = (char *)data; 557 u32 i; 558 559 if (stringset != ETH_SS_STATS) 560 return buff; 561 562 for (i = 0; i < size; i++) { 563 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) 564 continue; 565 566 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); 567 buff = buff + ETH_GSTRING_LEN; 568 } 569 570 return (u8 *)buff; 571 } 572 573 static void hclge_update_stats_for_all(struct hclge_dev *hdev) 574 { 575 struct hnae3_handle *handle; 576 int status; 577 578 handle = &hdev->vport[0].nic; 579 if (handle->client) { 580 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 581 if (status) { 582 dev_err(&hdev->pdev->dev, 583 "Update TQPS stats fail, status = %d.\n", 584 status); 585 } 586 } 587 588 hclge_update_fec_stats(hdev); 589 590 status = hclge_mac_update_stats(hdev); 591 if (status) 592 dev_err(&hdev->pdev->dev, 593 "Update MAC stats fail, status = %d.\n", status); 594 } 595 596 static void hclge_update_stats(struct hnae3_handle *handle) 597 { 598 struct hclge_vport *vport = hclge_get_vport(handle); 599 struct hclge_dev *hdev = vport->back; 600 int status; 601 602 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) 603 return; 604 605 status = hclge_mac_update_stats(hdev); 606 if (status) 607 dev_err(&hdev->pdev->dev, 608 "Update MAC stats fail, status = %d.\n", 609 status); 610 611 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 612 if (status) 613 dev_err(&hdev->pdev->dev, 614 "Update TQPS stats fail, status = %d.\n", 615 status); 616 617 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); 618 } 619 620 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) 621 { 622 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ 623 HNAE3_SUPPORT_PHY_LOOPBACK | \ 624 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ 625 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \ 626 HNAE3_SUPPORT_EXTERNAL_LOOPBACK) 627 628 struct hclge_vport *vport = hclge_get_vport(handle); 629 struct hclge_dev *hdev = vport->back; 630 int count = 0; 631 632 /* Loopback test support rules: 633 * mac: only GE mode support 634 * serdes: all mac mode will support include GE/XGE/LGE/CGE 635 * phy: only support when phy device exist on board 636 */ 637 if (stringset == ETH_SS_TEST) { 638 /* clear loopback bit flags at first */ 639 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); 640 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || 641 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || 642 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || 643 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { 644 count += 1; 645 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; 646 } 647 648 count += 1; 649 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 650 count += 1; 651 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 652 count += 1; 653 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK; 654 655 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && 656 hdev->hw.mac.phydev->drv->set_loopback) || 657 hnae3_dev_phy_imp_supported(hdev)) { 658 count += 1; 659 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; 660 } 661 } else if (stringset == ETH_SS_STATS) { 662 count = hclge_comm_get_count(hdev, g_mac_stats_string, 663 ARRAY_SIZE(g_mac_stats_string)) + 664 hclge_comm_tqps_get_sset_count(handle); 665 } 666 667 return count; 668 } 669 670 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, 671 u8 *data) 672 { 673 struct hclge_vport *vport = hclge_get_vport(handle); 674 struct hclge_dev *hdev = vport->back; 675 u8 *p = (char *)data; 676 int size; 677 678 if (stringset == ETH_SS_STATS) { 679 size = ARRAY_SIZE(g_mac_stats_string); 680 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, 681 size, p); 682 p = hclge_comm_tqps_get_strings(handle, p); 683 } else if (stringset == ETH_SS_TEST) { 684 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) { 685 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL], 686 ETH_GSTRING_LEN); 687 p += ETH_GSTRING_LEN; 688 } 689 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) { 690 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP], 691 ETH_GSTRING_LEN); 692 p += ETH_GSTRING_LEN; 693 } 694 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) { 695 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES], 696 ETH_GSTRING_LEN); 697 p += ETH_GSTRING_LEN; 698 } 699 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) { 700 memcpy(p, 701 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES], 702 ETH_GSTRING_LEN); 703 p += ETH_GSTRING_LEN; 704 } 705 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { 706 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY], 707 ETH_GSTRING_LEN); 708 p += ETH_GSTRING_LEN; 709 } 710 } 711 } 712 713 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) 714 { 715 struct hclge_vport *vport = hclge_get_vport(handle); 716 struct hclge_dev *hdev = vport->back; 717 u64 *p; 718 719 p = hclge_comm_get_stats(hdev, g_mac_stats_string, 720 ARRAY_SIZE(g_mac_stats_string), data); 721 p = hclge_comm_tqps_get_stats(handle, p); 722 } 723 724 static void hclge_get_mac_stat(struct hnae3_handle *handle, 725 struct hns3_mac_stats *mac_stats) 726 { 727 struct hclge_vport *vport = hclge_get_vport(handle); 728 struct hclge_dev *hdev = vport->back; 729 730 hclge_update_stats(handle); 731 732 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; 733 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; 734 } 735 736 static int hclge_parse_func_status(struct hclge_dev *hdev, 737 struct hclge_func_status_cmd *status) 738 { 739 #define HCLGE_MAC_ID_MASK 0xF 740 741 if (!(status->pf_state & HCLGE_PF_STATE_DONE)) 742 return -EINVAL; 743 744 /* Set the pf to main pf */ 745 if (status->pf_state & HCLGE_PF_STATE_MAIN) 746 hdev->flag |= HCLGE_FLAG_MAIN; 747 else 748 hdev->flag &= ~HCLGE_FLAG_MAIN; 749 750 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; 751 return 0; 752 } 753 754 static int hclge_query_function_status(struct hclge_dev *hdev) 755 { 756 #define HCLGE_QUERY_MAX_CNT 5 757 758 struct hclge_func_status_cmd *req; 759 struct hclge_desc desc; 760 int timeout = 0; 761 int ret; 762 763 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); 764 req = (struct hclge_func_status_cmd *)desc.data; 765 766 do { 767 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 768 if (ret) { 769 dev_err(&hdev->pdev->dev, 770 "query function status failed %d.\n", ret); 771 return ret; 772 } 773 774 /* Check pf reset is done */ 775 if (req->pf_state) 776 break; 777 usleep_range(1000, 2000); 778 } while (timeout++ < HCLGE_QUERY_MAX_CNT); 779 780 return hclge_parse_func_status(hdev, req); 781 } 782 783 static int hclge_query_pf_resource(struct hclge_dev *hdev) 784 { 785 struct hclge_pf_res_cmd *req; 786 struct hclge_desc desc; 787 int ret; 788 789 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); 790 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 791 if (ret) { 792 dev_err(&hdev->pdev->dev, 793 "query pf resource failed %d.\n", ret); 794 return ret; 795 } 796 797 req = (struct hclge_pf_res_cmd *)desc.data; 798 hdev->num_tqps = le16_to_cpu(req->tqp_num) + 799 le16_to_cpu(req->ext_tqp_num); 800 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 801 802 if (req->tx_buf_size) 803 hdev->tx_buf_size = 804 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; 805 else 806 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; 807 808 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); 809 810 if (req->dv_buf_size) 811 hdev->dv_buf_size = 812 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; 813 else 814 hdev->dv_buf_size = HCLGE_DEFAULT_DV; 815 816 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); 817 818 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); 819 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { 820 dev_err(&hdev->pdev->dev, 821 "only %u msi resources available, not enough for pf(min:2).\n", 822 hdev->num_nic_msi); 823 return -EINVAL; 824 } 825 826 if (hnae3_dev_roce_supported(hdev)) { 827 hdev->num_roce_msi = 828 le16_to_cpu(req->pf_intr_vector_number_roce); 829 830 /* PF should have NIC vectors and Roce vectors, 831 * NIC vectors are queued before Roce vectors. 832 */ 833 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; 834 } else { 835 hdev->num_msi = hdev->num_nic_msi; 836 } 837 838 return 0; 839 } 840 841 static int hclge_parse_speed(u8 speed_cmd, u32 *speed) 842 { 843 switch (speed_cmd) { 844 case HCLGE_FW_MAC_SPEED_10M: 845 *speed = HCLGE_MAC_SPEED_10M; 846 break; 847 case HCLGE_FW_MAC_SPEED_100M: 848 *speed = HCLGE_MAC_SPEED_100M; 849 break; 850 case HCLGE_FW_MAC_SPEED_1G: 851 *speed = HCLGE_MAC_SPEED_1G; 852 break; 853 case HCLGE_FW_MAC_SPEED_10G: 854 *speed = HCLGE_MAC_SPEED_10G; 855 break; 856 case HCLGE_FW_MAC_SPEED_25G: 857 *speed = HCLGE_MAC_SPEED_25G; 858 break; 859 case HCLGE_FW_MAC_SPEED_40G: 860 *speed = HCLGE_MAC_SPEED_40G; 861 break; 862 case HCLGE_FW_MAC_SPEED_50G: 863 *speed = HCLGE_MAC_SPEED_50G; 864 break; 865 case HCLGE_FW_MAC_SPEED_100G: 866 *speed = HCLGE_MAC_SPEED_100G; 867 break; 868 case HCLGE_FW_MAC_SPEED_200G: 869 *speed = HCLGE_MAC_SPEED_200G; 870 break; 871 default: 872 return -EINVAL; 873 } 874 875 return 0; 876 } 877 878 static const struct hclge_speed_bit_map speed_bit_map[] = { 879 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, 880 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, 881 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, 882 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, 883 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, 884 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, 885 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS}, 886 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS}, 887 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, 888 }; 889 890 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) 891 { 892 u16 i; 893 894 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { 895 if (speed == speed_bit_map[i].speed) { 896 *speed_bit = speed_bit_map[i].speed_bit; 897 return 0; 898 } 899 } 900 901 return -EINVAL; 902 } 903 904 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) 905 { 906 struct hclge_vport *vport = hclge_get_vport(handle); 907 struct hclge_dev *hdev = vport->back; 908 u32 speed_ability = hdev->hw.mac.speed_ability; 909 u32 speed_bit = 0; 910 int ret; 911 912 ret = hclge_get_speed_bit(speed, &speed_bit); 913 if (ret) 914 return ret; 915 916 if (speed_bit & speed_ability) 917 return 0; 918 919 return -EINVAL; 920 } 921 922 static void hclge_update_fec_support(struct hclge_mac *mac) 923 { 924 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); 925 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); 926 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported); 927 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 928 929 if (mac->fec_ability & BIT(HNAE3_FEC_BASER)) 930 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 931 mac->supported); 932 if (mac->fec_ability & BIT(HNAE3_FEC_RS)) 933 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 934 mac->supported); 935 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS)) 936 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 937 mac->supported); 938 if (mac->fec_ability & BIT(HNAE3_FEC_NONE)) 939 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 940 mac->supported); 941 } 942 943 static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = { 944 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, 945 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, 946 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, 947 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, 948 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT}, 949 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, 950 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT}, 951 {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, 952 }; 953 954 static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = { 955 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, 956 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, 957 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT}, 958 {HCLGE_SUPPORT_100G_R4_BIT, 959 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, 960 {HCLGE_SUPPORT_100G_R2_BIT, 961 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT}, 962 {HCLGE_SUPPORT_200G_BIT, 963 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, 964 }; 965 966 static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = { 967 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, 968 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, 969 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, 970 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, 971 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT}, 972 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, 973 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT}, 974 {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, 975 }; 976 977 static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = { 978 {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, 979 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, 980 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, 981 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, 982 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, 983 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT}, 984 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, 985 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT}, 986 {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, 987 }; 988 989 static void hclge_convert_setting_sr(u16 speed_ability, 990 unsigned long *link_mode) 991 { 992 int i; 993 994 for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) { 995 if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit) 996 linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode, 997 link_mode); 998 } 999 } 1000 1001 static void hclge_convert_setting_lr(u16 speed_ability, 1002 unsigned long *link_mode) 1003 { 1004 int i; 1005 1006 for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) { 1007 if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit) 1008 linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode, 1009 link_mode); 1010 } 1011 } 1012 1013 static void hclge_convert_setting_cr(u16 speed_ability, 1014 unsigned long *link_mode) 1015 { 1016 int i; 1017 1018 for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) { 1019 if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit) 1020 linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode, 1021 link_mode); 1022 } 1023 } 1024 1025 static void hclge_convert_setting_kr(u16 speed_ability, 1026 unsigned long *link_mode) 1027 { 1028 int i; 1029 1030 for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) { 1031 if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit) 1032 linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode, 1033 link_mode); 1034 } 1035 } 1036 1037 static void hclge_convert_setting_fec(struct hclge_mac *mac) 1038 { 1039 /* If firmware has reported fec_ability, don't need to convert by speed */ 1040 if (mac->fec_ability) 1041 goto out; 1042 1043 switch (mac->speed) { 1044 case HCLGE_MAC_SPEED_10G: 1045 case HCLGE_MAC_SPEED_40G: 1046 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) | 1047 BIT(HNAE3_FEC_NONE); 1048 break; 1049 case HCLGE_MAC_SPEED_25G: 1050 case HCLGE_MAC_SPEED_50G: 1051 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | 1052 BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE); 1053 break; 1054 case HCLGE_MAC_SPEED_100G: 1055 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | 1056 BIT(HNAE3_FEC_NONE); 1057 break; 1058 case HCLGE_MAC_SPEED_200G: 1059 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) | 1060 BIT(HNAE3_FEC_LLRS); 1061 break; 1062 default: 1063 mac->fec_ability = 0; 1064 break; 1065 } 1066 1067 out: 1068 hclge_update_fec_support(mac); 1069 } 1070 1071 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, 1072 u16 speed_ability) 1073 { 1074 struct hclge_mac *mac = &hdev->hw.mac; 1075 1076 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1077 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 1078 mac->supported); 1079 1080 hclge_convert_setting_sr(speed_ability, mac->supported); 1081 hclge_convert_setting_lr(speed_ability, mac->supported); 1082 hclge_convert_setting_cr(speed_ability, mac->supported); 1083 if (hnae3_dev_fec_supported(hdev)) 1084 hclge_convert_setting_fec(mac); 1085 1086 if (hnae3_dev_pause_supported(hdev)) 1087 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1088 1089 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); 1090 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1091 } 1092 1093 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, 1094 u16 speed_ability) 1095 { 1096 struct hclge_mac *mac = &hdev->hw.mac; 1097 1098 hclge_convert_setting_kr(speed_ability, mac->supported); 1099 if (hnae3_dev_fec_supported(hdev)) 1100 hclge_convert_setting_fec(mac); 1101 1102 if (hnae3_dev_pause_supported(hdev)) 1103 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); 1104 1105 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); 1106 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); 1107 } 1108 1109 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, 1110 u16 speed_ability) 1111 { 1112 unsigned long *supported = hdev->hw.mac.supported; 1113 1114 /* default to support all speed for GE port */ 1115 if (!speed_ability) 1116 speed_ability = HCLGE_SUPPORT_GE; 1117 1118 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1119 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1120 supported); 1121 1122 if (speed_ability & HCLGE_SUPPORT_100M_BIT) { 1123 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, 1124 supported); 1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, 1126 supported); 1127 } 1128 1129 if (speed_ability & HCLGE_SUPPORT_10M_BIT) { 1130 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); 1131 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); 1132 } 1133 1134 if (hnae3_dev_pause_supported(hdev)) { 1135 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); 1136 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); 1137 } 1138 1139 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); 1140 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); 1141 } 1142 1143 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) 1144 { 1145 u8 media_type = hdev->hw.mac.media_type; 1146 1147 if (media_type == HNAE3_MEDIA_TYPE_FIBER) 1148 hclge_parse_fiber_link_mode(hdev, speed_ability); 1149 else if (media_type == HNAE3_MEDIA_TYPE_COPPER) 1150 hclge_parse_copper_link_mode(hdev, speed_ability); 1151 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) 1152 hclge_parse_backplane_link_mode(hdev, speed_ability); 1153 } 1154 1155 static u32 hclge_get_max_speed(u16 speed_ability) 1156 { 1157 if (speed_ability & HCLGE_SUPPORT_200G_BIT) 1158 return HCLGE_MAC_SPEED_200G; 1159 1160 if (speed_ability & HCLGE_SUPPORT_100G_BITS) 1161 return HCLGE_MAC_SPEED_100G; 1162 1163 if (speed_ability & HCLGE_SUPPORT_50G_BITS) 1164 return HCLGE_MAC_SPEED_50G; 1165 1166 if (speed_ability & HCLGE_SUPPORT_40G_BIT) 1167 return HCLGE_MAC_SPEED_40G; 1168 1169 if (speed_ability & HCLGE_SUPPORT_25G_BIT) 1170 return HCLGE_MAC_SPEED_25G; 1171 1172 if (speed_ability & HCLGE_SUPPORT_10G_BIT) 1173 return HCLGE_MAC_SPEED_10G; 1174 1175 if (speed_ability & HCLGE_SUPPORT_1G_BIT) 1176 return HCLGE_MAC_SPEED_1G; 1177 1178 if (speed_ability & HCLGE_SUPPORT_100M_BIT) 1179 return HCLGE_MAC_SPEED_100M; 1180 1181 if (speed_ability & HCLGE_SUPPORT_10M_BIT) 1182 return HCLGE_MAC_SPEED_10M; 1183 1184 return HCLGE_MAC_SPEED_1G; 1185 } 1186 1187 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) 1188 { 1189 #define HCLGE_TX_SPARE_SIZE_UNIT 4096 1190 #define SPEED_ABILITY_EXT_SHIFT 8 1191 1192 struct hclge_cfg_param_cmd *req; 1193 u64 mac_addr_tmp_high; 1194 u16 speed_ability_ext; 1195 u64 mac_addr_tmp; 1196 unsigned int i; 1197 1198 req = (struct hclge_cfg_param_cmd *)desc[0].data; 1199 1200 /* get the configuration */ 1201 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1202 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); 1203 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]), 1204 HCLGE_CFG_TQP_DESC_N_M, 1205 HCLGE_CFG_TQP_DESC_N_S); 1206 1207 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]), 1208 HCLGE_CFG_PHY_ADDR_M, 1209 HCLGE_CFG_PHY_ADDR_S); 1210 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]), 1211 HCLGE_CFG_MEDIA_TP_M, 1212 HCLGE_CFG_MEDIA_TP_S); 1213 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]), 1214 HCLGE_CFG_RX_BUF_LEN_M, 1215 HCLGE_CFG_RX_BUF_LEN_S); 1216 /* get mac_address */ 1217 mac_addr_tmp = __le32_to_cpu(req->param[2]); 1218 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]), 1219 HCLGE_CFG_MAC_ADDR_H_M, 1220 HCLGE_CFG_MAC_ADDR_H_S); 1221 1222 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; 1223 1224 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), 1225 HCLGE_CFG_DEFAULT_SPEED_M, 1226 HCLGE_CFG_DEFAULT_SPEED_S); 1227 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), 1228 HCLGE_CFG_RSS_SIZE_M, 1229 HCLGE_CFG_RSS_SIZE_S); 1230 1231 for (i = 0; i < ETH_ALEN; i++) 1232 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; 1233 1234 req = (struct hclge_cfg_param_cmd *)desc[1].data; 1235 cfg->numa_node_map = __le32_to_cpu(req->param[0]); 1236 1237 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]), 1238 HCLGE_CFG_SPEED_ABILITY_M, 1239 HCLGE_CFG_SPEED_ABILITY_S); 1240 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]), 1241 HCLGE_CFG_SPEED_ABILITY_EXT_M, 1242 HCLGE_CFG_SPEED_ABILITY_EXT_S); 1243 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT; 1244 1245 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]), 1246 HCLGE_CFG_VLAN_FLTR_CAP_M, 1247 HCLGE_CFG_VLAN_FLTR_CAP_S); 1248 1249 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), 1250 HCLGE_CFG_UMV_TBL_SPACE_M, 1251 HCLGE_CFG_UMV_TBL_SPACE_S); 1252 1253 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), 1254 HCLGE_CFG_PF_RSS_SIZE_M, 1255 HCLGE_CFG_PF_RSS_SIZE_S); 1256 1257 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a 1258 * power of 2, instead of reading out directly. This would 1259 * be more flexible for future changes and expansions. 1260 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, 1261 * it does not make sense if PF's field is 0. In this case, PF and VF 1262 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. 1263 */ 1264 cfg->pf_rss_size_max = cfg->pf_rss_size_max ? 1265 1U << cfg->pf_rss_size_max : 1266 cfg->vf_rss_size_max; 1267 1268 /* The unit of the tx spare buffer size queried from configuration 1269 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is 1270 * needed here. 1271 */ 1272 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]), 1273 HCLGE_CFG_TX_SPARE_BUF_SIZE_M, 1274 HCLGE_CFG_TX_SPARE_BUF_SIZE_S); 1275 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT; 1276 } 1277 1278 /* hclge_get_cfg: query the static parameter from flash 1279 * @hdev: pointer to struct hclge_dev 1280 * @hcfg: the config structure to be getted 1281 */ 1282 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) 1283 { 1284 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; 1285 struct hclge_cfg_param_cmd *req; 1286 unsigned int i; 1287 int ret; 1288 1289 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { 1290 u32 offset = 0; 1291 1292 req = (struct hclge_cfg_param_cmd *)desc[i].data; 1293 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, 1294 true); 1295 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M, 1296 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); 1297 /* Len should be united by 4 bytes when send to hardware */ 1298 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, 1299 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); 1300 req->offset = cpu_to_le32(offset); 1301 } 1302 1303 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); 1304 if (ret) { 1305 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); 1306 return ret; 1307 } 1308 1309 hclge_parse_cfg(hcfg, desc); 1310 1311 return 0; 1312 } 1313 1314 static void hclge_set_default_dev_specs(struct hclge_dev *hdev) 1315 { 1316 #define HCLGE_MAX_NON_TSO_BD_NUM 8U 1317 1318 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1319 1320 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; 1321 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; 1322 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 1323 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; 1324 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; 1325 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; 1326 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; 1327 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1328 ae_dev->dev_specs.tnl_num = 0; 1329 } 1330 1331 static void hclge_parse_dev_specs(struct hclge_dev *hdev, 1332 struct hclge_desc *desc) 1333 { 1334 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1335 struct hclge_dev_specs_0_cmd *req0; 1336 struct hclge_dev_specs_1_cmd *req1; 1337 1338 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; 1339 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data; 1340 1341 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 1342 ae_dev->dev_specs.rss_ind_tbl_size = 1343 le16_to_cpu(req0->rss_ind_tbl_size); 1344 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 1345 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 1346 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); 1347 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); 1348 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 1349 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 1350 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); 1351 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); 1352 ae_dev->dev_specs.tnl_num = req1->tnl_num; 1353 } 1354 1355 static void hclge_check_dev_specs(struct hclge_dev *hdev) 1356 { 1357 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 1358 1359 if (!dev_specs->max_non_tso_bd_num) 1360 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM; 1361 if (!dev_specs->rss_ind_tbl_size) 1362 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; 1363 if (!dev_specs->rss_key_size) 1364 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 1365 if (!dev_specs->max_tm_rate) 1366 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; 1367 if (!dev_specs->max_qset_num) 1368 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM; 1369 if (!dev_specs->max_int_gl) 1370 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; 1371 if (!dev_specs->max_frm_size) 1372 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; 1373 if (!dev_specs->umv_size) 1374 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; 1375 } 1376 1377 static int hclge_query_mac_stats_num(struct hclge_dev *hdev) 1378 { 1379 u32 reg_num = 0; 1380 int ret; 1381 1382 ret = hclge_mac_query_reg_num(hdev, ®_num); 1383 if (ret && ret != -EOPNOTSUPP) 1384 return ret; 1385 1386 hdev->ae_dev->dev_specs.mac_stats_num = reg_num; 1387 return 0; 1388 } 1389 1390 static int hclge_query_dev_specs(struct hclge_dev *hdev) 1391 { 1392 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM]; 1393 int ret; 1394 int i; 1395 1396 ret = hclge_query_mac_stats_num(hdev); 1397 if (ret) 1398 return ret; 1399 1400 /* set default specifications as devices lower than version V3 do not 1401 * support querying specifications from firmware. 1402 */ 1403 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 1404 hclge_set_default_dev_specs(hdev); 1405 return 0; 1406 } 1407 1408 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 1409 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, 1410 true); 1411 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 1412 } 1413 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); 1414 1415 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); 1416 if (ret) 1417 return ret; 1418 1419 hclge_parse_dev_specs(hdev, desc); 1420 hclge_check_dev_specs(hdev); 1421 1422 return 0; 1423 } 1424 1425 static int hclge_get_cap(struct hclge_dev *hdev) 1426 { 1427 int ret; 1428 1429 ret = hclge_query_function_status(hdev); 1430 if (ret) { 1431 dev_err(&hdev->pdev->dev, 1432 "query function status error %d.\n", ret); 1433 return ret; 1434 } 1435 1436 /* get pf resource */ 1437 return hclge_query_pf_resource(hdev); 1438 } 1439 1440 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) 1441 { 1442 #define HCLGE_MIN_TX_DESC 64 1443 #define HCLGE_MIN_RX_DESC 64 1444 1445 if (!is_kdump_kernel()) 1446 return; 1447 1448 dev_info(&hdev->pdev->dev, 1449 "Running kdump kernel. Using minimal resources\n"); 1450 1451 /* minimal queue pairs equals to the number of vports */ 1452 hdev->num_tqps = hdev->num_req_vfs + 1; 1453 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; 1454 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; 1455 } 1456 1457 static void hclge_init_tc_config(struct hclge_dev *hdev) 1458 { 1459 unsigned int i; 1460 1461 if (hdev->tc_max > HNAE3_MAX_TC || 1462 hdev->tc_max < 1) { 1463 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", 1464 hdev->tc_max); 1465 hdev->tc_max = 1; 1466 } 1467 1468 /* Dev does not support DCB */ 1469 if (!hnae3_dev_dcb_supported(hdev)) { 1470 hdev->tc_max = 1; 1471 hdev->pfc_max = 0; 1472 } else { 1473 hdev->pfc_max = hdev->tc_max; 1474 } 1475 1476 hdev->tm_info.num_tc = 1; 1477 1478 /* Currently not support uncontiuous tc */ 1479 for (i = 0; i < hdev->tm_info.num_tc; i++) 1480 hnae3_set_bit(hdev->hw_tc_map, i, 1); 1481 1482 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; 1483 } 1484 1485 static int hclge_configure(struct hclge_dev *hdev) 1486 { 1487 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1488 struct hclge_cfg cfg; 1489 int ret; 1490 1491 ret = hclge_get_cfg(hdev, &cfg); 1492 if (ret) 1493 return ret; 1494 1495 hdev->base_tqp_pid = 0; 1496 hdev->vf_rss_size_max = cfg.vf_rss_size_max; 1497 hdev->pf_rss_size_max = cfg.pf_rss_size_max; 1498 hdev->rx_buf_len = cfg.rx_buf_len; 1499 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); 1500 hdev->hw.mac.media_type = cfg.media_type; 1501 hdev->hw.mac.phy_addr = cfg.phy_addr; 1502 hdev->num_tx_desc = cfg.tqp_desc_num; 1503 hdev->num_rx_desc = cfg.tqp_desc_num; 1504 hdev->tm_info.num_pg = 1; 1505 hdev->tc_max = cfg.tc_num; 1506 hdev->tm_info.hw_pfc_map = 0; 1507 if (cfg.umv_space) 1508 hdev->wanted_umv_size = cfg.umv_space; 1509 else 1510 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; 1511 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; 1512 hdev->gro_en = true; 1513 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) 1514 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 1515 1516 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 1517 hdev->fd_en = true; 1518 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 1519 } 1520 1521 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); 1522 if (ret) { 1523 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", 1524 cfg.default_speed, ret); 1525 return ret; 1526 } 1527 1528 hclge_parse_link_mode(hdev, cfg.speed_ability); 1529 1530 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); 1531 1532 hclge_init_tc_config(hdev); 1533 hclge_init_kdump_kernel_config(hdev); 1534 1535 return ret; 1536 } 1537 1538 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, 1539 u16 tso_mss_max) 1540 { 1541 struct hclge_cfg_tso_status_cmd *req; 1542 struct hclge_desc desc; 1543 1544 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); 1545 1546 req = (struct hclge_cfg_tso_status_cmd *)desc.data; 1547 req->tso_mss_min = cpu_to_le16(tso_mss_min); 1548 req->tso_mss_max = cpu_to_le16(tso_mss_max); 1549 1550 return hclge_cmd_send(&hdev->hw, &desc, 1); 1551 } 1552 1553 static int hclge_config_gro(struct hclge_dev *hdev) 1554 { 1555 struct hclge_cfg_gro_status_cmd *req; 1556 struct hclge_desc desc; 1557 int ret; 1558 1559 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) 1560 return 0; 1561 1562 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); 1563 req = (struct hclge_cfg_gro_status_cmd *)desc.data; 1564 1565 req->gro_en = hdev->gro_en ? 1 : 0; 1566 1567 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1568 if (ret) 1569 dev_err(&hdev->pdev->dev, 1570 "GRO hardware config cmd failed, ret = %d\n", ret); 1571 1572 return ret; 1573 } 1574 1575 static int hclge_alloc_tqps(struct hclge_dev *hdev) 1576 { 1577 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 1578 struct hclge_comm_tqp *tqp; 1579 int i; 1580 1581 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 1582 sizeof(struct hclge_comm_tqp), GFP_KERNEL); 1583 if (!hdev->htqp) 1584 return -ENOMEM; 1585 1586 tqp = hdev->htqp; 1587 1588 for (i = 0; i < hdev->num_tqps; i++) { 1589 tqp->dev = &hdev->pdev->dev; 1590 tqp->index = i; 1591 1592 tqp->q.ae_algo = &ae_algo; 1593 tqp->q.buf_size = hdev->rx_buf_len; 1594 tqp->q.tx_desc_num = hdev->num_tx_desc; 1595 tqp->q.rx_desc_num = hdev->num_rx_desc; 1596 1597 /* need an extended offset to configure queues >= 1598 * HCLGE_TQP_MAX_SIZE_DEV_V2 1599 */ 1600 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) 1601 tqp->q.io_base = hdev->hw.hw.io_base + 1602 HCLGE_TQP_REG_OFFSET + 1603 i * HCLGE_TQP_REG_SIZE; 1604 else 1605 tqp->q.io_base = hdev->hw.hw.io_base + 1606 HCLGE_TQP_REG_OFFSET + 1607 HCLGE_TQP_EXT_REG_OFFSET + 1608 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * 1609 HCLGE_TQP_REG_SIZE; 1610 1611 /* when device supports tx push and has device memory, 1612 * the queue can execute push mode or doorbell mode on 1613 * device memory. 1614 */ 1615 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) 1616 tqp->q.mem_base = hdev->hw.hw.mem_base + 1617 HCLGE_TQP_MEM_OFFSET(hdev, i); 1618 1619 tqp++; 1620 } 1621 1622 return 0; 1623 } 1624 1625 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, 1626 u16 tqp_pid, u16 tqp_vid, bool is_pf) 1627 { 1628 struct hclge_tqp_map_cmd *req; 1629 struct hclge_desc desc; 1630 int ret; 1631 1632 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); 1633 1634 req = (struct hclge_tqp_map_cmd *)desc.data; 1635 req->tqp_id = cpu_to_le16(tqp_pid); 1636 req->tqp_vf = func_id; 1637 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B; 1638 if (!is_pf) 1639 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B; 1640 req->tqp_vid = cpu_to_le16(tqp_vid); 1641 1642 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1643 if (ret) 1644 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); 1645 1646 return ret; 1647 } 1648 1649 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) 1650 { 1651 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1652 struct hclge_dev *hdev = vport->back; 1653 int i, alloced; 1654 1655 for (i = 0, alloced = 0; i < hdev->num_tqps && 1656 alloced < num_tqps; i++) { 1657 if (!hdev->htqp[i].alloced) { 1658 hdev->htqp[i].q.handle = &vport->nic; 1659 hdev->htqp[i].q.tqp_index = alloced; 1660 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; 1661 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; 1662 kinfo->tqp[alloced] = &hdev->htqp[i].q; 1663 hdev->htqp[i].alloced = true; 1664 alloced++; 1665 } 1666 } 1667 vport->alloc_tqps = alloced; 1668 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, 1669 vport->alloc_tqps / hdev->tm_info.num_tc); 1670 1671 /* ensure one to one mapping between irq and queue at default */ 1672 kinfo->rss_size = min_t(u16, kinfo->rss_size, 1673 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); 1674 1675 return 0; 1676 } 1677 1678 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, 1679 u16 num_tx_desc, u16 num_rx_desc) 1680 1681 { 1682 struct hnae3_handle *nic = &vport->nic; 1683 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 1684 struct hclge_dev *hdev = vport->back; 1685 int ret; 1686 1687 kinfo->num_tx_desc = num_tx_desc; 1688 kinfo->num_rx_desc = num_rx_desc; 1689 1690 kinfo->rx_buf_len = hdev->rx_buf_len; 1691 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; 1692 1693 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, 1694 sizeof(struct hnae3_queue *), GFP_KERNEL); 1695 if (!kinfo->tqp) 1696 return -ENOMEM; 1697 1698 ret = hclge_assign_tqp(vport, num_tqps); 1699 if (ret) 1700 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); 1701 1702 return ret; 1703 } 1704 1705 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, 1706 struct hclge_vport *vport) 1707 { 1708 struct hnae3_handle *nic = &vport->nic; 1709 struct hnae3_knic_private_info *kinfo; 1710 u16 i; 1711 1712 kinfo = &nic->kinfo; 1713 for (i = 0; i < vport->alloc_tqps; i++) { 1714 struct hclge_comm_tqp *q = 1715 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); 1716 bool is_pf; 1717 int ret; 1718 1719 is_pf = !(vport->vport_id); 1720 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, 1721 i, is_pf); 1722 if (ret) 1723 return ret; 1724 } 1725 1726 return 0; 1727 } 1728 1729 static int hclge_map_tqp(struct hclge_dev *hdev) 1730 { 1731 struct hclge_vport *vport = hdev->vport; 1732 u16 i, num_vport; 1733 1734 num_vport = hdev->num_req_vfs + 1; 1735 for (i = 0; i < num_vport; i++) { 1736 int ret; 1737 1738 ret = hclge_map_tqp_to_vport(hdev, vport); 1739 if (ret) 1740 return ret; 1741 1742 vport++; 1743 } 1744 1745 return 0; 1746 } 1747 1748 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) 1749 { 1750 struct hnae3_handle *nic = &vport->nic; 1751 struct hclge_dev *hdev = vport->back; 1752 int ret; 1753 1754 nic->pdev = hdev->pdev; 1755 nic->ae_algo = &ae_algo; 1756 nic->numa_node_mask = hdev->numa_node_mask; 1757 nic->kinfo.io_base = hdev->hw.hw.io_base; 1758 1759 ret = hclge_knic_setup(vport, num_tqps, 1760 hdev->num_tx_desc, hdev->num_rx_desc); 1761 if (ret) 1762 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); 1763 1764 return ret; 1765 } 1766 1767 static int hclge_alloc_vport(struct hclge_dev *hdev) 1768 { 1769 struct pci_dev *pdev = hdev->pdev; 1770 struct hclge_vport *vport; 1771 u32 tqp_main_vport; 1772 u32 tqp_per_vport; 1773 int num_vport, i; 1774 int ret; 1775 1776 /* We need to alloc a vport for main NIC of PF */ 1777 num_vport = hdev->num_req_vfs + 1; 1778 1779 if (hdev->num_tqps < num_vport) { 1780 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", 1781 hdev->num_tqps, num_vport); 1782 return -EINVAL; 1783 } 1784 1785 /* Alloc the same number of TQPs for every vport */ 1786 tqp_per_vport = hdev->num_tqps / num_vport; 1787 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; 1788 1789 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), 1790 GFP_KERNEL); 1791 if (!vport) 1792 return -ENOMEM; 1793 1794 hdev->vport = vport; 1795 hdev->num_alloc_vport = num_vport; 1796 1797 if (IS_ENABLED(CONFIG_PCI_IOV)) 1798 hdev->num_alloc_vfs = hdev->num_req_vfs; 1799 1800 for (i = 0; i < num_vport; i++) { 1801 vport->back = hdev; 1802 vport->vport_id = i; 1803 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; 1804 vport->mps = HCLGE_MAC_DEFAULT_FRAME; 1805 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; 1806 vport->port_base_vlan_cfg.tbl_sta = true; 1807 vport->rxvlan_cfg.rx_vlan_offload_en = true; 1808 vport->req_vlan_fltr_en = true; 1809 INIT_LIST_HEAD(&vport->vlan_list); 1810 INIT_LIST_HEAD(&vport->uc_mac_list); 1811 INIT_LIST_HEAD(&vport->mc_mac_list); 1812 spin_lock_init(&vport->mac_list_lock); 1813 1814 if (i == 0) 1815 ret = hclge_vport_setup(vport, tqp_main_vport); 1816 else 1817 ret = hclge_vport_setup(vport, tqp_per_vport); 1818 if (ret) { 1819 dev_err(&pdev->dev, 1820 "vport setup failed for vport %d, %d\n", 1821 i, ret); 1822 return ret; 1823 } 1824 1825 vport++; 1826 } 1827 1828 return 0; 1829 } 1830 1831 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, 1832 struct hclge_pkt_buf_alloc *buf_alloc) 1833 { 1834 /* TX buffer size is unit by 128 byte */ 1835 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 1836 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) 1837 struct hclge_tx_buff_alloc_cmd *req; 1838 struct hclge_desc desc; 1839 int ret; 1840 u8 i; 1841 1842 req = (struct hclge_tx_buff_alloc_cmd *)desc.data; 1843 1844 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); 1845 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1846 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; 1847 1848 req->tx_pkt_buff[i] = 1849 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | 1850 HCLGE_BUF_SIZE_UPDATE_EN_MSK); 1851 } 1852 1853 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1854 if (ret) 1855 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", 1856 ret); 1857 1858 return ret; 1859 } 1860 1861 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, 1862 struct hclge_pkt_buf_alloc *buf_alloc) 1863 { 1864 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); 1865 1866 if (ret) 1867 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); 1868 1869 return ret; 1870 } 1871 1872 static u32 hclge_get_tc_num(struct hclge_dev *hdev) 1873 { 1874 unsigned int i; 1875 u32 cnt = 0; 1876 1877 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1878 if (hdev->hw_tc_map & BIT(i)) 1879 cnt++; 1880 return cnt; 1881 } 1882 1883 /* Get the number of pfc enabled TCs, which have private buffer */ 1884 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, 1885 struct hclge_pkt_buf_alloc *buf_alloc) 1886 { 1887 struct hclge_priv_buf *priv; 1888 unsigned int i; 1889 int cnt = 0; 1890 1891 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1892 priv = &buf_alloc->priv_buf[i]; 1893 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && 1894 priv->enable) 1895 cnt++; 1896 } 1897 1898 return cnt; 1899 } 1900 1901 /* Get the number of pfc disabled TCs, which have private buffer */ 1902 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, 1903 struct hclge_pkt_buf_alloc *buf_alloc) 1904 { 1905 struct hclge_priv_buf *priv; 1906 unsigned int i; 1907 int cnt = 0; 1908 1909 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1910 priv = &buf_alloc->priv_buf[i]; 1911 if (hdev->hw_tc_map & BIT(i) && 1912 !(hdev->tm_info.hw_pfc_map & BIT(i)) && 1913 priv->enable) 1914 cnt++; 1915 } 1916 1917 return cnt; 1918 } 1919 1920 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1921 { 1922 struct hclge_priv_buf *priv; 1923 u32 rx_priv = 0; 1924 int i; 1925 1926 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1927 priv = &buf_alloc->priv_buf[i]; 1928 if (priv->enable) 1929 rx_priv += priv->buf_size; 1930 } 1931 return rx_priv; 1932 } 1933 1934 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) 1935 { 1936 u32 i, total_tx_size = 0; 1937 1938 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 1939 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; 1940 1941 return total_tx_size; 1942 } 1943 1944 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, 1945 struct hclge_pkt_buf_alloc *buf_alloc, 1946 u32 rx_all) 1947 { 1948 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; 1949 u32 tc_num = hclge_get_tc_num(hdev); 1950 u32 shared_buf, aligned_mps; 1951 u32 rx_priv; 1952 int i; 1953 1954 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); 1955 1956 if (hnae3_dev_dcb_supported(hdev)) 1957 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps + 1958 hdev->dv_buf_size; 1959 else 1960 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF 1961 + hdev->dv_buf_size; 1962 1963 shared_buf_tc = tc_num * aligned_mps + aligned_mps; 1964 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), 1965 HCLGE_BUF_SIZE_UNIT); 1966 1967 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); 1968 if (rx_all < rx_priv + shared_std) 1969 return false; 1970 1971 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); 1972 buf_alloc->s_buf.buf_size = shared_buf; 1973 if (hnae3_dev_dcb_supported(hdev)) { 1974 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; 1975 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high 1976 - roundup(aligned_mps / HCLGE_BUF_DIV_BY, 1977 HCLGE_BUF_SIZE_UNIT); 1978 } else { 1979 buf_alloc->s_buf.self.high = aligned_mps + 1980 HCLGE_NON_DCB_ADDITIONAL_BUF; 1981 buf_alloc->s_buf.self.low = aligned_mps; 1982 } 1983 1984 if (hnae3_dev_dcb_supported(hdev)) { 1985 hi_thrd = shared_buf - hdev->dv_buf_size; 1986 1987 if (tc_num <= NEED_RESERVE_TC_NUM) 1988 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT 1989 / BUF_MAX_PERCENT; 1990 1991 if (tc_num) 1992 hi_thrd = hi_thrd / tc_num; 1993 1994 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps); 1995 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); 1996 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY; 1997 } else { 1998 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; 1999 lo_thrd = aligned_mps; 2000 } 2001 2002 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2003 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; 2004 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; 2005 } 2006 2007 return true; 2008 } 2009 2010 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, 2011 struct hclge_pkt_buf_alloc *buf_alloc) 2012 { 2013 u32 i, total_size; 2014 2015 total_size = hdev->pkt_buf_size; 2016 2017 /* alloc tx buffer for all enabled tc */ 2018 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2019 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2020 2021 if (hdev->hw_tc_map & BIT(i)) { 2022 if (total_size < hdev->tx_buf_size) 2023 return -ENOMEM; 2024 2025 priv->tx_buf_size = hdev->tx_buf_size; 2026 } else { 2027 priv->tx_buf_size = 0; 2028 } 2029 2030 total_size -= priv->tx_buf_size; 2031 } 2032 2033 return 0; 2034 } 2035 2036 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, 2037 struct hclge_pkt_buf_alloc *buf_alloc) 2038 { 2039 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2040 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); 2041 unsigned int i; 2042 2043 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2044 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2045 2046 priv->enable = 0; 2047 priv->wl.low = 0; 2048 priv->wl.high = 0; 2049 priv->buf_size = 0; 2050 2051 if (!(hdev->hw_tc_map & BIT(i))) 2052 continue; 2053 2054 priv->enable = 1; 2055 2056 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 2057 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT; 2058 priv->wl.high = roundup(priv->wl.low + aligned_mps, 2059 HCLGE_BUF_SIZE_UNIT); 2060 } else { 2061 priv->wl.low = 0; 2062 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) : 2063 aligned_mps; 2064 } 2065 2066 priv->buf_size = priv->wl.high + hdev->dv_buf_size; 2067 } 2068 2069 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2070 } 2071 2072 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, 2073 struct hclge_pkt_buf_alloc *buf_alloc) 2074 { 2075 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2076 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); 2077 int i; 2078 2079 /* let the last to be cleared first */ 2080 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 2081 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2082 unsigned int mask = BIT((unsigned int)i); 2083 2084 if (hdev->hw_tc_map & mask && 2085 !(hdev->tm_info.hw_pfc_map & mask)) { 2086 /* Clear the no pfc TC private buffer */ 2087 priv->wl.low = 0; 2088 priv->wl.high = 0; 2089 priv->buf_size = 0; 2090 priv->enable = 0; 2091 no_pfc_priv_num--; 2092 } 2093 2094 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 2095 no_pfc_priv_num == 0) 2096 break; 2097 } 2098 2099 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2100 } 2101 2102 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, 2103 struct hclge_pkt_buf_alloc *buf_alloc) 2104 { 2105 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2106 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); 2107 int i; 2108 2109 /* let the last to be cleared first */ 2110 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { 2111 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2112 unsigned int mask = BIT((unsigned int)i); 2113 2114 if (hdev->hw_tc_map & mask && 2115 hdev->tm_info.hw_pfc_map & mask) { 2116 /* Reduce the number of pfc TC with private buffer */ 2117 priv->wl.low = 0; 2118 priv->enable = 0; 2119 priv->wl.high = 0; 2120 priv->buf_size = 0; 2121 pfc_priv_num--; 2122 } 2123 2124 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || 2125 pfc_priv_num == 0) 2126 break; 2127 } 2128 2129 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); 2130 } 2131 2132 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, 2133 struct hclge_pkt_buf_alloc *buf_alloc) 2134 { 2135 #define COMPENSATE_BUFFER 0x3C00 2136 #define COMPENSATE_HALF_MPS_NUM 5 2137 #define PRIV_WL_GAP 0x1800 2138 2139 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); 2140 u32 tc_num = hclge_get_tc_num(hdev); 2141 u32 half_mps = hdev->mps >> 1; 2142 u32 min_rx_priv; 2143 unsigned int i; 2144 2145 if (tc_num) 2146 rx_priv = rx_priv / tc_num; 2147 2148 if (tc_num <= NEED_RESERVE_TC_NUM) 2149 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT; 2150 2151 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + 2152 COMPENSATE_HALF_MPS_NUM * half_mps; 2153 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT); 2154 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT); 2155 if (rx_priv < min_rx_priv) 2156 return false; 2157 2158 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2159 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2160 2161 priv->enable = 0; 2162 priv->wl.low = 0; 2163 priv->wl.high = 0; 2164 priv->buf_size = 0; 2165 2166 if (!(hdev->hw_tc_map & BIT(i))) 2167 continue; 2168 2169 priv->enable = 1; 2170 priv->buf_size = rx_priv; 2171 priv->wl.high = rx_priv - hdev->dv_buf_size; 2172 priv->wl.low = priv->wl.high - PRIV_WL_GAP; 2173 } 2174 2175 buf_alloc->s_buf.buf_size = 0; 2176 2177 return true; 2178 } 2179 2180 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs 2181 * @hdev: pointer to struct hclge_dev 2182 * @buf_alloc: pointer to buffer calculation data 2183 * @return: 0: calculate successful, negative: fail 2184 */ 2185 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, 2186 struct hclge_pkt_buf_alloc *buf_alloc) 2187 { 2188 /* When DCB is not supported, rx private buffer is not allocated. */ 2189 if (!hnae3_dev_dcb_supported(hdev)) { 2190 u32 rx_all = hdev->pkt_buf_size; 2191 2192 rx_all -= hclge_get_tx_buff_alloced(buf_alloc); 2193 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) 2194 return -ENOMEM; 2195 2196 return 0; 2197 } 2198 2199 if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) 2200 return 0; 2201 2202 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) 2203 return 0; 2204 2205 /* try to decrease the buffer size */ 2206 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) 2207 return 0; 2208 2209 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) 2210 return 0; 2211 2212 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) 2213 return 0; 2214 2215 return -ENOMEM; 2216 } 2217 2218 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, 2219 struct hclge_pkt_buf_alloc *buf_alloc) 2220 { 2221 struct hclge_rx_priv_buff_cmd *req; 2222 struct hclge_desc desc; 2223 int ret; 2224 int i; 2225 2226 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); 2227 req = (struct hclge_rx_priv_buff_cmd *)desc.data; 2228 2229 /* Alloc private buffer TCs */ 2230 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2231 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; 2232 2233 req->buf_num[i] = 2234 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); 2235 req->buf_num[i] |= 2236 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); 2237 } 2238 2239 req->shared_buf = 2240 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | 2241 (1 << HCLGE_TC0_PRI_BUF_EN_B)); 2242 2243 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2244 if (ret) 2245 dev_err(&hdev->pdev->dev, 2246 "rx private buffer alloc cmd failed %d\n", ret); 2247 2248 return ret; 2249 } 2250 2251 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, 2252 struct hclge_pkt_buf_alloc *buf_alloc) 2253 { 2254 struct hclge_rx_priv_wl_buf *req; 2255 struct hclge_priv_buf *priv; 2256 struct hclge_desc desc[2]; 2257 int i, j; 2258 int ret; 2259 2260 for (i = 0; i < 2; i++) { 2261 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, 2262 false); 2263 req = (struct hclge_rx_priv_wl_buf *)desc[i].data; 2264 2265 /* The first descriptor set the NEXT bit to 1 */ 2266 if (i == 0) 2267 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2268 else 2269 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2270 2271 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2272 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; 2273 2274 priv = &buf_alloc->priv_buf[idx]; 2275 req->tc_wl[j].high = 2276 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); 2277 req->tc_wl[j].high |= 2278 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2279 req->tc_wl[j].low = 2280 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); 2281 req->tc_wl[j].low |= 2282 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2283 } 2284 } 2285 2286 /* Send 2 descriptor at one time */ 2287 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2288 if (ret) 2289 dev_err(&hdev->pdev->dev, 2290 "rx private waterline config cmd failed %d\n", 2291 ret); 2292 return ret; 2293 } 2294 2295 static int hclge_common_thrd_config(struct hclge_dev *hdev, 2296 struct hclge_pkt_buf_alloc *buf_alloc) 2297 { 2298 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; 2299 struct hclge_rx_com_thrd *req; 2300 struct hclge_desc desc[2]; 2301 struct hclge_tc_thrd *tc; 2302 int i, j; 2303 int ret; 2304 2305 for (i = 0; i < 2; i++) { 2306 hclge_cmd_setup_basic_desc(&desc[i], 2307 HCLGE_OPC_RX_COM_THRD_ALLOC, false); 2308 req = (struct hclge_rx_com_thrd *)&desc[i].data; 2309 2310 /* The first descriptor set the NEXT bit to 1 */ 2311 if (i == 0) 2312 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2313 else 2314 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2315 2316 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { 2317 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; 2318 2319 req->com_thrd[j].high = 2320 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); 2321 req->com_thrd[j].high |= 2322 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2323 req->com_thrd[j].low = 2324 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); 2325 req->com_thrd[j].low |= 2326 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2327 } 2328 } 2329 2330 /* Send 2 descriptors at one time */ 2331 ret = hclge_cmd_send(&hdev->hw, desc, 2); 2332 if (ret) 2333 dev_err(&hdev->pdev->dev, 2334 "common threshold config cmd failed %d\n", ret); 2335 return ret; 2336 } 2337 2338 static int hclge_common_wl_config(struct hclge_dev *hdev, 2339 struct hclge_pkt_buf_alloc *buf_alloc) 2340 { 2341 struct hclge_shared_buf *buf = &buf_alloc->s_buf; 2342 struct hclge_rx_com_wl *req; 2343 struct hclge_desc desc; 2344 int ret; 2345 2346 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); 2347 2348 req = (struct hclge_rx_com_wl *)desc.data; 2349 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); 2350 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2351 2352 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); 2353 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B)); 2354 2355 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2356 if (ret) 2357 dev_err(&hdev->pdev->dev, 2358 "common waterline config cmd failed %d\n", ret); 2359 2360 return ret; 2361 } 2362 2363 int hclge_buffer_alloc(struct hclge_dev *hdev) 2364 { 2365 struct hclge_pkt_buf_alloc *pkt_buf; 2366 int ret; 2367 2368 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); 2369 if (!pkt_buf) 2370 return -ENOMEM; 2371 2372 ret = hclge_tx_buffer_calc(hdev, pkt_buf); 2373 if (ret) { 2374 dev_err(&hdev->pdev->dev, 2375 "could not calc tx buffer size for all TCs %d\n", ret); 2376 goto out; 2377 } 2378 2379 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); 2380 if (ret) { 2381 dev_err(&hdev->pdev->dev, 2382 "could not alloc tx buffers %d\n", ret); 2383 goto out; 2384 } 2385 2386 ret = hclge_rx_buffer_calc(hdev, pkt_buf); 2387 if (ret) { 2388 dev_err(&hdev->pdev->dev, 2389 "could not calc rx priv buffer size for all TCs %d\n", 2390 ret); 2391 goto out; 2392 } 2393 2394 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); 2395 if (ret) { 2396 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", 2397 ret); 2398 goto out; 2399 } 2400 2401 if (hnae3_dev_dcb_supported(hdev)) { 2402 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); 2403 if (ret) { 2404 dev_err(&hdev->pdev->dev, 2405 "could not configure rx private waterline %d\n", 2406 ret); 2407 goto out; 2408 } 2409 2410 ret = hclge_common_thrd_config(hdev, pkt_buf); 2411 if (ret) { 2412 dev_err(&hdev->pdev->dev, 2413 "could not configure common threshold %d\n", 2414 ret); 2415 goto out; 2416 } 2417 } 2418 2419 ret = hclge_common_wl_config(hdev, pkt_buf); 2420 if (ret) 2421 dev_err(&hdev->pdev->dev, 2422 "could not configure common waterline %d\n", ret); 2423 2424 out: 2425 kfree(pkt_buf); 2426 return ret; 2427 } 2428 2429 static int hclge_init_roce_base_info(struct hclge_vport *vport) 2430 { 2431 struct hnae3_handle *roce = &vport->roce; 2432 struct hnae3_handle *nic = &vport->nic; 2433 struct hclge_dev *hdev = vport->back; 2434 2435 roce->rinfo.num_vectors = vport->back->num_roce_msi; 2436 2437 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) 2438 return -EINVAL; 2439 2440 roce->rinfo.base_vector = hdev->num_nic_msi; 2441 2442 roce->rinfo.netdev = nic->kinfo.netdev; 2443 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2444 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2445 2446 roce->pdev = nic->pdev; 2447 roce->ae_algo = nic->ae_algo; 2448 roce->numa_node_mask = nic->numa_node_mask; 2449 2450 return 0; 2451 } 2452 2453 static int hclge_init_msi(struct hclge_dev *hdev) 2454 { 2455 struct pci_dev *pdev = hdev->pdev; 2456 int vectors; 2457 int i; 2458 2459 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2460 hdev->num_msi, 2461 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2462 if (vectors < 0) { 2463 dev_err(&pdev->dev, 2464 "failed(%d) to allocate MSI/MSI-X vectors\n", 2465 vectors); 2466 return vectors; 2467 } 2468 if (vectors < hdev->num_msi) 2469 dev_warn(&hdev->pdev->dev, 2470 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2471 hdev->num_msi, vectors); 2472 2473 hdev->num_msi = vectors; 2474 hdev->num_msi_left = vectors; 2475 2476 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2477 sizeof(u16), GFP_KERNEL); 2478 if (!hdev->vector_status) { 2479 pci_free_irq_vectors(pdev); 2480 return -ENOMEM; 2481 } 2482 2483 for (i = 0; i < hdev->num_msi; i++) 2484 hdev->vector_status[i] = HCLGE_INVALID_VPORT; 2485 2486 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2487 sizeof(int), GFP_KERNEL); 2488 if (!hdev->vector_irq) { 2489 pci_free_irq_vectors(pdev); 2490 return -ENOMEM; 2491 } 2492 2493 return 0; 2494 } 2495 2496 static u8 hclge_check_speed_dup(u8 duplex, int speed) 2497 { 2498 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) 2499 duplex = HCLGE_MAC_FULL; 2500 2501 return duplex; 2502 } 2503 2504 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = { 2505 {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M}, 2506 {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M}, 2507 {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G}, 2508 {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G}, 2509 {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G}, 2510 {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G}, 2511 {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G}, 2512 {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G}, 2513 {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G}, 2514 }; 2515 2516 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw) 2517 { 2518 u16 i; 2519 2520 for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) { 2521 if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) { 2522 *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw; 2523 return 0; 2524 } 2525 } 2526 2527 return -EINVAL; 2528 } 2529 2530 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, 2531 u8 duplex, u8 lane_num) 2532 { 2533 struct hclge_config_mac_speed_dup_cmd *req; 2534 struct hclge_desc desc; 2535 u32 speed_fw; 2536 int ret; 2537 2538 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; 2539 2540 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); 2541 2542 if (duplex) 2543 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); 2544 2545 ret = hclge_convert_to_fw_speed(speed, &speed_fw); 2546 if (ret) { 2547 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); 2548 return ret; 2549 } 2550 2551 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, 2552 speed_fw); 2553 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 2554 1); 2555 req->lane_num = lane_num; 2556 2557 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2558 if (ret) { 2559 dev_err(&hdev->pdev->dev, 2560 "mac speed/duplex config cmd failed %d.\n", ret); 2561 return ret; 2562 } 2563 2564 return 0; 2565 } 2566 2567 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) 2568 { 2569 struct hclge_mac *mac = &hdev->hw.mac; 2570 int ret; 2571 2572 duplex = hclge_check_speed_dup(duplex, speed); 2573 if (!mac->support_autoneg && mac->speed == speed && 2574 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0)) 2575 return 0; 2576 2577 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); 2578 if (ret) 2579 return ret; 2580 2581 hdev->hw.mac.speed = speed; 2582 hdev->hw.mac.duplex = duplex; 2583 if (!lane_num) 2584 hdev->hw.mac.lane_num = lane_num; 2585 2586 return 0; 2587 } 2588 2589 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, 2590 u8 duplex, u8 lane_num) 2591 { 2592 struct hclge_vport *vport = hclge_get_vport(handle); 2593 struct hclge_dev *hdev = vport->back; 2594 2595 return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); 2596 } 2597 2598 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) 2599 { 2600 struct hclge_config_auto_neg_cmd *req; 2601 struct hclge_desc desc; 2602 u32 flag = 0; 2603 int ret; 2604 2605 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); 2606 2607 req = (struct hclge_config_auto_neg_cmd *)desc.data; 2608 if (enable) 2609 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U); 2610 req->cfg_an_cmd_flag = cpu_to_le32(flag); 2611 2612 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2613 if (ret) 2614 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", 2615 ret); 2616 2617 return ret; 2618 } 2619 2620 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) 2621 { 2622 struct hclge_vport *vport = hclge_get_vport(handle); 2623 struct hclge_dev *hdev = vport->back; 2624 2625 if (!hdev->hw.mac.support_autoneg) { 2626 if (enable) { 2627 dev_err(&hdev->pdev->dev, 2628 "autoneg is not supported by current port\n"); 2629 return -EOPNOTSUPP; 2630 } else { 2631 return 0; 2632 } 2633 } 2634 2635 return hclge_set_autoneg_en(hdev, enable); 2636 } 2637 2638 static int hclge_get_autoneg(struct hnae3_handle *handle) 2639 { 2640 struct hclge_vport *vport = hclge_get_vport(handle); 2641 struct hclge_dev *hdev = vport->back; 2642 struct phy_device *phydev = hdev->hw.mac.phydev; 2643 2644 if (phydev) 2645 return phydev->autoneg; 2646 2647 return hdev->hw.mac.autoneg; 2648 } 2649 2650 static int hclge_restart_autoneg(struct hnae3_handle *handle) 2651 { 2652 struct hclge_vport *vport = hclge_get_vport(handle); 2653 struct hclge_dev *hdev = vport->back; 2654 int ret; 2655 2656 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); 2657 2658 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 2659 if (ret) 2660 return ret; 2661 return hclge_notify_client(hdev, HNAE3_UP_CLIENT); 2662 } 2663 2664 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt) 2665 { 2666 struct hclge_vport *vport = hclge_get_vport(handle); 2667 struct hclge_dev *hdev = vport->back; 2668 2669 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) 2670 return hclge_set_autoneg_en(hdev, !halt); 2671 2672 return 0; 2673 } 2674 2675 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, 2676 struct hclge_desc *desc, u32 desc_len) 2677 { 2678 u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2; 2679 u32 desc_index = 0; 2680 u32 data_index = 0; 2681 u32 i; 2682 2683 for (i = 0; i < lane_size; i++) { 2684 if (data_index >= HCLGE_DESC_DATA_LEN) { 2685 desc_index++; 2686 data_index = 0; 2687 } 2688 2689 if (desc_index >= desc_len) 2690 return; 2691 2692 hdev->fec_stats.per_lanes[i] += 2693 le32_to_cpu(desc[desc_index].data[data_index]); 2694 data_index++; 2695 } 2696 } 2697 2698 static void hclge_parse_fec_stats(struct hclge_dev *hdev, 2699 struct hclge_desc *desc, u32 desc_len) 2700 { 2701 struct hclge_query_fec_stats_cmd *req; 2702 2703 req = (struct hclge_query_fec_stats_cmd *)desc[0].data; 2704 2705 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; 2706 hdev->fec_stats.rs_corr_blocks += 2707 le32_to_cpu(req->rs_fec_corr_blocks); 2708 hdev->fec_stats.rs_uncorr_blocks += 2709 le32_to_cpu(req->rs_fec_uncorr_blocks); 2710 hdev->fec_stats.rs_error_blocks += 2711 le32_to_cpu(req->rs_fec_error_blocks); 2712 hdev->fec_stats.base_r_corr_blocks += 2713 le32_to_cpu(req->base_r_fec_corr_blocks); 2714 hdev->fec_stats.base_r_uncorr_blocks += 2715 le32_to_cpu(req->base_r_fec_uncorr_blocks); 2716 2717 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); 2718 } 2719 2720 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) 2721 { 2722 struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM]; 2723 int ret; 2724 u32 i; 2725 2726 for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) { 2727 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS, 2728 true); 2729 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1)) 2730 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2731 } 2732 2733 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); 2734 if (ret) 2735 return ret; 2736 2737 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); 2738 2739 return 0; 2740 } 2741 2742 static void hclge_update_fec_stats(struct hclge_dev *hdev) 2743 { 2744 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2745 int ret; 2746 2747 if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || 2748 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) 2749 return; 2750 2751 ret = hclge_update_fec_stats_hw(hdev); 2752 if (ret) 2753 dev_err(&hdev->pdev->dev, 2754 "failed to update fec stats, ret = %d\n", ret); 2755 2756 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); 2757 } 2758 2759 static void hclge_get_fec_stats_total(struct hclge_dev *hdev, 2760 struct ethtool_fec_stats *fec_stats) 2761 { 2762 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; 2763 fec_stats->uncorrectable_blocks.total = 2764 hdev->fec_stats.rs_uncorr_blocks; 2765 } 2766 2767 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, 2768 struct ethtool_fec_stats *fec_stats) 2769 { 2770 u32 i; 2771 2772 if (hdev->fec_stats.base_r_lane_num == 0 || 2773 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { 2774 dev_err(&hdev->pdev->dev, 2775 "fec stats lane number(%llu) is invalid\n", 2776 hdev->fec_stats.base_r_lane_num); 2777 return; 2778 } 2779 2780 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { 2781 fec_stats->corrected_blocks.lanes[i] = 2782 hdev->fec_stats.base_r_corr_per_lanes[i]; 2783 fec_stats->uncorrectable_blocks.lanes[i] = 2784 hdev->fec_stats.base_r_uncorr_per_lanes[i]; 2785 } 2786 } 2787 2788 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, 2789 struct ethtool_fec_stats *fec_stats) 2790 { 2791 u32 fec_mode = hdev->hw.mac.fec_mode; 2792 2793 switch (fec_mode) { 2794 case BIT(HNAE3_FEC_RS): 2795 case BIT(HNAE3_FEC_LLRS): 2796 hclge_get_fec_stats_total(hdev, fec_stats); 2797 break; 2798 case BIT(HNAE3_FEC_BASER): 2799 hclge_get_fec_stats_lanes(hdev, fec_stats); 2800 break; 2801 default: 2802 dev_err(&hdev->pdev->dev, 2803 "fec stats is not supported by current fec mode(0x%x)\n", 2804 fec_mode); 2805 break; 2806 } 2807 } 2808 2809 static void hclge_get_fec_stats(struct hnae3_handle *handle, 2810 struct ethtool_fec_stats *fec_stats) 2811 { 2812 struct hclge_vport *vport = hclge_get_vport(handle); 2813 struct hclge_dev *hdev = vport->back; 2814 u32 fec_mode = hdev->hw.mac.fec_mode; 2815 2816 if (fec_mode == BIT(HNAE3_FEC_NONE) || 2817 fec_mode == BIT(HNAE3_FEC_AUTO) || 2818 fec_mode == BIT(HNAE3_FEC_USER_DEF)) 2819 return; 2820 2821 hclge_update_fec_stats(hdev); 2822 2823 hclge_comm_get_fec_stats(hdev, fec_stats); 2824 } 2825 2826 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) 2827 { 2828 struct hclge_config_fec_cmd *req; 2829 struct hclge_desc desc; 2830 int ret; 2831 2832 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); 2833 2834 req = (struct hclge_config_fec_cmd *)desc.data; 2835 if (fec_mode & BIT(HNAE3_FEC_AUTO)) 2836 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); 2837 if (fec_mode & BIT(HNAE3_FEC_RS)) 2838 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2839 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); 2840 if (fec_mode & BIT(HNAE3_FEC_LLRS)) 2841 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2842 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS); 2843 if (fec_mode & BIT(HNAE3_FEC_BASER)) 2844 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, 2845 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); 2846 2847 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2848 if (ret) 2849 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); 2850 2851 return ret; 2852 } 2853 2854 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) 2855 { 2856 struct hclge_vport *vport = hclge_get_vport(handle); 2857 struct hclge_dev *hdev = vport->back; 2858 struct hclge_mac *mac = &hdev->hw.mac; 2859 int ret; 2860 2861 if (fec_mode && !(mac->fec_ability & fec_mode)) { 2862 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); 2863 return -EINVAL; 2864 } 2865 2866 ret = hclge_set_fec_hw(hdev, fec_mode); 2867 if (ret) 2868 return ret; 2869 2870 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); 2871 return 0; 2872 } 2873 2874 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, 2875 u8 *fec_mode) 2876 { 2877 struct hclge_vport *vport = hclge_get_vport(handle); 2878 struct hclge_dev *hdev = vport->back; 2879 struct hclge_mac *mac = &hdev->hw.mac; 2880 2881 if (fec_ability) 2882 *fec_ability = mac->fec_ability; 2883 if (fec_mode) 2884 *fec_mode = mac->fec_mode; 2885 } 2886 2887 static int hclge_mac_init(struct hclge_dev *hdev) 2888 { 2889 struct hclge_mac *mac = &hdev->hw.mac; 2890 int ret; 2891 2892 hdev->support_sfp_query = true; 2893 hdev->hw.mac.duplex = HCLGE_MAC_FULL; 2894 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, 2895 hdev->hw.mac.duplex, hdev->hw.mac.lane_num); 2896 if (ret) 2897 return ret; 2898 2899 if (hdev->hw.mac.support_autoneg) { 2900 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); 2901 if (ret) 2902 return ret; 2903 } 2904 2905 mac->link = 0; 2906 2907 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { 2908 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); 2909 if (ret) 2910 return ret; 2911 } 2912 2913 ret = hclge_set_mac_mtu(hdev, hdev->mps); 2914 if (ret) { 2915 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); 2916 return ret; 2917 } 2918 2919 ret = hclge_set_default_loopback(hdev); 2920 if (ret) 2921 return ret; 2922 2923 ret = hclge_buffer_alloc(hdev); 2924 if (ret) 2925 dev_err(&hdev->pdev->dev, 2926 "allocate buffer fail, ret=%d\n", ret); 2927 2928 return ret; 2929 } 2930 2931 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) 2932 { 2933 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2934 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { 2935 hdev->last_mbx_scheduled = jiffies; 2936 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 2937 } 2938 } 2939 2940 static void hclge_reset_task_schedule(struct hclge_dev *hdev) 2941 { 2942 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2943 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && 2944 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { 2945 hdev->last_rst_scheduled = jiffies; 2946 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 2947 } 2948 } 2949 2950 static void hclge_errhand_task_schedule(struct hclge_dev *hdev) 2951 { 2952 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2953 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) 2954 mod_delayed_work(hclge_wq, &hdev->service_task, 0); 2955 } 2956 2957 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) 2958 { 2959 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && 2960 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) 2961 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); 2962 } 2963 2964 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) 2965 { 2966 struct hclge_link_status_cmd *req; 2967 struct hclge_desc desc; 2968 int ret; 2969 2970 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); 2971 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2972 if (ret) { 2973 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", 2974 ret); 2975 return ret; 2976 } 2977 2978 req = (struct hclge_link_status_cmd *)desc.data; 2979 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? 2980 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 2981 2982 return 0; 2983 } 2984 2985 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) 2986 { 2987 struct phy_device *phydev = hdev->hw.mac.phydev; 2988 2989 *link_status = HCLGE_LINK_STATUS_DOWN; 2990 2991 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) 2992 return 0; 2993 2994 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) 2995 return 0; 2996 2997 return hclge_get_mac_link_status(hdev, link_status); 2998 } 2999 3000 static void hclge_push_link_status(struct hclge_dev *hdev) 3001 { 3002 struct hclge_vport *vport; 3003 int ret; 3004 u16 i; 3005 3006 for (i = 0; i < pci_num_vf(hdev->pdev); i++) { 3007 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; 3008 3009 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) || 3010 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO) 3011 continue; 3012 3013 ret = hclge_push_vf_link_status(vport); 3014 if (ret) { 3015 dev_err(&hdev->pdev->dev, 3016 "failed to push link status to vf%u, ret = %d\n", 3017 i, ret); 3018 } 3019 } 3020 } 3021 3022 static void hclge_update_link_status(struct hclge_dev *hdev) 3023 { 3024 struct hnae3_handle *rhandle = &hdev->vport[0].roce; 3025 struct hnae3_handle *handle = &hdev->vport[0].nic; 3026 struct hnae3_client *rclient = hdev->roce_client; 3027 struct hnae3_client *client = hdev->nic_client; 3028 int state; 3029 int ret; 3030 3031 if (!client) 3032 return; 3033 3034 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) 3035 return; 3036 3037 ret = hclge_get_mac_phy_link(hdev, &state); 3038 if (ret) { 3039 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 3040 return; 3041 } 3042 3043 if (state != hdev->hw.mac.link) { 3044 hdev->hw.mac.link = state; 3045 if (state == HCLGE_LINK_STATUS_UP) 3046 hclge_update_port_info(hdev); 3047 3048 client->ops->link_status_change(handle, state); 3049 hclge_config_mac_tnl_int(hdev, state); 3050 if (rclient && rclient->ops->link_status_change) 3051 rclient->ops->link_status_change(rhandle, state); 3052 3053 hclge_push_link_status(hdev); 3054 } 3055 3056 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); 3057 } 3058 3059 static void hclge_update_speed_advertising(struct hclge_mac *mac) 3060 { 3061 u32 speed_ability; 3062 3063 if (hclge_get_speed_bit(mac->speed, &speed_ability)) 3064 return; 3065 3066 switch (mac->module_type) { 3067 case HNAE3_MODULE_TYPE_FIBRE_LR: 3068 hclge_convert_setting_lr(speed_ability, mac->advertising); 3069 break; 3070 case HNAE3_MODULE_TYPE_FIBRE_SR: 3071 case HNAE3_MODULE_TYPE_AOC: 3072 hclge_convert_setting_sr(speed_ability, mac->advertising); 3073 break; 3074 case HNAE3_MODULE_TYPE_CR: 3075 hclge_convert_setting_cr(speed_ability, mac->advertising); 3076 break; 3077 case HNAE3_MODULE_TYPE_KR: 3078 hclge_convert_setting_kr(speed_ability, mac->advertising); 3079 break; 3080 default: 3081 break; 3082 } 3083 } 3084 3085 static void hclge_update_fec_advertising(struct hclge_mac *mac) 3086 { 3087 if (mac->fec_mode & BIT(HNAE3_FEC_RS)) 3088 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, 3089 mac->advertising); 3090 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS)) 3091 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, 3092 mac->advertising); 3093 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) 3094 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, 3095 mac->advertising); 3096 else 3097 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, 3098 mac->advertising); 3099 } 3100 3101 static void hclge_update_pause_advertising(struct hclge_dev *hdev) 3102 { 3103 struct hclge_mac *mac = &hdev->hw.mac; 3104 bool rx_en, tx_en; 3105 3106 switch (hdev->fc_mode_last_time) { 3107 case HCLGE_FC_RX_PAUSE: 3108 rx_en = true; 3109 tx_en = false; 3110 break; 3111 case HCLGE_FC_TX_PAUSE: 3112 rx_en = false; 3113 tx_en = true; 3114 break; 3115 case HCLGE_FC_FULL: 3116 rx_en = true; 3117 tx_en = true; 3118 break; 3119 default: 3120 rx_en = false; 3121 tx_en = false; 3122 break; 3123 } 3124 3125 linkmode_set_pause(mac->advertising, tx_en, rx_en); 3126 } 3127 3128 static void hclge_update_advertising(struct hclge_dev *hdev) 3129 { 3130 struct hclge_mac *mac = &hdev->hw.mac; 3131 3132 linkmode_zero(mac->advertising); 3133 hclge_update_speed_advertising(mac); 3134 hclge_update_fec_advertising(mac); 3135 hclge_update_pause_advertising(hdev); 3136 } 3137 3138 static void hclge_update_port_capability(struct hclge_dev *hdev, 3139 struct hclge_mac *mac) 3140 { 3141 if (hnae3_dev_fec_supported(hdev)) 3142 hclge_convert_setting_fec(mac); 3143 3144 /* firmware can not identify back plane type, the media type 3145 * read from configuration can help deal it 3146 */ 3147 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && 3148 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) 3149 mac->module_type = HNAE3_MODULE_TYPE_KR; 3150 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 3151 mac->module_type = HNAE3_MODULE_TYPE_TP; 3152 3153 if (mac->support_autoneg) { 3154 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); 3155 linkmode_copy(mac->advertising, mac->supported); 3156 } else { 3157 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, 3158 mac->supported); 3159 hclge_update_advertising(hdev); 3160 } 3161 } 3162 3163 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) 3164 { 3165 struct hclge_sfp_info_cmd *resp; 3166 struct hclge_desc desc; 3167 int ret; 3168 3169 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 3170 resp = (struct hclge_sfp_info_cmd *)desc.data; 3171 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3172 if (ret == -EOPNOTSUPP) { 3173 dev_warn(&hdev->pdev->dev, 3174 "IMP do not support get SFP speed %d\n", ret); 3175 return ret; 3176 } else if (ret) { 3177 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); 3178 return ret; 3179 } 3180 3181 *speed = le32_to_cpu(resp->speed); 3182 3183 return 0; 3184 } 3185 3186 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) 3187 { 3188 struct hclge_sfp_info_cmd *resp; 3189 struct hclge_desc desc; 3190 int ret; 3191 3192 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); 3193 resp = (struct hclge_sfp_info_cmd *)desc.data; 3194 3195 resp->query_type = QUERY_ACTIVE_SPEED; 3196 3197 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3198 if (ret == -EOPNOTSUPP) { 3199 dev_warn(&hdev->pdev->dev, 3200 "IMP does not support get SFP info %d\n", ret); 3201 return ret; 3202 } else if (ret) { 3203 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); 3204 return ret; 3205 } 3206 3207 /* In some case, mac speed get from IMP may be 0, it shouldn't be 3208 * set to mac->speed. 3209 */ 3210 if (!le32_to_cpu(resp->speed)) 3211 return 0; 3212 3213 mac->speed = le32_to_cpu(resp->speed); 3214 /* if resp->speed_ability is 0, it means it's an old version 3215 * firmware, do not update these params 3216 */ 3217 if (resp->speed_ability) { 3218 mac->module_type = le32_to_cpu(resp->module_type); 3219 mac->speed_ability = le32_to_cpu(resp->speed_ability); 3220 mac->autoneg = resp->autoneg; 3221 mac->support_autoneg = resp->autoneg_ability; 3222 mac->speed_type = QUERY_ACTIVE_SPEED; 3223 mac->lane_num = resp->lane_num; 3224 if (!resp->active_fec) 3225 mac->fec_mode = 0; 3226 else 3227 mac->fec_mode = BIT(resp->active_fec); 3228 mac->fec_ability = resp->fec_ability; 3229 } else { 3230 mac->speed_type = QUERY_SFP_SPEED; 3231 } 3232 3233 return 0; 3234 } 3235 3236 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, 3237 struct ethtool_link_ksettings *cmd) 3238 { 3239 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; 3240 struct hclge_vport *vport = hclge_get_vport(handle); 3241 struct hclge_phy_link_ksetting_0_cmd *req0; 3242 struct hclge_phy_link_ksetting_1_cmd *req1; 3243 u32 supported, advertising, lp_advertising; 3244 struct hclge_dev *hdev = vport->back; 3245 int ret; 3246 3247 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, 3248 true); 3249 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3250 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, 3251 true); 3252 3253 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); 3254 if (ret) { 3255 dev_err(&hdev->pdev->dev, 3256 "failed to get phy link ksetting, ret = %d.\n", ret); 3257 return ret; 3258 } 3259 3260 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; 3261 cmd->base.autoneg = req0->autoneg; 3262 cmd->base.speed = le32_to_cpu(req0->speed); 3263 cmd->base.duplex = req0->duplex; 3264 cmd->base.port = req0->port; 3265 cmd->base.transceiver = req0->transceiver; 3266 cmd->base.phy_address = req0->phy_address; 3267 cmd->base.eth_tp_mdix = req0->eth_tp_mdix; 3268 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; 3269 supported = le32_to_cpu(req0->supported); 3270 advertising = le32_to_cpu(req0->advertising); 3271 lp_advertising = le32_to_cpu(req0->lp_advertising); 3272 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 3273 supported); 3274 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 3275 advertising); 3276 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, 3277 lp_advertising); 3278 3279 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; 3280 cmd->base.master_slave_cfg = req1->master_slave_cfg; 3281 cmd->base.master_slave_state = req1->master_slave_state; 3282 3283 return 0; 3284 } 3285 3286 static int 3287 hclge_set_phy_link_ksettings(struct hnae3_handle *handle, 3288 const struct ethtool_link_ksettings *cmd) 3289 { 3290 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM]; 3291 struct hclge_vport *vport = hclge_get_vport(handle); 3292 struct hclge_phy_link_ksetting_0_cmd *req0; 3293 struct hclge_phy_link_ksetting_1_cmd *req1; 3294 struct hclge_dev *hdev = vport->back; 3295 u32 advertising; 3296 int ret; 3297 3298 if (cmd->base.autoneg == AUTONEG_DISABLE && 3299 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) || 3300 (cmd->base.duplex != DUPLEX_HALF && 3301 cmd->base.duplex != DUPLEX_FULL))) 3302 return -EINVAL; 3303 3304 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING, 3305 false); 3306 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 3307 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING, 3308 false); 3309 3310 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; 3311 req0->autoneg = cmd->base.autoneg; 3312 req0->speed = cpu_to_le32(cmd->base.speed); 3313 req0->duplex = cmd->base.duplex; 3314 ethtool_convert_link_mode_to_legacy_u32(&advertising, 3315 cmd->link_modes.advertising); 3316 req0->advertising = cpu_to_le32(advertising); 3317 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; 3318 3319 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; 3320 req1->master_slave_cfg = cmd->base.master_slave_cfg; 3321 3322 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); 3323 if (ret) { 3324 dev_err(&hdev->pdev->dev, 3325 "failed to set phy link ksettings, ret = %d.\n", ret); 3326 return ret; 3327 } 3328 3329 hdev->hw.mac.autoneg = cmd->base.autoneg; 3330 hdev->hw.mac.speed = cmd->base.speed; 3331 hdev->hw.mac.duplex = cmd->base.duplex; 3332 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); 3333 3334 return 0; 3335 } 3336 3337 static int hclge_update_tp_port_info(struct hclge_dev *hdev) 3338 { 3339 struct ethtool_link_ksettings cmd; 3340 int ret; 3341 3342 if (!hnae3_dev_phy_imp_supported(hdev)) 3343 return 0; 3344 3345 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); 3346 if (ret) 3347 return ret; 3348 3349 hdev->hw.mac.autoneg = cmd.base.autoneg; 3350 hdev->hw.mac.speed = cmd.base.speed; 3351 hdev->hw.mac.duplex = cmd.base.duplex; 3352 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); 3353 3354 return 0; 3355 } 3356 3357 static int hclge_tp_port_init(struct hclge_dev *hdev) 3358 { 3359 struct ethtool_link_ksettings cmd; 3360 3361 if (!hnae3_dev_phy_imp_supported(hdev)) 3362 return 0; 3363 3364 cmd.base.autoneg = hdev->hw.mac.autoneg; 3365 cmd.base.speed = hdev->hw.mac.speed; 3366 cmd.base.duplex = hdev->hw.mac.duplex; 3367 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); 3368 3369 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); 3370 } 3371 3372 static int hclge_update_port_info(struct hclge_dev *hdev) 3373 { 3374 struct hclge_mac *mac = &hdev->hw.mac; 3375 int speed; 3376 int ret; 3377 3378 /* get the port info from SFP cmd if not copper port */ 3379 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) 3380 return hclge_update_tp_port_info(hdev); 3381 3382 /* if IMP does not support get SFP/qSFP info, return directly */ 3383 if (!hdev->support_sfp_query) 3384 return 0; 3385 3386 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3387 speed = mac->speed; 3388 ret = hclge_get_sfp_info(hdev, mac); 3389 } else { 3390 speed = HCLGE_MAC_SPEED_UNKNOWN; 3391 ret = hclge_get_sfp_speed(hdev, &speed); 3392 } 3393 3394 if (ret == -EOPNOTSUPP) { 3395 hdev->support_sfp_query = false; 3396 return ret; 3397 } else if (ret) { 3398 return ret; 3399 } 3400 3401 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 3402 if (mac->speed_type == QUERY_ACTIVE_SPEED) { 3403 hclge_update_port_capability(hdev, mac); 3404 if (mac->speed != speed) 3405 (void)hclge_tm_port_shaper_cfg(hdev); 3406 return 0; 3407 } 3408 return hclge_cfg_mac_speed_dup(hdev, mac->speed, 3409 HCLGE_MAC_FULL, mac->lane_num); 3410 } else { 3411 if (speed == HCLGE_MAC_SPEED_UNKNOWN) 3412 return 0; /* do nothing if no SFP */ 3413 3414 /* must config full duplex for SFP */ 3415 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0); 3416 } 3417 } 3418 3419 static int hclge_get_status(struct hnae3_handle *handle) 3420 { 3421 struct hclge_vport *vport = hclge_get_vport(handle); 3422 struct hclge_dev *hdev = vport->back; 3423 3424 hclge_update_link_status(hdev); 3425 3426 return hdev->hw.mac.link; 3427 } 3428 3429 struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) 3430 { 3431 if (!pci_num_vf(hdev->pdev)) { 3432 dev_err(&hdev->pdev->dev, 3433 "SRIOV is disabled, can not get vport(%d) info.\n", vf); 3434 return NULL; 3435 } 3436 3437 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { 3438 dev_err(&hdev->pdev->dev, 3439 "vf id(%d) is out of range(0 <= vfid < %d)\n", 3440 vf, pci_num_vf(hdev->pdev)); 3441 return NULL; 3442 } 3443 3444 /* VF start from 1 in vport */ 3445 vf += HCLGE_VF_VPORT_START_NUM; 3446 return &hdev->vport[vf]; 3447 } 3448 3449 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, 3450 struct ifla_vf_info *ivf) 3451 { 3452 struct hclge_vport *vport = hclge_get_vport(handle); 3453 struct hclge_dev *hdev = vport->back; 3454 3455 vport = hclge_get_vf_vport(hdev, vf); 3456 if (!vport) 3457 return -EINVAL; 3458 3459 ivf->vf = vf; 3460 ivf->linkstate = vport->vf_info.link_state; 3461 ivf->spoofchk = vport->vf_info.spoofchk; 3462 ivf->trusted = vport->vf_info.trusted; 3463 ivf->min_tx_rate = 0; 3464 ivf->max_tx_rate = vport->vf_info.max_tx_rate; 3465 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag; 3466 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto); 3467 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos; 3468 ether_addr_copy(ivf->mac, vport->vf_info.mac); 3469 3470 return 0; 3471 } 3472 3473 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, 3474 int link_state) 3475 { 3476 struct hclge_vport *vport = hclge_get_vport(handle); 3477 struct hclge_dev *hdev = vport->back; 3478 int link_state_old; 3479 int ret; 3480 3481 vport = hclge_get_vf_vport(hdev, vf); 3482 if (!vport) 3483 return -EINVAL; 3484 3485 link_state_old = vport->vf_info.link_state; 3486 vport->vf_info.link_state = link_state; 3487 3488 /* return success directly if the VF is unalive, VF will 3489 * query link state itself when it starts work. 3490 */ 3491 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 3492 return 0; 3493 3494 ret = hclge_push_vf_link_status(vport); 3495 if (ret) { 3496 vport->vf_info.link_state = link_state_old; 3497 dev_err(&hdev->pdev->dev, 3498 "failed to push vf%d link status, ret = %d\n", vf, ret); 3499 } 3500 3501 return ret; 3502 } 3503 3504 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) 3505 { 3506 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg; 3507 3508 /* fetch the events from their corresponding regs */ 3509 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); 3510 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 3511 hw_err_src_reg = hclge_read_dev(&hdev->hw, 3512 HCLGE_RAS_PF_OTHER_INT_STS_REG); 3513 3514 /* Assumption: If by any chance reset and mailbox events are reported 3515 * together then we will only process reset event in this go and will 3516 * defer the processing of the mailbox events. Since, we would have not 3517 * cleared RX CMDQ event this time we would receive again another 3518 * interrupt from H/W just for the mailbox. 3519 * 3520 * check for vector0 reset event sources 3521 */ 3522 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) { 3523 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); 3524 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); 3525 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3526 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 3527 hdev->rst_stats.imp_rst_cnt++; 3528 return HCLGE_VECTOR0_EVENT_RST; 3529 } 3530 3531 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) { 3532 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); 3533 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3534 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); 3535 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 3536 hdev->rst_stats.global_rst_cnt++; 3537 return HCLGE_VECTOR0_EVENT_RST; 3538 } 3539 3540 /* check for vector0 msix event and hardware error event source */ 3541 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK || 3542 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK) 3543 return HCLGE_VECTOR0_EVENT_ERR; 3544 3545 /* check for vector0 ptp event source */ 3546 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) { 3547 *clearval = msix_src_reg; 3548 return HCLGE_VECTOR0_EVENT_PTP; 3549 } 3550 3551 /* check for vector0 mailbox(=CMDQ RX) event source */ 3552 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { 3553 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B); 3554 *clearval = cmdq_src_reg; 3555 return HCLGE_VECTOR0_EVENT_MBX; 3556 } 3557 3558 /* print other vector0 event source */ 3559 dev_info(&hdev->pdev->dev, 3560 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n", 3561 cmdq_src_reg, hw_err_src_reg, msix_src_reg); 3562 3563 return HCLGE_VECTOR0_EVENT_OTHER; 3564 } 3565 3566 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, 3567 u32 regclr) 3568 { 3569 #define HCLGE_IMP_RESET_DELAY 5 3570 3571 switch (event_type) { 3572 case HCLGE_VECTOR0_EVENT_PTP: 3573 case HCLGE_VECTOR0_EVENT_RST: 3574 if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B)) 3575 mdelay(HCLGE_IMP_RESET_DELAY); 3576 3577 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); 3578 break; 3579 case HCLGE_VECTOR0_EVENT_MBX: 3580 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); 3581 break; 3582 default: 3583 break; 3584 } 3585 } 3586 3587 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) 3588 { 3589 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, 3590 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | 3591 BIT(HCLGE_VECTOR0_CORERESET_INT_B) | 3592 BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); 3593 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); 3594 } 3595 3596 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) 3597 { 3598 writel(enable ? 1 : 0, vector->addr); 3599 } 3600 3601 static irqreturn_t hclge_misc_irq_handle(int irq, void *data) 3602 { 3603 struct hclge_dev *hdev = data; 3604 unsigned long flags; 3605 u32 clearval = 0; 3606 u32 event_cause; 3607 3608 hclge_enable_vector(&hdev->misc_vector, false); 3609 event_cause = hclge_check_event_cause(hdev, &clearval); 3610 3611 /* vector 0 interrupt is shared with reset and mailbox source events. */ 3612 switch (event_cause) { 3613 case HCLGE_VECTOR0_EVENT_ERR: 3614 hclge_errhand_task_schedule(hdev); 3615 break; 3616 case HCLGE_VECTOR0_EVENT_RST: 3617 hclge_reset_task_schedule(hdev); 3618 break; 3619 case HCLGE_VECTOR0_EVENT_PTP: 3620 spin_lock_irqsave(&hdev->ptp->lock, flags); 3621 hclge_ptp_clean_tx_hwts(hdev); 3622 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 3623 break; 3624 case HCLGE_VECTOR0_EVENT_MBX: 3625 /* If we are here then, 3626 * 1. Either we are not handling any mbx task and we are not 3627 * scheduled as well 3628 * OR 3629 * 2. We could be handling a mbx task but nothing more is 3630 * scheduled. 3631 * In both cases, we should schedule mbx task as there are more 3632 * mbx messages reported by this interrupt. 3633 */ 3634 hclge_mbx_task_schedule(hdev); 3635 break; 3636 default: 3637 dev_warn(&hdev->pdev->dev, 3638 "received unknown or unhandled event of vector0\n"); 3639 break; 3640 } 3641 3642 hclge_clear_event_cause(hdev, event_cause, clearval); 3643 3644 /* Enable interrupt if it is not caused by reset event or error event */ 3645 if (event_cause == HCLGE_VECTOR0_EVENT_PTP || 3646 event_cause == HCLGE_VECTOR0_EVENT_MBX || 3647 event_cause == HCLGE_VECTOR0_EVENT_OTHER) 3648 hclge_enable_vector(&hdev->misc_vector, true); 3649 3650 return IRQ_HANDLED; 3651 } 3652 3653 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) 3654 { 3655 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { 3656 dev_warn(&hdev->pdev->dev, 3657 "vector(vector_id %d) has been freed.\n", vector_id); 3658 return; 3659 } 3660 3661 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; 3662 hdev->num_msi_left += 1; 3663 hdev->num_msi_used -= 1; 3664 } 3665 3666 static void hclge_get_misc_vector(struct hclge_dev *hdev) 3667 { 3668 struct hclge_misc_vector *vector = &hdev->misc_vector; 3669 3670 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); 3671 3672 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; 3673 hdev->vector_status[0] = 0; 3674 3675 hdev->num_msi_left -= 1; 3676 hdev->num_msi_used += 1; 3677 } 3678 3679 static int hclge_misc_irq_init(struct hclge_dev *hdev) 3680 { 3681 int ret; 3682 3683 hclge_get_misc_vector(hdev); 3684 3685 /* this would be explicitly freed in the end */ 3686 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 3687 HCLGE_NAME, pci_name(hdev->pdev)); 3688 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, 3689 0, hdev->misc_vector.name, hdev); 3690 if (ret) { 3691 hclge_free_vector(hdev, 0); 3692 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", 3693 hdev->misc_vector.vector_irq); 3694 } 3695 3696 return ret; 3697 } 3698 3699 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) 3700 { 3701 free_irq(hdev->misc_vector.vector_irq, hdev); 3702 hclge_free_vector(hdev, 0); 3703 } 3704 3705 int hclge_notify_client(struct hclge_dev *hdev, 3706 enum hnae3_reset_notify_type type) 3707 { 3708 struct hnae3_handle *handle = &hdev->vport[0].nic; 3709 struct hnae3_client *client = hdev->nic_client; 3710 int ret; 3711 3712 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) 3713 return 0; 3714 3715 if (!client->ops->reset_notify) 3716 return -EOPNOTSUPP; 3717 3718 ret = client->ops->reset_notify(handle, type); 3719 if (ret) 3720 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 3721 type, ret); 3722 3723 return ret; 3724 } 3725 3726 static int hclge_notify_roce_client(struct hclge_dev *hdev, 3727 enum hnae3_reset_notify_type type) 3728 { 3729 struct hnae3_handle *handle = &hdev->vport[0].roce; 3730 struct hnae3_client *client = hdev->roce_client; 3731 int ret; 3732 3733 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) 3734 return 0; 3735 3736 if (!client->ops->reset_notify) 3737 return -EOPNOTSUPP; 3738 3739 ret = client->ops->reset_notify(handle, type); 3740 if (ret) 3741 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 3742 type, ret); 3743 3744 return ret; 3745 } 3746 3747 static int hclge_reset_wait(struct hclge_dev *hdev) 3748 { 3749 #define HCLGE_RESET_WATI_MS 100 3750 #define HCLGE_RESET_WAIT_CNT 350 3751 3752 u32 val, reg, reg_bit; 3753 u32 cnt = 0; 3754 3755 switch (hdev->reset_type) { 3756 case HNAE3_IMP_RESET: 3757 reg = HCLGE_GLOBAL_RESET_REG; 3758 reg_bit = HCLGE_IMP_RESET_BIT; 3759 break; 3760 case HNAE3_GLOBAL_RESET: 3761 reg = HCLGE_GLOBAL_RESET_REG; 3762 reg_bit = HCLGE_GLOBAL_RESET_BIT; 3763 break; 3764 case HNAE3_FUNC_RESET: 3765 reg = HCLGE_FUN_RST_ING; 3766 reg_bit = HCLGE_FUN_RST_ING_B; 3767 break; 3768 default: 3769 dev_err(&hdev->pdev->dev, 3770 "Wait for unsupported reset type: %d\n", 3771 hdev->reset_type); 3772 return -EINVAL; 3773 } 3774 3775 val = hclge_read_dev(&hdev->hw, reg); 3776 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { 3777 msleep(HCLGE_RESET_WATI_MS); 3778 val = hclge_read_dev(&hdev->hw, reg); 3779 cnt++; 3780 } 3781 3782 if (cnt >= HCLGE_RESET_WAIT_CNT) { 3783 dev_warn(&hdev->pdev->dev, 3784 "Wait for reset timeout: %d\n", hdev->reset_type); 3785 return -EBUSY; 3786 } 3787 3788 return 0; 3789 } 3790 3791 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) 3792 { 3793 struct hclge_vf_rst_cmd *req; 3794 struct hclge_desc desc; 3795 3796 req = (struct hclge_vf_rst_cmd *)desc.data; 3797 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); 3798 req->dest_vfid = func_id; 3799 3800 if (reset) 3801 req->vf_rst = 0x1; 3802 3803 return hclge_cmd_send(&hdev->hw, &desc, 1); 3804 } 3805 3806 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) 3807 { 3808 int i; 3809 3810 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { 3811 struct hclge_vport *vport = &hdev->vport[i]; 3812 int ret; 3813 3814 /* Send cmd to set/clear VF's FUNC_RST_ING */ 3815 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); 3816 if (ret) { 3817 dev_err(&hdev->pdev->dev, 3818 "set vf(%u) rst failed %d!\n", 3819 vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3820 ret); 3821 return ret; 3822 } 3823 3824 if (!reset || 3825 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state)) 3826 continue; 3827 3828 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) && 3829 hdev->reset_type == HNAE3_FUNC_RESET) { 3830 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, 3831 &vport->need_notify); 3832 continue; 3833 } 3834 3835 /* Inform VF to process the reset. 3836 * hclge_inform_reset_assert_to_vf may fail if VF 3837 * driver is not loaded. 3838 */ 3839 ret = hclge_inform_reset_assert_to_vf(vport); 3840 if (ret) 3841 dev_warn(&hdev->pdev->dev, 3842 "inform reset to vf(%u) failed %d!\n", 3843 vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3844 ret); 3845 } 3846 3847 return 0; 3848 } 3849 3850 static void hclge_mailbox_service_task(struct hclge_dev *hdev) 3851 { 3852 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || 3853 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || 3854 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) 3855 return; 3856 3857 if (time_is_before_jiffies(hdev->last_mbx_scheduled + 3858 HCLGE_MBX_SCHED_TIMEOUT)) 3859 dev_warn(&hdev->pdev->dev, 3860 "mbx service task is scheduled after %ums on cpu%u!\n", 3861 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), 3862 smp_processor_id()); 3863 3864 hclge_mbx_handler(hdev); 3865 3866 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 3867 } 3868 3869 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) 3870 { 3871 struct hclge_pf_rst_sync_cmd *req; 3872 struct hclge_desc desc; 3873 int cnt = 0; 3874 int ret; 3875 3876 req = (struct hclge_pf_rst_sync_cmd *)desc.data; 3877 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true); 3878 3879 do { 3880 /* vf need to down netdev by mbx during PF or FLR reset */ 3881 hclge_mailbox_service_task(hdev); 3882 3883 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3884 /* for compatible with old firmware, wait 3885 * 100 ms for VF to stop IO 3886 */ 3887 if (ret == -EOPNOTSUPP) { 3888 msleep(HCLGE_RESET_SYNC_TIME); 3889 return; 3890 } else if (ret) { 3891 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", 3892 ret); 3893 return; 3894 } else if (req->all_vf_ready) { 3895 return; 3896 } 3897 msleep(HCLGE_PF_RESET_SYNC_TIME); 3898 hclge_comm_cmd_reuse_desc(&desc, true); 3899 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT); 3900 3901 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); 3902 } 3903 3904 void hclge_report_hw_error(struct hclge_dev *hdev, 3905 enum hnae3_hw_error_type type) 3906 { 3907 struct hnae3_client *client = hdev->nic_client; 3908 3909 if (!client || !client->ops->process_hw_error || 3910 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) 3911 return; 3912 3913 client->ops->process_hw_error(&hdev->vport[0].nic, type); 3914 } 3915 3916 static void hclge_handle_imp_error(struct hclge_dev *hdev) 3917 { 3918 u32 reg_val; 3919 3920 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 3921 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) { 3922 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); 3923 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B); 3924 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 3925 } 3926 3927 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) { 3928 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); 3929 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B); 3930 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); 3931 } 3932 } 3933 3934 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) 3935 { 3936 struct hclge_desc desc; 3937 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; 3938 int ret; 3939 3940 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 3941 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); 3942 req->fun_reset_vfid = func_id; 3943 3944 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 3945 if (ret) 3946 dev_err(&hdev->pdev->dev, 3947 "send function reset cmd fail, status =%d\n", ret); 3948 3949 return ret; 3950 } 3951 3952 static void hclge_do_reset(struct hclge_dev *hdev) 3953 { 3954 struct hnae3_handle *handle = &hdev->vport[0].nic; 3955 struct pci_dev *pdev = hdev->pdev; 3956 u32 val; 3957 3958 if (hclge_get_hw_reset_stat(handle)) { 3959 dev_info(&pdev->dev, "hardware reset not finish\n"); 3960 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", 3961 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), 3962 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 3963 return; 3964 } 3965 3966 switch (hdev->reset_type) { 3967 case HNAE3_IMP_RESET: 3968 dev_info(&pdev->dev, "IMP reset requested\n"); 3969 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 3970 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); 3971 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); 3972 break; 3973 case HNAE3_GLOBAL_RESET: 3974 dev_info(&pdev->dev, "global reset requested\n"); 3975 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); 3976 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); 3977 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); 3978 break; 3979 case HNAE3_FUNC_RESET: 3980 dev_info(&pdev->dev, "PF reset requested\n"); 3981 /* schedule again to check later */ 3982 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); 3983 hclge_reset_task_schedule(hdev); 3984 break; 3985 default: 3986 dev_warn(&pdev->dev, 3987 "unsupported reset type: %d\n", hdev->reset_type); 3988 break; 3989 } 3990 } 3991 3992 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, 3993 unsigned long *addr) 3994 { 3995 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 3996 struct hclge_dev *hdev = ae_dev->priv; 3997 3998 /* return the highest priority reset level amongst all */ 3999 if (test_bit(HNAE3_IMP_RESET, addr)) { 4000 rst_level = HNAE3_IMP_RESET; 4001 clear_bit(HNAE3_IMP_RESET, addr); 4002 clear_bit(HNAE3_GLOBAL_RESET, addr); 4003 clear_bit(HNAE3_FUNC_RESET, addr); 4004 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { 4005 rst_level = HNAE3_GLOBAL_RESET; 4006 clear_bit(HNAE3_GLOBAL_RESET, addr); 4007 clear_bit(HNAE3_FUNC_RESET, addr); 4008 } else if (test_bit(HNAE3_FUNC_RESET, addr)) { 4009 rst_level = HNAE3_FUNC_RESET; 4010 clear_bit(HNAE3_FUNC_RESET, addr); 4011 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 4012 rst_level = HNAE3_FLR_RESET; 4013 clear_bit(HNAE3_FLR_RESET, addr); 4014 } 4015 4016 if (hdev->reset_type != HNAE3_NONE_RESET && 4017 rst_level < hdev->reset_type) 4018 return HNAE3_NONE_RESET; 4019 4020 return rst_level; 4021 } 4022 4023 static void hclge_clear_reset_cause(struct hclge_dev *hdev) 4024 { 4025 u32 clearval = 0; 4026 4027 switch (hdev->reset_type) { 4028 case HNAE3_IMP_RESET: 4029 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); 4030 break; 4031 case HNAE3_GLOBAL_RESET: 4032 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); 4033 break; 4034 default: 4035 break; 4036 } 4037 4038 if (!clearval) 4039 return; 4040 4041 /* For revision 0x20, the reset interrupt source 4042 * can only be cleared after hardware reset done 4043 */ 4044 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 4045 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, 4046 clearval); 4047 4048 hclge_enable_vector(&hdev->misc_vector, true); 4049 } 4050 4051 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) 4052 { 4053 u32 reg_val; 4054 4055 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 4056 if (enable) 4057 reg_val |= HCLGE_COMM_NIC_SW_RST_RDY; 4058 else 4059 reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY; 4060 4061 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); 4062 } 4063 4064 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) 4065 { 4066 int ret; 4067 4068 ret = hclge_set_all_vf_rst(hdev, true); 4069 if (ret) 4070 return ret; 4071 4072 hclge_func_reset_sync_vf(hdev); 4073 4074 return 0; 4075 } 4076 4077 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) 4078 { 4079 u32 reg_val; 4080 int ret = 0; 4081 4082 switch (hdev->reset_type) { 4083 case HNAE3_FUNC_RESET: 4084 ret = hclge_func_reset_notify_vf(hdev); 4085 if (ret) 4086 return ret; 4087 4088 ret = hclge_func_reset_cmd(hdev, 0); 4089 if (ret) { 4090 dev_err(&hdev->pdev->dev, 4091 "asserting function reset fail %d!\n", ret); 4092 return ret; 4093 } 4094 4095 /* After performaning pf reset, it is not necessary to do the 4096 * mailbox handling or send any command to firmware, because 4097 * any mailbox handling or command to firmware is only valid 4098 * after hclge_comm_cmd_init is called. 4099 */ 4100 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 4101 hdev->rst_stats.pf_rst_cnt++; 4102 break; 4103 case HNAE3_FLR_RESET: 4104 ret = hclge_func_reset_notify_vf(hdev); 4105 if (ret) 4106 return ret; 4107 break; 4108 case HNAE3_IMP_RESET: 4109 hclge_handle_imp_error(hdev); 4110 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); 4111 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, 4112 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); 4113 break; 4114 default: 4115 break; 4116 } 4117 4118 /* inform hardware that preparatory work is done */ 4119 msleep(HCLGE_RESET_SYNC_TIME); 4120 hclge_reset_handshake(hdev, true); 4121 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); 4122 4123 return ret; 4124 } 4125 4126 static void hclge_show_rst_info(struct hclge_dev *hdev) 4127 { 4128 char *buf; 4129 4130 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL); 4131 if (!buf) 4132 return; 4133 4134 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); 4135 4136 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); 4137 4138 kfree(buf); 4139 } 4140 4141 static bool hclge_reset_err_handle(struct hclge_dev *hdev) 4142 { 4143 #define MAX_RESET_FAIL_CNT 5 4144 4145 if (hdev->reset_pending) { 4146 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", 4147 hdev->reset_pending); 4148 return true; 4149 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & 4150 HCLGE_RESET_INT_M) { 4151 dev_info(&hdev->pdev->dev, 4152 "reset failed because new reset interrupt\n"); 4153 hclge_clear_reset_cause(hdev); 4154 return false; 4155 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { 4156 hdev->rst_stats.reset_fail_cnt++; 4157 set_bit(hdev->reset_type, &hdev->reset_pending); 4158 dev_info(&hdev->pdev->dev, 4159 "re-schedule reset task(%u)\n", 4160 hdev->rst_stats.reset_fail_cnt); 4161 return true; 4162 } 4163 4164 hclge_clear_reset_cause(hdev); 4165 4166 /* recover the handshake status when reset fail */ 4167 hclge_reset_handshake(hdev, true); 4168 4169 dev_err(&hdev->pdev->dev, "Reset fail!\n"); 4170 4171 hclge_show_rst_info(hdev); 4172 4173 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 4174 4175 return false; 4176 } 4177 4178 static void hclge_update_reset_level(struct hclge_dev *hdev) 4179 { 4180 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4181 enum hnae3_reset_type reset_level; 4182 4183 /* reset request will not be set during reset, so clear 4184 * pending reset request to avoid unnecessary reset 4185 * caused by the same reason. 4186 */ 4187 hclge_get_reset_level(ae_dev, &hdev->reset_request); 4188 4189 /* if default_reset_request has a higher level reset request, 4190 * it should be handled as soon as possible. since some errors 4191 * need this kind of reset to fix. 4192 */ 4193 reset_level = hclge_get_reset_level(ae_dev, 4194 &hdev->default_reset_request); 4195 if (reset_level != HNAE3_NONE_RESET) 4196 set_bit(reset_level, &hdev->reset_request); 4197 } 4198 4199 static int hclge_set_rst_done(struct hclge_dev *hdev) 4200 { 4201 struct hclge_pf_rst_done_cmd *req; 4202 struct hclge_desc desc; 4203 int ret; 4204 4205 req = (struct hclge_pf_rst_done_cmd *)desc.data; 4206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); 4207 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; 4208 4209 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 4210 /* To be compatible with the old firmware, which does not support 4211 * command HCLGE_OPC_PF_RST_DONE, just print a warning and 4212 * return success 4213 */ 4214 if (ret == -EOPNOTSUPP) { 4215 dev_warn(&hdev->pdev->dev, 4216 "current firmware does not support command(0x%x)!\n", 4217 HCLGE_OPC_PF_RST_DONE); 4218 return 0; 4219 } else if (ret) { 4220 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", 4221 ret); 4222 } 4223 4224 return ret; 4225 } 4226 4227 static int hclge_reset_prepare_up(struct hclge_dev *hdev) 4228 { 4229 int ret = 0; 4230 4231 switch (hdev->reset_type) { 4232 case HNAE3_FUNC_RESET: 4233 case HNAE3_FLR_RESET: 4234 ret = hclge_set_all_vf_rst(hdev, false); 4235 break; 4236 case HNAE3_GLOBAL_RESET: 4237 case HNAE3_IMP_RESET: 4238 ret = hclge_set_rst_done(hdev); 4239 break; 4240 default: 4241 break; 4242 } 4243 4244 /* clear up the handshake status after re-initialize done */ 4245 hclge_reset_handshake(hdev, false); 4246 4247 return ret; 4248 } 4249 4250 static int hclge_reset_stack(struct hclge_dev *hdev) 4251 { 4252 int ret; 4253 4254 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); 4255 if (ret) 4256 return ret; 4257 4258 ret = hclge_reset_ae_dev(hdev->ae_dev); 4259 if (ret) 4260 return ret; 4261 4262 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); 4263 } 4264 4265 static int hclge_reset_prepare(struct hclge_dev *hdev) 4266 { 4267 int ret; 4268 4269 hdev->rst_stats.reset_cnt++; 4270 /* perform reset of the stack & ae device for a client */ 4271 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 4272 if (ret) 4273 return ret; 4274 4275 rtnl_lock(); 4276 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 4277 rtnl_unlock(); 4278 if (ret) 4279 return ret; 4280 4281 return hclge_reset_prepare_wait(hdev); 4282 } 4283 4284 static int hclge_reset_rebuild(struct hclge_dev *hdev) 4285 { 4286 int ret; 4287 4288 hdev->rst_stats.hw_reset_done_cnt++; 4289 4290 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 4291 if (ret) 4292 return ret; 4293 4294 rtnl_lock(); 4295 ret = hclge_reset_stack(hdev); 4296 rtnl_unlock(); 4297 if (ret) 4298 return ret; 4299 4300 hclge_clear_reset_cause(hdev); 4301 4302 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 4303 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1 4304 * times 4305 */ 4306 if (ret && 4307 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) 4308 return ret; 4309 4310 ret = hclge_reset_prepare_up(hdev); 4311 if (ret) 4312 return ret; 4313 4314 rtnl_lock(); 4315 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); 4316 rtnl_unlock(); 4317 if (ret) 4318 return ret; 4319 4320 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); 4321 if (ret) 4322 return ret; 4323 4324 hdev->last_reset_time = jiffies; 4325 hdev->rst_stats.reset_fail_cnt = 0; 4326 hdev->rst_stats.reset_done_cnt++; 4327 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 4328 4329 hclge_update_reset_level(hdev); 4330 4331 return 0; 4332 } 4333 4334 static void hclge_reset(struct hclge_dev *hdev) 4335 { 4336 if (hclge_reset_prepare(hdev)) 4337 goto err_reset; 4338 4339 if (hclge_reset_wait(hdev)) 4340 goto err_reset; 4341 4342 if (hclge_reset_rebuild(hdev)) 4343 goto err_reset; 4344 4345 return; 4346 4347 err_reset: 4348 if (hclge_reset_err_handle(hdev)) 4349 hclge_reset_task_schedule(hdev); 4350 } 4351 4352 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) 4353 { 4354 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 4355 struct hclge_dev *hdev = ae_dev->priv; 4356 4357 /* We might end up getting called broadly because of 2 below cases: 4358 * 1. Recoverable error was conveyed through APEI and only way to bring 4359 * normalcy is to reset. 4360 * 2. A new reset request from the stack due to timeout 4361 * 4362 * check if this is a new reset request and we are not here just because 4363 * last reset attempt did not succeed and watchdog hit us again. We will 4364 * know this if last reset request did not occur very recently (watchdog 4365 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz) 4366 * In case of new request we reset the "reset level" to PF reset. 4367 * And if it is a repeat reset request of the most recent one then we 4368 * want to make sure we throttle the reset request. Therefore, we will 4369 * not allow it again before 3*HZ times. 4370 */ 4371 4372 if (time_before(jiffies, (hdev->last_reset_time + 4373 HCLGE_RESET_INTERVAL))) { 4374 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 4375 return; 4376 } 4377 4378 if (hdev->default_reset_request) { 4379 hdev->reset_level = 4380 hclge_get_reset_level(ae_dev, 4381 &hdev->default_reset_request); 4382 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { 4383 hdev->reset_level = HNAE3_FUNC_RESET; 4384 } 4385 4386 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", 4387 hdev->reset_level); 4388 4389 /* request reset & schedule reset task */ 4390 set_bit(hdev->reset_level, &hdev->reset_request); 4391 hclge_reset_task_schedule(hdev); 4392 4393 if (hdev->reset_level < HNAE3_GLOBAL_RESET) 4394 hdev->reset_level++; 4395 } 4396 4397 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 4398 enum hnae3_reset_type rst_type) 4399 { 4400 struct hclge_dev *hdev = ae_dev->priv; 4401 4402 set_bit(rst_type, &hdev->default_reset_request); 4403 } 4404 4405 static void hclge_reset_timer(struct timer_list *t) 4406 { 4407 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); 4408 4409 /* if default_reset_request has no value, it means that this reset 4410 * request has already be handled, so just return here 4411 */ 4412 if (!hdev->default_reset_request) 4413 return; 4414 4415 dev_info(&hdev->pdev->dev, 4416 "triggering reset in reset timer\n"); 4417 hclge_reset_event(hdev->pdev, NULL); 4418 } 4419 4420 static void hclge_reset_subtask(struct hclge_dev *hdev) 4421 { 4422 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4423 4424 /* check if there is any ongoing reset in the hardware. This status can 4425 * be checked from reset_pending. If there is then, we need to wait for 4426 * hardware to complete reset. 4427 * a. If we are able to figure out in reasonable time that hardware 4428 * has fully resetted then, we can proceed with driver, client 4429 * reset. 4430 * b. else, we can come back later to check this status so re-sched 4431 * now. 4432 */ 4433 hdev->last_reset_time = jiffies; 4434 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); 4435 if (hdev->reset_type != HNAE3_NONE_RESET) 4436 hclge_reset(hdev); 4437 4438 /* check if we got any *new* reset requests to be honored */ 4439 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); 4440 if (hdev->reset_type != HNAE3_NONE_RESET) 4441 hclge_do_reset(hdev); 4442 4443 hdev->reset_type = HNAE3_NONE_RESET; 4444 } 4445 4446 static void hclge_handle_err_reset_request(struct hclge_dev *hdev) 4447 { 4448 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4449 enum hnae3_reset_type reset_type; 4450 4451 if (ae_dev->hw_err_reset_req) { 4452 reset_type = hclge_get_reset_level(ae_dev, 4453 &ae_dev->hw_err_reset_req); 4454 hclge_set_def_reset_request(ae_dev, reset_type); 4455 } 4456 4457 if (hdev->default_reset_request && ae_dev->ops->reset_event) 4458 ae_dev->ops->reset_event(hdev->pdev, NULL); 4459 4460 /* enable interrupt after error handling complete */ 4461 hclge_enable_vector(&hdev->misc_vector, true); 4462 } 4463 4464 static void hclge_handle_err_recovery(struct hclge_dev *hdev) 4465 { 4466 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4467 4468 ae_dev->hw_err_reset_req = 0; 4469 4470 if (hclge_find_error_source(hdev)) { 4471 hclge_handle_error_info_log(ae_dev); 4472 hclge_handle_mac_tnl(hdev); 4473 hclge_handle_vf_queue_err_ras(hdev); 4474 } 4475 4476 hclge_handle_err_reset_request(hdev); 4477 } 4478 4479 static void hclge_misc_err_recovery(struct hclge_dev *hdev) 4480 { 4481 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 4482 struct device *dev = &hdev->pdev->dev; 4483 u32 msix_sts_reg; 4484 4485 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); 4486 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { 4487 if (hclge_handle_hw_msix_error 4488 (hdev, &hdev->default_reset_request)) 4489 dev_info(dev, "received msix interrupt 0x%x\n", 4490 msix_sts_reg); 4491 } 4492 4493 hclge_handle_hw_ras_error(ae_dev); 4494 4495 hclge_handle_err_reset_request(hdev); 4496 } 4497 4498 static void hclge_errhand_service_task(struct hclge_dev *hdev) 4499 { 4500 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) 4501 return; 4502 4503 if (hnae3_dev_ras_imp_supported(hdev)) 4504 hclge_handle_err_recovery(hdev); 4505 else 4506 hclge_misc_err_recovery(hdev); 4507 } 4508 4509 static void hclge_reset_service_task(struct hclge_dev *hdev) 4510 { 4511 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) 4512 return; 4513 4514 if (time_is_before_jiffies(hdev->last_rst_scheduled + 4515 HCLGE_RESET_SCHED_TIMEOUT)) 4516 dev_warn(&hdev->pdev->dev, 4517 "reset service task is scheduled after %ums on cpu%u!\n", 4518 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), 4519 smp_processor_id()); 4520 4521 down(&hdev->reset_sem); 4522 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 4523 4524 hclge_reset_subtask(hdev); 4525 4526 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 4527 up(&hdev->reset_sem); 4528 } 4529 4530 static void hclge_update_vport_alive(struct hclge_dev *hdev) 4531 { 4532 #define HCLGE_ALIVE_SECONDS_NORMAL 8 4533 4534 unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ; 4535 int i; 4536 4537 /* start from vport 1 for PF is always alive */ 4538 for (i = 1; i < hdev->num_alloc_vport; i++) { 4539 struct hclge_vport *vport = &hdev->vport[i]; 4540 4541 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) || 4542 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 4543 continue; 4544 if (time_after(jiffies, vport->last_active_jiffies + 4545 alive_time)) { 4546 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 4547 dev_warn(&hdev->pdev->dev, 4548 "VF %u heartbeat timeout\n", 4549 i - HCLGE_VF_VPORT_START_NUM); 4550 } 4551 } 4552 } 4553 4554 static void hclge_periodic_service_task(struct hclge_dev *hdev) 4555 { 4556 unsigned long delta = round_jiffies_relative(HZ); 4557 4558 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) 4559 return; 4560 4561 /* Always handle the link updating to make sure link state is 4562 * updated when it is triggered by mbx. 4563 */ 4564 hclge_update_link_status(hdev); 4565 hclge_sync_mac_table(hdev); 4566 hclge_sync_promisc_mode(hdev); 4567 hclge_sync_fd_table(hdev); 4568 4569 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 4570 delta = jiffies - hdev->last_serv_processed; 4571 4572 if (delta < round_jiffies_relative(HZ)) { 4573 delta = round_jiffies_relative(HZ) - delta; 4574 goto out; 4575 } 4576 } 4577 4578 hdev->serv_processed_cnt++; 4579 hclge_update_vport_alive(hdev); 4580 4581 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { 4582 hdev->last_serv_processed = jiffies; 4583 goto out; 4584 } 4585 4586 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) 4587 hclge_update_stats_for_all(hdev); 4588 4589 hclge_update_port_info(hdev); 4590 hclge_sync_vlan_filter(hdev); 4591 4592 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) 4593 hclge_rfs_filter_expire(hdev); 4594 4595 hdev->last_serv_processed = jiffies; 4596 4597 out: 4598 hclge_task_schedule(hdev, delta); 4599 } 4600 4601 static void hclge_ptp_service_task(struct hclge_dev *hdev) 4602 { 4603 unsigned long flags; 4604 4605 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || 4606 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || 4607 !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) 4608 return; 4609 4610 /* to prevent concurrence with the irq handler */ 4611 spin_lock_irqsave(&hdev->ptp->lock, flags); 4612 4613 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq 4614 * handler may handle it just before spin_lock_irqsave(). 4615 */ 4616 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) 4617 hclge_ptp_clean_tx_hwts(hdev); 4618 4619 spin_unlock_irqrestore(&hdev->ptp->lock, flags); 4620 } 4621 4622 static void hclge_service_task(struct work_struct *work) 4623 { 4624 struct hclge_dev *hdev = 4625 container_of(work, struct hclge_dev, service_task.work); 4626 4627 hclge_errhand_service_task(hdev); 4628 hclge_reset_service_task(hdev); 4629 hclge_ptp_service_task(hdev); 4630 hclge_mailbox_service_task(hdev); 4631 hclge_periodic_service_task(hdev); 4632 4633 /* Handle error recovery, reset and mbx again in case periodical task 4634 * delays the handling by calling hclge_task_schedule() in 4635 * hclge_periodic_service_task(). 4636 */ 4637 hclge_errhand_service_task(hdev); 4638 hclge_reset_service_task(hdev); 4639 hclge_mailbox_service_task(hdev); 4640 } 4641 4642 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) 4643 { 4644 /* VF handle has no client */ 4645 if (!handle->client) 4646 return container_of(handle, struct hclge_vport, nic); 4647 else if (handle->client->type == HNAE3_CLIENT_ROCE) 4648 return container_of(handle, struct hclge_vport, roce); 4649 else 4650 return container_of(handle, struct hclge_vport, nic); 4651 } 4652 4653 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, 4654 struct hnae3_vector_info *vector_info) 4655 { 4656 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64 4657 4658 vector_info->vector = pci_irq_vector(hdev->pdev, idx); 4659 4660 /* need an extend offset to config vector >= 64 */ 4661 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) 4662 vector_info->io_addr = hdev->hw.hw.io_base + 4663 HCLGE_VECTOR_REG_BASE + 4664 (idx - 1) * HCLGE_VECTOR_REG_OFFSET; 4665 else 4666 vector_info->io_addr = hdev->hw.hw.io_base + 4667 HCLGE_VECTOR_EXT_REG_BASE + 4668 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * 4669 HCLGE_VECTOR_REG_OFFSET_H + 4670 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * 4671 HCLGE_VECTOR_REG_OFFSET; 4672 4673 hdev->vector_status[idx] = hdev->vport[0].vport_id; 4674 hdev->vector_irq[idx] = vector_info->vector; 4675 } 4676 4677 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, 4678 struct hnae3_vector_info *vector_info) 4679 { 4680 struct hclge_vport *vport = hclge_get_vport(handle); 4681 struct hnae3_vector_info *vector = vector_info; 4682 struct hclge_dev *hdev = vport->back; 4683 int alloc = 0; 4684 u16 i = 0; 4685 u16 j; 4686 4687 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); 4688 vector_num = min(hdev->num_msi_left, vector_num); 4689 4690 for (j = 0; j < vector_num; j++) { 4691 while (++i < hdev->num_nic_msi) { 4692 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { 4693 hclge_get_vector_info(hdev, i, vector); 4694 vector++; 4695 alloc++; 4696 4697 break; 4698 } 4699 } 4700 } 4701 hdev->num_msi_left -= alloc; 4702 hdev->num_msi_used += alloc; 4703 4704 return alloc; 4705 } 4706 4707 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) 4708 { 4709 int i; 4710 4711 for (i = 0; i < hdev->num_msi; i++) 4712 if (vector == hdev->vector_irq[i]) 4713 return i; 4714 4715 return -EINVAL; 4716 } 4717 4718 static int hclge_put_vector(struct hnae3_handle *handle, int vector) 4719 { 4720 struct hclge_vport *vport = hclge_get_vport(handle); 4721 struct hclge_dev *hdev = vport->back; 4722 int vector_id; 4723 4724 vector_id = hclge_get_vector_index(hdev, vector); 4725 if (vector_id < 0) { 4726 dev_err(&hdev->pdev->dev, 4727 "Get vector index fail. vector = %d\n", vector); 4728 return vector_id; 4729 } 4730 4731 hclge_free_vector(hdev, vector_id); 4732 4733 return 0; 4734 } 4735 4736 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, 4737 u8 *key, u8 *hfunc) 4738 { 4739 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 4740 struct hclge_vport *vport = hclge_get_vport(handle); 4741 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg; 4742 4743 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); 4744 4745 hclge_comm_get_rss_indir_tbl(rss_cfg, indir, 4746 ae_dev->dev_specs.rss_ind_tbl_size); 4747 4748 return 0; 4749 } 4750 4751 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 4752 const u8 *key, const u8 hfunc) 4753 { 4754 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 4755 struct hclge_vport *vport = hclge_get_vport(handle); 4756 struct hclge_dev *hdev = vport->back; 4757 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 4758 int ret, i; 4759 4760 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); 4761 if (ret) { 4762 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); 4763 return ret; 4764 } 4765 4766 /* Update the shadow RSS table with user specified qids */ 4767 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) 4768 rss_cfg->rss_indirection_tbl[i] = indir[i]; 4769 4770 /* Update the hardware */ 4771 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, 4772 rss_cfg->rss_indirection_tbl); 4773 } 4774 4775 static int hclge_set_rss_tuple(struct hnae3_handle *handle, 4776 struct ethtool_rxnfc *nfc) 4777 { 4778 struct hclge_vport *vport = hclge_get_vport(handle); 4779 struct hclge_dev *hdev = vport->back; 4780 int ret; 4781 4782 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, 4783 &hdev->rss_cfg, nfc); 4784 if (ret) { 4785 dev_err(&hdev->pdev->dev, 4786 "failed to set rss tuple, ret = %d.\n", ret); 4787 return ret; 4788 } 4789 4790 return 0; 4791 } 4792 4793 static int hclge_get_rss_tuple(struct hnae3_handle *handle, 4794 struct ethtool_rxnfc *nfc) 4795 { 4796 struct hclge_vport *vport = hclge_get_vport(handle); 4797 u8 tuple_sets; 4798 int ret; 4799 4800 nfc->data = 0; 4801 4802 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type, 4803 &tuple_sets); 4804 if (ret || !tuple_sets) 4805 return ret; 4806 4807 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); 4808 4809 return 0; 4810 } 4811 4812 static int hclge_get_tc_size(struct hnae3_handle *handle) 4813 { 4814 struct hclge_vport *vport = hclge_get_vport(handle); 4815 struct hclge_dev *hdev = vport->back; 4816 4817 return hdev->pf_rss_size_max; 4818 } 4819 4820 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) 4821 { 4822 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 4823 struct hclge_vport *vport = hdev->vport; 4824 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 4825 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; 4826 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 4827 struct hnae3_tc_info *tc_info; 4828 u16 roundup_size; 4829 u16 rss_size; 4830 int i; 4831 4832 tc_info = &vport->nic.kinfo.tc_info; 4833 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 4834 rss_size = tc_info->tqp_count[i]; 4835 tc_valid[i] = 0; 4836 4837 if (!(hdev->hw_tc_map & BIT(i))) 4838 continue; 4839 4840 /* tc_size set to hardware is the log2 of roundup power of two 4841 * of rss_size, the acutal queue size is limited by indirection 4842 * table. 4843 */ 4844 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || 4845 rss_size == 0) { 4846 dev_err(&hdev->pdev->dev, 4847 "Configure rss tc size failed, invalid TC_SIZE = %u\n", 4848 rss_size); 4849 return -EINVAL; 4850 } 4851 4852 roundup_size = roundup_pow_of_two(rss_size); 4853 roundup_size = ilog2(roundup_size); 4854 4855 tc_valid[i] = 1; 4856 tc_size[i] = roundup_size; 4857 tc_offset[i] = tc_info->tqp_offset[i]; 4858 } 4859 4860 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, 4861 tc_size); 4862 } 4863 4864 int hclge_rss_init_hw(struct hclge_dev *hdev) 4865 { 4866 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; 4867 u8 *key = hdev->rss_cfg.rss_hash_key; 4868 u8 hfunc = hdev->rss_cfg.rss_algo; 4869 int ret; 4870 4871 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 4872 rss_indir); 4873 if (ret) 4874 return ret; 4875 4876 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); 4877 if (ret) 4878 return ret; 4879 4880 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg); 4881 if (ret) 4882 return ret; 4883 4884 return hclge_init_rss_tc_mode(hdev); 4885 } 4886 4887 int hclge_bind_ring_with_vector(struct hclge_vport *vport, 4888 int vector_id, bool en, 4889 struct hnae3_ring_chain_node *ring_chain) 4890 { 4891 struct hclge_dev *hdev = vport->back; 4892 struct hnae3_ring_chain_node *node; 4893 struct hclge_desc desc; 4894 struct hclge_ctrl_vector_chain_cmd *req = 4895 (struct hclge_ctrl_vector_chain_cmd *)desc.data; 4896 enum hclge_comm_cmd_status status; 4897 enum hclge_opcode_type op; 4898 u16 tqp_type_and_id; 4899 int i; 4900 4901 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; 4902 hclge_cmd_setup_basic_desc(&desc, op, false); 4903 req->int_vector_id_l = hnae3_get_field(vector_id, 4904 HCLGE_VECTOR_ID_L_M, 4905 HCLGE_VECTOR_ID_L_S); 4906 req->int_vector_id_h = hnae3_get_field(vector_id, 4907 HCLGE_VECTOR_ID_H_M, 4908 HCLGE_VECTOR_ID_H_S); 4909 4910 i = 0; 4911 for (node = ring_chain; node; node = node->next) { 4912 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]); 4913 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, 4914 HCLGE_INT_TYPE_S, 4915 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B)); 4916 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, 4917 HCLGE_TQP_ID_S, node->tqp_index); 4918 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M, 4919 HCLGE_INT_GL_IDX_S, 4920 hnae3_get_field(node->int_gl_idx, 4921 HNAE3_RING_GL_IDX_M, 4922 HNAE3_RING_GL_IDX_S)); 4923 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id); 4924 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 4925 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 4926 req->vfid = vport->vport_id; 4927 4928 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4929 if (status) { 4930 dev_err(&hdev->pdev->dev, 4931 "Map TQP fail, status is %d.\n", 4932 status); 4933 return -EIO; 4934 } 4935 i = 0; 4936 4937 hclge_cmd_setup_basic_desc(&desc, 4938 op, 4939 false); 4940 req->int_vector_id_l = 4941 hnae3_get_field(vector_id, 4942 HCLGE_VECTOR_ID_L_M, 4943 HCLGE_VECTOR_ID_L_S); 4944 req->int_vector_id_h = 4945 hnae3_get_field(vector_id, 4946 HCLGE_VECTOR_ID_H_M, 4947 HCLGE_VECTOR_ID_H_S); 4948 } 4949 } 4950 4951 if (i > 0) { 4952 req->int_cause_num = i; 4953 req->vfid = vport->vport_id; 4954 status = hclge_cmd_send(&hdev->hw, &desc, 1); 4955 if (status) { 4956 dev_err(&hdev->pdev->dev, 4957 "Map TQP fail, status is %d.\n", status); 4958 return -EIO; 4959 } 4960 } 4961 4962 return 0; 4963 } 4964 4965 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector, 4966 struct hnae3_ring_chain_node *ring_chain) 4967 { 4968 struct hclge_vport *vport = hclge_get_vport(handle); 4969 struct hclge_dev *hdev = vport->back; 4970 int vector_id; 4971 4972 vector_id = hclge_get_vector_index(hdev, vector); 4973 if (vector_id < 0) { 4974 dev_err(&hdev->pdev->dev, 4975 "failed to get vector index. vector=%d\n", vector); 4976 return vector_id; 4977 } 4978 4979 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain); 4980 } 4981 4982 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, 4983 struct hnae3_ring_chain_node *ring_chain) 4984 { 4985 struct hclge_vport *vport = hclge_get_vport(handle); 4986 struct hclge_dev *hdev = vport->back; 4987 int vector_id, ret; 4988 4989 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 4990 return 0; 4991 4992 vector_id = hclge_get_vector_index(hdev, vector); 4993 if (vector_id < 0) { 4994 dev_err(&handle->pdev->dev, 4995 "Get vector index fail. ret =%d\n", vector_id); 4996 return vector_id; 4997 } 4998 4999 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); 5000 if (ret) 5001 dev_err(&handle->pdev->dev, 5002 "Unmap ring from vector fail. vectorid=%d, ret =%d\n", 5003 vector_id, ret); 5004 5005 return ret; 5006 } 5007 5008 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, 5009 bool en_uc, bool en_mc, bool en_bc) 5010 { 5011 struct hclge_vport *vport = &hdev->vport[vf_id]; 5012 struct hnae3_handle *handle = &vport->nic; 5013 struct hclge_promisc_cfg_cmd *req; 5014 struct hclge_desc desc; 5015 bool uc_tx_en = en_uc; 5016 u8 promisc_cfg = 0; 5017 int ret; 5018 5019 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); 5020 5021 req = (struct hclge_promisc_cfg_cmd *)desc.data; 5022 req->vf_id = vf_id; 5023 5024 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) 5025 uc_tx_en = false; 5026 5027 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0); 5028 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0); 5029 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0); 5030 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0); 5031 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0); 5032 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0); 5033 req->extend_promisc = promisc_cfg; 5034 5035 /* to be compatible with DEVICE_VERSION_V1/2 */ 5036 promisc_cfg = 0; 5037 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0); 5038 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0); 5039 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0); 5040 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1); 5041 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1); 5042 req->promisc = promisc_cfg; 5043 5044 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5045 if (ret) 5046 dev_err(&hdev->pdev->dev, 5047 "failed to set vport %u promisc mode, ret = %d.\n", 5048 vf_id, ret); 5049 5050 return ret; 5051 } 5052 5053 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, 5054 bool en_mc_pmc, bool en_bc_pmc) 5055 { 5056 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, 5057 en_uc_pmc, en_mc_pmc, en_bc_pmc); 5058 } 5059 5060 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 5061 bool en_mc_pmc) 5062 { 5063 struct hclge_vport *vport = hclge_get_vport(handle); 5064 struct hclge_dev *hdev = vport->back; 5065 bool en_bc_pmc = true; 5066 5067 /* For device whose version below V2, if broadcast promisc enabled, 5068 * vlan filter is always bypassed. So broadcast promisc should be 5069 * disabled until user enable promisc mode 5070 */ 5071 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 5072 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; 5073 5074 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, 5075 en_bc_pmc); 5076 } 5077 5078 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) 5079 { 5080 struct hclge_vport *vport = hclge_get_vport(handle); 5081 5082 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 5083 } 5084 5085 static void hclge_sync_fd_state(struct hclge_dev *hdev) 5086 { 5087 if (hlist_empty(&hdev->fd_rule_list)) 5088 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 5089 } 5090 5091 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) 5092 { 5093 if (!test_bit(location, hdev->fd_bmap)) { 5094 set_bit(location, hdev->fd_bmap); 5095 hdev->hclge_fd_rule_num++; 5096 } 5097 } 5098 5099 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) 5100 { 5101 if (test_bit(location, hdev->fd_bmap)) { 5102 clear_bit(location, hdev->fd_bmap); 5103 hdev->hclge_fd_rule_num--; 5104 } 5105 } 5106 5107 static void hclge_fd_free_node(struct hclge_dev *hdev, 5108 struct hclge_fd_rule *rule) 5109 { 5110 hlist_del(&rule->rule_node); 5111 kfree(rule); 5112 hclge_sync_fd_state(hdev); 5113 } 5114 5115 static void hclge_update_fd_rule_node(struct hclge_dev *hdev, 5116 struct hclge_fd_rule *old_rule, 5117 struct hclge_fd_rule *new_rule, 5118 enum HCLGE_FD_NODE_STATE state) 5119 { 5120 switch (state) { 5121 case HCLGE_FD_TO_ADD: 5122 case HCLGE_FD_ACTIVE: 5123 /* 1) if the new state is TO_ADD, just replace the old rule 5124 * with the same location, no matter its state, because the 5125 * new rule will be configured to the hardware. 5126 * 2) if the new state is ACTIVE, it means the new rule 5127 * has been configured to the hardware, so just replace 5128 * the old rule node with the same location. 5129 * 3) for it doesn't add a new node to the list, so it's 5130 * unnecessary to update the rule number and fd_bmap. 5131 */ 5132 new_rule->rule_node.next = old_rule->rule_node.next; 5133 new_rule->rule_node.pprev = old_rule->rule_node.pprev; 5134 memcpy(old_rule, new_rule, sizeof(*old_rule)); 5135 kfree(new_rule); 5136 break; 5137 case HCLGE_FD_DELETED: 5138 hclge_fd_dec_rule_cnt(hdev, old_rule->location); 5139 hclge_fd_free_node(hdev, old_rule); 5140 break; 5141 case HCLGE_FD_TO_DEL: 5142 /* if new request is TO_DEL, and old rule is existent 5143 * 1) the state of old rule is TO_DEL, we need do nothing, 5144 * because we delete rule by location, other rule content 5145 * is unncessary. 5146 * 2) the state of old rule is ACTIVE, we need to change its 5147 * state to TO_DEL, so the rule will be deleted when periodic 5148 * task being scheduled. 5149 * 3) the state of old rule is TO_ADD, it means the rule hasn't 5150 * been added to hardware, so we just delete the rule node from 5151 * fd_rule_list directly. 5152 */ 5153 if (old_rule->state == HCLGE_FD_TO_ADD) { 5154 hclge_fd_dec_rule_cnt(hdev, old_rule->location); 5155 hclge_fd_free_node(hdev, old_rule); 5156 return; 5157 } 5158 old_rule->state = HCLGE_FD_TO_DEL; 5159 break; 5160 } 5161 } 5162 5163 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, 5164 u16 location, 5165 struct hclge_fd_rule **parent) 5166 { 5167 struct hclge_fd_rule *rule; 5168 struct hlist_node *node; 5169 5170 hlist_for_each_entry_safe(rule, node, hlist, rule_node) { 5171 if (rule->location == location) 5172 return rule; 5173 else if (rule->location > location) 5174 return NULL; 5175 /* record the parent node, use to keep the nodes in fd_rule_list 5176 * in ascend order. 5177 */ 5178 *parent = rule; 5179 } 5180 5181 return NULL; 5182 } 5183 5184 /* insert fd rule node in ascend order according to rule->location */ 5185 static void hclge_fd_insert_rule_node(struct hlist_head *hlist, 5186 struct hclge_fd_rule *rule, 5187 struct hclge_fd_rule *parent) 5188 { 5189 INIT_HLIST_NODE(&rule->rule_node); 5190 5191 if (parent) 5192 hlist_add_behind(&rule->rule_node, &parent->rule_node); 5193 else 5194 hlist_add_head(&rule->rule_node, hlist); 5195 } 5196 5197 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, 5198 struct hclge_fd_user_def_cfg *cfg) 5199 { 5200 struct hclge_fd_user_def_cfg_cmd *req; 5201 struct hclge_desc desc; 5202 u16 data = 0; 5203 int ret; 5204 5205 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false); 5206 5207 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; 5208 5209 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0); 5210 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5211 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset); 5212 req->ol2_cfg = cpu_to_le16(data); 5213 5214 data = 0; 5215 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0); 5216 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5217 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset); 5218 req->ol3_cfg = cpu_to_le16(data); 5219 5220 data = 0; 5221 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0); 5222 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M, 5223 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset); 5224 req->ol4_cfg = cpu_to_le16(data); 5225 5226 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5227 if (ret) 5228 dev_err(&hdev->pdev->dev, 5229 "failed to set fd user def data, ret= %d\n", ret); 5230 return ret; 5231 } 5232 5233 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) 5234 { 5235 int ret; 5236 5237 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) 5238 return; 5239 5240 if (!locked) 5241 spin_lock_bh(&hdev->fd_rule_lock); 5242 5243 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); 5244 if (ret) 5245 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5246 5247 if (!locked) 5248 spin_unlock_bh(&hdev->fd_rule_lock); 5249 } 5250 5251 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, 5252 struct hclge_fd_rule *rule) 5253 { 5254 struct hlist_head *hlist = &hdev->fd_rule_list; 5255 struct hclge_fd_rule *fd_rule, *parent = NULL; 5256 struct hclge_fd_user_def_info *info, *old_info; 5257 struct hclge_fd_user_def_cfg *cfg; 5258 5259 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5260 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5261 return 0; 5262 5263 /* for valid layer is start from 1, so need minus 1 to get the cfg */ 5264 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5265 info = &rule->ep.user_def; 5266 5267 if (!cfg->ref_cnt || cfg->offset == info->offset) 5268 return 0; 5269 5270 if (cfg->ref_cnt > 1) 5271 goto error; 5272 5273 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); 5274 if (fd_rule) { 5275 old_info = &fd_rule->ep.user_def; 5276 if (info->layer == old_info->layer) 5277 return 0; 5278 } 5279 5280 error: 5281 dev_err(&hdev->pdev->dev, 5282 "No available offset for layer%d fd rule, each layer only support one user def offset.\n", 5283 info->layer + 1); 5284 return -ENOSPC; 5285 } 5286 5287 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, 5288 struct hclge_fd_rule *rule) 5289 { 5290 struct hclge_fd_user_def_cfg *cfg; 5291 5292 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5293 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5294 return; 5295 5296 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5297 if (!cfg->ref_cnt) { 5298 cfg->offset = rule->ep.user_def.offset; 5299 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5300 } 5301 cfg->ref_cnt++; 5302 } 5303 5304 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, 5305 struct hclge_fd_rule *rule) 5306 { 5307 struct hclge_fd_user_def_cfg *cfg; 5308 5309 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE || 5310 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE) 5311 return; 5312 5313 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; 5314 if (!cfg->ref_cnt) 5315 return; 5316 5317 cfg->ref_cnt--; 5318 if (!cfg->ref_cnt) { 5319 cfg->offset = 0; 5320 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 5321 } 5322 } 5323 5324 static void hclge_update_fd_list(struct hclge_dev *hdev, 5325 enum HCLGE_FD_NODE_STATE state, u16 location, 5326 struct hclge_fd_rule *new_rule) 5327 { 5328 struct hlist_head *hlist = &hdev->fd_rule_list; 5329 struct hclge_fd_rule *fd_rule, *parent = NULL; 5330 5331 fd_rule = hclge_find_fd_rule(hlist, location, &parent); 5332 if (fd_rule) { 5333 hclge_fd_dec_user_def_refcnt(hdev, fd_rule); 5334 if (state == HCLGE_FD_ACTIVE) 5335 hclge_fd_inc_user_def_refcnt(hdev, new_rule); 5336 hclge_sync_fd_user_def_cfg(hdev, true); 5337 5338 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); 5339 return; 5340 } 5341 5342 /* it's unlikely to fail here, because we have checked the rule 5343 * exist before. 5344 */ 5345 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) { 5346 dev_warn(&hdev->pdev->dev, 5347 "failed to delete fd rule %u, it's inexistent\n", 5348 location); 5349 return; 5350 } 5351 5352 hclge_fd_inc_user_def_refcnt(hdev, new_rule); 5353 hclge_sync_fd_user_def_cfg(hdev, true); 5354 5355 hclge_fd_insert_rule_node(hlist, new_rule, parent); 5356 hclge_fd_inc_rule_cnt(hdev, new_rule->location); 5357 5358 if (state == HCLGE_FD_TO_ADD) { 5359 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 5360 hclge_task_schedule(hdev, 0); 5361 } 5362 } 5363 5364 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) 5365 { 5366 struct hclge_get_fd_mode_cmd *req; 5367 struct hclge_desc desc; 5368 int ret; 5369 5370 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true); 5371 5372 req = (struct hclge_get_fd_mode_cmd *)desc.data; 5373 5374 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5375 if (ret) { 5376 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); 5377 return ret; 5378 } 5379 5380 *fd_mode = req->mode; 5381 5382 return ret; 5383 } 5384 5385 static int hclge_get_fd_allocation(struct hclge_dev *hdev, 5386 u32 *stage1_entry_num, 5387 u32 *stage2_entry_num, 5388 u16 *stage1_counter_num, 5389 u16 *stage2_counter_num) 5390 { 5391 struct hclge_get_fd_allocation_cmd *req; 5392 struct hclge_desc desc; 5393 int ret; 5394 5395 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true); 5396 5397 req = (struct hclge_get_fd_allocation_cmd *)desc.data; 5398 5399 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5400 if (ret) { 5401 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", 5402 ret); 5403 return ret; 5404 } 5405 5406 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num); 5407 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num); 5408 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num); 5409 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num); 5410 5411 return ret; 5412 } 5413 5414 static int hclge_set_fd_key_config(struct hclge_dev *hdev, 5415 enum HCLGE_FD_STAGE stage_num) 5416 { 5417 struct hclge_set_fd_key_config_cmd *req; 5418 struct hclge_fd_key_cfg *stage; 5419 struct hclge_desc desc; 5420 int ret; 5421 5422 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false); 5423 5424 req = (struct hclge_set_fd_key_config_cmd *)desc.data; 5425 stage = &hdev->fd_cfg.key_cfg[stage_num]; 5426 req->stage = stage_num; 5427 req->key_select = stage->key_sel; 5428 req->inner_sipv6_word_en = stage->inner_sipv6_word_en; 5429 req->inner_dipv6_word_en = stage->inner_dipv6_word_en; 5430 req->outer_sipv6_word_en = stage->outer_sipv6_word_en; 5431 req->outer_dipv6_word_en = stage->outer_dipv6_word_en; 5432 req->tuple_mask = cpu_to_le32(~stage->tuple_active); 5433 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active); 5434 5435 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5436 if (ret) 5437 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); 5438 5439 return ret; 5440 } 5441 5442 static void hclge_fd_disable_user_def(struct hclge_dev *hdev) 5443 { 5444 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; 5445 5446 spin_lock_bh(&hdev->fd_rule_lock); 5447 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); 5448 spin_unlock_bh(&hdev->fd_rule_lock); 5449 5450 hclge_fd_set_user_def_cmd(hdev, cfg); 5451 } 5452 5453 static int hclge_init_fd_config(struct hclge_dev *hdev) 5454 { 5455 #define LOW_2_WORDS 0x03 5456 struct hclge_fd_key_cfg *key_cfg; 5457 int ret; 5458 5459 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 5460 return 0; 5461 5462 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); 5463 if (ret) 5464 return ret; 5465 5466 switch (hdev->fd_cfg.fd_mode) { 5467 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1: 5468 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; 5469 break; 5470 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1: 5471 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; 5472 break; 5473 default: 5474 dev_err(&hdev->pdev->dev, 5475 "Unsupported flow director mode %u\n", 5476 hdev->fd_cfg.fd_mode); 5477 return -EOPNOTSUPP; 5478 } 5479 5480 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; 5481 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; 5482 key_cfg->inner_sipv6_word_en = LOW_2_WORDS; 5483 key_cfg->inner_dipv6_word_en = LOW_2_WORDS; 5484 key_cfg->outer_sipv6_word_en = 0; 5485 key_cfg->outer_dipv6_word_en = 0; 5486 5487 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) | 5488 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) | 5489 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 5490 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5491 5492 /* If use max 400bit key, we can support tuples for ether type */ 5493 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 5494 key_cfg->tuple_active |= 5495 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); 5496 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 5497 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; 5498 } 5499 5500 /* roce_type is used to filter roce frames 5501 * dst_vport is used to specify the rule 5502 */ 5503 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT); 5504 5505 ret = hclge_get_fd_allocation(hdev, 5506 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 5507 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], 5508 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], 5509 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); 5510 if (ret) 5511 return ret; 5512 5513 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); 5514 } 5515 5516 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, 5517 int loc, u8 *key, bool is_add) 5518 { 5519 struct hclge_fd_tcam_config_1_cmd *req1; 5520 struct hclge_fd_tcam_config_2_cmd *req2; 5521 struct hclge_fd_tcam_config_3_cmd *req3; 5522 struct hclge_desc desc[3]; 5523 int ret; 5524 5525 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false); 5526 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 5527 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false); 5528 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 5529 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false); 5530 5531 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 5532 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 5533 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 5534 5535 req1->stage = stage; 5536 req1->xy_sel = sel_x ? 1 : 0; 5537 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0); 5538 req1->index = cpu_to_le32(loc); 5539 req1->entry_vld = sel_x ? is_add : 0; 5540 5541 if (key) { 5542 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data)); 5543 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)], 5544 sizeof(req2->tcam_data)); 5545 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) + 5546 sizeof(req2->tcam_data)], sizeof(req3->tcam_data)); 5547 } 5548 5549 ret = hclge_cmd_send(&hdev->hw, desc, 3); 5550 if (ret) 5551 dev_err(&hdev->pdev->dev, 5552 "config tcam key fail, ret=%d\n", 5553 ret); 5554 5555 return ret; 5556 } 5557 5558 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, 5559 struct hclge_fd_ad_data *action) 5560 { 5561 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 5562 struct hclge_fd_ad_config_cmd *req; 5563 struct hclge_desc desc; 5564 u64 ad_data = 0; 5565 int ret; 5566 5567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false); 5568 5569 req = (struct hclge_fd_ad_config_cmd *)desc.data; 5570 req->index = cpu_to_le32(loc); 5571 req->stage = stage; 5572 5573 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B, 5574 action->write_rule_id_to_bd); 5575 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, 5576 action->rule_id); 5577 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { 5578 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, 5579 action->override_tc); 5580 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, 5581 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); 5582 } 5583 ad_data <<= 32; 5584 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); 5585 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, 5586 action->forward_to_direct_queue); 5587 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, 5588 action->queue_id); 5589 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); 5590 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, 5591 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); 5592 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); 5593 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, 5594 action->counter_id); 5595 5596 req->ad_data = cpu_to_le64(ad_data); 5597 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 5598 if (ret) 5599 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); 5600 5601 return ret; 5602 } 5603 5604 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, 5605 struct hclge_fd_rule *rule) 5606 { 5607 int offset, moffset, ip_offset; 5608 enum HCLGE_FD_KEY_OPT key_opt; 5609 u16 tmp_x_s, tmp_y_s; 5610 u32 tmp_x_l, tmp_y_l; 5611 u8 *p = (u8 *)rule; 5612 int i; 5613 5614 if (rule->unused_tuple & BIT(tuple_bit)) 5615 return true; 5616 5617 key_opt = tuple_key_info[tuple_bit].key_opt; 5618 offset = tuple_key_info[tuple_bit].offset; 5619 moffset = tuple_key_info[tuple_bit].moffset; 5620 5621 switch (key_opt) { 5622 case KEY_OPT_U8: 5623 calc_x(*key_x, p[offset], p[moffset]); 5624 calc_y(*key_y, p[offset], p[moffset]); 5625 5626 return true; 5627 case KEY_OPT_LE16: 5628 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); 5629 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); 5630 *(__le16 *)key_x = cpu_to_le16(tmp_x_s); 5631 *(__le16 *)key_y = cpu_to_le16(tmp_y_s); 5632 5633 return true; 5634 case KEY_OPT_LE32: 5635 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); 5636 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); 5637 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5638 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5639 5640 return true; 5641 case KEY_OPT_MAC: 5642 for (i = 0; i < ETH_ALEN; i++) { 5643 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i], 5644 p[moffset + i]); 5645 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i], 5646 p[moffset + i]); 5647 } 5648 5649 return true; 5650 case KEY_OPT_IP: 5651 ip_offset = IPV4_INDEX * sizeof(u32); 5652 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), 5653 *(u32 *)(&p[moffset + ip_offset])); 5654 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), 5655 *(u32 *)(&p[moffset + ip_offset])); 5656 *(__le32 *)key_x = cpu_to_le32(tmp_x_l); 5657 *(__le32 *)key_y = cpu_to_le32(tmp_y_l); 5658 5659 return true; 5660 default: 5661 return false; 5662 } 5663 } 5664 5665 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id, 5666 u8 vf_id, u8 network_port_id) 5667 { 5668 u32 port_number = 0; 5669 5670 if (port_type == HOST_PORT) { 5671 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S, 5672 pf_id); 5673 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S, 5674 vf_id); 5675 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT); 5676 } else { 5677 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M, 5678 HCLGE_NETWORK_PORT_ID_S, network_port_id); 5679 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT); 5680 } 5681 5682 return port_number; 5683 } 5684 5685 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg, 5686 __le32 *key_x, __le32 *key_y, 5687 struct hclge_fd_rule *rule) 5688 { 5689 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number; 5690 u8 cur_pos = 0, tuple_size, shift_bits; 5691 unsigned int i; 5692 5693 for (i = 0; i < MAX_META_DATA; i++) { 5694 tuple_size = meta_data_key_info[i].key_length; 5695 tuple_bit = key_cfg->meta_data_active & BIT(i); 5696 5697 switch (tuple_bit) { 5698 case BIT(ROCE_TYPE): 5699 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET); 5700 cur_pos += tuple_size; 5701 break; 5702 case BIT(DST_VPORT): 5703 port_number = hclge_get_port_number(HOST_PORT, 0, 5704 rule->vf_id, 0); 5705 hnae3_set_field(meta_data, 5706 GENMASK(cur_pos + tuple_size, cur_pos), 5707 cur_pos, port_number); 5708 cur_pos += tuple_size; 5709 break; 5710 default: 5711 break; 5712 } 5713 } 5714 5715 calc_x(tmp_x, meta_data, 0xFFFFFFFF); 5716 calc_y(tmp_y, meta_data, 0xFFFFFFFF); 5717 shift_bits = sizeof(meta_data) * 8 - cur_pos; 5718 5719 *key_x = cpu_to_le32(tmp_x << shift_bits); 5720 *key_y = cpu_to_le32(tmp_y << shift_bits); 5721 } 5722 5723 /* A complete key is combined with meta data key and tuple key. 5724 * Meta data key is stored at the MSB region, and tuple key is stored at 5725 * the LSB region, unused bits will be filled 0. 5726 */ 5727 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, 5728 struct hclge_fd_rule *rule) 5729 { 5730 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; 5731 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES]; 5732 u8 *cur_key_x, *cur_key_y; 5733 u8 meta_data_region; 5734 u8 tuple_size; 5735 int ret; 5736 u32 i; 5737 5738 memset(key_x, 0, sizeof(key_x)); 5739 memset(key_y, 0, sizeof(key_y)); 5740 cur_key_x = key_x; 5741 cur_key_y = key_y; 5742 5743 for (i = 0; i < MAX_TUPLE; i++) { 5744 bool tuple_valid; 5745 5746 tuple_size = tuple_key_info[i].key_length / 8; 5747 if (!(key_cfg->tuple_active & BIT(i))) 5748 continue; 5749 5750 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x, 5751 cur_key_y, rule); 5752 if (tuple_valid) { 5753 cur_key_x += tuple_size; 5754 cur_key_y += tuple_size; 5755 } 5756 } 5757 5758 meta_data_region = hdev->fd_cfg.max_key_length / 8 - 5759 MAX_META_DATA_LENGTH / 8; 5760 5761 hclge_fd_convert_meta_data(key_cfg, 5762 (__le32 *)(key_x + meta_data_region), 5763 (__le32 *)(key_y + meta_data_region), 5764 rule); 5765 5766 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, 5767 true); 5768 if (ret) { 5769 dev_err(&hdev->pdev->dev, 5770 "fd key_y config fail, loc=%u, ret=%d\n", 5771 rule->queue_id, ret); 5772 return ret; 5773 } 5774 5775 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, 5776 true); 5777 if (ret) 5778 dev_err(&hdev->pdev->dev, 5779 "fd key_x config fail, loc=%u, ret=%d\n", 5780 rule->queue_id, ret); 5781 return ret; 5782 } 5783 5784 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, 5785 struct hclge_fd_rule *rule) 5786 { 5787 struct hclge_vport *vport = hdev->vport; 5788 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 5789 struct hclge_fd_ad_data ad_data; 5790 5791 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); 5792 ad_data.ad_id = rule->location; 5793 5794 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 5795 ad_data.drop_packet = true; 5796 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { 5797 ad_data.override_tc = true; 5798 ad_data.queue_id = 5799 kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; 5800 ad_data.tc_size = 5801 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); 5802 } else { 5803 ad_data.forward_to_direct_queue = true; 5804 ad_data.queue_id = rule->queue_id; 5805 } 5806 5807 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { 5808 ad_data.use_counter = true; 5809 ad_data.counter_id = rule->vf_id % 5810 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; 5811 } else { 5812 ad_data.use_counter = false; 5813 ad_data.counter_id = 0; 5814 } 5815 5816 ad_data.use_next_stage = false; 5817 ad_data.next_input_key = 0; 5818 5819 ad_data.write_rule_id_to_bd = true; 5820 ad_data.rule_id = rule->location; 5821 5822 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); 5823 } 5824 5825 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec, 5826 u32 *unused_tuple) 5827 { 5828 if (!spec || !unused_tuple) 5829 return -EINVAL; 5830 5831 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 5832 5833 if (!spec->ip4src) 5834 *unused_tuple |= BIT(INNER_SRC_IP); 5835 5836 if (!spec->ip4dst) 5837 *unused_tuple |= BIT(INNER_DST_IP); 5838 5839 if (!spec->psrc) 5840 *unused_tuple |= BIT(INNER_SRC_PORT); 5841 5842 if (!spec->pdst) 5843 *unused_tuple |= BIT(INNER_DST_PORT); 5844 5845 if (!spec->tos) 5846 *unused_tuple |= BIT(INNER_IP_TOS); 5847 5848 return 0; 5849 } 5850 5851 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, 5852 u32 *unused_tuple) 5853 { 5854 if (!spec || !unused_tuple) 5855 return -EINVAL; 5856 5857 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5858 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5859 5860 if (!spec->ip4src) 5861 *unused_tuple |= BIT(INNER_SRC_IP); 5862 5863 if (!spec->ip4dst) 5864 *unused_tuple |= BIT(INNER_DST_IP); 5865 5866 if (!spec->tos) 5867 *unused_tuple |= BIT(INNER_IP_TOS); 5868 5869 if (!spec->proto) 5870 *unused_tuple |= BIT(INNER_IP_PROTO); 5871 5872 if (spec->l4_4_bytes) 5873 return -EOPNOTSUPP; 5874 5875 if (spec->ip_ver != ETH_RX_NFC_IP4) 5876 return -EOPNOTSUPP; 5877 5878 return 0; 5879 } 5880 5881 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, 5882 u32 *unused_tuple) 5883 { 5884 if (!spec || !unused_tuple) 5885 return -EINVAL; 5886 5887 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC); 5888 5889 /* check whether src/dst ip address used */ 5890 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) 5891 *unused_tuple |= BIT(INNER_SRC_IP); 5892 5893 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) 5894 *unused_tuple |= BIT(INNER_DST_IP); 5895 5896 if (!spec->psrc) 5897 *unused_tuple |= BIT(INNER_SRC_PORT); 5898 5899 if (!spec->pdst) 5900 *unused_tuple |= BIT(INNER_DST_PORT); 5901 5902 if (!spec->tclass) 5903 *unused_tuple |= BIT(INNER_IP_TOS); 5904 5905 return 0; 5906 } 5907 5908 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, 5909 u32 *unused_tuple) 5910 { 5911 if (!spec || !unused_tuple) 5912 return -EINVAL; 5913 5914 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 5915 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); 5916 5917 /* check whether src/dst ip address used */ 5918 if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) 5919 *unused_tuple |= BIT(INNER_SRC_IP); 5920 5921 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) 5922 *unused_tuple |= BIT(INNER_DST_IP); 5923 5924 if (!spec->l4_proto) 5925 *unused_tuple |= BIT(INNER_IP_PROTO); 5926 5927 if (!spec->tclass) 5928 *unused_tuple |= BIT(INNER_IP_TOS); 5929 5930 if (spec->l4_4_bytes) 5931 return -EOPNOTSUPP; 5932 5933 return 0; 5934 } 5935 5936 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) 5937 { 5938 if (!spec || !unused_tuple) 5939 return -EINVAL; 5940 5941 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | 5942 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) | 5943 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO); 5944 5945 if (is_zero_ether_addr(spec->h_source)) 5946 *unused_tuple |= BIT(INNER_SRC_MAC); 5947 5948 if (is_zero_ether_addr(spec->h_dest)) 5949 *unused_tuple |= BIT(INNER_DST_MAC); 5950 5951 if (!spec->h_proto) 5952 *unused_tuple |= BIT(INNER_ETH_TYPE); 5953 5954 return 0; 5955 } 5956 5957 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, 5958 struct ethtool_rx_flow_spec *fs, 5959 u32 *unused_tuple) 5960 { 5961 if (fs->flow_type & FLOW_EXT) { 5962 if (fs->h_ext.vlan_etype) { 5963 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); 5964 return -EOPNOTSUPP; 5965 } 5966 5967 if (!fs->h_ext.vlan_tci) 5968 *unused_tuple |= BIT(INNER_VLAN_TAG_FST); 5969 5970 if (fs->m_ext.vlan_tci && 5971 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) { 5972 dev_err(&hdev->pdev->dev, 5973 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n", 5974 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1); 5975 return -EINVAL; 5976 } 5977 } else { 5978 *unused_tuple |= BIT(INNER_VLAN_TAG_FST); 5979 } 5980 5981 if (fs->flow_type & FLOW_MAC_EXT) { 5982 if (hdev->fd_cfg.fd_mode != 5983 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 5984 dev_err(&hdev->pdev->dev, 5985 "FLOW_MAC_EXT is not supported in current fd mode!\n"); 5986 return -EOPNOTSUPP; 5987 } 5988 5989 if (is_zero_ether_addr(fs->h_ext.h_dest)) 5990 *unused_tuple |= BIT(INNER_DST_MAC); 5991 else 5992 *unused_tuple &= ~BIT(INNER_DST_MAC); 5993 } 5994 5995 return 0; 5996 } 5997 5998 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, 5999 struct hclge_fd_user_def_info *info) 6000 { 6001 switch (flow_type) { 6002 case ETHER_FLOW: 6003 info->layer = HCLGE_FD_USER_DEF_L2; 6004 *unused_tuple &= ~BIT(INNER_L2_RSV); 6005 break; 6006 case IP_USER_FLOW: 6007 case IPV6_USER_FLOW: 6008 info->layer = HCLGE_FD_USER_DEF_L3; 6009 *unused_tuple &= ~BIT(INNER_L3_RSV); 6010 break; 6011 case TCP_V4_FLOW: 6012 case UDP_V4_FLOW: 6013 case TCP_V6_FLOW: 6014 case UDP_V6_FLOW: 6015 info->layer = HCLGE_FD_USER_DEF_L4; 6016 *unused_tuple &= ~BIT(INNER_L4_RSV); 6017 break; 6018 default: 6019 return -EOPNOTSUPP; 6020 } 6021 6022 return 0; 6023 } 6024 6025 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) 6026 { 6027 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; 6028 } 6029 6030 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, 6031 struct ethtool_rx_flow_spec *fs, 6032 u32 *unused_tuple, 6033 struct hclge_fd_user_def_info *info) 6034 { 6035 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; 6036 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6037 u16 data, offset, data_mask, offset_mask; 6038 int ret; 6039 6040 info->layer = HCLGE_FD_USER_DEF_NONE; 6041 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; 6042 6043 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs)) 6044 return 0; 6045 6046 /* user-def data from ethtool is 64 bit value, the bit0~15 is used 6047 * for data, and bit32~47 is used for offset. 6048 */ 6049 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; 6050 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA; 6051 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; 6052 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET; 6053 6054 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) { 6055 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); 6056 return -EOPNOTSUPP; 6057 } 6058 6059 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) { 6060 dev_err(&hdev->pdev->dev, 6061 "user-def offset[%u] should be no more than %u\n", 6062 offset, HCLGE_FD_MAX_USER_DEF_OFFSET); 6063 return -EINVAL; 6064 } 6065 6066 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) { 6067 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); 6068 return -EINVAL; 6069 } 6070 6071 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); 6072 if (ret) { 6073 dev_err(&hdev->pdev->dev, 6074 "unsupported flow type for user-def bytes, ret = %d\n", 6075 ret); 6076 return ret; 6077 } 6078 6079 info->data = data; 6080 info->data_mask = data_mask; 6081 info->offset = offset; 6082 6083 return 0; 6084 } 6085 6086 static int hclge_fd_check_spec(struct hclge_dev *hdev, 6087 struct ethtool_rx_flow_spec *fs, 6088 u32 *unused_tuple, 6089 struct hclge_fd_user_def_info *info) 6090 { 6091 u32 flow_type; 6092 int ret; 6093 6094 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 6095 dev_err(&hdev->pdev->dev, 6096 "failed to config fd rules, invalid rule location: %u, max is %u\n.", 6097 fs->location, 6098 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); 6099 return -EINVAL; 6100 } 6101 6102 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); 6103 if (ret) 6104 return ret; 6105 6106 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6107 switch (flow_type) { 6108 case SCTP_V4_FLOW: 6109 case TCP_V4_FLOW: 6110 case UDP_V4_FLOW: 6111 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec, 6112 unused_tuple); 6113 break; 6114 case IP_USER_FLOW: 6115 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, 6116 unused_tuple); 6117 break; 6118 case SCTP_V6_FLOW: 6119 case TCP_V6_FLOW: 6120 case UDP_V6_FLOW: 6121 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec, 6122 unused_tuple); 6123 break; 6124 case IPV6_USER_FLOW: 6125 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, 6126 unused_tuple); 6127 break; 6128 case ETHER_FLOW: 6129 if (hdev->fd_cfg.fd_mode != 6130 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { 6131 dev_err(&hdev->pdev->dev, 6132 "ETHER_FLOW is not supported in current fd mode!\n"); 6133 return -EOPNOTSUPP; 6134 } 6135 6136 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec, 6137 unused_tuple); 6138 break; 6139 default: 6140 dev_err(&hdev->pdev->dev, 6141 "unsupported protocol type, protocol type = %#x\n", 6142 flow_type); 6143 return -EOPNOTSUPP; 6144 } 6145 6146 if (ret) { 6147 dev_err(&hdev->pdev->dev, 6148 "failed to check flow union tuple, ret = %d\n", 6149 ret); 6150 return ret; 6151 } 6152 6153 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); 6154 } 6155 6156 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs, 6157 struct hclge_fd_rule *rule, u8 ip_proto) 6158 { 6159 rule->tuples.src_ip[IPV4_INDEX] = 6160 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); 6161 rule->tuples_mask.src_ip[IPV4_INDEX] = 6162 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); 6163 6164 rule->tuples.dst_ip[IPV4_INDEX] = 6165 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); 6166 rule->tuples_mask.dst_ip[IPV4_INDEX] = 6167 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); 6168 6169 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); 6170 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); 6171 6172 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); 6173 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); 6174 6175 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; 6176 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; 6177 6178 rule->tuples.ether_proto = ETH_P_IP; 6179 rule->tuples_mask.ether_proto = 0xFFFF; 6180 6181 rule->tuples.ip_proto = ip_proto; 6182 rule->tuples_mask.ip_proto = 0xFF; 6183 } 6184 6185 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs, 6186 struct hclge_fd_rule *rule) 6187 { 6188 rule->tuples.src_ip[IPV4_INDEX] = 6189 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); 6190 rule->tuples_mask.src_ip[IPV4_INDEX] = 6191 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); 6192 6193 rule->tuples.dst_ip[IPV4_INDEX] = 6194 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); 6195 rule->tuples_mask.dst_ip[IPV4_INDEX] = 6196 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); 6197 6198 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; 6199 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; 6200 6201 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; 6202 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; 6203 6204 rule->tuples.ether_proto = ETH_P_IP; 6205 rule->tuples_mask.ether_proto = 0xFFFF; 6206 } 6207 6208 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs, 6209 struct hclge_fd_rule *rule, u8 ip_proto) 6210 { 6211 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, 6212 IPV6_SIZE); 6213 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, 6214 IPV6_SIZE); 6215 6216 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, 6217 IPV6_SIZE); 6218 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, 6219 IPV6_SIZE); 6220 6221 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); 6222 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); 6223 6224 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); 6225 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); 6226 6227 rule->tuples.ether_proto = ETH_P_IPV6; 6228 rule->tuples_mask.ether_proto = 0xFFFF; 6229 6230 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; 6231 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; 6232 6233 rule->tuples.ip_proto = ip_proto; 6234 rule->tuples_mask.ip_proto = 0xFF; 6235 } 6236 6237 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs, 6238 struct hclge_fd_rule *rule) 6239 { 6240 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, 6241 IPV6_SIZE); 6242 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, 6243 IPV6_SIZE); 6244 6245 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, 6246 IPV6_SIZE); 6247 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, 6248 IPV6_SIZE); 6249 6250 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; 6251 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; 6252 6253 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; 6254 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; 6255 6256 rule->tuples.ether_proto = ETH_P_IPV6; 6257 rule->tuples_mask.ether_proto = 0xFFFF; 6258 } 6259 6260 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs, 6261 struct hclge_fd_rule *rule) 6262 { 6263 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); 6264 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); 6265 6266 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); 6267 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); 6268 6269 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); 6270 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); 6271 } 6272 6273 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, 6274 struct hclge_fd_rule *rule) 6275 { 6276 switch (info->layer) { 6277 case HCLGE_FD_USER_DEF_L2: 6278 rule->tuples.l2_user_def = info->data; 6279 rule->tuples_mask.l2_user_def = info->data_mask; 6280 break; 6281 case HCLGE_FD_USER_DEF_L3: 6282 rule->tuples.l3_user_def = info->data; 6283 rule->tuples_mask.l3_user_def = info->data_mask; 6284 break; 6285 case HCLGE_FD_USER_DEF_L4: 6286 rule->tuples.l4_user_def = (u32)info->data << 16; 6287 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; 6288 break; 6289 default: 6290 break; 6291 } 6292 6293 rule->ep.user_def = *info; 6294 } 6295 6296 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs, 6297 struct hclge_fd_rule *rule, 6298 struct hclge_fd_user_def_info *info) 6299 { 6300 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); 6301 6302 switch (flow_type) { 6303 case SCTP_V4_FLOW: 6304 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP); 6305 break; 6306 case TCP_V4_FLOW: 6307 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP); 6308 break; 6309 case UDP_V4_FLOW: 6310 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP); 6311 break; 6312 case IP_USER_FLOW: 6313 hclge_fd_get_ip4_tuple(fs, rule); 6314 break; 6315 case SCTP_V6_FLOW: 6316 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP); 6317 break; 6318 case TCP_V6_FLOW: 6319 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP); 6320 break; 6321 case UDP_V6_FLOW: 6322 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP); 6323 break; 6324 case IPV6_USER_FLOW: 6325 hclge_fd_get_ip6_tuple(fs, rule); 6326 break; 6327 case ETHER_FLOW: 6328 hclge_fd_get_ether_tuple(fs, rule); 6329 break; 6330 default: 6331 return -EOPNOTSUPP; 6332 } 6333 6334 if (fs->flow_type & FLOW_EXT) { 6335 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci); 6336 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci); 6337 hclge_fd_get_user_def_tuple(info, rule); 6338 } 6339 6340 if (fs->flow_type & FLOW_MAC_EXT) { 6341 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest); 6342 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest); 6343 } 6344 6345 return 0; 6346 } 6347 6348 static int hclge_fd_config_rule(struct hclge_dev *hdev, 6349 struct hclge_fd_rule *rule) 6350 { 6351 int ret; 6352 6353 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); 6354 if (ret) 6355 return ret; 6356 6357 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); 6358 } 6359 6360 static int hclge_add_fd_entry_common(struct hclge_dev *hdev, 6361 struct hclge_fd_rule *rule) 6362 { 6363 int ret; 6364 6365 spin_lock_bh(&hdev->fd_rule_lock); 6366 6367 if (hdev->fd_active_type != rule->rule_type && 6368 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || 6369 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { 6370 dev_err(&hdev->pdev->dev, 6371 "mode conflict(new type %d, active type %d), please delete existent rules first\n", 6372 rule->rule_type, hdev->fd_active_type); 6373 spin_unlock_bh(&hdev->fd_rule_lock); 6374 return -EINVAL; 6375 } 6376 6377 ret = hclge_fd_check_user_def_refcnt(hdev, rule); 6378 if (ret) 6379 goto out; 6380 6381 ret = hclge_clear_arfs_rules(hdev); 6382 if (ret) 6383 goto out; 6384 6385 ret = hclge_fd_config_rule(hdev, rule); 6386 if (ret) 6387 goto out; 6388 6389 rule->state = HCLGE_FD_ACTIVE; 6390 hdev->fd_active_type = rule->rule_type; 6391 hclge_update_fd_list(hdev, rule->state, rule->location, rule); 6392 6393 out: 6394 spin_unlock_bh(&hdev->fd_rule_lock); 6395 return ret; 6396 } 6397 6398 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) 6399 { 6400 struct hclge_vport *vport = hclge_get_vport(handle); 6401 struct hclge_dev *hdev = vport->back; 6402 6403 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; 6404 } 6405 6406 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, 6407 u16 *vport_id, u8 *action, u16 *queue_id) 6408 { 6409 struct hclge_vport *vport = hdev->vport; 6410 6411 if (ring_cookie == RX_CLS_FLOW_DISC) { 6412 *action = HCLGE_FD_ACTION_DROP_PACKET; 6413 } else { 6414 u32 ring = ethtool_get_flow_spec_ring(ring_cookie); 6415 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); 6416 u16 tqps; 6417 6418 /* To keep consistent with user's configuration, minus 1 when 6419 * printing 'vf', because vf id from ethtool is added 1 for vf. 6420 */ 6421 if (vf > hdev->num_req_vfs) { 6422 dev_err(&hdev->pdev->dev, 6423 "Error: vf id (%u) should be less than %u\n", 6424 vf - 1U, hdev->num_req_vfs); 6425 return -EINVAL; 6426 } 6427 6428 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; 6429 tqps = hdev->vport[vf].nic.kinfo.num_tqps; 6430 6431 if (ring >= tqps) { 6432 dev_err(&hdev->pdev->dev, 6433 "Error: queue id (%u) > max tqp num (%u)\n", 6434 ring, tqps - 1U); 6435 return -EINVAL; 6436 } 6437 6438 *action = HCLGE_FD_ACTION_SELECT_QUEUE; 6439 *queue_id = ring; 6440 } 6441 6442 return 0; 6443 } 6444 6445 static int hclge_add_fd_entry(struct hnae3_handle *handle, 6446 struct ethtool_rxnfc *cmd) 6447 { 6448 struct hclge_vport *vport = hclge_get_vport(handle); 6449 struct hclge_dev *hdev = vport->back; 6450 struct hclge_fd_user_def_info info; 6451 u16 dst_vport_id = 0, q_index = 0; 6452 struct ethtool_rx_flow_spec *fs; 6453 struct hclge_fd_rule *rule; 6454 u32 unused = 0; 6455 u8 action; 6456 int ret; 6457 6458 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 6459 dev_err(&hdev->pdev->dev, 6460 "flow table director is not supported\n"); 6461 return -EOPNOTSUPP; 6462 } 6463 6464 if (!hdev->fd_en) { 6465 dev_err(&hdev->pdev->dev, 6466 "please enable flow director first\n"); 6467 return -EOPNOTSUPP; 6468 } 6469 6470 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6471 6472 ret = hclge_fd_check_spec(hdev, fs, &unused, &info); 6473 if (ret) 6474 return ret; 6475 6476 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, 6477 &action, &q_index); 6478 if (ret) 6479 return ret; 6480 6481 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 6482 if (!rule) 6483 return -ENOMEM; 6484 6485 ret = hclge_fd_get_tuple(fs, rule, &info); 6486 if (ret) { 6487 kfree(rule); 6488 return ret; 6489 } 6490 6491 rule->flow_type = fs->flow_type; 6492 rule->location = fs->location; 6493 rule->unused_tuple = unused; 6494 rule->vf_id = dst_vport_id; 6495 rule->queue_id = q_index; 6496 rule->action = action; 6497 rule->rule_type = HCLGE_FD_EP_ACTIVE; 6498 6499 ret = hclge_add_fd_entry_common(hdev, rule); 6500 if (ret) 6501 kfree(rule); 6502 6503 return ret; 6504 } 6505 6506 static int hclge_del_fd_entry(struct hnae3_handle *handle, 6507 struct ethtool_rxnfc *cmd) 6508 { 6509 struct hclge_vport *vport = hclge_get_vport(handle); 6510 struct hclge_dev *hdev = vport->back; 6511 struct ethtool_rx_flow_spec *fs; 6512 int ret; 6513 6514 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6515 return -EOPNOTSUPP; 6516 6517 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6518 6519 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 6520 return -EINVAL; 6521 6522 spin_lock_bh(&hdev->fd_rule_lock); 6523 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || 6524 !test_bit(fs->location, hdev->fd_bmap)) { 6525 dev_err(&hdev->pdev->dev, 6526 "Delete fail, rule %u is inexistent\n", fs->location); 6527 spin_unlock_bh(&hdev->fd_rule_lock); 6528 return -ENOENT; 6529 } 6530 6531 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, 6532 NULL, false); 6533 if (ret) 6534 goto out; 6535 6536 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); 6537 6538 out: 6539 spin_unlock_bh(&hdev->fd_rule_lock); 6540 return ret; 6541 } 6542 6543 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, 6544 bool clear_list) 6545 { 6546 struct hclge_fd_rule *rule; 6547 struct hlist_node *node; 6548 u16 location; 6549 6550 spin_lock_bh(&hdev->fd_rule_lock); 6551 6552 for_each_set_bit(location, hdev->fd_bmap, 6553 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 6554 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, 6555 NULL, false); 6556 6557 if (clear_list) { 6558 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, 6559 rule_node) { 6560 hlist_del(&rule->rule_node); 6561 kfree(rule); 6562 } 6563 hdev->fd_active_type = HCLGE_FD_RULE_NONE; 6564 hdev->hclge_fd_rule_num = 0; 6565 bitmap_zero(hdev->fd_bmap, 6566 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 6567 } 6568 6569 spin_unlock_bh(&hdev->fd_rule_lock); 6570 } 6571 6572 static void hclge_del_all_fd_entries(struct hclge_dev *hdev) 6573 { 6574 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6575 return; 6576 6577 hclge_clear_fd_rules_in_list(hdev, true); 6578 hclge_fd_disable_user_def(hdev); 6579 } 6580 6581 static int hclge_restore_fd_entries(struct hnae3_handle *handle) 6582 { 6583 struct hclge_vport *vport = hclge_get_vport(handle); 6584 struct hclge_dev *hdev = vport->back; 6585 struct hclge_fd_rule *rule; 6586 struct hlist_node *node; 6587 6588 /* Return ok here, because reset error handling will check this 6589 * return value. If error is returned here, the reset process will 6590 * fail. 6591 */ 6592 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6593 return 0; 6594 6595 /* if fd is disabled, should not restore it when reset */ 6596 if (!hdev->fd_en) 6597 return 0; 6598 6599 spin_lock_bh(&hdev->fd_rule_lock); 6600 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 6601 if (rule->state == HCLGE_FD_ACTIVE) 6602 rule->state = HCLGE_FD_TO_ADD; 6603 } 6604 spin_unlock_bh(&hdev->fd_rule_lock); 6605 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 6606 6607 return 0; 6608 } 6609 6610 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, 6611 struct ethtool_rxnfc *cmd) 6612 { 6613 struct hclge_vport *vport = hclge_get_vport(handle); 6614 struct hclge_dev *hdev = vport->back; 6615 6616 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) 6617 return -EOPNOTSUPP; 6618 6619 cmd->rule_cnt = hdev->hclge_fd_rule_num; 6620 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 6621 6622 return 0; 6623 } 6624 6625 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule, 6626 struct ethtool_tcpip4_spec *spec, 6627 struct ethtool_tcpip4_spec *spec_mask) 6628 { 6629 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 6630 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? 6631 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 6632 6633 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 6634 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? 6635 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 6636 6637 spec->psrc = cpu_to_be16(rule->tuples.src_port); 6638 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 6639 0 : cpu_to_be16(rule->tuples_mask.src_port); 6640 6641 spec->pdst = cpu_to_be16(rule->tuples.dst_port); 6642 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? 6643 0 : cpu_to_be16(rule->tuples_mask.dst_port); 6644 6645 spec->tos = rule->tuples.ip_tos; 6646 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6647 0 : rule->tuples_mask.ip_tos; 6648 } 6649 6650 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, 6651 struct ethtool_usrip4_spec *spec, 6652 struct ethtool_usrip4_spec *spec_mask) 6653 { 6654 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); 6655 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? 6656 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); 6657 6658 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); 6659 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? 6660 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); 6661 6662 spec->tos = rule->tuples.ip_tos; 6663 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6664 0 : rule->tuples_mask.ip_tos; 6665 6666 spec->proto = rule->tuples.ip_proto; 6667 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 6668 0 : rule->tuples_mask.ip_proto; 6669 6670 spec->ip_ver = ETH_RX_NFC_IP4; 6671 } 6672 6673 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, 6674 struct ethtool_tcpip6_spec *spec, 6675 struct ethtool_tcpip6_spec *spec_mask) 6676 { 6677 cpu_to_be32_array(spec->ip6src, 6678 rule->tuples.src_ip, IPV6_SIZE); 6679 cpu_to_be32_array(spec->ip6dst, 6680 rule->tuples.dst_ip, IPV6_SIZE); 6681 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 6682 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); 6683 else 6684 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, 6685 IPV6_SIZE); 6686 6687 if (rule->unused_tuple & BIT(INNER_DST_IP)) 6688 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); 6689 else 6690 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, 6691 IPV6_SIZE); 6692 6693 spec->tclass = rule->tuples.ip_tos; 6694 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6695 0 : rule->tuples_mask.ip_tos; 6696 6697 spec->psrc = cpu_to_be16(rule->tuples.src_port); 6698 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ? 6699 0 : cpu_to_be16(rule->tuples_mask.src_port); 6700 6701 spec->pdst = cpu_to_be16(rule->tuples.dst_port); 6702 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ? 6703 0 : cpu_to_be16(rule->tuples_mask.dst_port); 6704 } 6705 6706 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, 6707 struct ethtool_usrip6_spec *spec, 6708 struct ethtool_usrip6_spec *spec_mask) 6709 { 6710 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE); 6711 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE); 6712 if (rule->unused_tuple & BIT(INNER_SRC_IP)) 6713 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); 6714 else 6715 cpu_to_be32_array(spec_mask->ip6src, 6716 rule->tuples_mask.src_ip, IPV6_SIZE); 6717 6718 if (rule->unused_tuple & BIT(INNER_DST_IP)) 6719 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); 6720 else 6721 cpu_to_be32_array(spec_mask->ip6dst, 6722 rule->tuples_mask.dst_ip, IPV6_SIZE); 6723 6724 spec->tclass = rule->tuples.ip_tos; 6725 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? 6726 0 : rule->tuples_mask.ip_tos; 6727 6728 spec->l4_proto = rule->tuples.ip_proto; 6729 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 6730 0 : rule->tuples_mask.ip_proto; 6731 } 6732 6733 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, 6734 struct ethhdr *spec, 6735 struct ethhdr *spec_mask) 6736 { 6737 ether_addr_copy(spec->h_source, rule->tuples.src_mac); 6738 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac); 6739 6740 if (rule->unused_tuple & BIT(INNER_SRC_MAC)) 6741 eth_zero_addr(spec_mask->h_source); 6742 else 6743 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac); 6744 6745 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6746 eth_zero_addr(spec_mask->h_dest); 6747 else 6748 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac); 6749 6750 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto); 6751 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? 6752 0 : cpu_to_be16(rule->tuples_mask.ether_proto); 6753 } 6754 6755 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, 6756 struct hclge_fd_rule *rule) 6757 { 6758 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) == 6759 HCLGE_FD_TUPLE_USER_DEF_TUPLES) { 6760 fs->h_ext.data[0] = 0; 6761 fs->h_ext.data[1] = 0; 6762 fs->m_ext.data[0] = 0; 6763 fs->m_ext.data[1] = 0; 6764 } else { 6765 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); 6766 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); 6767 fs->m_ext.data[0] = 6768 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK); 6769 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); 6770 } 6771 } 6772 6773 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, 6774 struct hclge_fd_rule *rule) 6775 { 6776 if (fs->flow_type & FLOW_EXT) { 6777 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1); 6778 fs->m_ext.vlan_tci = 6779 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ? 6780 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1); 6781 6782 hclge_fd_get_user_def_info(fs, rule); 6783 } 6784 6785 if (fs->flow_type & FLOW_MAC_EXT) { 6786 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac); 6787 if (rule->unused_tuple & BIT(INNER_DST_MAC)) 6788 eth_zero_addr(fs->m_u.ether_spec.h_dest); 6789 else 6790 ether_addr_copy(fs->m_u.ether_spec.h_dest, 6791 rule->tuples_mask.dst_mac); 6792 } 6793 } 6794 6795 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, 6796 u16 location) 6797 { 6798 struct hclge_fd_rule *rule = NULL; 6799 struct hlist_node *node2; 6800 6801 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { 6802 if (rule->location == location) 6803 return rule; 6804 else if (rule->location > location) 6805 return NULL; 6806 } 6807 6808 return NULL; 6809 } 6810 6811 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, 6812 struct hclge_fd_rule *rule) 6813 { 6814 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { 6815 fs->ring_cookie = RX_CLS_FLOW_DISC; 6816 } else { 6817 u64 vf_id; 6818 6819 fs->ring_cookie = rule->queue_id; 6820 vf_id = rule->vf_id; 6821 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 6822 fs->ring_cookie |= vf_id; 6823 } 6824 } 6825 6826 static int hclge_get_fd_rule_info(struct hnae3_handle *handle, 6827 struct ethtool_rxnfc *cmd) 6828 { 6829 struct hclge_vport *vport = hclge_get_vport(handle); 6830 struct hclge_fd_rule *rule = NULL; 6831 struct hclge_dev *hdev = vport->back; 6832 struct ethtool_rx_flow_spec *fs; 6833 6834 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6835 return -EOPNOTSUPP; 6836 6837 fs = (struct ethtool_rx_flow_spec *)&cmd->fs; 6838 6839 spin_lock_bh(&hdev->fd_rule_lock); 6840 6841 rule = hclge_get_fd_rule(hdev, fs->location); 6842 if (!rule) { 6843 spin_unlock_bh(&hdev->fd_rule_lock); 6844 return -ENOENT; 6845 } 6846 6847 fs->flow_type = rule->flow_type; 6848 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { 6849 case SCTP_V4_FLOW: 6850 case TCP_V4_FLOW: 6851 case UDP_V4_FLOW: 6852 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec, 6853 &fs->m_u.tcp_ip4_spec); 6854 break; 6855 case IP_USER_FLOW: 6856 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, 6857 &fs->m_u.usr_ip4_spec); 6858 break; 6859 case SCTP_V6_FLOW: 6860 case TCP_V6_FLOW: 6861 case UDP_V6_FLOW: 6862 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec, 6863 &fs->m_u.tcp_ip6_spec); 6864 break; 6865 case IPV6_USER_FLOW: 6866 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, 6867 &fs->m_u.usr_ip6_spec); 6868 break; 6869 /* The flow type of fd rule has been checked before adding in to rule 6870 * list. As other flow types have been handled, it must be ETHER_FLOW 6871 * for the default case 6872 */ 6873 default: 6874 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec, 6875 &fs->m_u.ether_spec); 6876 break; 6877 } 6878 6879 hclge_fd_get_ext_info(fs, rule); 6880 6881 hclge_fd_get_ring_cookie(fs, rule); 6882 6883 spin_unlock_bh(&hdev->fd_rule_lock); 6884 6885 return 0; 6886 } 6887 6888 static int hclge_get_all_rules(struct hnae3_handle *handle, 6889 struct ethtool_rxnfc *cmd, u32 *rule_locs) 6890 { 6891 struct hclge_vport *vport = hclge_get_vport(handle); 6892 struct hclge_dev *hdev = vport->back; 6893 struct hclge_fd_rule *rule; 6894 struct hlist_node *node2; 6895 int cnt = 0; 6896 6897 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6898 return -EOPNOTSUPP; 6899 6900 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; 6901 6902 spin_lock_bh(&hdev->fd_rule_lock); 6903 hlist_for_each_entry_safe(rule, node2, 6904 &hdev->fd_rule_list, rule_node) { 6905 if (cnt == cmd->rule_cnt) { 6906 spin_unlock_bh(&hdev->fd_rule_lock); 6907 return -EMSGSIZE; 6908 } 6909 6910 if (rule->state == HCLGE_FD_TO_DEL) 6911 continue; 6912 6913 rule_locs[cnt] = rule->location; 6914 cnt++; 6915 } 6916 6917 spin_unlock_bh(&hdev->fd_rule_lock); 6918 6919 cmd->rule_cnt = cnt; 6920 6921 return 0; 6922 } 6923 6924 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys, 6925 struct hclge_fd_rule_tuples *tuples) 6926 { 6927 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32 6928 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32 6929 6930 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto); 6931 tuples->ip_proto = fkeys->basic.ip_proto; 6932 tuples->dst_port = be16_to_cpu(fkeys->ports.dst); 6933 6934 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 6935 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src); 6936 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst); 6937 } else { 6938 int i; 6939 6940 for (i = 0; i < IPV6_SIZE; i++) { 6941 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]); 6942 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]); 6943 } 6944 } 6945 } 6946 6947 /* traverse all rules, check whether an existed rule has the same tuples */ 6948 static struct hclge_fd_rule * 6949 hclge_fd_search_flow_keys(struct hclge_dev *hdev, 6950 const struct hclge_fd_rule_tuples *tuples) 6951 { 6952 struct hclge_fd_rule *rule = NULL; 6953 struct hlist_node *node; 6954 6955 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 6956 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples))) 6957 return rule; 6958 } 6959 6960 return NULL; 6961 } 6962 6963 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples, 6964 struct hclge_fd_rule *rule) 6965 { 6966 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) | 6967 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) | 6968 BIT(INNER_SRC_PORT); 6969 rule->action = 0; 6970 rule->vf_id = 0; 6971 rule->rule_type = HCLGE_FD_ARFS_ACTIVE; 6972 rule->state = HCLGE_FD_TO_ADD; 6973 if (tuples->ether_proto == ETH_P_IP) { 6974 if (tuples->ip_proto == IPPROTO_TCP) 6975 rule->flow_type = TCP_V4_FLOW; 6976 else 6977 rule->flow_type = UDP_V4_FLOW; 6978 } else { 6979 if (tuples->ip_proto == IPPROTO_TCP) 6980 rule->flow_type = TCP_V6_FLOW; 6981 else 6982 rule->flow_type = UDP_V6_FLOW; 6983 } 6984 memcpy(&rule->tuples, tuples, sizeof(rule->tuples)); 6985 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask)); 6986 } 6987 6988 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, 6989 u16 flow_id, struct flow_keys *fkeys) 6990 { 6991 struct hclge_vport *vport = hclge_get_vport(handle); 6992 struct hclge_fd_rule_tuples new_tuples = {}; 6993 struct hclge_dev *hdev = vport->back; 6994 struct hclge_fd_rule *rule; 6995 u16 bit_id; 6996 6997 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 6998 return -EOPNOTSUPP; 6999 7000 /* when there is already fd rule existed add by user, 7001 * arfs should not work 7002 */ 7003 spin_lock_bh(&hdev->fd_rule_lock); 7004 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && 7005 hdev->fd_active_type != HCLGE_FD_RULE_NONE) { 7006 spin_unlock_bh(&hdev->fd_rule_lock); 7007 return -EOPNOTSUPP; 7008 } 7009 7010 hclge_fd_get_flow_tuples(fkeys, &new_tuples); 7011 7012 /* check is there flow director filter existed for this flow, 7013 * if not, create a new filter for it; 7014 * if filter exist with different queue id, modify the filter; 7015 * if filter exist with same queue id, do nothing 7016 */ 7017 rule = hclge_fd_search_flow_keys(hdev, &new_tuples); 7018 if (!rule) { 7019 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); 7020 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 7021 spin_unlock_bh(&hdev->fd_rule_lock); 7022 return -ENOSPC; 7023 } 7024 7025 rule = kzalloc(sizeof(*rule), GFP_ATOMIC); 7026 if (!rule) { 7027 spin_unlock_bh(&hdev->fd_rule_lock); 7028 return -ENOMEM; 7029 } 7030 7031 rule->location = bit_id; 7032 rule->arfs.flow_id = flow_id; 7033 rule->queue_id = queue_id; 7034 hclge_fd_build_arfs_rule(&new_tuples, rule); 7035 hclge_update_fd_list(hdev, rule->state, rule->location, rule); 7036 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; 7037 } else if (rule->queue_id != queue_id) { 7038 rule->queue_id = queue_id; 7039 rule->state = HCLGE_FD_TO_ADD; 7040 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7041 hclge_task_schedule(hdev, 0); 7042 } 7043 spin_unlock_bh(&hdev->fd_rule_lock); 7044 return rule->location; 7045 } 7046 7047 static void hclge_rfs_filter_expire(struct hclge_dev *hdev) 7048 { 7049 #ifdef CONFIG_RFS_ACCEL 7050 struct hnae3_handle *handle = &hdev->vport[0].nic; 7051 struct hclge_fd_rule *rule; 7052 struct hlist_node *node; 7053 7054 spin_lock_bh(&hdev->fd_rule_lock); 7055 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { 7056 spin_unlock_bh(&hdev->fd_rule_lock); 7057 return; 7058 } 7059 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7060 if (rule->state != HCLGE_FD_ACTIVE) 7061 continue; 7062 if (rps_may_expire_flow(handle->netdev, rule->queue_id, 7063 rule->arfs.flow_id, rule->location)) { 7064 rule->state = HCLGE_FD_TO_DEL; 7065 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7066 } 7067 } 7068 spin_unlock_bh(&hdev->fd_rule_lock); 7069 #endif 7070 } 7071 7072 /* make sure being called after lock up with fd_rule_lock */ 7073 static int hclge_clear_arfs_rules(struct hclge_dev *hdev) 7074 { 7075 #ifdef CONFIG_RFS_ACCEL 7076 struct hclge_fd_rule *rule; 7077 struct hlist_node *node; 7078 int ret; 7079 7080 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) 7081 return 0; 7082 7083 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7084 switch (rule->state) { 7085 case HCLGE_FD_TO_DEL: 7086 case HCLGE_FD_ACTIVE: 7087 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 7088 rule->location, NULL, false); 7089 if (ret) 7090 return ret; 7091 fallthrough; 7092 case HCLGE_FD_TO_ADD: 7093 hclge_fd_dec_rule_cnt(hdev, rule->location); 7094 hlist_del(&rule->rule_node); 7095 kfree(rule); 7096 break; 7097 default: 7098 break; 7099 } 7100 } 7101 hclge_sync_fd_state(hdev); 7102 7103 #endif 7104 return 0; 7105 } 7106 7107 static void hclge_get_cls_key_basic(const struct flow_rule *flow, 7108 struct hclge_fd_rule *rule) 7109 { 7110 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) { 7111 struct flow_match_basic match; 7112 u16 ethtype_key, ethtype_mask; 7113 7114 flow_rule_match_basic(flow, &match); 7115 ethtype_key = ntohs(match.key->n_proto); 7116 ethtype_mask = ntohs(match.mask->n_proto); 7117 7118 if (ethtype_key == ETH_P_ALL) { 7119 ethtype_key = 0; 7120 ethtype_mask = 0; 7121 } 7122 rule->tuples.ether_proto = ethtype_key; 7123 rule->tuples_mask.ether_proto = ethtype_mask; 7124 rule->tuples.ip_proto = match.key->ip_proto; 7125 rule->tuples_mask.ip_proto = match.mask->ip_proto; 7126 } else { 7127 rule->unused_tuple |= BIT(INNER_IP_PROTO); 7128 rule->unused_tuple |= BIT(INNER_ETH_TYPE); 7129 } 7130 } 7131 7132 static void hclge_get_cls_key_mac(const struct flow_rule *flow, 7133 struct hclge_fd_rule *rule) 7134 { 7135 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 7136 struct flow_match_eth_addrs match; 7137 7138 flow_rule_match_eth_addrs(flow, &match); 7139 ether_addr_copy(rule->tuples.dst_mac, match.key->dst); 7140 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); 7141 ether_addr_copy(rule->tuples.src_mac, match.key->src); 7142 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); 7143 } else { 7144 rule->unused_tuple |= BIT(INNER_DST_MAC); 7145 rule->unused_tuple |= BIT(INNER_SRC_MAC); 7146 } 7147 } 7148 7149 static void hclge_get_cls_key_vlan(const struct flow_rule *flow, 7150 struct hclge_fd_rule *rule) 7151 { 7152 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { 7153 struct flow_match_vlan match; 7154 7155 flow_rule_match_vlan(flow, &match); 7156 rule->tuples.vlan_tag1 = match.key->vlan_id | 7157 (match.key->vlan_priority << VLAN_PRIO_SHIFT); 7158 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | 7159 (match.mask->vlan_priority << VLAN_PRIO_SHIFT); 7160 } else { 7161 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); 7162 } 7163 } 7164 7165 static void hclge_get_cls_key_ip(const struct flow_rule *flow, 7166 struct hclge_fd_rule *rule) 7167 { 7168 u16 addr_type = 0; 7169 7170 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) { 7171 struct flow_match_control match; 7172 7173 flow_rule_match_control(flow, &match); 7174 addr_type = match.key->addr_type; 7175 } 7176 7177 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 7178 struct flow_match_ipv4_addrs match; 7179 7180 flow_rule_match_ipv4_addrs(flow, &match); 7181 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); 7182 rule->tuples_mask.src_ip[IPV4_INDEX] = 7183 be32_to_cpu(match.mask->src); 7184 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); 7185 rule->tuples_mask.dst_ip[IPV4_INDEX] = 7186 be32_to_cpu(match.mask->dst); 7187 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 7188 struct flow_match_ipv6_addrs match; 7189 7190 flow_rule_match_ipv6_addrs(flow, &match); 7191 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, 7192 IPV6_SIZE); 7193 be32_to_cpu_array(rule->tuples_mask.src_ip, 7194 match.mask->src.s6_addr32, IPV6_SIZE); 7195 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, 7196 IPV6_SIZE); 7197 be32_to_cpu_array(rule->tuples_mask.dst_ip, 7198 match.mask->dst.s6_addr32, IPV6_SIZE); 7199 } else { 7200 rule->unused_tuple |= BIT(INNER_SRC_IP); 7201 rule->unused_tuple |= BIT(INNER_DST_IP); 7202 } 7203 } 7204 7205 static void hclge_get_cls_key_port(const struct flow_rule *flow, 7206 struct hclge_fd_rule *rule) 7207 { 7208 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { 7209 struct flow_match_ports match; 7210 7211 flow_rule_match_ports(flow, &match); 7212 7213 rule->tuples.src_port = be16_to_cpu(match.key->src); 7214 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); 7215 rule->tuples.dst_port = be16_to_cpu(match.key->dst); 7216 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); 7217 } else { 7218 rule->unused_tuple |= BIT(INNER_SRC_PORT); 7219 rule->unused_tuple |= BIT(INNER_DST_PORT); 7220 } 7221 } 7222 7223 static int hclge_parse_cls_flower(struct hclge_dev *hdev, 7224 struct flow_cls_offload *cls_flower, 7225 struct hclge_fd_rule *rule) 7226 { 7227 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); 7228 struct flow_dissector *dissector = flow->match.dissector; 7229 7230 if (dissector->used_keys & 7231 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 7232 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 7233 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 7234 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 7235 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 7236 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 7237 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { 7238 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n", 7239 dissector->used_keys); 7240 return -EOPNOTSUPP; 7241 } 7242 7243 hclge_get_cls_key_basic(flow, rule); 7244 hclge_get_cls_key_mac(flow, rule); 7245 hclge_get_cls_key_vlan(flow, rule); 7246 hclge_get_cls_key_ip(flow, rule); 7247 hclge_get_cls_key_port(flow, rule); 7248 7249 return 0; 7250 } 7251 7252 static int hclge_check_cls_flower(struct hclge_dev *hdev, 7253 struct flow_cls_offload *cls_flower, int tc) 7254 { 7255 u32 prio = cls_flower->common.prio; 7256 7257 if (tc < 0 || tc > hdev->tc_max) { 7258 dev_err(&hdev->pdev->dev, "invalid traffic class\n"); 7259 return -EINVAL; 7260 } 7261 7262 if (prio == 0 || 7263 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { 7264 dev_err(&hdev->pdev->dev, 7265 "prio %u should be in range[1, %u]\n", 7266 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); 7267 return -EINVAL; 7268 } 7269 7270 if (test_bit(prio - 1, hdev->fd_bmap)) { 7271 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); 7272 return -EINVAL; 7273 } 7274 return 0; 7275 } 7276 7277 static int hclge_add_cls_flower(struct hnae3_handle *handle, 7278 struct flow_cls_offload *cls_flower, 7279 int tc) 7280 { 7281 struct hclge_vport *vport = hclge_get_vport(handle); 7282 struct hclge_dev *hdev = vport->back; 7283 struct hclge_fd_rule *rule; 7284 int ret; 7285 7286 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { 7287 dev_err(&hdev->pdev->dev, 7288 "cls flower is not supported\n"); 7289 return -EOPNOTSUPP; 7290 } 7291 7292 ret = hclge_check_cls_flower(hdev, cls_flower, tc); 7293 if (ret) { 7294 dev_err(&hdev->pdev->dev, 7295 "failed to check cls flower params, ret = %d\n", ret); 7296 return ret; 7297 } 7298 7299 rule = kzalloc(sizeof(*rule), GFP_KERNEL); 7300 if (!rule) 7301 return -ENOMEM; 7302 7303 ret = hclge_parse_cls_flower(hdev, cls_flower, rule); 7304 if (ret) { 7305 kfree(rule); 7306 return ret; 7307 } 7308 7309 rule->action = HCLGE_FD_ACTION_SELECT_TC; 7310 rule->cls_flower.tc = tc; 7311 rule->location = cls_flower->common.prio - 1; 7312 rule->vf_id = 0; 7313 rule->cls_flower.cookie = cls_flower->cookie; 7314 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; 7315 7316 ret = hclge_add_fd_entry_common(hdev, rule); 7317 if (ret) 7318 kfree(rule); 7319 7320 return ret; 7321 } 7322 7323 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, 7324 unsigned long cookie) 7325 { 7326 struct hclge_fd_rule *rule; 7327 struct hlist_node *node; 7328 7329 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 7330 if (rule->cls_flower.cookie == cookie) 7331 return rule; 7332 } 7333 7334 return NULL; 7335 } 7336 7337 static int hclge_del_cls_flower(struct hnae3_handle *handle, 7338 struct flow_cls_offload *cls_flower) 7339 { 7340 struct hclge_vport *vport = hclge_get_vport(handle); 7341 struct hclge_dev *hdev = vport->back; 7342 struct hclge_fd_rule *rule; 7343 int ret; 7344 7345 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7346 return -EOPNOTSUPP; 7347 7348 spin_lock_bh(&hdev->fd_rule_lock); 7349 7350 rule = hclge_find_cls_flower(hdev, cls_flower->cookie); 7351 if (!rule) { 7352 spin_unlock_bh(&hdev->fd_rule_lock); 7353 return -EINVAL; 7354 } 7355 7356 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, 7357 NULL, false); 7358 if (ret) { 7359 /* if tcam config fail, set rule state to TO_DEL, 7360 * so the rule will be deleted when periodic 7361 * task being scheduled. 7362 */ 7363 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); 7364 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7365 spin_unlock_bh(&hdev->fd_rule_lock); 7366 return ret; 7367 } 7368 7369 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); 7370 spin_unlock_bh(&hdev->fd_rule_lock); 7371 7372 return 0; 7373 } 7374 7375 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) 7376 { 7377 struct hclge_fd_rule *rule; 7378 struct hlist_node *node; 7379 int ret = 0; 7380 7381 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) 7382 return; 7383 7384 spin_lock_bh(&hdev->fd_rule_lock); 7385 7386 hlist_for_each_entry_safe(rule, node, hlist, rule_node) { 7387 switch (rule->state) { 7388 case HCLGE_FD_TO_ADD: 7389 ret = hclge_fd_config_rule(hdev, rule); 7390 if (ret) 7391 goto out; 7392 rule->state = HCLGE_FD_ACTIVE; 7393 break; 7394 case HCLGE_FD_TO_DEL: 7395 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, 7396 rule->location, NULL, false); 7397 if (ret) 7398 goto out; 7399 hclge_fd_dec_rule_cnt(hdev, rule->location); 7400 hclge_fd_free_node(hdev, rule); 7401 break; 7402 default: 7403 break; 7404 } 7405 } 7406 7407 out: 7408 if (ret) 7409 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); 7410 7411 spin_unlock_bh(&hdev->fd_rule_lock); 7412 } 7413 7414 static void hclge_sync_fd_table(struct hclge_dev *hdev) 7415 { 7416 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) 7417 return; 7418 7419 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { 7420 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; 7421 7422 hclge_clear_fd_rules_in_list(hdev, clear_list); 7423 } 7424 7425 hclge_sync_fd_user_def_cfg(hdev, false); 7426 7427 hclge_sync_fd_list(hdev, &hdev->fd_rule_list); 7428 } 7429 7430 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) 7431 { 7432 struct hclge_vport *vport = hclge_get_vport(handle); 7433 struct hclge_dev *hdev = vport->back; 7434 7435 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || 7436 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); 7437 } 7438 7439 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle) 7440 { 7441 struct hclge_vport *vport = hclge_get_vport(handle); 7442 struct hclge_dev *hdev = vport->back; 7443 7444 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 7445 } 7446 7447 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) 7448 { 7449 struct hclge_vport *vport = hclge_get_vport(handle); 7450 struct hclge_dev *hdev = vport->back; 7451 7452 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 7453 } 7454 7455 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) 7456 { 7457 struct hclge_vport *vport = hclge_get_vport(handle); 7458 struct hclge_dev *hdev = vport->back; 7459 7460 return hdev->rst_stats.hw_reset_done_cnt; 7461 } 7462 7463 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) 7464 { 7465 struct hclge_vport *vport = hclge_get_vport(handle); 7466 struct hclge_dev *hdev = vport->back; 7467 7468 hdev->fd_en = enable; 7469 7470 if (!enable) 7471 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); 7472 else 7473 hclge_restore_fd_entries(handle); 7474 7475 hclge_task_schedule(hdev, 0); 7476 } 7477 7478 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) 7479 { 7480 #define HCLGE_LINK_STATUS_WAIT_CNT 3 7481 7482 struct hclge_desc desc; 7483 struct hclge_config_mac_mode_cmd *req = 7484 (struct hclge_config_mac_mode_cmd *)desc.data; 7485 u32 loop_en = 0; 7486 int ret; 7487 7488 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); 7489 7490 if (enable) { 7491 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U); 7492 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U); 7493 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U); 7494 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U); 7495 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U); 7496 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U); 7497 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U); 7498 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U); 7499 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U); 7500 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U); 7501 } 7502 7503 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 7504 7505 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7506 if (ret) { 7507 dev_err(&hdev->pdev->dev, 7508 "mac enable fail, ret =%d.\n", ret); 7509 return; 7510 } 7511 7512 if (!enable) 7513 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, 7514 HCLGE_LINK_STATUS_WAIT_CNT); 7515 } 7516 7517 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, 7518 u8 switch_param, u8 param_mask) 7519 { 7520 struct hclge_mac_vlan_switch_cmd *req; 7521 struct hclge_desc desc; 7522 u32 func_id; 7523 int ret; 7524 7525 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0); 7526 req = (struct hclge_mac_vlan_switch_cmd *)desc.data; 7527 7528 /* read current config parameter */ 7529 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM, 7530 true); 7531 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL; 7532 req->func_id = cpu_to_le32(func_id); 7533 7534 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7535 if (ret) { 7536 dev_err(&hdev->pdev->dev, 7537 "read mac vlan switch parameter fail, ret = %d\n", ret); 7538 return ret; 7539 } 7540 7541 /* modify and write new config parameter */ 7542 hclge_comm_cmd_reuse_desc(&desc, false); 7543 req->switch_param = (req->switch_param & param_mask) | switch_param; 7544 req->param_mask = param_mask; 7545 7546 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7547 if (ret) 7548 dev_err(&hdev->pdev->dev, 7549 "set mac vlan switch parameter fail, ret = %d\n", ret); 7550 return ret; 7551 } 7552 7553 static void hclge_phy_link_status_wait(struct hclge_dev *hdev, 7554 int link_ret) 7555 { 7556 #define HCLGE_PHY_LINK_STATUS_NUM 200 7557 7558 struct phy_device *phydev = hdev->hw.mac.phydev; 7559 int i = 0; 7560 int ret; 7561 7562 do { 7563 ret = phy_read_status(phydev); 7564 if (ret) { 7565 dev_err(&hdev->pdev->dev, 7566 "phy update link status fail, ret = %d\n", ret); 7567 return; 7568 } 7569 7570 if (phydev->link == link_ret) 7571 break; 7572 7573 msleep(HCLGE_LINK_STATUS_MS); 7574 } while (++i < HCLGE_PHY_LINK_STATUS_NUM); 7575 } 7576 7577 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, 7578 int wait_cnt) 7579 { 7580 int link_status; 7581 int i = 0; 7582 int ret; 7583 7584 do { 7585 ret = hclge_get_mac_link_status(hdev, &link_status); 7586 if (ret) 7587 return ret; 7588 if (link_status == link_ret) 7589 return 0; 7590 7591 msleep(HCLGE_LINK_STATUS_MS); 7592 } while (++i < wait_cnt); 7593 return -EBUSY; 7594 } 7595 7596 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, 7597 bool is_phy) 7598 { 7599 #define HCLGE_MAC_LINK_STATUS_NUM 100 7600 7601 int link_ret; 7602 7603 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; 7604 7605 if (is_phy) 7606 hclge_phy_link_status_wait(hdev, link_ret); 7607 7608 return hclge_mac_link_status_wait(hdev, link_ret, 7609 HCLGE_MAC_LINK_STATUS_NUM); 7610 } 7611 7612 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) 7613 { 7614 struct hclge_config_mac_mode_cmd *req; 7615 struct hclge_desc desc; 7616 u32 loop_en; 7617 int ret; 7618 7619 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; 7620 /* 1 Read out the MAC mode config at first */ 7621 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 7622 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7623 if (ret) { 7624 dev_err(&hdev->pdev->dev, 7625 "mac loopback get fail, ret =%d.\n", ret); 7626 return ret; 7627 } 7628 7629 /* 2 Then setup the loopback flag */ 7630 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); 7631 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); 7632 7633 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); 7634 7635 /* 3 Config mac work mode with loopback flag 7636 * and its original configure parameters 7637 */ 7638 hclge_comm_cmd_reuse_desc(&desc, false); 7639 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7640 if (ret) 7641 dev_err(&hdev->pdev->dev, 7642 "mac loopback set fail, ret =%d.\n", ret); 7643 return ret; 7644 } 7645 7646 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, 7647 enum hnae3_loop loop_mode) 7648 { 7649 struct hclge_common_lb_cmd *req; 7650 struct hclge_desc desc; 7651 u8 loop_mode_b; 7652 int ret; 7653 7654 req = (struct hclge_common_lb_cmd *)desc.data; 7655 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false); 7656 7657 switch (loop_mode) { 7658 case HNAE3_LOOP_SERIAL_SERDES: 7659 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 7660 break; 7661 case HNAE3_LOOP_PARALLEL_SERDES: 7662 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 7663 break; 7664 case HNAE3_LOOP_PHY: 7665 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B; 7666 break; 7667 default: 7668 dev_err(&hdev->pdev->dev, 7669 "unsupported loopback mode %d\n", loop_mode); 7670 return -ENOTSUPP; 7671 } 7672 7673 req->mask = loop_mode_b; 7674 if (en) 7675 req->enable = loop_mode_b; 7676 7677 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7678 if (ret) 7679 dev_err(&hdev->pdev->dev, 7680 "failed to send loopback cmd, loop_mode = %d, ret = %d\n", 7681 loop_mode, ret); 7682 7683 return ret; 7684 } 7685 7686 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) 7687 { 7688 #define HCLGE_COMMON_LB_RETRY_MS 10 7689 #define HCLGE_COMMON_LB_RETRY_NUM 100 7690 7691 struct hclge_common_lb_cmd *req; 7692 struct hclge_desc desc; 7693 u32 i = 0; 7694 int ret; 7695 7696 req = (struct hclge_common_lb_cmd *)desc.data; 7697 7698 do { 7699 msleep(HCLGE_COMMON_LB_RETRY_MS); 7700 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, 7701 true); 7702 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 7703 if (ret) { 7704 dev_err(&hdev->pdev->dev, 7705 "failed to get loopback done status, ret = %d\n", 7706 ret); 7707 return ret; 7708 } 7709 } while (++i < HCLGE_COMMON_LB_RETRY_NUM && 7710 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B)); 7711 7712 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) { 7713 dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); 7714 return -EBUSY; 7715 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) { 7716 dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); 7717 return -EIO; 7718 } 7719 7720 return 0; 7721 } 7722 7723 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, 7724 enum hnae3_loop loop_mode) 7725 { 7726 int ret; 7727 7728 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); 7729 if (ret) 7730 return ret; 7731 7732 return hclge_cfg_common_loopback_wait(hdev); 7733 } 7734 7735 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, 7736 enum hnae3_loop loop_mode) 7737 { 7738 int ret; 7739 7740 ret = hclge_cfg_common_loopback(hdev, en, loop_mode); 7741 if (ret) 7742 return ret; 7743 7744 hclge_cfg_mac_mode(hdev, en); 7745 7746 ret = hclge_mac_phy_link_status_wait(hdev, en, false); 7747 if (ret) 7748 dev_err(&hdev->pdev->dev, 7749 "serdes loopback config mac mode timeout\n"); 7750 7751 return ret; 7752 } 7753 7754 static int hclge_enable_phy_loopback(struct hclge_dev *hdev, 7755 struct phy_device *phydev) 7756 { 7757 int ret; 7758 7759 if (!phydev->suspended) { 7760 ret = phy_suspend(phydev); 7761 if (ret) 7762 return ret; 7763 } 7764 7765 ret = phy_resume(phydev); 7766 if (ret) 7767 return ret; 7768 7769 return phy_loopback(phydev, true); 7770 } 7771 7772 static int hclge_disable_phy_loopback(struct hclge_dev *hdev, 7773 struct phy_device *phydev) 7774 { 7775 int ret; 7776 7777 ret = phy_loopback(phydev, false); 7778 if (ret) 7779 return ret; 7780 7781 return phy_suspend(phydev); 7782 } 7783 7784 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) 7785 { 7786 struct phy_device *phydev = hdev->hw.mac.phydev; 7787 int ret; 7788 7789 if (!phydev) { 7790 if (hnae3_dev_phy_imp_supported(hdev)) 7791 return hclge_set_common_loopback(hdev, en, 7792 HNAE3_LOOP_PHY); 7793 return -ENOTSUPP; 7794 } 7795 7796 if (en) 7797 ret = hclge_enable_phy_loopback(hdev, phydev); 7798 else 7799 ret = hclge_disable_phy_loopback(hdev, phydev); 7800 if (ret) { 7801 dev_err(&hdev->pdev->dev, 7802 "set phy loopback fail, ret = %d\n", ret); 7803 return ret; 7804 } 7805 7806 hclge_cfg_mac_mode(hdev, en); 7807 7808 ret = hclge_mac_phy_link_status_wait(hdev, en, true); 7809 if (ret) 7810 dev_err(&hdev->pdev->dev, 7811 "phy loopback config mac mode timeout\n"); 7812 7813 return ret; 7814 } 7815 7816 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, 7817 u16 stream_id, bool enable) 7818 { 7819 struct hclge_desc desc; 7820 struct hclge_cfg_com_tqp_queue_cmd *req = 7821 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; 7822 7823 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 7824 req->tqp_id = cpu_to_le16(tqp_id); 7825 req->stream_id = cpu_to_le16(stream_id); 7826 if (enable) 7827 req->enable |= 1U << HCLGE_TQP_ENABLE_B; 7828 7829 return hclge_cmd_send(&hdev->hw, &desc, 1); 7830 } 7831 7832 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable) 7833 { 7834 struct hclge_vport *vport = hclge_get_vport(handle); 7835 struct hclge_dev *hdev = vport->back; 7836 int ret; 7837 u16 i; 7838 7839 for (i = 0; i < handle->kinfo.num_tqps; i++) { 7840 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); 7841 if (ret) 7842 return ret; 7843 } 7844 return 0; 7845 } 7846 7847 static int hclge_set_loopback(struct hnae3_handle *handle, 7848 enum hnae3_loop loop_mode, bool en) 7849 { 7850 struct hclge_vport *vport = hclge_get_vport(handle); 7851 struct hclge_dev *hdev = vport->back; 7852 int ret = 0; 7853 7854 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By 7855 * default, SSU loopback is enabled, so if the SMAC and the DMAC are 7856 * the same, the packets are looped back in the SSU. If SSU loopback 7857 * is disabled, packets can reach MAC even if SMAC is the same as DMAC. 7858 */ 7859 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 7860 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B); 7861 7862 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, 7863 HCLGE_SWITCH_ALW_LPBK_MASK); 7864 if (ret) 7865 return ret; 7866 } 7867 7868 switch (loop_mode) { 7869 case HNAE3_LOOP_APP: 7870 ret = hclge_set_app_loopback(hdev, en); 7871 break; 7872 case HNAE3_LOOP_SERIAL_SERDES: 7873 case HNAE3_LOOP_PARALLEL_SERDES: 7874 ret = hclge_set_common_loopback(hdev, en, loop_mode); 7875 break; 7876 case HNAE3_LOOP_PHY: 7877 ret = hclge_set_phy_loopback(hdev, en); 7878 break; 7879 case HNAE3_LOOP_EXTERNAL: 7880 break; 7881 default: 7882 ret = -ENOTSUPP; 7883 dev_err(&hdev->pdev->dev, 7884 "loop_mode %d is not supported\n", loop_mode); 7885 break; 7886 } 7887 7888 if (ret) 7889 return ret; 7890 7891 ret = hclge_tqp_enable(handle, en); 7892 if (ret) 7893 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", 7894 en ? "enable" : "disable", ret); 7895 7896 return ret; 7897 } 7898 7899 static int hclge_set_default_loopback(struct hclge_dev *hdev) 7900 { 7901 int ret; 7902 7903 ret = hclge_set_app_loopback(hdev, false); 7904 if (ret) 7905 return ret; 7906 7907 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); 7908 if (ret) 7909 return ret; 7910 7911 return hclge_cfg_common_loopback(hdev, false, 7912 HNAE3_LOOP_PARALLEL_SERDES); 7913 } 7914 7915 static void hclge_flush_link_update(struct hclge_dev *hdev) 7916 { 7917 #define HCLGE_FLUSH_LINK_TIMEOUT 100000 7918 7919 unsigned long last = hdev->serv_processed_cnt; 7920 int i = 0; 7921 7922 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && 7923 i++ < HCLGE_FLUSH_LINK_TIMEOUT && 7924 last == hdev->serv_processed_cnt) 7925 usleep_range(1, 1); 7926 } 7927 7928 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) 7929 { 7930 struct hclge_vport *vport = hclge_get_vport(handle); 7931 struct hclge_dev *hdev = vport->back; 7932 7933 if (enable) { 7934 hclge_task_schedule(hdev, 0); 7935 } else { 7936 /* Set the DOWN flag here to disable link updating */ 7937 set_bit(HCLGE_STATE_DOWN, &hdev->state); 7938 7939 /* flush memory to make sure DOWN is seen by service task */ 7940 smp_mb__before_atomic(); 7941 hclge_flush_link_update(hdev); 7942 } 7943 } 7944 7945 static int hclge_ae_start(struct hnae3_handle *handle) 7946 { 7947 struct hclge_vport *vport = hclge_get_vport(handle); 7948 struct hclge_dev *hdev = vport->back; 7949 7950 /* mac enable */ 7951 hclge_cfg_mac_mode(hdev, true); 7952 clear_bit(HCLGE_STATE_DOWN, &hdev->state); 7953 hdev->hw.mac.link = 0; 7954 7955 /* reset tqp stats */ 7956 hclge_comm_reset_tqp_stats(handle); 7957 7958 hclge_mac_start_phy(hdev); 7959 7960 return 0; 7961 } 7962 7963 static void hclge_ae_stop(struct hnae3_handle *handle) 7964 { 7965 struct hclge_vport *vport = hclge_get_vport(handle); 7966 struct hclge_dev *hdev = vport->back; 7967 7968 set_bit(HCLGE_STATE_DOWN, &hdev->state); 7969 spin_lock_bh(&hdev->fd_rule_lock); 7970 hclge_clear_arfs_rules(hdev); 7971 spin_unlock_bh(&hdev->fd_rule_lock); 7972 7973 /* If it is not PF reset or FLR, the firmware will disable the MAC, 7974 * so it only need to stop phy here. 7975 */ 7976 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { 7977 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, 7978 HCLGE_PFC_DISABLE); 7979 if (hdev->reset_type != HNAE3_FUNC_RESET && 7980 hdev->reset_type != HNAE3_FLR_RESET) { 7981 hclge_mac_stop_phy(hdev); 7982 hclge_update_link_status(hdev); 7983 return; 7984 } 7985 } 7986 7987 hclge_reset_tqp(handle); 7988 7989 hclge_config_mac_tnl_int(hdev, false); 7990 7991 /* Mac disable */ 7992 hclge_cfg_mac_mode(hdev, false); 7993 7994 hclge_mac_stop_phy(hdev); 7995 7996 /* reset tqp stats */ 7997 hclge_comm_reset_tqp_stats(handle); 7998 hclge_update_link_status(hdev); 7999 } 8000 8001 int hclge_vport_start(struct hclge_vport *vport) 8002 { 8003 struct hclge_dev *hdev = vport->back; 8004 8005 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 8006 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 8007 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 8008 vport->last_active_jiffies = jiffies; 8009 vport->need_notify = 0; 8010 8011 if (test_bit(vport->vport_id, hdev->vport_config_block)) { 8012 if (vport->vport_id) { 8013 hclge_restore_mac_table_common(vport); 8014 hclge_restore_vport_vlan_table(vport); 8015 } else { 8016 hclge_restore_hw_table(hdev); 8017 } 8018 } 8019 8020 clear_bit(vport->vport_id, hdev->vport_config_block); 8021 8022 return 0; 8023 } 8024 8025 void hclge_vport_stop(struct hclge_vport *vport) 8026 { 8027 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 8028 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 8029 vport->need_notify = 0; 8030 } 8031 8032 static int hclge_client_start(struct hnae3_handle *handle) 8033 { 8034 struct hclge_vport *vport = hclge_get_vport(handle); 8035 8036 return hclge_vport_start(vport); 8037 } 8038 8039 static void hclge_client_stop(struct hnae3_handle *handle) 8040 { 8041 struct hclge_vport *vport = hclge_get_vport(handle); 8042 8043 hclge_vport_stop(vport); 8044 } 8045 8046 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, 8047 u16 cmdq_resp, u8 resp_code, 8048 enum hclge_mac_vlan_tbl_opcode op) 8049 { 8050 struct hclge_dev *hdev = vport->back; 8051 8052 if (cmdq_resp) { 8053 dev_err(&hdev->pdev->dev, 8054 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n", 8055 cmdq_resp); 8056 return -EIO; 8057 } 8058 8059 if (op == HCLGE_MAC_VLAN_ADD) { 8060 if (!resp_code || resp_code == 1) 8061 return 0; 8062 else if (resp_code == HCLGE_ADD_UC_OVERFLOW || 8063 resp_code == HCLGE_ADD_MC_OVERFLOW) 8064 return -ENOSPC; 8065 8066 dev_err(&hdev->pdev->dev, 8067 "add mac addr failed for undefined, code=%u.\n", 8068 resp_code); 8069 return -EIO; 8070 } else if (op == HCLGE_MAC_VLAN_REMOVE) { 8071 if (!resp_code) { 8072 return 0; 8073 } else if (resp_code == 1) { 8074 dev_dbg(&hdev->pdev->dev, 8075 "remove mac addr failed for miss.\n"); 8076 return -ENOENT; 8077 } 8078 8079 dev_err(&hdev->pdev->dev, 8080 "remove mac addr failed for undefined, code=%u.\n", 8081 resp_code); 8082 return -EIO; 8083 } else if (op == HCLGE_MAC_VLAN_LKUP) { 8084 if (!resp_code) { 8085 return 0; 8086 } else if (resp_code == 1) { 8087 dev_dbg(&hdev->pdev->dev, 8088 "lookup mac addr failed for miss.\n"); 8089 return -ENOENT; 8090 } 8091 8092 dev_err(&hdev->pdev->dev, 8093 "lookup mac addr failed for undefined, code=%u.\n", 8094 resp_code); 8095 return -EIO; 8096 } 8097 8098 dev_err(&hdev->pdev->dev, 8099 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op); 8100 8101 return -EINVAL; 8102 } 8103 8104 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) 8105 { 8106 #define HCLGE_VF_NUM_IN_FIRST_DESC 192 8107 8108 unsigned int word_num; 8109 unsigned int bit_num; 8110 8111 if (vfid > 255 || vfid < 0) 8112 return -EIO; 8113 8114 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) { 8115 word_num = vfid / 32; 8116 bit_num = vfid % 32; 8117 if (clr) 8118 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 8119 else 8120 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); 8121 } else { 8122 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32; 8123 bit_num = vfid % 32; 8124 if (clr) 8125 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); 8126 else 8127 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); 8128 } 8129 8130 return 0; 8131 } 8132 8133 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) 8134 { 8135 #define HCLGE_DESC_NUMBER 3 8136 #define HCLGE_FUNC_NUMBER_PER_DESC 6 8137 int i, j; 8138 8139 for (i = 1; i < HCLGE_DESC_NUMBER; i++) 8140 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) 8141 if (desc[i].data[j]) 8142 return false; 8143 8144 return true; 8145 } 8146 8147 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, 8148 const u8 *addr, bool is_mc) 8149 { 8150 const unsigned char *mac_addr = addr; 8151 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | 8152 (mac_addr[0]) | (mac_addr[1] << 8); 8153 u32 low_val = mac_addr[4] | (mac_addr[5] << 8); 8154 8155 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 8156 if (is_mc) { 8157 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); 8158 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); 8159 } 8160 8161 new_req->mac_addr_hi32 = cpu_to_le32(high_val); 8162 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); 8163 } 8164 8165 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, 8166 struct hclge_mac_vlan_tbl_entry_cmd *req) 8167 { 8168 struct hclge_dev *hdev = vport->back; 8169 struct hclge_desc desc; 8170 u8 resp_code; 8171 u16 retval; 8172 int ret; 8173 8174 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); 8175 8176 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8177 8178 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8179 if (ret) { 8180 dev_err(&hdev->pdev->dev, 8181 "del mac addr failed for cmd_send, ret =%d.\n", 8182 ret); 8183 return ret; 8184 } 8185 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 8186 retval = le16_to_cpu(desc.retval); 8187 8188 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 8189 HCLGE_MAC_VLAN_REMOVE); 8190 } 8191 8192 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, 8193 struct hclge_mac_vlan_tbl_entry_cmd *req, 8194 struct hclge_desc *desc, 8195 bool is_mc) 8196 { 8197 struct hclge_dev *hdev = vport->back; 8198 u8 resp_code; 8199 u16 retval; 8200 int ret; 8201 8202 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); 8203 if (is_mc) { 8204 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8205 memcpy(desc[0].data, 8206 req, 8207 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8208 hclge_cmd_setup_basic_desc(&desc[1], 8209 HCLGE_OPC_MAC_VLAN_ADD, 8210 true); 8211 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8212 hclge_cmd_setup_basic_desc(&desc[2], 8213 HCLGE_OPC_MAC_VLAN_ADD, 8214 true); 8215 ret = hclge_cmd_send(&hdev->hw, desc, 3); 8216 } else { 8217 memcpy(desc[0].data, 8218 req, 8219 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8220 ret = hclge_cmd_send(&hdev->hw, desc, 1); 8221 } 8222 if (ret) { 8223 dev_err(&hdev->pdev->dev, 8224 "lookup mac addr failed for cmd_send, ret =%d.\n", 8225 ret); 8226 return ret; 8227 } 8228 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; 8229 retval = le16_to_cpu(desc[0].retval); 8230 8231 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, 8232 HCLGE_MAC_VLAN_LKUP); 8233 } 8234 8235 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, 8236 struct hclge_mac_vlan_tbl_entry_cmd *req, 8237 struct hclge_desc *mc_desc) 8238 { 8239 struct hclge_dev *hdev = vport->back; 8240 int cfg_status; 8241 u8 resp_code; 8242 u16 retval; 8243 int ret; 8244 8245 if (!mc_desc) { 8246 struct hclge_desc desc; 8247 8248 hclge_cmd_setup_basic_desc(&desc, 8249 HCLGE_OPC_MAC_VLAN_ADD, 8250 false); 8251 memcpy(desc.data, req, 8252 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8253 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8254 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 8255 retval = le16_to_cpu(desc.retval); 8256 8257 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 8258 resp_code, 8259 HCLGE_MAC_VLAN_ADD); 8260 } else { 8261 hclge_comm_cmd_reuse_desc(&mc_desc[0], false); 8262 mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8263 hclge_comm_cmd_reuse_desc(&mc_desc[1], false); 8264 mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 8265 hclge_comm_cmd_reuse_desc(&mc_desc[2], false); 8266 mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT); 8267 memcpy(mc_desc[0].data, req, 8268 sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); 8269 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); 8270 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; 8271 retval = le16_to_cpu(mc_desc[0].retval); 8272 8273 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, 8274 resp_code, 8275 HCLGE_MAC_VLAN_ADD); 8276 } 8277 8278 if (ret) { 8279 dev_err(&hdev->pdev->dev, 8280 "add mac addr failed for cmd_send, ret =%d.\n", 8281 ret); 8282 return ret; 8283 } 8284 8285 return cfg_status; 8286 } 8287 8288 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, 8289 u16 *allocated_size) 8290 { 8291 struct hclge_umv_spc_alc_cmd *req; 8292 struct hclge_desc desc; 8293 int ret; 8294 8295 req = (struct hclge_umv_spc_alc_cmd *)desc.data; 8296 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); 8297 8298 req->space_size = cpu_to_le32(space_size); 8299 8300 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 8301 if (ret) { 8302 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", 8303 ret); 8304 return ret; 8305 } 8306 8307 *allocated_size = le32_to_cpu(desc.data[1]); 8308 8309 return 0; 8310 } 8311 8312 static int hclge_init_umv_space(struct hclge_dev *hdev) 8313 { 8314 u16 allocated_size = 0; 8315 int ret; 8316 8317 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); 8318 if (ret) 8319 return ret; 8320 8321 if (allocated_size < hdev->wanted_umv_size) 8322 dev_warn(&hdev->pdev->dev, 8323 "failed to alloc umv space, want %u, get %u\n", 8324 hdev->wanted_umv_size, allocated_size); 8325 8326 hdev->max_umv_size = allocated_size; 8327 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); 8328 hdev->share_umv_size = hdev->priv_umv_size + 8329 hdev->max_umv_size % (hdev->num_alloc_vport + 1); 8330 8331 if (hdev->ae_dev->dev_specs.mc_mac_size) 8332 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); 8333 8334 return 0; 8335 } 8336 8337 static void hclge_reset_umv_space(struct hclge_dev *hdev) 8338 { 8339 struct hclge_vport *vport; 8340 int i; 8341 8342 for (i = 0; i < hdev->num_alloc_vport; i++) { 8343 vport = &hdev->vport[i]; 8344 vport->used_umv_num = 0; 8345 } 8346 8347 mutex_lock(&hdev->vport_lock); 8348 hdev->share_umv_size = hdev->priv_umv_size + 8349 hdev->max_umv_size % (hdev->num_alloc_vport + 1); 8350 mutex_unlock(&hdev->vport_lock); 8351 8352 hdev->used_mc_mac_num = 0; 8353 } 8354 8355 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) 8356 { 8357 struct hclge_dev *hdev = vport->back; 8358 bool is_full; 8359 8360 if (need_lock) 8361 mutex_lock(&hdev->vport_lock); 8362 8363 is_full = (vport->used_umv_num >= hdev->priv_umv_size && 8364 hdev->share_umv_size == 0); 8365 8366 if (need_lock) 8367 mutex_unlock(&hdev->vport_lock); 8368 8369 return is_full; 8370 } 8371 8372 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) 8373 { 8374 struct hclge_dev *hdev = vport->back; 8375 8376 if (is_free) { 8377 if (vport->used_umv_num > hdev->priv_umv_size) 8378 hdev->share_umv_size++; 8379 8380 if (vport->used_umv_num > 0) 8381 vport->used_umv_num--; 8382 } else { 8383 if (vport->used_umv_num >= hdev->priv_umv_size && 8384 hdev->share_umv_size > 0) 8385 hdev->share_umv_size--; 8386 vport->used_umv_num++; 8387 } 8388 } 8389 8390 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list, 8391 const u8 *mac_addr) 8392 { 8393 struct hclge_mac_node *mac_node, *tmp; 8394 8395 list_for_each_entry_safe(mac_node, tmp, list, node) 8396 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 8397 return mac_node; 8398 8399 return NULL; 8400 } 8401 8402 static void hclge_update_mac_node(struct hclge_mac_node *mac_node, 8403 enum HCLGE_MAC_NODE_STATE state) 8404 { 8405 switch (state) { 8406 /* from set_rx_mode or tmp_add_list */ 8407 case HCLGE_MAC_TO_ADD: 8408 if (mac_node->state == HCLGE_MAC_TO_DEL) 8409 mac_node->state = HCLGE_MAC_ACTIVE; 8410 break; 8411 /* only from set_rx_mode */ 8412 case HCLGE_MAC_TO_DEL: 8413 if (mac_node->state == HCLGE_MAC_TO_ADD) { 8414 list_del(&mac_node->node); 8415 kfree(mac_node); 8416 } else { 8417 mac_node->state = HCLGE_MAC_TO_DEL; 8418 } 8419 break; 8420 /* only from tmp_add_list, the mac_node->state won't be 8421 * ACTIVE. 8422 */ 8423 case HCLGE_MAC_ACTIVE: 8424 if (mac_node->state == HCLGE_MAC_TO_ADD) 8425 mac_node->state = HCLGE_MAC_ACTIVE; 8426 8427 break; 8428 } 8429 } 8430 8431 int hclge_update_mac_list(struct hclge_vport *vport, 8432 enum HCLGE_MAC_NODE_STATE state, 8433 enum HCLGE_MAC_ADDR_TYPE mac_type, 8434 const unsigned char *addr) 8435 { 8436 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8437 struct hclge_dev *hdev = vport->back; 8438 struct hclge_mac_node *mac_node; 8439 struct list_head *list; 8440 8441 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 8442 &vport->uc_mac_list : &vport->mc_mac_list; 8443 8444 spin_lock_bh(&vport->mac_list_lock); 8445 8446 /* if the mac addr is already in the mac list, no need to add a new 8447 * one into it, just check the mac addr state, convert it to a new 8448 * state, or just remove it, or do nothing. 8449 */ 8450 mac_node = hclge_find_mac_node(list, addr); 8451 if (mac_node) { 8452 hclge_update_mac_node(mac_node, state); 8453 spin_unlock_bh(&vport->mac_list_lock); 8454 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 8455 return 0; 8456 } 8457 8458 /* if this address is never added, unnecessary to delete */ 8459 if (state == HCLGE_MAC_TO_DEL) { 8460 spin_unlock_bh(&vport->mac_list_lock); 8461 hnae3_format_mac_addr(format_mac_addr, addr); 8462 dev_err(&hdev->pdev->dev, 8463 "failed to delete address %s from mac list\n", 8464 format_mac_addr); 8465 return -ENOENT; 8466 } 8467 8468 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 8469 if (!mac_node) { 8470 spin_unlock_bh(&vport->mac_list_lock); 8471 return -ENOMEM; 8472 } 8473 8474 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 8475 8476 mac_node->state = state; 8477 ether_addr_copy(mac_node->mac_addr, addr); 8478 list_add_tail(&mac_node->node, list); 8479 8480 spin_unlock_bh(&vport->mac_list_lock); 8481 8482 return 0; 8483 } 8484 8485 static int hclge_add_uc_addr(struct hnae3_handle *handle, 8486 const unsigned char *addr) 8487 { 8488 struct hclge_vport *vport = hclge_get_vport(handle); 8489 8490 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC, 8491 addr); 8492 } 8493 8494 int hclge_add_uc_addr_common(struct hclge_vport *vport, 8495 const unsigned char *addr) 8496 { 8497 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8498 struct hclge_dev *hdev = vport->back; 8499 struct hclge_mac_vlan_tbl_entry_cmd req; 8500 struct hclge_desc desc; 8501 u16 egress_port = 0; 8502 int ret; 8503 8504 /* mac addr check */ 8505 if (is_zero_ether_addr(addr) || 8506 is_broadcast_ether_addr(addr) || 8507 is_multicast_ether_addr(addr)) { 8508 hnae3_format_mac_addr(format_mac_addr, addr); 8509 dev_err(&hdev->pdev->dev, 8510 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", 8511 format_mac_addr, is_zero_ether_addr(addr), 8512 is_broadcast_ether_addr(addr), 8513 is_multicast_ether_addr(addr)); 8514 return -EINVAL; 8515 } 8516 8517 memset(&req, 0, sizeof(req)); 8518 8519 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, 8520 HCLGE_MAC_EPORT_VFID_S, vport->vport_id); 8521 8522 req.egress_port = cpu_to_le16(egress_port); 8523 8524 hclge_prepare_mac_addr(&req, addr, false); 8525 8526 /* Lookup the mac address in the mac_vlan table, and add 8527 * it if the entry is inexistent. Repeated unicast entry 8528 * is not allowed in the mac vlan table. 8529 */ 8530 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); 8531 if (ret == -ENOENT) { 8532 mutex_lock(&hdev->vport_lock); 8533 if (!hclge_is_umv_space_full(vport, false)) { 8534 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); 8535 if (!ret) 8536 hclge_update_umv_space(vport, false); 8537 mutex_unlock(&hdev->vport_lock); 8538 return ret; 8539 } 8540 mutex_unlock(&hdev->vport_lock); 8541 8542 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE)) 8543 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", 8544 hdev->priv_umv_size); 8545 8546 return -ENOSPC; 8547 } 8548 8549 /* check if we just hit the duplicate */ 8550 if (!ret) 8551 return -EEXIST; 8552 8553 return ret; 8554 } 8555 8556 static int hclge_rm_uc_addr(struct hnae3_handle *handle, 8557 const unsigned char *addr) 8558 { 8559 struct hclge_vport *vport = hclge_get_vport(handle); 8560 8561 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC, 8562 addr); 8563 } 8564 8565 int hclge_rm_uc_addr_common(struct hclge_vport *vport, 8566 const unsigned char *addr) 8567 { 8568 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8569 struct hclge_dev *hdev = vport->back; 8570 struct hclge_mac_vlan_tbl_entry_cmd req; 8571 int ret; 8572 8573 /* mac addr check */ 8574 if (is_zero_ether_addr(addr) || 8575 is_broadcast_ether_addr(addr) || 8576 is_multicast_ether_addr(addr)) { 8577 hnae3_format_mac_addr(format_mac_addr, addr); 8578 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", 8579 format_mac_addr); 8580 return -EINVAL; 8581 } 8582 8583 memset(&req, 0, sizeof(req)); 8584 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); 8585 hclge_prepare_mac_addr(&req, addr, false); 8586 ret = hclge_remove_mac_vlan_tbl(vport, &req); 8587 if (!ret || ret == -ENOENT) { 8588 mutex_lock(&hdev->vport_lock); 8589 hclge_update_umv_space(vport, true); 8590 mutex_unlock(&hdev->vport_lock); 8591 return 0; 8592 } 8593 8594 return ret; 8595 } 8596 8597 static int hclge_add_mc_addr(struct hnae3_handle *handle, 8598 const unsigned char *addr) 8599 { 8600 struct hclge_vport *vport = hclge_get_vport(handle); 8601 8602 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC, 8603 addr); 8604 } 8605 8606 int hclge_add_mc_addr_common(struct hclge_vport *vport, 8607 const unsigned char *addr) 8608 { 8609 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8610 struct hclge_dev *hdev = vport->back; 8611 struct hclge_mac_vlan_tbl_entry_cmd req; 8612 struct hclge_desc desc[3]; 8613 bool is_new_addr = false; 8614 int status; 8615 8616 /* mac addr check */ 8617 if (!is_multicast_ether_addr(addr)) { 8618 hnae3_format_mac_addr(format_mac_addr, addr); 8619 dev_err(&hdev->pdev->dev, 8620 "Add mc mac err! invalid mac:%s.\n", 8621 format_mac_addr); 8622 return -EINVAL; 8623 } 8624 memset(&req, 0, sizeof(req)); 8625 hclge_prepare_mac_addr(&req, addr, true); 8626 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 8627 if (status) { 8628 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && 8629 hdev->used_mc_mac_num >= 8630 hdev->ae_dev->dev_specs.mc_mac_size) 8631 goto err_no_space; 8632 8633 is_new_addr = true; 8634 8635 /* This mac addr do not exist, add new entry for it */ 8636 memset(desc[0].data, 0, sizeof(desc[0].data)); 8637 memset(desc[1].data, 0, sizeof(desc[0].data)); 8638 memset(desc[2].data, 0, sizeof(desc[0].data)); 8639 } 8640 status = hclge_update_desc_vfid(desc, vport->vport_id, false); 8641 if (status) 8642 return status; 8643 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 8644 if (status == -ENOSPC) 8645 goto err_no_space; 8646 else if (!status && is_new_addr) 8647 hdev->used_mc_mac_num++; 8648 8649 return status; 8650 8651 err_no_space: 8652 /* if already overflow, not to print each time */ 8653 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) { 8654 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; 8655 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); 8656 } 8657 8658 return -ENOSPC; 8659 } 8660 8661 static int hclge_rm_mc_addr(struct hnae3_handle *handle, 8662 const unsigned char *addr) 8663 { 8664 struct hclge_vport *vport = hclge_get_vport(handle); 8665 8666 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC, 8667 addr); 8668 } 8669 8670 int hclge_rm_mc_addr_common(struct hclge_vport *vport, 8671 const unsigned char *addr) 8672 { 8673 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 8674 struct hclge_dev *hdev = vport->back; 8675 struct hclge_mac_vlan_tbl_entry_cmd req; 8676 enum hclge_comm_cmd_status status; 8677 struct hclge_desc desc[3]; 8678 8679 /* mac addr check */ 8680 if (!is_multicast_ether_addr(addr)) { 8681 hnae3_format_mac_addr(format_mac_addr, addr); 8682 dev_dbg(&hdev->pdev->dev, 8683 "Remove mc mac err! invalid mac:%s.\n", 8684 format_mac_addr); 8685 return -EINVAL; 8686 } 8687 8688 memset(&req, 0, sizeof(req)); 8689 hclge_prepare_mac_addr(&req, addr, true); 8690 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); 8691 if (!status) { 8692 /* This mac addr exist, remove this handle's VFID for it */ 8693 status = hclge_update_desc_vfid(desc, vport->vport_id, true); 8694 if (status) 8695 return status; 8696 8697 if (hclge_is_all_function_id_zero(desc)) { 8698 /* All the vfid is zero, so need to delete this entry */ 8699 status = hclge_remove_mac_vlan_tbl(vport, &req); 8700 if (!status) 8701 hdev->used_mc_mac_num--; 8702 } else { 8703 /* Not all the vfid is zero, update the vfid */ 8704 status = hclge_add_mac_vlan_tbl(vport, &req, desc); 8705 } 8706 } else if (status == -ENOENT) { 8707 status = 0; 8708 } 8709 8710 return status; 8711 } 8712 8713 static void hclge_sync_vport_mac_list(struct hclge_vport *vport, 8714 struct list_head *list, 8715 enum HCLGE_MAC_ADDR_TYPE mac_type) 8716 { 8717 int (*sync)(struct hclge_vport *vport, const unsigned char *addr); 8718 struct hclge_mac_node *mac_node, *tmp; 8719 int ret; 8720 8721 if (mac_type == HCLGE_MAC_ADDR_UC) 8722 sync = hclge_add_uc_addr_common; 8723 else 8724 sync = hclge_add_mc_addr_common; 8725 8726 list_for_each_entry_safe(mac_node, tmp, list, node) { 8727 ret = sync(vport, mac_node->mac_addr); 8728 if (!ret) { 8729 mac_node->state = HCLGE_MAC_ACTIVE; 8730 } else { 8731 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 8732 &vport->state); 8733 8734 /* If one unicast mac address is existing in hardware, 8735 * we need to try whether other unicast mac addresses 8736 * are new addresses that can be added. 8737 * Multicast mac address can be reusable, even though 8738 * there is no space to add new multicast mac address, 8739 * we should check whether other mac addresses are 8740 * existing in hardware for reuse. 8741 */ 8742 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) || 8743 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC)) 8744 break; 8745 } 8746 } 8747 } 8748 8749 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport, 8750 struct list_head *list, 8751 enum HCLGE_MAC_ADDR_TYPE mac_type) 8752 { 8753 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); 8754 struct hclge_mac_node *mac_node, *tmp; 8755 int ret; 8756 8757 if (mac_type == HCLGE_MAC_ADDR_UC) 8758 unsync = hclge_rm_uc_addr_common; 8759 else 8760 unsync = hclge_rm_mc_addr_common; 8761 8762 list_for_each_entry_safe(mac_node, tmp, list, node) { 8763 ret = unsync(vport, mac_node->mac_addr); 8764 if (!ret || ret == -ENOENT) { 8765 list_del(&mac_node->node); 8766 kfree(mac_node); 8767 } else { 8768 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 8769 &vport->state); 8770 break; 8771 } 8772 } 8773 } 8774 8775 static bool hclge_sync_from_add_list(struct list_head *add_list, 8776 struct list_head *mac_list) 8777 { 8778 struct hclge_mac_node *mac_node, *tmp, *new_node; 8779 bool all_added = true; 8780 8781 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 8782 if (mac_node->state == HCLGE_MAC_TO_ADD) 8783 all_added = false; 8784 8785 /* if the mac address from tmp_add_list is not in the 8786 * uc/mc_mac_list, it means have received a TO_DEL request 8787 * during the time window of adding the mac address into mac 8788 * table. if mac_node state is ACTIVE, then change it to TO_DEL, 8789 * then it will be removed at next time. else it must be TO_ADD, 8790 * this address hasn't been added into mac table, 8791 * so just remove the mac node. 8792 */ 8793 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); 8794 if (new_node) { 8795 hclge_update_mac_node(new_node, mac_node->state); 8796 list_del(&mac_node->node); 8797 kfree(mac_node); 8798 } else if (mac_node->state == HCLGE_MAC_ACTIVE) { 8799 mac_node->state = HCLGE_MAC_TO_DEL; 8800 list_move_tail(&mac_node->node, mac_list); 8801 } else { 8802 list_del(&mac_node->node); 8803 kfree(mac_node); 8804 } 8805 } 8806 8807 return all_added; 8808 } 8809 8810 static void hclge_sync_from_del_list(struct list_head *del_list, 8811 struct list_head *mac_list) 8812 { 8813 struct hclge_mac_node *mac_node, *tmp, *new_node; 8814 8815 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 8816 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr); 8817 if (new_node) { 8818 /* If the mac addr exists in the mac list, it means 8819 * received a new TO_ADD request during the time window 8820 * of configuring the mac address. For the mac node 8821 * state is TO_ADD, and the address is already in the 8822 * in the hardware(due to delete fail), so we just need 8823 * to change the mac node state to ACTIVE. 8824 */ 8825 new_node->state = HCLGE_MAC_ACTIVE; 8826 list_del(&mac_node->node); 8827 kfree(mac_node); 8828 } else { 8829 list_move_tail(&mac_node->node, mac_list); 8830 } 8831 } 8832 } 8833 8834 static void hclge_update_overflow_flags(struct hclge_vport *vport, 8835 enum HCLGE_MAC_ADDR_TYPE mac_type, 8836 bool is_all_added) 8837 { 8838 if (mac_type == HCLGE_MAC_ADDR_UC) { 8839 if (is_all_added) 8840 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE; 8841 else if (hclge_is_umv_space_full(vport, true)) 8842 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE; 8843 } else { 8844 if (is_all_added) 8845 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE; 8846 else 8847 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE; 8848 } 8849 } 8850 8851 static void hclge_sync_vport_mac_table(struct hclge_vport *vport, 8852 enum HCLGE_MAC_ADDR_TYPE mac_type) 8853 { 8854 struct hclge_mac_node *mac_node, *tmp, *new_node; 8855 struct list_head tmp_add_list, tmp_del_list; 8856 struct list_head *list; 8857 bool all_added; 8858 8859 INIT_LIST_HEAD(&tmp_add_list); 8860 INIT_LIST_HEAD(&tmp_del_list); 8861 8862 /* move the mac addr to the tmp_add_list and tmp_del_list, then 8863 * we can add/delete these mac addr outside the spin lock 8864 */ 8865 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 8866 &vport->uc_mac_list : &vport->mc_mac_list; 8867 8868 spin_lock_bh(&vport->mac_list_lock); 8869 8870 list_for_each_entry_safe(mac_node, tmp, list, node) { 8871 switch (mac_node->state) { 8872 case HCLGE_MAC_TO_DEL: 8873 list_move_tail(&mac_node->node, &tmp_del_list); 8874 break; 8875 case HCLGE_MAC_TO_ADD: 8876 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 8877 if (!new_node) 8878 goto stop_traverse; 8879 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 8880 new_node->state = mac_node->state; 8881 list_add_tail(&new_node->node, &tmp_add_list); 8882 break; 8883 default: 8884 break; 8885 } 8886 } 8887 8888 stop_traverse: 8889 spin_unlock_bh(&vport->mac_list_lock); 8890 8891 /* delete first, in order to get max mac table space for adding */ 8892 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); 8893 hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type); 8894 8895 /* if some mac addresses were added/deleted fail, move back to the 8896 * mac_list, and retry at next time. 8897 */ 8898 spin_lock_bh(&vport->mac_list_lock); 8899 8900 hclge_sync_from_del_list(&tmp_del_list, list); 8901 all_added = hclge_sync_from_add_list(&tmp_add_list, list); 8902 8903 spin_unlock_bh(&vport->mac_list_lock); 8904 8905 hclge_update_overflow_flags(vport, mac_type, all_added); 8906 } 8907 8908 static bool hclge_need_sync_mac_table(struct hclge_vport *vport) 8909 { 8910 struct hclge_dev *hdev = vport->back; 8911 8912 if (test_bit(vport->vport_id, hdev->vport_config_block)) 8913 return false; 8914 8915 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state)) 8916 return true; 8917 8918 return false; 8919 } 8920 8921 static void hclge_sync_mac_table(struct hclge_dev *hdev) 8922 { 8923 int i; 8924 8925 for (i = 0; i < hdev->num_alloc_vport; i++) { 8926 struct hclge_vport *vport = &hdev->vport[i]; 8927 8928 if (!hclge_need_sync_mac_table(vport)) 8929 continue; 8930 8931 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC); 8932 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC); 8933 } 8934 } 8935 8936 static void hclge_build_del_list(struct list_head *list, 8937 bool is_del_list, 8938 struct list_head *tmp_del_list) 8939 { 8940 struct hclge_mac_node *mac_cfg, *tmp; 8941 8942 list_for_each_entry_safe(mac_cfg, tmp, list, node) { 8943 switch (mac_cfg->state) { 8944 case HCLGE_MAC_TO_DEL: 8945 case HCLGE_MAC_ACTIVE: 8946 list_move_tail(&mac_cfg->node, tmp_del_list); 8947 break; 8948 case HCLGE_MAC_TO_ADD: 8949 if (is_del_list) { 8950 list_del(&mac_cfg->node); 8951 kfree(mac_cfg); 8952 } 8953 break; 8954 } 8955 } 8956 } 8957 8958 static void hclge_unsync_del_list(struct hclge_vport *vport, 8959 int (*unsync)(struct hclge_vport *vport, 8960 const unsigned char *addr), 8961 bool is_del_list, 8962 struct list_head *tmp_del_list) 8963 { 8964 struct hclge_mac_node *mac_cfg, *tmp; 8965 int ret; 8966 8967 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) { 8968 ret = unsync(vport, mac_cfg->mac_addr); 8969 if (!ret || ret == -ENOENT) { 8970 /* clear all mac addr from hardware, but remain these 8971 * mac addr in the mac list, and restore them after 8972 * vf reset finished. 8973 */ 8974 if (!is_del_list && 8975 mac_cfg->state == HCLGE_MAC_ACTIVE) { 8976 mac_cfg->state = HCLGE_MAC_TO_ADD; 8977 } else { 8978 list_del(&mac_cfg->node); 8979 kfree(mac_cfg); 8980 } 8981 } else if (is_del_list) { 8982 mac_cfg->state = HCLGE_MAC_TO_DEL; 8983 } 8984 } 8985 } 8986 8987 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, 8988 enum HCLGE_MAC_ADDR_TYPE mac_type) 8989 { 8990 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr); 8991 struct hclge_dev *hdev = vport->back; 8992 struct list_head tmp_del_list, *list; 8993 8994 if (mac_type == HCLGE_MAC_ADDR_UC) { 8995 list = &vport->uc_mac_list; 8996 unsync = hclge_rm_uc_addr_common; 8997 } else { 8998 list = &vport->mc_mac_list; 8999 unsync = hclge_rm_mc_addr_common; 9000 } 9001 9002 INIT_LIST_HEAD(&tmp_del_list); 9003 9004 if (!is_del_list) 9005 set_bit(vport->vport_id, hdev->vport_config_block); 9006 9007 spin_lock_bh(&vport->mac_list_lock); 9008 9009 hclge_build_del_list(list, is_del_list, &tmp_del_list); 9010 9011 spin_unlock_bh(&vport->mac_list_lock); 9012 9013 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list); 9014 9015 spin_lock_bh(&vport->mac_list_lock); 9016 9017 hclge_sync_from_del_list(&tmp_del_list, list); 9018 9019 spin_unlock_bh(&vport->mac_list_lock); 9020 } 9021 9022 /* remove all mac address when uninitailize */ 9023 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport, 9024 enum HCLGE_MAC_ADDR_TYPE mac_type) 9025 { 9026 struct hclge_mac_node *mac_node, *tmp; 9027 struct hclge_dev *hdev = vport->back; 9028 struct list_head tmp_del_list, *list; 9029 9030 INIT_LIST_HEAD(&tmp_del_list); 9031 9032 list = (mac_type == HCLGE_MAC_ADDR_UC) ? 9033 &vport->uc_mac_list : &vport->mc_mac_list; 9034 9035 spin_lock_bh(&vport->mac_list_lock); 9036 9037 list_for_each_entry_safe(mac_node, tmp, list, node) { 9038 switch (mac_node->state) { 9039 case HCLGE_MAC_TO_DEL: 9040 case HCLGE_MAC_ACTIVE: 9041 list_move_tail(&mac_node->node, &tmp_del_list); 9042 break; 9043 case HCLGE_MAC_TO_ADD: 9044 list_del(&mac_node->node); 9045 kfree(mac_node); 9046 break; 9047 } 9048 } 9049 9050 spin_unlock_bh(&vport->mac_list_lock); 9051 9052 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type); 9053 9054 if (!list_empty(&tmp_del_list)) 9055 dev_warn(&hdev->pdev->dev, 9056 "uninit %s mac list for vport %u not completely.\n", 9057 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc", 9058 vport->vport_id); 9059 9060 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) { 9061 list_del(&mac_node->node); 9062 kfree(mac_node); 9063 } 9064 } 9065 9066 static void hclge_uninit_mac_table(struct hclge_dev *hdev) 9067 { 9068 struct hclge_vport *vport; 9069 int i; 9070 9071 for (i = 0; i < hdev->num_alloc_vport; i++) { 9072 vport = &hdev->vport[i]; 9073 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC); 9074 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC); 9075 } 9076 } 9077 9078 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, 9079 u16 cmdq_resp, u8 resp_code) 9080 { 9081 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0 9082 #define HCLGE_ETHERTYPE_ALREADY_ADD 1 9083 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2 9084 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3 9085 9086 int return_status; 9087 9088 if (cmdq_resp) { 9089 dev_err(&hdev->pdev->dev, 9090 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", 9091 cmdq_resp); 9092 return -EIO; 9093 } 9094 9095 switch (resp_code) { 9096 case HCLGE_ETHERTYPE_SUCCESS_ADD: 9097 case HCLGE_ETHERTYPE_ALREADY_ADD: 9098 return_status = 0; 9099 break; 9100 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW: 9101 dev_err(&hdev->pdev->dev, 9102 "add mac ethertype failed for manager table overflow.\n"); 9103 return_status = -EIO; 9104 break; 9105 case HCLGE_ETHERTYPE_KEY_CONFLICT: 9106 dev_err(&hdev->pdev->dev, 9107 "add mac ethertype failed for key conflict.\n"); 9108 return_status = -EIO; 9109 break; 9110 default: 9111 dev_err(&hdev->pdev->dev, 9112 "add mac ethertype failed for undefined, code=%u.\n", 9113 resp_code); 9114 return_status = -EIO; 9115 } 9116 9117 return return_status; 9118 } 9119 9120 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, 9121 u8 *mac_addr) 9122 { 9123 struct hclge_vport *vport = hclge_get_vport(handle); 9124 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 9125 struct hclge_dev *hdev = vport->back; 9126 9127 vport = hclge_get_vf_vport(hdev, vf); 9128 if (!vport) 9129 return -EINVAL; 9130 9131 hnae3_format_mac_addr(format_mac_addr, mac_addr); 9132 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { 9133 dev_info(&hdev->pdev->dev, 9134 "Specified MAC(=%s) is same as before, no change committed!\n", 9135 format_mac_addr); 9136 return 0; 9137 } 9138 9139 ether_addr_copy(vport->vf_info.mac, mac_addr); 9140 9141 /* there is a timewindow for PF to know VF unalive, it may 9142 * cause send mailbox fail, but it doesn't matter, VF will 9143 * query it when reinit. 9144 */ 9145 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { 9146 dev_info(&hdev->pdev->dev, 9147 "MAC of VF %d has been set to %s, and it will be reinitialized!\n", 9148 vf, format_mac_addr); 9149 (void)hclge_inform_reset_assert_to_vf(vport); 9150 return 0; 9151 } 9152 9153 dev_info(&hdev->pdev->dev, 9154 "MAC of VF %d has been set to %s, will be active after VF reset\n", 9155 vf, format_mac_addr); 9156 return 0; 9157 } 9158 9159 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, 9160 const struct hclge_mac_mgr_tbl_entry_cmd *req) 9161 { 9162 struct hclge_desc desc; 9163 u8 resp_code; 9164 u16 retval; 9165 int ret; 9166 9167 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false); 9168 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd)); 9169 9170 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9171 if (ret) { 9172 dev_err(&hdev->pdev->dev, 9173 "add mac ethertype failed for cmd_send, ret =%d.\n", 9174 ret); 9175 return ret; 9176 } 9177 9178 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; 9179 retval = le16_to_cpu(desc.retval); 9180 9181 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); 9182 } 9183 9184 static int init_mgr_tbl(struct hclge_dev *hdev) 9185 { 9186 int ret; 9187 int i; 9188 9189 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) { 9190 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); 9191 if (ret) { 9192 dev_err(&hdev->pdev->dev, 9193 "add mac ethertype failed, ret =%d.\n", 9194 ret); 9195 return ret; 9196 } 9197 } 9198 9199 return 0; 9200 } 9201 9202 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) 9203 { 9204 struct hclge_vport *vport = hclge_get_vport(handle); 9205 struct hclge_dev *hdev = vport->back; 9206 9207 ether_addr_copy(p, hdev->hw.mac.mac_addr); 9208 } 9209 9210 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport, 9211 const u8 *old_addr, const u8 *new_addr) 9212 { 9213 struct list_head *list = &vport->uc_mac_list; 9214 struct hclge_mac_node *old_node, *new_node; 9215 9216 new_node = hclge_find_mac_node(list, new_addr); 9217 if (!new_node) { 9218 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 9219 if (!new_node) 9220 return -ENOMEM; 9221 9222 new_node->state = HCLGE_MAC_TO_ADD; 9223 ether_addr_copy(new_node->mac_addr, new_addr); 9224 list_add(&new_node->node, list); 9225 } else { 9226 if (new_node->state == HCLGE_MAC_TO_DEL) 9227 new_node->state = HCLGE_MAC_ACTIVE; 9228 9229 /* make sure the new addr is in the list head, avoid dev 9230 * addr may be not re-added into mac table for the umv space 9231 * limitation after global/imp reset which will clear mac 9232 * table by hardware. 9233 */ 9234 list_move(&new_node->node, list); 9235 } 9236 9237 if (old_addr && !ether_addr_equal(old_addr, new_addr)) { 9238 old_node = hclge_find_mac_node(list, old_addr); 9239 if (old_node) { 9240 if (old_node->state == HCLGE_MAC_TO_ADD) { 9241 list_del(&old_node->node); 9242 kfree(old_node); 9243 } else { 9244 old_node->state = HCLGE_MAC_TO_DEL; 9245 } 9246 } 9247 } 9248 9249 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 9250 9251 return 0; 9252 } 9253 9254 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, 9255 bool is_first) 9256 { 9257 const unsigned char *new_addr = (const unsigned char *)p; 9258 struct hclge_vport *vport = hclge_get_vport(handle); 9259 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 9260 struct hclge_dev *hdev = vport->back; 9261 unsigned char *old_addr = NULL; 9262 int ret; 9263 9264 /* mac addr check */ 9265 if (is_zero_ether_addr(new_addr) || 9266 is_broadcast_ether_addr(new_addr) || 9267 is_multicast_ether_addr(new_addr)) { 9268 hnae3_format_mac_addr(format_mac_addr, new_addr); 9269 dev_err(&hdev->pdev->dev, 9270 "change uc mac err! invalid mac: %s.\n", 9271 format_mac_addr); 9272 return -EINVAL; 9273 } 9274 9275 ret = hclge_pause_addr_cfg(hdev, new_addr); 9276 if (ret) { 9277 dev_err(&hdev->pdev->dev, 9278 "failed to configure mac pause address, ret = %d\n", 9279 ret); 9280 return ret; 9281 } 9282 9283 if (!is_first) 9284 old_addr = hdev->hw.mac.mac_addr; 9285 9286 spin_lock_bh(&vport->mac_list_lock); 9287 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); 9288 if (ret) { 9289 hnae3_format_mac_addr(format_mac_addr, new_addr); 9290 dev_err(&hdev->pdev->dev, 9291 "failed to change the mac addr:%s, ret = %d\n", 9292 format_mac_addr, ret); 9293 spin_unlock_bh(&vport->mac_list_lock); 9294 9295 if (!is_first) 9296 hclge_pause_addr_cfg(hdev, old_addr); 9297 9298 return ret; 9299 } 9300 /* we must update dev addr with spin lock protect, preventing dev addr 9301 * being removed by set_rx_mode path. 9302 */ 9303 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); 9304 spin_unlock_bh(&vport->mac_list_lock); 9305 9306 hclge_task_schedule(hdev, 0); 9307 9308 return 0; 9309 } 9310 9311 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) 9312 { 9313 struct mii_ioctl_data *data = if_mii(ifr); 9314 9315 if (!hnae3_dev_phy_imp_supported(hdev)) 9316 return -EOPNOTSUPP; 9317 9318 switch (cmd) { 9319 case SIOCGMIIPHY: 9320 data->phy_id = hdev->hw.mac.phy_addr; 9321 /* this command reads phy id and register at the same time */ 9322 fallthrough; 9323 case SIOCGMIIREG: 9324 data->val_out = hclge_read_phy_reg(hdev, data->reg_num); 9325 return 0; 9326 9327 case SIOCSMIIREG: 9328 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); 9329 default: 9330 return -EOPNOTSUPP; 9331 } 9332 } 9333 9334 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, 9335 int cmd) 9336 { 9337 struct hclge_vport *vport = hclge_get_vport(handle); 9338 struct hclge_dev *hdev = vport->back; 9339 9340 switch (cmd) { 9341 case SIOCGHWTSTAMP: 9342 return hclge_ptp_get_cfg(hdev, ifr); 9343 case SIOCSHWTSTAMP: 9344 return hclge_ptp_set_cfg(hdev, ifr); 9345 default: 9346 if (!hdev->hw.mac.phydev) 9347 return hclge_mii_ioctl(hdev, ifr, cmd); 9348 } 9349 9350 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); 9351 } 9352 9353 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, 9354 bool bypass_en) 9355 { 9356 struct hclge_port_vlan_filter_bypass_cmd *req; 9357 struct hclge_desc desc; 9358 int ret; 9359 9360 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false); 9361 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data; 9362 req->vf_id = vf_id; 9363 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B, 9364 bypass_en ? 1 : 0); 9365 9366 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9367 if (ret) 9368 dev_err(&hdev->pdev->dev, 9369 "failed to set vport%u port vlan filter bypass state, ret = %d.\n", 9370 vf_id, ret); 9371 9372 return ret; 9373 } 9374 9375 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, 9376 u8 fe_type, bool filter_en, u8 vf_id) 9377 { 9378 struct hclge_vlan_filter_ctrl_cmd *req; 9379 struct hclge_desc desc; 9380 int ret; 9381 9382 /* read current vlan filter parameter */ 9383 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true); 9384 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; 9385 req->vlan_type = vlan_type; 9386 req->vf_id = vf_id; 9387 9388 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9389 if (ret) { 9390 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", 9391 vf_id, ret); 9392 return ret; 9393 } 9394 9395 /* modify and write new config parameter */ 9396 hclge_comm_cmd_reuse_desc(&desc, false); 9397 req->vlan_fe = filter_en ? 9398 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type); 9399 9400 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9401 if (ret) 9402 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", 9403 vf_id, ret); 9404 9405 return ret; 9406 } 9407 9408 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) 9409 { 9410 struct hclge_dev *hdev = vport->back; 9411 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 9412 int ret; 9413 9414 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 9415 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9416 HCLGE_FILTER_FE_EGRESS_V1_B, 9417 enable, vport->vport_id); 9418 9419 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9420 HCLGE_FILTER_FE_EGRESS, enable, 9421 vport->vport_id); 9422 if (ret) 9423 return ret; 9424 9425 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { 9426 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, 9427 !enable); 9428 } else if (!vport->vport_id) { 9429 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 9430 enable = false; 9431 9432 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 9433 HCLGE_FILTER_FE_INGRESS, 9434 enable, 0); 9435 } 9436 9437 return ret; 9438 } 9439 9440 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) 9441 { 9442 struct hnae3_handle *handle = &vport->nic; 9443 struct hclge_vport_vlan_cfg *vlan, *tmp; 9444 struct hclge_dev *hdev = vport->back; 9445 9446 if (vport->vport_id) { 9447 if (vport->port_base_vlan_cfg.state != 9448 HNAE3_PORT_BASE_VLAN_DISABLE) 9449 return true; 9450 9451 if (vport->vf_info.trusted && vport->vf_info.request_uc_en) 9452 return false; 9453 } else if (handle->netdev_flags & HNAE3_USER_UPE) { 9454 return false; 9455 } 9456 9457 if (!vport->req_vlan_fltr_en) 9458 return false; 9459 9460 /* compatible with former device, always enable vlan filter */ 9461 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) 9462 return true; 9463 9464 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) 9465 if (vlan->vlan_id != 0) 9466 return true; 9467 9468 return false; 9469 } 9470 9471 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) 9472 { 9473 struct hclge_dev *hdev = vport->back; 9474 bool need_en; 9475 int ret; 9476 9477 mutex_lock(&hdev->vport_lock); 9478 9479 vport->req_vlan_fltr_en = request_en; 9480 9481 need_en = hclge_need_enable_vport_vlan_filter(vport); 9482 if (need_en == vport->cur_vlan_fltr_en) { 9483 mutex_unlock(&hdev->vport_lock); 9484 return 0; 9485 } 9486 9487 ret = hclge_set_vport_vlan_filter(vport, need_en); 9488 if (ret) { 9489 mutex_unlock(&hdev->vport_lock); 9490 return ret; 9491 } 9492 9493 vport->cur_vlan_fltr_en = need_en; 9494 9495 mutex_unlock(&hdev->vport_lock); 9496 9497 return 0; 9498 } 9499 9500 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 9501 { 9502 struct hclge_vport *vport = hclge_get_vport(handle); 9503 9504 return hclge_enable_vport_vlan_filter(vport, enable); 9505 } 9506 9507 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, 9508 bool is_kill, u16 vlan, 9509 struct hclge_desc *desc) 9510 { 9511 struct hclge_vlan_filter_vf_cfg_cmd *req0; 9512 struct hclge_vlan_filter_vf_cfg_cmd *req1; 9513 u8 vf_byte_val; 9514 u8 vf_byte_off; 9515 int ret; 9516 9517 hclge_cmd_setup_basic_desc(&desc[0], 9518 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 9519 hclge_cmd_setup_basic_desc(&desc[1], 9520 HCLGE_OPC_VLAN_FILTER_VF_CFG, false); 9521 9522 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 9523 9524 vf_byte_off = vfid / 8; 9525 vf_byte_val = 1 << (vfid % 8); 9526 9527 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 9528 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; 9529 9530 req0->vlan_id = cpu_to_le16(vlan); 9531 req0->vlan_cfg = is_kill; 9532 9533 if (vf_byte_off < HCLGE_MAX_VF_BYTES) 9534 req0->vf_bitmap[vf_byte_off] = vf_byte_val; 9535 else 9536 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; 9537 9538 ret = hclge_cmd_send(&hdev->hw, desc, 2); 9539 if (ret) { 9540 dev_err(&hdev->pdev->dev, 9541 "Send vf vlan command fail, ret =%d.\n", 9542 ret); 9543 return ret; 9544 } 9545 9546 return 0; 9547 } 9548 9549 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, 9550 bool is_kill, struct hclge_desc *desc) 9551 { 9552 struct hclge_vlan_filter_vf_cfg_cmd *req; 9553 9554 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; 9555 9556 if (!is_kill) { 9557 #define HCLGE_VF_VLAN_NO_ENTRY 2 9558 if (!req->resp_code || req->resp_code == 1) 9559 return 0; 9560 9561 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { 9562 set_bit(vfid, hdev->vf_vlan_full); 9563 dev_warn(&hdev->pdev->dev, 9564 "vf vlan table is full, vf vlan filter is disabled\n"); 9565 return 0; 9566 } 9567 9568 dev_err(&hdev->pdev->dev, 9569 "Add vf vlan filter fail, ret =%u.\n", 9570 req->resp_code); 9571 } else { 9572 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 9573 if (!req->resp_code) 9574 return 0; 9575 9576 /* vf vlan filter is disabled when vf vlan table is full, 9577 * then new vlan id will not be added into vf vlan table. 9578 * Just return 0 without warning, avoid massive verbose 9579 * print logs when unload. 9580 */ 9581 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) 9582 return 0; 9583 9584 dev_err(&hdev->pdev->dev, 9585 "Kill vf vlan filter fail, ret =%u.\n", 9586 req->resp_code); 9587 } 9588 9589 return -EIO; 9590 } 9591 9592 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, 9593 bool is_kill, u16 vlan) 9594 { 9595 struct hclge_vport *vport = &hdev->vport[vfid]; 9596 struct hclge_desc desc[2]; 9597 int ret; 9598 9599 /* if vf vlan table is full, firmware will close vf vlan filter, it 9600 * is unable and unnecessary to add new vlan id to vf vlan filter. 9601 * If spoof check is enable, and vf vlan is full, it shouldn't add 9602 * new vlan, because tx packets with these vlan id will be dropped. 9603 */ 9604 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { 9605 if (vport->vf_info.spoofchk && vlan) { 9606 dev_err(&hdev->pdev->dev, 9607 "Can't add vlan due to spoof check is on and vf vlan table is full\n"); 9608 return -EPERM; 9609 } 9610 return 0; 9611 } 9612 9613 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); 9614 if (ret) 9615 return ret; 9616 9617 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); 9618 } 9619 9620 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, 9621 u16 vlan_id, bool is_kill) 9622 { 9623 struct hclge_vlan_filter_pf_cfg_cmd *req; 9624 struct hclge_desc desc; 9625 u8 vlan_offset_byte_val; 9626 u8 vlan_offset_byte; 9627 u8 vlan_offset_160; 9628 int ret; 9629 9630 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); 9631 9632 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; 9633 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / 9634 HCLGE_VLAN_BYTE_SIZE; 9635 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); 9636 9637 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; 9638 req->vlan_offset = vlan_offset_160; 9639 req->vlan_cfg = is_kill; 9640 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; 9641 9642 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 9643 if (ret) 9644 dev_err(&hdev->pdev->dev, 9645 "port vlan command, send fail, ret =%d.\n", ret); 9646 return ret; 9647 } 9648 9649 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, 9650 u16 vlan_id, bool is_kill) 9651 { 9652 /* vlan 0 may be added twice when 8021q module is enabled */ 9653 if (!is_kill && !vlan_id && 9654 test_bit(vport_id, hdev->vlan_table[vlan_id])) 9655 return false; 9656 9657 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { 9658 dev_warn(&hdev->pdev->dev, 9659 "Add port vlan failed, vport %u is already in vlan %u\n", 9660 vport_id, vlan_id); 9661 return false; 9662 } 9663 9664 if (is_kill && 9665 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { 9666 dev_warn(&hdev->pdev->dev, 9667 "Delete port vlan failed, vport %u is not in vlan %u\n", 9668 vport_id, vlan_id); 9669 return false; 9670 } 9671 9672 return true; 9673 } 9674 9675 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, 9676 u16 vport_id, u16 vlan_id, 9677 bool is_kill) 9678 { 9679 u16 vport_idx, vport_num = 0; 9680 int ret; 9681 9682 if (is_kill && !vlan_id) 9683 return 0; 9684 9685 if (vlan_id >= VLAN_N_VID) 9686 return -EINVAL; 9687 9688 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); 9689 if (ret) { 9690 dev_err(&hdev->pdev->dev, 9691 "Set %u vport vlan filter config fail, ret =%d.\n", 9692 vport_id, ret); 9693 return ret; 9694 } 9695 9696 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) 9697 return 0; 9698 9699 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) 9700 vport_num++; 9701 9702 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) 9703 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, 9704 is_kill); 9705 9706 return ret; 9707 } 9708 9709 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) 9710 { 9711 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; 9712 struct hclge_vport_vtag_tx_cfg_cmd *req; 9713 struct hclge_dev *hdev = vport->back; 9714 struct hclge_desc desc; 9715 u16 bmap_index; 9716 int status; 9717 9718 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); 9719 9720 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; 9721 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); 9722 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); 9723 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, 9724 vcfg->accept_tag1 ? 1 : 0); 9725 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, 9726 vcfg->accept_untag1 ? 1 : 0); 9727 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, 9728 vcfg->accept_tag2 ? 1 : 0); 9729 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, 9730 vcfg->accept_untag2 ? 1 : 0); 9731 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, 9732 vcfg->insert_tag1_en ? 1 : 0); 9733 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, 9734 vcfg->insert_tag2_en ? 1 : 0); 9735 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, 9736 vcfg->tag_shift_mode_en ? 1 : 0); 9737 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); 9738 9739 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 9740 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 9741 HCLGE_VF_NUM_PER_BYTE; 9742 req->vf_bitmap[bmap_index] = 9743 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 9744 9745 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9746 if (status) 9747 dev_err(&hdev->pdev->dev, 9748 "Send port txvlan cfg command fail, ret =%d\n", 9749 status); 9750 9751 return status; 9752 } 9753 9754 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) 9755 { 9756 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; 9757 struct hclge_vport_vtag_rx_cfg_cmd *req; 9758 struct hclge_dev *hdev = vport->back; 9759 struct hclge_desc desc; 9760 u16 bmap_index; 9761 int status; 9762 9763 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); 9764 9765 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; 9766 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, 9767 vcfg->strip_tag1_en ? 1 : 0); 9768 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, 9769 vcfg->strip_tag2_en ? 1 : 0); 9770 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, 9771 vcfg->vlan1_vlan_prionly ? 1 : 0); 9772 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, 9773 vcfg->vlan2_vlan_prionly ? 1 : 0); 9774 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, 9775 vcfg->strip_tag1_discard_en ? 1 : 0); 9776 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, 9777 vcfg->strip_tag2_discard_en ? 1 : 0); 9778 9779 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; 9780 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / 9781 HCLGE_VF_NUM_PER_BYTE; 9782 req->vf_bitmap[bmap_index] = 9783 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); 9784 9785 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9786 if (status) 9787 dev_err(&hdev->pdev->dev, 9788 "Send port rxvlan cfg command fail, ret =%d\n", 9789 status); 9790 9791 return status; 9792 } 9793 9794 static int hclge_vlan_offload_cfg(struct hclge_vport *vport, 9795 u16 port_base_vlan_state, 9796 u16 vlan_tag, u8 qos) 9797 { 9798 int ret; 9799 9800 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 9801 vport->txvlan_cfg.accept_tag1 = true; 9802 vport->txvlan_cfg.insert_tag1_en = false; 9803 vport->txvlan_cfg.default_tag1 = 0; 9804 } else { 9805 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); 9806 9807 vport->txvlan_cfg.accept_tag1 = 9808 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; 9809 vport->txvlan_cfg.insert_tag1_en = true; 9810 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) | 9811 vlan_tag; 9812 } 9813 9814 vport->txvlan_cfg.accept_untag1 = true; 9815 9816 /* accept_tag2 and accept_untag2 are not supported on 9817 * pdev revision(0x20), new revision support them, 9818 * this two fields can not be configured by user. 9819 */ 9820 vport->txvlan_cfg.accept_tag2 = true; 9821 vport->txvlan_cfg.accept_untag2 = true; 9822 vport->txvlan_cfg.insert_tag2_en = false; 9823 vport->txvlan_cfg.default_tag2 = 0; 9824 vport->txvlan_cfg.tag_shift_mode_en = true; 9825 9826 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 9827 vport->rxvlan_cfg.strip_tag1_en = false; 9828 vport->rxvlan_cfg.strip_tag2_en = 9829 vport->rxvlan_cfg.rx_vlan_offload_en; 9830 vport->rxvlan_cfg.strip_tag2_discard_en = false; 9831 } else { 9832 vport->rxvlan_cfg.strip_tag1_en = 9833 vport->rxvlan_cfg.rx_vlan_offload_en; 9834 vport->rxvlan_cfg.strip_tag2_en = true; 9835 vport->rxvlan_cfg.strip_tag2_discard_en = true; 9836 } 9837 9838 vport->rxvlan_cfg.strip_tag1_discard_en = false; 9839 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 9840 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 9841 9842 ret = hclge_set_vlan_tx_offload_cfg(vport); 9843 if (ret) 9844 return ret; 9845 9846 return hclge_set_vlan_rx_offload_cfg(vport); 9847 } 9848 9849 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) 9850 { 9851 struct hclge_rx_vlan_type_cfg_cmd *rx_req; 9852 struct hclge_tx_vlan_type_cfg_cmd *tx_req; 9853 struct hclge_desc desc; 9854 int status; 9855 9856 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); 9857 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; 9858 rx_req->ot_fst_vlan_type = 9859 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); 9860 rx_req->ot_sec_vlan_type = 9861 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); 9862 rx_req->in_fst_vlan_type = 9863 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); 9864 rx_req->in_sec_vlan_type = 9865 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); 9866 9867 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9868 if (status) { 9869 dev_err(&hdev->pdev->dev, 9870 "Send rxvlan protocol type command fail, ret =%d\n", 9871 status); 9872 return status; 9873 } 9874 9875 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); 9876 9877 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; 9878 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); 9879 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); 9880 9881 status = hclge_cmd_send(&hdev->hw, &desc, 1); 9882 if (status) 9883 dev_err(&hdev->pdev->dev, 9884 "Send txvlan protocol type command fail, ret =%d\n", 9885 status); 9886 9887 return status; 9888 } 9889 9890 static int hclge_init_vlan_filter(struct hclge_dev *hdev) 9891 { 9892 struct hclge_vport *vport; 9893 int ret; 9894 int i; 9895 9896 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 9897 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9898 HCLGE_FILTER_FE_EGRESS_V1_B, 9899 true, 0); 9900 9901 /* for revision 0x21, vf vlan filter is per function */ 9902 for (i = 0; i < hdev->num_alloc_vport; i++) { 9903 vport = &hdev->vport[i]; 9904 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 9905 HCLGE_FILTER_FE_EGRESS, true, 9906 vport->vport_id); 9907 if (ret) 9908 return ret; 9909 vport->cur_vlan_fltr_en = true; 9910 } 9911 9912 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, 9913 HCLGE_FILTER_FE_INGRESS, true, 0); 9914 } 9915 9916 static int hclge_init_vlan_type(struct hclge_dev *hdev) 9917 { 9918 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; 9919 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; 9920 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; 9921 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; 9922 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; 9923 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; 9924 9925 return hclge_set_vlan_protocol_type(hdev); 9926 } 9927 9928 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) 9929 { 9930 struct hclge_port_base_vlan_config *cfg; 9931 struct hclge_vport *vport; 9932 int ret; 9933 int i; 9934 9935 for (i = 0; i < hdev->num_alloc_vport; i++) { 9936 vport = &hdev->vport[i]; 9937 cfg = &vport->port_base_vlan_cfg; 9938 9939 ret = hclge_vlan_offload_cfg(vport, cfg->state, 9940 cfg->vlan_info.vlan_tag, 9941 cfg->vlan_info.qos); 9942 if (ret) 9943 return ret; 9944 } 9945 return 0; 9946 } 9947 9948 static int hclge_init_vlan_config(struct hclge_dev *hdev) 9949 { 9950 struct hnae3_handle *handle = &hdev->vport[0].nic; 9951 int ret; 9952 9953 ret = hclge_init_vlan_filter(hdev); 9954 if (ret) 9955 return ret; 9956 9957 ret = hclge_init_vlan_type(hdev); 9958 if (ret) 9959 return ret; 9960 9961 ret = hclge_init_vport_vlan_offload(hdev); 9962 if (ret) 9963 return ret; 9964 9965 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); 9966 } 9967 9968 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 9969 bool writen_to_tbl) 9970 { 9971 struct hclge_vport_vlan_cfg *vlan, *tmp; 9972 struct hclge_dev *hdev = vport->back; 9973 9974 mutex_lock(&hdev->vport_lock); 9975 9976 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 9977 if (vlan->vlan_id == vlan_id) { 9978 mutex_unlock(&hdev->vport_lock); 9979 return; 9980 } 9981 } 9982 9983 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 9984 if (!vlan) { 9985 mutex_unlock(&hdev->vport_lock); 9986 return; 9987 } 9988 9989 vlan->hd_tbl_status = writen_to_tbl; 9990 vlan->vlan_id = vlan_id; 9991 9992 list_add_tail(&vlan->node, &vport->vlan_list); 9993 mutex_unlock(&hdev->vport_lock); 9994 } 9995 9996 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) 9997 { 9998 struct hclge_vport_vlan_cfg *vlan, *tmp; 9999 struct hclge_dev *hdev = vport->back; 10000 int ret; 10001 10002 mutex_lock(&hdev->vport_lock); 10003 10004 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10005 if (!vlan->hd_tbl_status) { 10006 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10007 vport->vport_id, 10008 vlan->vlan_id, false); 10009 if (ret) { 10010 dev_err(&hdev->pdev->dev, 10011 "restore vport vlan list failed, ret=%d\n", 10012 ret); 10013 10014 mutex_unlock(&hdev->vport_lock); 10015 return ret; 10016 } 10017 } 10018 vlan->hd_tbl_status = true; 10019 } 10020 10021 mutex_unlock(&hdev->vport_lock); 10022 10023 return 0; 10024 } 10025 10026 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, 10027 bool is_write_tbl) 10028 { 10029 struct hclge_vport_vlan_cfg *vlan, *tmp; 10030 struct hclge_dev *hdev = vport->back; 10031 10032 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10033 if (vlan->vlan_id == vlan_id) { 10034 if (is_write_tbl && vlan->hd_tbl_status) 10035 hclge_set_vlan_filter_hw(hdev, 10036 htons(ETH_P_8021Q), 10037 vport->vport_id, 10038 vlan_id, 10039 true); 10040 10041 list_del(&vlan->node); 10042 kfree(vlan); 10043 break; 10044 } 10045 } 10046 } 10047 10048 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) 10049 { 10050 struct hclge_vport_vlan_cfg *vlan, *tmp; 10051 struct hclge_dev *hdev = vport->back; 10052 10053 mutex_lock(&hdev->vport_lock); 10054 10055 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10056 if (vlan->hd_tbl_status) 10057 hclge_set_vlan_filter_hw(hdev, 10058 htons(ETH_P_8021Q), 10059 vport->vport_id, 10060 vlan->vlan_id, 10061 true); 10062 10063 vlan->hd_tbl_status = false; 10064 if (is_del_list) { 10065 list_del(&vlan->node); 10066 kfree(vlan); 10067 } 10068 } 10069 clear_bit(vport->vport_id, hdev->vf_vlan_full); 10070 mutex_unlock(&hdev->vport_lock); 10071 } 10072 10073 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) 10074 { 10075 struct hclge_vport_vlan_cfg *vlan, *tmp; 10076 struct hclge_vport *vport; 10077 int i; 10078 10079 mutex_lock(&hdev->vport_lock); 10080 10081 for (i = 0; i < hdev->num_alloc_vport; i++) { 10082 vport = &hdev->vport[i]; 10083 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10084 list_del(&vlan->node); 10085 kfree(vlan); 10086 } 10087 } 10088 10089 mutex_unlock(&hdev->vport_lock); 10090 } 10091 10092 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) 10093 { 10094 struct hclge_vlan_info *vlan_info; 10095 struct hclge_vport *vport; 10096 u16 vlan_proto; 10097 u16 vlan_id; 10098 u16 state; 10099 int vf_id; 10100 int ret; 10101 10102 /* PF should restore all vfs port base vlan */ 10103 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { 10104 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; 10105 vlan_info = vport->port_base_vlan_cfg.tbl_sta ? 10106 &vport->port_base_vlan_cfg.vlan_info : 10107 &vport->port_base_vlan_cfg.old_vlan_info; 10108 10109 vlan_id = vlan_info->vlan_tag; 10110 vlan_proto = vlan_info->vlan_proto; 10111 state = vport->port_base_vlan_cfg.state; 10112 10113 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { 10114 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); 10115 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), 10116 vport->vport_id, 10117 vlan_id, false); 10118 vport->port_base_vlan_cfg.tbl_sta = ret == 0; 10119 } 10120 } 10121 } 10122 10123 void hclge_restore_vport_vlan_table(struct hclge_vport *vport) 10124 { 10125 struct hclge_vport_vlan_cfg *vlan, *tmp; 10126 struct hclge_dev *hdev = vport->back; 10127 int ret; 10128 10129 mutex_lock(&hdev->vport_lock); 10130 10131 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10132 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { 10133 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10134 vport->vport_id, 10135 vlan->vlan_id, false); 10136 if (ret) 10137 break; 10138 vlan->hd_tbl_status = true; 10139 } 10140 } 10141 10142 mutex_unlock(&hdev->vport_lock); 10143 } 10144 10145 /* For global reset and imp reset, hardware will clear the mac table, 10146 * so we change the mac address state from ACTIVE to TO_ADD, then they 10147 * can be restored in the service task after reset complete. Furtherly, 10148 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to 10149 * be restored after reset, so just remove these mac nodes from mac_list. 10150 */ 10151 static void hclge_mac_node_convert_for_reset(struct list_head *list) 10152 { 10153 struct hclge_mac_node *mac_node, *tmp; 10154 10155 list_for_each_entry_safe(mac_node, tmp, list, node) { 10156 if (mac_node->state == HCLGE_MAC_ACTIVE) { 10157 mac_node->state = HCLGE_MAC_TO_ADD; 10158 } else if (mac_node->state == HCLGE_MAC_TO_DEL) { 10159 list_del(&mac_node->node); 10160 kfree(mac_node); 10161 } 10162 } 10163 } 10164 10165 void hclge_restore_mac_table_common(struct hclge_vport *vport) 10166 { 10167 spin_lock_bh(&vport->mac_list_lock); 10168 10169 hclge_mac_node_convert_for_reset(&vport->uc_mac_list); 10170 hclge_mac_node_convert_for_reset(&vport->mc_mac_list); 10171 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state); 10172 10173 spin_unlock_bh(&vport->mac_list_lock); 10174 } 10175 10176 static void hclge_restore_hw_table(struct hclge_dev *hdev) 10177 { 10178 struct hclge_vport *vport = &hdev->vport[0]; 10179 struct hnae3_handle *handle = &vport->nic; 10180 10181 hclge_restore_mac_table_common(vport); 10182 hclge_restore_vport_port_base_vlan_config(hdev); 10183 hclge_restore_vport_vlan_table(vport); 10184 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); 10185 hclge_restore_fd_entries(handle); 10186 } 10187 10188 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 10189 { 10190 struct hclge_vport *vport = hclge_get_vport(handle); 10191 10192 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10193 vport->rxvlan_cfg.strip_tag1_en = false; 10194 vport->rxvlan_cfg.strip_tag2_en = enable; 10195 vport->rxvlan_cfg.strip_tag2_discard_en = false; 10196 } else { 10197 vport->rxvlan_cfg.strip_tag1_en = enable; 10198 vport->rxvlan_cfg.strip_tag2_en = true; 10199 vport->rxvlan_cfg.strip_tag2_discard_en = true; 10200 } 10201 10202 vport->rxvlan_cfg.strip_tag1_discard_en = false; 10203 vport->rxvlan_cfg.vlan1_vlan_prionly = false; 10204 vport->rxvlan_cfg.vlan2_vlan_prionly = false; 10205 vport->rxvlan_cfg.rx_vlan_offload_en = enable; 10206 10207 return hclge_set_vlan_rx_offload_cfg(vport); 10208 } 10209 10210 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport) 10211 { 10212 struct hclge_dev *hdev = vport->back; 10213 10214 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) 10215 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); 10216 } 10217 10218 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, 10219 u16 port_base_vlan_state, 10220 struct hclge_vlan_info *new_info, 10221 struct hclge_vlan_info *old_info) 10222 { 10223 struct hclge_dev *hdev = vport->back; 10224 int ret; 10225 10226 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { 10227 hclge_rm_vport_all_vlan_table(vport, false); 10228 /* force clear VLAN 0 */ 10229 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); 10230 if (ret) 10231 return ret; 10232 return hclge_set_vlan_filter_hw(hdev, 10233 htons(new_info->vlan_proto), 10234 vport->vport_id, 10235 new_info->vlan_tag, 10236 false); 10237 } 10238 10239 vport->port_base_vlan_cfg.tbl_sta = false; 10240 10241 /* force add VLAN 0 */ 10242 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); 10243 if (ret) 10244 return ret; 10245 10246 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), 10247 vport->vport_id, old_info->vlan_tag, 10248 true); 10249 if (ret) 10250 return ret; 10251 10252 return hclge_add_vport_all_vlan_table(vport); 10253 } 10254 10255 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, 10256 const struct hclge_vlan_info *old_cfg) 10257 { 10258 if (new_cfg->vlan_tag != old_cfg->vlan_tag) 10259 return true; 10260 10261 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0)) 10262 return true; 10263 10264 return false; 10265 } 10266 10267 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, 10268 struct hclge_vlan_info *new_info, 10269 struct hclge_vlan_info *old_info) 10270 { 10271 struct hclge_dev *hdev = vport->back; 10272 int ret; 10273 10274 /* add new VLAN tag */ 10275 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), 10276 vport->vport_id, new_info->vlan_tag, 10277 false); 10278 if (ret) 10279 return ret; 10280 10281 vport->port_base_vlan_cfg.tbl_sta = false; 10282 /* remove old VLAN tag */ 10283 if (old_info->vlan_tag == 0) 10284 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, 10285 true, 0); 10286 else 10287 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10288 vport->vport_id, 10289 old_info->vlan_tag, true); 10290 if (ret) 10291 dev_err(&hdev->pdev->dev, 10292 "failed to clear vport%u port base vlan %u, ret = %d.\n", 10293 vport->vport_id, old_info->vlan_tag, ret); 10294 10295 return ret; 10296 } 10297 10298 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, 10299 struct hclge_vlan_info *vlan_info) 10300 { 10301 struct hnae3_handle *nic = &vport->nic; 10302 struct hclge_vlan_info *old_vlan_info; 10303 int ret; 10304 10305 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; 10306 10307 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag, 10308 vlan_info->qos); 10309 if (ret) 10310 return ret; 10311 10312 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) 10313 goto out; 10314 10315 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) 10316 ret = hclge_modify_port_base_vlan_tag(vport, vlan_info, 10317 old_vlan_info); 10318 else 10319 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, 10320 old_vlan_info); 10321 if (ret) 10322 return ret; 10323 10324 out: 10325 vport->port_base_vlan_cfg.state = state; 10326 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 10327 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; 10328 else 10329 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 10330 10331 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; 10332 vport->port_base_vlan_cfg.vlan_info = *vlan_info; 10333 vport->port_base_vlan_cfg.tbl_sta = true; 10334 hclge_set_vport_vlan_fltr_change(vport); 10335 10336 return 0; 10337 } 10338 10339 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, 10340 enum hnae3_port_base_vlan_state state, 10341 u16 vlan, u8 qos) 10342 { 10343 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10344 if (!vlan && !qos) 10345 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 10346 10347 return HNAE3_PORT_BASE_VLAN_ENABLE; 10348 } 10349 10350 if (!vlan && !qos) 10351 return HNAE3_PORT_BASE_VLAN_DISABLE; 10352 10353 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan && 10354 vport->port_base_vlan_cfg.vlan_info.qos == qos) 10355 return HNAE3_PORT_BASE_VLAN_NOCHANGE; 10356 10357 return HNAE3_PORT_BASE_VLAN_MODIFY; 10358 } 10359 10360 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, 10361 u16 vlan, u8 qos, __be16 proto) 10362 { 10363 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 10364 struct hclge_vport *vport = hclge_get_vport(handle); 10365 struct hclge_dev *hdev = vport->back; 10366 struct hclge_vlan_info vlan_info; 10367 u16 state; 10368 int ret; 10369 10370 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 10371 return -EOPNOTSUPP; 10372 10373 vport = hclge_get_vf_vport(hdev, vfid); 10374 if (!vport) 10375 return -EINVAL; 10376 10377 /* qos is a 3 bits value, so can not be bigger than 7 */ 10378 if (vlan > VLAN_N_VID - 1 || qos > 7) 10379 return -EINVAL; 10380 if (proto != htons(ETH_P_8021Q)) 10381 return -EPROTONOSUPPORT; 10382 10383 state = hclge_get_port_base_vlan_state(vport, 10384 vport->port_base_vlan_cfg.state, 10385 vlan, qos); 10386 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) 10387 return 0; 10388 10389 vlan_info.vlan_tag = vlan; 10390 vlan_info.qos = qos; 10391 vlan_info.vlan_proto = ntohs(proto); 10392 10393 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); 10394 if (ret) { 10395 dev_err(&hdev->pdev->dev, 10396 "failed to update port base vlan for vf %d, ret = %d\n", 10397 vfid, ret); 10398 return ret; 10399 } 10400 10401 /* there is a timewindow for PF to know VF unalive, it may 10402 * cause send mailbox fail, but it doesn't matter, VF will 10403 * query it when reinit. 10404 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based 10405 * VLAN state. 10406 */ 10407 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 10408 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) 10409 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], 10410 vport->vport_id, 10411 state, 10412 &vlan_info); 10413 else 10414 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, 10415 &vport->need_notify); 10416 } 10417 return 0; 10418 } 10419 10420 static void hclge_clear_vf_vlan(struct hclge_dev *hdev) 10421 { 10422 struct hclge_vlan_info *vlan_info; 10423 struct hclge_vport *vport; 10424 int ret; 10425 int vf; 10426 10427 /* clear port base vlan for all vf */ 10428 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { 10429 vport = &hdev->vport[vf]; 10430 vlan_info = &vport->port_base_vlan_cfg.vlan_info; 10431 10432 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10433 vport->vport_id, 10434 vlan_info->vlan_tag, true); 10435 if (ret) 10436 dev_err(&hdev->pdev->dev, 10437 "failed to clear vf vlan for vf%d, ret = %d\n", 10438 vf - HCLGE_VF_VPORT_START_NUM, ret); 10439 } 10440 } 10441 10442 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, 10443 u16 vlan_id, bool is_kill) 10444 { 10445 struct hclge_vport *vport = hclge_get_vport(handle); 10446 struct hclge_dev *hdev = vport->back; 10447 bool writen_to_tbl = false; 10448 int ret = 0; 10449 10450 /* When device is resetting or reset failed, firmware is unable to 10451 * handle mailbox. Just record the vlan id, and remove it after 10452 * reset finished. 10453 */ 10454 mutex_lock(&hdev->vport_lock); 10455 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 10456 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { 10457 set_bit(vlan_id, vport->vlan_del_fail_bmap); 10458 mutex_unlock(&hdev->vport_lock); 10459 return -EBUSY; 10460 } else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) { 10461 clear_bit(vlan_id, vport->vlan_del_fail_bmap); 10462 } 10463 mutex_unlock(&hdev->vport_lock); 10464 10465 /* when port base vlan enabled, we use port base vlan as the vlan 10466 * filter entry. In this case, we don't update vlan filter table 10467 * when user add new vlan or remove exist vlan, just update the vport 10468 * vlan list. The vlan id in vlan list will be writen in vlan filter 10469 * table until port base vlan disabled 10470 */ 10471 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { 10472 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, 10473 vlan_id, is_kill); 10474 writen_to_tbl = true; 10475 } 10476 10477 if (!ret) { 10478 if (!is_kill) { 10479 hclge_add_vport_vlan_table(vport, vlan_id, 10480 writen_to_tbl); 10481 } else if (is_kill && vlan_id != 0) { 10482 mutex_lock(&hdev->vport_lock); 10483 hclge_rm_vport_vlan_table(vport, vlan_id, false); 10484 mutex_unlock(&hdev->vport_lock); 10485 } 10486 } else if (is_kill) { 10487 /* when remove hw vlan filter failed, record the vlan id, 10488 * and try to remove it from hw later, to be consistence 10489 * with stack 10490 */ 10491 mutex_lock(&hdev->vport_lock); 10492 set_bit(vlan_id, vport->vlan_del_fail_bmap); 10493 mutex_unlock(&hdev->vport_lock); 10494 } 10495 10496 hclge_set_vport_vlan_fltr_change(vport); 10497 10498 return ret; 10499 } 10500 10501 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) 10502 { 10503 struct hclge_vport *vport; 10504 int ret; 10505 u16 i; 10506 10507 for (i = 0; i < hdev->num_alloc_vport; i++) { 10508 vport = &hdev->vport[i]; 10509 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 10510 &vport->state)) 10511 continue; 10512 10513 ret = hclge_enable_vport_vlan_filter(vport, 10514 vport->req_vlan_fltr_en); 10515 if (ret) { 10516 dev_err(&hdev->pdev->dev, 10517 "failed to sync vlan filter state for vport%u, ret = %d\n", 10518 vport->vport_id, ret); 10519 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 10520 &vport->state); 10521 return; 10522 } 10523 } 10524 } 10525 10526 static void hclge_sync_vlan_filter(struct hclge_dev *hdev) 10527 { 10528 #define HCLGE_MAX_SYNC_COUNT 60 10529 10530 int i, ret, sync_cnt = 0; 10531 u16 vlan_id; 10532 10533 mutex_lock(&hdev->vport_lock); 10534 /* start from vport 1 for PF is always alive */ 10535 for (i = 0; i < hdev->num_alloc_vport; i++) { 10536 struct hclge_vport *vport = &hdev->vport[i]; 10537 10538 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 10539 VLAN_N_VID); 10540 while (vlan_id != VLAN_N_VID) { 10541 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), 10542 vport->vport_id, vlan_id, 10543 true); 10544 if (ret && ret != -EINVAL) { 10545 mutex_unlock(&hdev->vport_lock); 10546 return; 10547 } 10548 10549 clear_bit(vlan_id, vport->vlan_del_fail_bmap); 10550 hclge_rm_vport_vlan_table(vport, vlan_id, false); 10551 hclge_set_vport_vlan_fltr_change(vport); 10552 10553 sync_cnt++; 10554 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) { 10555 mutex_unlock(&hdev->vport_lock); 10556 return; 10557 } 10558 10559 vlan_id = find_first_bit(vport->vlan_del_fail_bmap, 10560 VLAN_N_VID); 10561 } 10562 } 10563 mutex_unlock(&hdev->vport_lock); 10564 10565 hclge_sync_vlan_fltr_state(hdev); 10566 } 10567 10568 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) 10569 { 10570 struct hclge_config_max_frm_size_cmd *req; 10571 struct hclge_desc desc; 10572 10573 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); 10574 10575 req = (struct hclge_config_max_frm_size_cmd *)desc.data; 10576 req->max_frm_size = cpu_to_le16(new_mps); 10577 req->min_frm_size = HCLGE_MAC_MIN_FRAME; 10578 10579 return hclge_cmd_send(&hdev->hw, &desc, 1); 10580 } 10581 10582 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 10583 { 10584 struct hclge_vport *vport = hclge_get_vport(handle); 10585 10586 return hclge_set_vport_mtu(vport, new_mtu); 10587 } 10588 10589 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) 10590 { 10591 struct hclge_dev *hdev = vport->back; 10592 int i, max_frm_size, ret; 10593 10594 /* HW supprt 2 layer vlan */ 10595 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 10596 if (max_frm_size < HCLGE_MAC_MIN_FRAME || 10597 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) 10598 return -EINVAL; 10599 10600 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); 10601 mutex_lock(&hdev->vport_lock); 10602 /* VF's mps must fit within hdev->mps */ 10603 if (vport->vport_id && max_frm_size > hdev->mps) { 10604 mutex_unlock(&hdev->vport_lock); 10605 return -EINVAL; 10606 } else if (vport->vport_id) { 10607 vport->mps = max_frm_size; 10608 mutex_unlock(&hdev->vport_lock); 10609 return 0; 10610 } 10611 10612 /* PF's mps must be greater then VF's mps */ 10613 for (i = 1; i < hdev->num_alloc_vport; i++) 10614 if (max_frm_size < hdev->vport[i].mps) { 10615 dev_err(&hdev->pdev->dev, 10616 "failed to set pf mtu for less than vport %d, mps = %u.\n", 10617 i, hdev->vport[i].mps); 10618 mutex_unlock(&hdev->vport_lock); 10619 return -EINVAL; 10620 } 10621 10622 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); 10623 10624 ret = hclge_set_mac_mtu(hdev, max_frm_size); 10625 if (ret) { 10626 dev_err(&hdev->pdev->dev, 10627 "Change mtu fail, ret =%d\n", ret); 10628 goto out; 10629 } 10630 10631 hdev->mps = max_frm_size; 10632 vport->mps = max_frm_size; 10633 10634 ret = hclge_buffer_alloc(hdev); 10635 if (ret) 10636 dev_err(&hdev->pdev->dev, 10637 "Allocate buffer fail, ret =%d\n", ret); 10638 10639 out: 10640 hclge_notify_client(hdev, HNAE3_UP_CLIENT); 10641 mutex_unlock(&hdev->vport_lock); 10642 return ret; 10643 } 10644 10645 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, 10646 bool enable) 10647 { 10648 struct hclge_reset_tqp_queue_cmd *req; 10649 struct hclge_desc desc; 10650 int ret; 10651 10652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); 10653 10654 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 10655 req->tqp_id = cpu_to_le16(queue_id); 10656 if (enable) 10657 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); 10658 10659 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10660 if (ret) { 10661 dev_err(&hdev->pdev->dev, 10662 "Send tqp reset cmd error, status =%d\n", ret); 10663 return ret; 10664 } 10665 10666 return 0; 10667 } 10668 10669 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, 10670 u8 *reset_status) 10671 { 10672 struct hclge_reset_tqp_queue_cmd *req; 10673 struct hclge_desc desc; 10674 int ret; 10675 10676 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); 10677 10678 req = (struct hclge_reset_tqp_queue_cmd *)desc.data; 10679 req->tqp_id = cpu_to_le16(queue_id); 10680 10681 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10682 if (ret) { 10683 dev_err(&hdev->pdev->dev, 10684 "Get reset status error, status =%d\n", ret); 10685 return ret; 10686 } 10687 10688 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 10689 10690 return 0; 10691 } 10692 10693 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) 10694 { 10695 struct hclge_comm_tqp *tqp; 10696 struct hnae3_queue *queue; 10697 10698 queue = handle->kinfo.tqp[queue_id]; 10699 tqp = container_of(queue, struct hclge_comm_tqp, q); 10700 10701 return tqp->index; 10702 } 10703 10704 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle) 10705 { 10706 struct hclge_vport *vport = hclge_get_vport(handle); 10707 struct hclge_dev *hdev = vport->back; 10708 u16 reset_try_times = 0; 10709 u8 reset_status; 10710 u16 queue_gid; 10711 int ret; 10712 u16 i; 10713 10714 for (i = 0; i < handle->kinfo.num_tqps; i++) { 10715 queue_gid = hclge_covert_handle_qid_global(handle, i); 10716 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); 10717 if (ret) { 10718 dev_err(&hdev->pdev->dev, 10719 "failed to send reset tqp cmd, ret = %d\n", 10720 ret); 10721 return ret; 10722 } 10723 10724 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 10725 ret = hclge_get_reset_status(hdev, queue_gid, 10726 &reset_status); 10727 if (ret) 10728 return ret; 10729 10730 if (reset_status) 10731 break; 10732 10733 /* Wait for tqp hw reset */ 10734 usleep_range(1000, 1200); 10735 } 10736 10737 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { 10738 dev_err(&hdev->pdev->dev, 10739 "wait for tqp hw reset timeout\n"); 10740 return -ETIME; 10741 } 10742 10743 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); 10744 if (ret) { 10745 dev_err(&hdev->pdev->dev, 10746 "failed to deassert soft reset, ret = %d\n", 10747 ret); 10748 return ret; 10749 } 10750 reset_try_times = 0; 10751 } 10752 return 0; 10753 } 10754 10755 static int hclge_reset_rcb(struct hnae3_handle *handle) 10756 { 10757 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U 10758 #define HCLGE_RESET_RCB_SUCCESS 1U 10759 10760 struct hclge_vport *vport = hclge_get_vport(handle); 10761 struct hclge_dev *hdev = vport->back; 10762 struct hclge_reset_cmd *req; 10763 struct hclge_desc desc; 10764 u8 return_status; 10765 u16 queue_gid; 10766 int ret; 10767 10768 queue_gid = hclge_covert_handle_qid_global(handle, 0); 10769 10770 req = (struct hclge_reset_cmd *)desc.data; 10771 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); 10772 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1); 10773 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid); 10774 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps); 10775 10776 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 10777 if (ret) { 10778 dev_err(&hdev->pdev->dev, 10779 "failed to send rcb reset cmd, ret = %d\n", ret); 10780 return ret; 10781 } 10782 10783 return_status = req->fun_reset_rcb_return_status; 10784 if (return_status == HCLGE_RESET_RCB_SUCCESS) 10785 return 0; 10786 10787 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) { 10788 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", 10789 return_status); 10790 return -EIO; 10791 } 10792 10793 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd 10794 * again to reset all tqps 10795 */ 10796 return hclge_reset_tqp_cmd(handle); 10797 } 10798 10799 int hclge_reset_tqp(struct hnae3_handle *handle) 10800 { 10801 struct hclge_vport *vport = hclge_get_vport(handle); 10802 struct hclge_dev *hdev = vport->back; 10803 int ret; 10804 10805 /* only need to disable PF's tqp */ 10806 if (!vport->vport_id) { 10807 ret = hclge_tqp_enable(handle, false); 10808 if (ret) { 10809 dev_err(&hdev->pdev->dev, 10810 "failed to disable tqp, ret = %d\n", ret); 10811 return ret; 10812 } 10813 } 10814 10815 return hclge_reset_rcb(handle); 10816 } 10817 10818 static u32 hclge_get_fw_version(struct hnae3_handle *handle) 10819 { 10820 struct hclge_vport *vport = hclge_get_vport(handle); 10821 struct hclge_dev *hdev = vport->back; 10822 10823 return hdev->fw_version; 10824 } 10825 10826 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 10827 { 10828 struct phy_device *phydev = hdev->hw.mac.phydev; 10829 10830 if (!phydev) 10831 return; 10832 10833 phy_set_asym_pause(phydev, rx_en, tx_en); 10834 } 10835 10836 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) 10837 { 10838 int ret; 10839 10840 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 10841 return 0; 10842 10843 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 10844 if (ret) 10845 dev_err(&hdev->pdev->dev, 10846 "configure pauseparam error, ret = %d.\n", ret); 10847 10848 return ret; 10849 } 10850 10851 int hclge_cfg_flowctrl(struct hclge_dev *hdev) 10852 { 10853 struct phy_device *phydev = hdev->hw.mac.phydev; 10854 u16 remote_advertising = 0; 10855 u16 local_advertising; 10856 u32 rx_pause, tx_pause; 10857 u8 flowctl; 10858 10859 if (!phydev->link) 10860 return 0; 10861 10862 if (!phydev->autoneg) 10863 return hclge_mac_pause_setup_hw(hdev); 10864 10865 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); 10866 10867 if (phydev->pause) 10868 remote_advertising = LPA_PAUSE_CAP; 10869 10870 if (phydev->asym_pause) 10871 remote_advertising |= LPA_PAUSE_ASYM; 10872 10873 flowctl = mii_resolve_flowctrl_fdx(local_advertising, 10874 remote_advertising); 10875 tx_pause = flowctl & FLOW_CTRL_TX; 10876 rx_pause = flowctl & FLOW_CTRL_RX; 10877 10878 if (phydev->duplex == HCLGE_MAC_HALF) { 10879 tx_pause = 0; 10880 rx_pause = 0; 10881 } 10882 10883 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); 10884 } 10885 10886 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, 10887 u32 *rx_en, u32 *tx_en) 10888 { 10889 struct hclge_vport *vport = hclge_get_vport(handle); 10890 struct hclge_dev *hdev = vport->back; 10891 u8 media_type = hdev->hw.mac.media_type; 10892 10893 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ? 10894 hclge_get_autoneg(handle) : 0; 10895 10896 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 10897 *rx_en = 0; 10898 *tx_en = 0; 10899 return; 10900 } 10901 10902 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { 10903 *rx_en = 1; 10904 *tx_en = 0; 10905 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { 10906 *tx_en = 1; 10907 *rx_en = 0; 10908 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { 10909 *rx_en = 1; 10910 *tx_en = 1; 10911 } else { 10912 *rx_en = 0; 10913 *tx_en = 0; 10914 } 10915 } 10916 10917 static void hclge_record_user_pauseparam(struct hclge_dev *hdev, 10918 u32 rx_en, u32 tx_en) 10919 { 10920 if (rx_en && tx_en) 10921 hdev->fc_mode_last_time = HCLGE_FC_FULL; 10922 else if (rx_en && !tx_en) 10923 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; 10924 else if (!rx_en && tx_en) 10925 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; 10926 else 10927 hdev->fc_mode_last_time = HCLGE_FC_NONE; 10928 10929 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 10930 } 10931 10932 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, 10933 u32 rx_en, u32 tx_en) 10934 { 10935 struct hclge_vport *vport = hclge_get_vport(handle); 10936 struct hclge_dev *hdev = vport->back; 10937 struct phy_device *phydev = hdev->hw.mac.phydev; 10938 u32 fc_autoneg; 10939 10940 if (phydev || hnae3_dev_phy_imp_supported(hdev)) { 10941 fc_autoneg = hclge_get_autoneg(handle); 10942 if (auto_neg != fc_autoneg) { 10943 dev_info(&hdev->pdev->dev, 10944 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 10945 return -EOPNOTSUPP; 10946 } 10947 } 10948 10949 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { 10950 dev_info(&hdev->pdev->dev, 10951 "Priority flow control enabled. Cannot set link flow control.\n"); 10952 return -EOPNOTSUPP; 10953 } 10954 10955 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); 10956 10957 hclge_record_user_pauseparam(hdev, rx_en, tx_en); 10958 10959 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) 10960 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); 10961 10962 if (phydev) 10963 return phy_start_aneg(phydev); 10964 10965 return -EOPNOTSUPP; 10966 } 10967 10968 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, 10969 u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num) 10970 { 10971 struct hclge_vport *vport = hclge_get_vport(handle); 10972 struct hclge_dev *hdev = vport->back; 10973 10974 if (speed) 10975 *speed = hdev->hw.mac.speed; 10976 if (duplex) 10977 *duplex = hdev->hw.mac.duplex; 10978 if (auto_neg) 10979 *auto_neg = hdev->hw.mac.autoneg; 10980 if (lane_num) 10981 *lane_num = hdev->hw.mac.lane_num; 10982 } 10983 10984 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, 10985 u8 *module_type) 10986 { 10987 struct hclge_vport *vport = hclge_get_vport(handle); 10988 struct hclge_dev *hdev = vport->back; 10989 10990 /* When nic is down, the service task is not running, doesn't update 10991 * the port information per second. Query the port information before 10992 * return the media type, ensure getting the correct media information. 10993 */ 10994 hclge_update_port_info(hdev); 10995 10996 if (media_type) 10997 *media_type = hdev->hw.mac.media_type; 10998 10999 if (module_type) 11000 *module_type = hdev->hw.mac.module_type; 11001 } 11002 11003 static void hclge_get_mdix_mode(struct hnae3_handle *handle, 11004 u8 *tp_mdix_ctrl, u8 *tp_mdix) 11005 { 11006 struct hclge_vport *vport = hclge_get_vport(handle); 11007 struct hclge_dev *hdev = vport->back; 11008 struct phy_device *phydev = hdev->hw.mac.phydev; 11009 int mdix_ctrl, mdix, is_resolved; 11010 unsigned int retval; 11011 11012 if (!phydev) { 11013 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 11014 *tp_mdix = ETH_TP_MDI_INVALID; 11015 return; 11016 } 11017 11018 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); 11019 11020 retval = phy_read(phydev, HCLGE_PHY_CSC_REG); 11021 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, 11022 HCLGE_PHY_MDIX_CTRL_S); 11023 11024 retval = phy_read(phydev, HCLGE_PHY_CSS_REG); 11025 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); 11026 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); 11027 11028 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); 11029 11030 switch (mdix_ctrl) { 11031 case 0x0: 11032 *tp_mdix_ctrl = ETH_TP_MDI; 11033 break; 11034 case 0x1: 11035 *tp_mdix_ctrl = ETH_TP_MDI_X; 11036 break; 11037 case 0x3: 11038 *tp_mdix_ctrl = ETH_TP_MDI_AUTO; 11039 break; 11040 default: 11041 *tp_mdix_ctrl = ETH_TP_MDI_INVALID; 11042 break; 11043 } 11044 11045 if (!is_resolved) 11046 *tp_mdix = ETH_TP_MDI_INVALID; 11047 else if (mdix) 11048 *tp_mdix = ETH_TP_MDI_X; 11049 else 11050 *tp_mdix = ETH_TP_MDI; 11051 } 11052 11053 static void hclge_info_show(struct hclge_dev *hdev) 11054 { 11055 struct hnae3_handle *handle = &hdev->vport->nic; 11056 struct device *dev = &hdev->pdev->dev; 11057 11058 dev_info(dev, "PF info begin:\n"); 11059 11060 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 11061 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 11062 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 11063 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 11064 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); 11065 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 11066 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); 11067 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); 11068 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); 11069 dev_info(dev, "This is %s PF\n", 11070 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); 11071 dev_info(dev, "DCB %s\n", 11072 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable"); 11073 dev_info(dev, "MQPRIO %s\n", 11074 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable"); 11075 dev_info(dev, "Default tx spare buffer size: %u\n", 11076 hdev->tx_spare_buf_size); 11077 11078 dev_info(dev, "PF info end.\n"); 11079 } 11080 11081 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 11082 struct hclge_vport *vport) 11083 { 11084 struct hnae3_client *client = vport->nic.client; 11085 struct hclge_dev *hdev = ae_dev->priv; 11086 int rst_cnt = hdev->rst_stats.reset_cnt; 11087 int ret; 11088 11089 ret = client->ops->init_instance(&vport->nic); 11090 if (ret) 11091 return ret; 11092 11093 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11094 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 11095 rst_cnt != hdev->rst_stats.reset_cnt) { 11096 ret = -EBUSY; 11097 goto init_nic_err; 11098 } 11099 11100 /* Enable nic hw error interrupts */ 11101 ret = hclge_config_nic_hw_error(hdev, true); 11102 if (ret) { 11103 dev_err(&ae_dev->pdev->dev, 11104 "fail(%d) to enable hw error interrupts\n", ret); 11105 goto init_nic_err; 11106 } 11107 11108 hnae3_set_client_init_flag(client, ae_dev, 1); 11109 11110 if (netif_msg_drv(&hdev->vport->nic)) 11111 hclge_info_show(hdev); 11112 11113 return ret; 11114 11115 init_nic_err: 11116 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11117 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11118 msleep(HCLGE_WAIT_RESET_DONE); 11119 11120 client->ops->uninit_instance(&vport->nic, 0); 11121 11122 return ret; 11123 } 11124 11125 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 11126 struct hclge_vport *vport) 11127 { 11128 struct hclge_dev *hdev = ae_dev->priv; 11129 struct hnae3_client *client; 11130 int rst_cnt; 11131 int ret; 11132 11133 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 11134 !hdev->nic_client) 11135 return 0; 11136 11137 client = hdev->roce_client; 11138 ret = hclge_init_roce_base_info(vport); 11139 if (ret) 11140 return ret; 11141 11142 rst_cnt = hdev->rst_stats.reset_cnt; 11143 ret = client->ops->init_instance(&vport->roce); 11144 if (ret) 11145 return ret; 11146 11147 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11148 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || 11149 rst_cnt != hdev->rst_stats.reset_cnt) { 11150 ret = -EBUSY; 11151 goto init_roce_err; 11152 } 11153 11154 /* Enable roce ras interrupts */ 11155 ret = hclge_config_rocee_ras_interrupt(hdev, true); 11156 if (ret) { 11157 dev_err(&ae_dev->pdev->dev, 11158 "fail(%d) to enable roce ras interrupts\n", ret); 11159 goto init_roce_err; 11160 } 11161 11162 hnae3_set_client_init_flag(client, ae_dev, 1); 11163 11164 return 0; 11165 11166 init_roce_err: 11167 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11168 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11169 msleep(HCLGE_WAIT_RESET_DONE); 11170 11171 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); 11172 11173 return ret; 11174 } 11175 11176 static int hclge_init_client_instance(struct hnae3_client *client, 11177 struct hnae3_ae_dev *ae_dev) 11178 { 11179 struct hclge_dev *hdev = ae_dev->priv; 11180 struct hclge_vport *vport = &hdev->vport[0]; 11181 int ret; 11182 11183 switch (client->type) { 11184 case HNAE3_CLIENT_KNIC: 11185 hdev->nic_client = client; 11186 vport->nic.client = client; 11187 ret = hclge_init_nic_client_instance(ae_dev, vport); 11188 if (ret) 11189 goto clear_nic; 11190 11191 ret = hclge_init_roce_client_instance(ae_dev, vport); 11192 if (ret) 11193 goto clear_roce; 11194 11195 break; 11196 case HNAE3_CLIENT_ROCE: 11197 if (hnae3_dev_roce_supported(hdev)) { 11198 hdev->roce_client = client; 11199 vport->roce.client = client; 11200 } 11201 11202 ret = hclge_init_roce_client_instance(ae_dev, vport); 11203 if (ret) 11204 goto clear_roce; 11205 11206 break; 11207 default: 11208 return -EINVAL; 11209 } 11210 11211 return 0; 11212 11213 clear_nic: 11214 hdev->nic_client = NULL; 11215 vport->nic.client = NULL; 11216 return ret; 11217 clear_roce: 11218 hdev->roce_client = NULL; 11219 vport->roce.client = NULL; 11220 return ret; 11221 } 11222 11223 static void hclge_uninit_client_instance(struct hnae3_client *client, 11224 struct hnae3_ae_dev *ae_dev) 11225 { 11226 struct hclge_dev *hdev = ae_dev->priv; 11227 struct hclge_vport *vport = &hdev->vport[0]; 11228 11229 if (hdev->roce_client) { 11230 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); 11231 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11232 msleep(HCLGE_WAIT_RESET_DONE); 11233 11234 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); 11235 hdev->roce_client = NULL; 11236 vport->roce.client = NULL; 11237 } 11238 if (client->type == HNAE3_CLIENT_ROCE) 11239 return; 11240 if (hdev->nic_client && client->ops->uninit_instance) { 11241 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); 11242 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) 11243 msleep(HCLGE_WAIT_RESET_DONE); 11244 11245 client->ops->uninit_instance(&vport->nic, 0); 11246 hdev->nic_client = NULL; 11247 vport->nic.client = NULL; 11248 } 11249 } 11250 11251 static int hclge_dev_mem_map(struct hclge_dev *hdev) 11252 { 11253 struct pci_dev *pdev = hdev->pdev; 11254 struct hclge_hw *hw = &hdev->hw; 11255 11256 /* for device does not have device memory, return directly */ 11257 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR))) 11258 return 0; 11259 11260 hw->hw.mem_base = 11261 devm_ioremap_wc(&pdev->dev, 11262 pci_resource_start(pdev, HCLGE_MEM_BAR), 11263 pci_resource_len(pdev, HCLGE_MEM_BAR)); 11264 if (!hw->hw.mem_base) { 11265 dev_err(&pdev->dev, "failed to map device memory\n"); 11266 return -EFAULT; 11267 } 11268 11269 return 0; 11270 } 11271 11272 static int hclge_pci_init(struct hclge_dev *hdev) 11273 { 11274 struct pci_dev *pdev = hdev->pdev; 11275 struct hclge_hw *hw; 11276 int ret; 11277 11278 ret = pci_enable_device(pdev); 11279 if (ret) { 11280 dev_err(&pdev->dev, "failed to enable PCI device\n"); 11281 return ret; 11282 } 11283 11284 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 11285 if (ret) { 11286 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 11287 if (ret) { 11288 dev_err(&pdev->dev, 11289 "can't set consistent PCI DMA"); 11290 goto err_disable_device; 11291 } 11292 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); 11293 } 11294 11295 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); 11296 if (ret) { 11297 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 11298 goto err_disable_device; 11299 } 11300 11301 pci_set_master(pdev); 11302 hw = &hdev->hw; 11303 hw->hw.io_base = pcim_iomap(pdev, 2, 0); 11304 if (!hw->hw.io_base) { 11305 dev_err(&pdev->dev, "Can't map configuration register space\n"); 11306 ret = -ENOMEM; 11307 goto err_release_regions; 11308 } 11309 11310 ret = hclge_dev_mem_map(hdev); 11311 if (ret) 11312 goto err_unmap_io_base; 11313 11314 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); 11315 11316 return 0; 11317 11318 err_unmap_io_base: 11319 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11320 err_release_regions: 11321 pci_release_regions(pdev); 11322 err_disable_device: 11323 pci_disable_device(pdev); 11324 11325 return ret; 11326 } 11327 11328 static void hclge_pci_uninit(struct hclge_dev *hdev) 11329 { 11330 struct pci_dev *pdev = hdev->pdev; 11331 11332 if (hdev->hw.hw.mem_base) 11333 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 11334 11335 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11336 pci_free_irq_vectors(pdev); 11337 pci_release_mem_regions(pdev); 11338 pci_disable_device(pdev); 11339 } 11340 11341 static void hclge_state_init(struct hclge_dev *hdev) 11342 { 11343 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); 11344 set_bit(HCLGE_STATE_DOWN, &hdev->state); 11345 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); 11346 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11347 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); 11348 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); 11349 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); 11350 } 11351 11352 static void hclge_state_uninit(struct hclge_dev *hdev) 11353 { 11354 set_bit(HCLGE_STATE_DOWN, &hdev->state); 11355 set_bit(HCLGE_STATE_REMOVING, &hdev->state); 11356 11357 if (hdev->reset_timer.function) 11358 del_timer_sync(&hdev->reset_timer); 11359 if (hdev->service_task.work.func) 11360 cancel_delayed_work_sync(&hdev->service_task); 11361 } 11362 11363 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 11364 enum hnae3_reset_type rst_type) 11365 { 11366 #define HCLGE_RESET_RETRY_WAIT_MS 500 11367 #define HCLGE_RESET_RETRY_CNT 5 11368 11369 struct hclge_dev *hdev = ae_dev->priv; 11370 int retry_cnt = 0; 11371 int ret; 11372 11373 while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) { 11374 down(&hdev->reset_sem); 11375 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11376 hdev->reset_type = rst_type; 11377 ret = hclge_reset_prepare(hdev); 11378 if (!ret && !hdev->reset_pending) 11379 break; 11380 11381 dev_err(&hdev->pdev->dev, 11382 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 11383 ret, hdev->reset_pending, retry_cnt); 11384 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11385 up(&hdev->reset_sem); 11386 msleep(HCLGE_RESET_RETRY_WAIT_MS); 11387 } 11388 11389 /* disable misc vector before reset done */ 11390 hclge_enable_vector(&hdev->misc_vector, false); 11391 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 11392 11393 if (hdev->reset_type == HNAE3_FLR_RESET) 11394 hdev->rst_stats.flr_rst_cnt++; 11395 } 11396 11397 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev) 11398 { 11399 struct hclge_dev *hdev = ae_dev->priv; 11400 int ret; 11401 11402 hclge_enable_vector(&hdev->misc_vector, true); 11403 11404 ret = hclge_reset_rebuild(hdev); 11405 if (ret) 11406 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); 11407 11408 hdev->reset_type = HNAE3_NONE_RESET; 11409 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); 11410 up(&hdev->reset_sem); 11411 } 11412 11413 static void hclge_clear_resetting_state(struct hclge_dev *hdev) 11414 { 11415 u16 i; 11416 11417 for (i = 0; i < hdev->num_alloc_vport; i++) { 11418 struct hclge_vport *vport = &hdev->vport[i]; 11419 int ret; 11420 11421 /* Send cmd to clear vport's FUNC_RST_ING */ 11422 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); 11423 if (ret) 11424 dev_warn(&hdev->pdev->dev, 11425 "clear vport(%u) rst failed %d!\n", 11426 vport->vport_id, ret); 11427 } 11428 } 11429 11430 static int hclge_clear_hw_resource(struct hclge_dev *hdev) 11431 { 11432 struct hclge_desc desc; 11433 int ret; 11434 11435 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); 11436 11437 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11438 /* This new command is only supported by new firmware, it will 11439 * fail with older firmware. Error value -EOPNOSUPP can only be 11440 * returned by older firmware running this command, to keep code 11441 * backward compatible we will override this value and return 11442 * success. 11443 */ 11444 if (ret && ret != -EOPNOTSUPP) { 11445 dev_err(&hdev->pdev->dev, 11446 "failed to clear hw resource, ret = %d\n", ret); 11447 return ret; 11448 } 11449 return 0; 11450 } 11451 11452 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) 11453 { 11454 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 11455 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); 11456 } 11457 11458 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) 11459 { 11460 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 11461 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); 11462 } 11463 11464 static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle) 11465 { 11466 struct hclge_vport *vport = hclge_get_vport(handle); 11467 11468 return &vport->back->hw.mac.wol; 11469 } 11470 11471 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev, 11472 u32 *wol_supported) 11473 { 11474 struct hclge_query_wol_supported_cmd *wol_supported_cmd; 11475 struct hclge_desc desc; 11476 int ret; 11477 11478 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE, 11479 true); 11480 wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data; 11481 11482 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11483 if (ret) { 11484 dev_err(&hdev->pdev->dev, 11485 "failed to query wol supported, ret = %d\n", ret); 11486 return ret; 11487 } 11488 11489 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode); 11490 11491 return 0; 11492 } 11493 11494 static int hclge_set_wol_cfg(struct hclge_dev *hdev, 11495 struct hclge_wol_info *wol_info) 11496 { 11497 struct hclge_wol_cfg_cmd *wol_cfg_cmd; 11498 struct hclge_desc desc; 11499 int ret; 11500 11501 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false); 11502 wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data; 11503 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode); 11504 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size; 11505 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX); 11506 11507 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 11508 if (ret) 11509 dev_err(&hdev->pdev->dev, 11510 "failed to set wol config, ret = %d\n", ret); 11511 11512 return ret; 11513 } 11514 11515 static int hclge_update_wol(struct hclge_dev *hdev) 11516 { 11517 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; 11518 11519 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) 11520 return 0; 11521 11522 return hclge_set_wol_cfg(hdev, wol_info); 11523 } 11524 11525 static int hclge_init_wol(struct hclge_dev *hdev) 11526 { 11527 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; 11528 int ret; 11529 11530 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) 11531 return 0; 11532 11533 memset(wol_info, 0, sizeof(struct hclge_wol_info)); 11534 ret = hclge_get_wol_supported_mode(hdev, 11535 &wol_info->wol_support_mode); 11536 if (ret) { 11537 wol_info->wol_support_mode = 0; 11538 return ret; 11539 } 11540 11541 return hclge_update_wol(hdev); 11542 } 11543 11544 static void hclge_get_wol(struct hnae3_handle *handle, 11545 struct ethtool_wolinfo *wol) 11546 { 11547 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); 11548 11549 wol->supported = wol_info->wol_support_mode; 11550 wol->wolopts = wol_info->wol_current_mode; 11551 if (wol_info->wol_current_mode & WAKE_MAGICSECURE) 11552 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX); 11553 } 11554 11555 static int hclge_set_wol(struct hnae3_handle *handle, 11556 struct ethtool_wolinfo *wol) 11557 { 11558 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle); 11559 struct hclge_vport *vport = hclge_get_vport(handle); 11560 u32 wol_mode; 11561 int ret; 11562 11563 wol_mode = wol->wolopts; 11564 if (wol_mode & ~wol_info->wol_support_mode) 11565 return -EINVAL; 11566 11567 wol_info->wol_current_mode = wol_mode; 11568 if (wol_mode & WAKE_MAGICSECURE) { 11569 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX); 11570 wol_info->wol_sopass_size = SOPASS_MAX; 11571 } else { 11572 wol_info->wol_sopass_size = 0; 11573 } 11574 11575 ret = hclge_set_wol_cfg(vport->back, wol_info); 11576 if (ret) 11577 wol_info->wol_current_mode = 0; 11578 11579 return ret; 11580 } 11581 11582 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 11583 { 11584 struct pci_dev *pdev = ae_dev->pdev; 11585 struct hclge_dev *hdev; 11586 int ret; 11587 11588 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 11589 if (!hdev) 11590 return -ENOMEM; 11591 11592 hdev->pdev = pdev; 11593 hdev->ae_dev = ae_dev; 11594 hdev->reset_type = HNAE3_NONE_RESET; 11595 hdev->reset_level = HNAE3_FUNC_RESET; 11596 ae_dev->priv = hdev; 11597 11598 /* HW supprt 2 layer vlan */ 11599 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; 11600 11601 mutex_init(&hdev->vport_lock); 11602 spin_lock_init(&hdev->fd_rule_lock); 11603 sema_init(&hdev->reset_sem, 1); 11604 11605 ret = hclge_pci_init(hdev); 11606 if (ret) 11607 goto out; 11608 11609 ret = hclge_devlink_init(hdev); 11610 if (ret) 11611 goto err_pci_uninit; 11612 11613 /* Firmware command queue initialize */ 11614 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 11615 if (ret) 11616 goto err_devlink_uninit; 11617 11618 /* Firmware command initialize */ 11619 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, 11620 true, hdev->reset_pending); 11621 if (ret) 11622 goto err_cmd_uninit; 11623 11624 ret = hclge_clear_hw_resource(hdev); 11625 if (ret) 11626 goto err_cmd_uninit; 11627 11628 ret = hclge_get_cap(hdev); 11629 if (ret) 11630 goto err_cmd_uninit; 11631 11632 ret = hclge_query_dev_specs(hdev); 11633 if (ret) { 11634 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n", 11635 ret); 11636 goto err_cmd_uninit; 11637 } 11638 11639 ret = hclge_configure(hdev); 11640 if (ret) { 11641 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); 11642 goto err_cmd_uninit; 11643 } 11644 11645 ret = hclge_init_msi(hdev); 11646 if (ret) { 11647 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); 11648 goto err_cmd_uninit; 11649 } 11650 11651 ret = hclge_misc_irq_init(hdev); 11652 if (ret) 11653 goto err_msi_uninit; 11654 11655 ret = hclge_alloc_tqps(hdev); 11656 if (ret) { 11657 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); 11658 goto err_msi_irq_uninit; 11659 } 11660 11661 ret = hclge_alloc_vport(hdev); 11662 if (ret) 11663 goto err_msi_irq_uninit; 11664 11665 ret = hclge_map_tqp(hdev); 11666 if (ret) 11667 goto err_msi_irq_uninit; 11668 11669 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { 11670 clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); 11671 if (hnae3_dev_phy_imp_supported(hdev)) 11672 ret = hclge_update_tp_port_info(hdev); 11673 else 11674 ret = hclge_mac_mdio_config(hdev); 11675 11676 if (ret) 11677 goto err_msi_irq_uninit; 11678 } 11679 11680 ret = hclge_init_umv_space(hdev); 11681 if (ret) 11682 goto err_mdiobus_unreg; 11683 11684 ret = hclge_mac_init(hdev); 11685 if (ret) { 11686 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 11687 goto err_mdiobus_unreg; 11688 } 11689 11690 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 11691 if (ret) { 11692 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 11693 goto err_mdiobus_unreg; 11694 } 11695 11696 ret = hclge_config_gro(hdev); 11697 if (ret) 11698 goto err_mdiobus_unreg; 11699 11700 ret = hclge_init_vlan_config(hdev); 11701 if (ret) { 11702 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 11703 goto err_mdiobus_unreg; 11704 } 11705 11706 ret = hclge_tm_schd_init(hdev); 11707 if (ret) { 11708 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); 11709 goto err_mdiobus_unreg; 11710 } 11711 11712 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, 11713 &hdev->rss_cfg); 11714 if (ret) { 11715 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 11716 goto err_mdiobus_unreg; 11717 } 11718 11719 ret = hclge_rss_init_hw(hdev); 11720 if (ret) { 11721 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 11722 goto err_mdiobus_unreg; 11723 } 11724 11725 ret = init_mgr_tbl(hdev); 11726 if (ret) { 11727 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); 11728 goto err_mdiobus_unreg; 11729 } 11730 11731 ret = hclge_init_fd_config(hdev); 11732 if (ret) { 11733 dev_err(&pdev->dev, 11734 "fd table init fail, ret=%d\n", ret); 11735 goto err_mdiobus_unreg; 11736 } 11737 11738 ret = hclge_ptp_init(hdev); 11739 if (ret) 11740 goto err_mdiobus_unreg; 11741 11742 ret = hclge_update_port_info(hdev); 11743 if (ret) 11744 goto err_mdiobus_unreg; 11745 11746 INIT_KFIFO(hdev->mac_tnl_log); 11747 11748 hclge_dcb_ops_set(hdev); 11749 11750 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); 11751 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); 11752 11753 hclge_clear_all_event_cause(hdev); 11754 hclge_clear_resetting_state(hdev); 11755 11756 /* Log and clear the hw errors those already occurred */ 11757 if (hnae3_dev_ras_imp_supported(hdev)) 11758 hclge_handle_occurred_error(hdev); 11759 else 11760 hclge_handle_all_hns_hw_errors(ae_dev); 11761 11762 /* request delayed reset for the error recovery because an immediate 11763 * global reset on a PF affecting pending initialization of other PFs 11764 */ 11765 if (ae_dev->hw_err_reset_req) { 11766 enum hnae3_reset_type reset_level; 11767 11768 reset_level = hclge_get_reset_level(ae_dev, 11769 &ae_dev->hw_err_reset_req); 11770 hclge_set_def_reset_request(ae_dev, reset_level); 11771 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); 11772 } 11773 11774 hclge_init_rxd_adv_layout(hdev); 11775 11776 /* Enable MISC vector(vector0) */ 11777 hclge_enable_vector(&hdev->misc_vector, true); 11778 11779 ret = hclge_init_wol(hdev); 11780 if (ret) 11781 dev_warn(&pdev->dev, 11782 "failed to wake on lan init, ret = %d\n", ret); 11783 11784 hclge_state_init(hdev); 11785 hdev->last_reset_time = jiffies; 11786 11787 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", 11788 HCLGE_DRIVER_NAME); 11789 11790 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); 11791 11792 return 0; 11793 11794 err_mdiobus_unreg: 11795 if (hdev->hw.mac.phydev) 11796 mdiobus_unregister(hdev->hw.mac.mdio_bus); 11797 err_msi_irq_uninit: 11798 hclge_misc_irq_uninit(hdev); 11799 err_msi_uninit: 11800 pci_free_irq_vectors(pdev); 11801 err_cmd_uninit: 11802 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 11803 err_devlink_uninit: 11804 hclge_devlink_uninit(hdev); 11805 err_pci_uninit: 11806 pcim_iounmap(pdev, hdev->hw.hw.io_base); 11807 pci_release_regions(pdev); 11808 pci_disable_device(pdev); 11809 out: 11810 mutex_destroy(&hdev->vport_lock); 11811 return ret; 11812 } 11813 11814 static void hclge_stats_clear(struct hclge_dev *hdev) 11815 { 11816 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); 11817 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); 11818 } 11819 11820 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 11821 { 11822 return hclge_config_switch_param(hdev, vf, enable, 11823 HCLGE_SWITCH_ANTI_SPOOF_MASK); 11824 } 11825 11826 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) 11827 { 11828 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, 11829 HCLGE_FILTER_FE_NIC_INGRESS_B, 11830 enable, vf); 11831 } 11832 11833 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) 11834 { 11835 int ret; 11836 11837 ret = hclge_set_mac_spoofchk(hdev, vf, enable); 11838 if (ret) { 11839 dev_err(&hdev->pdev->dev, 11840 "Set vf %d mac spoof check %s failed, ret=%d\n", 11841 vf, enable ? "on" : "off", ret); 11842 return ret; 11843 } 11844 11845 ret = hclge_set_vlan_spoofchk(hdev, vf, enable); 11846 if (ret) 11847 dev_err(&hdev->pdev->dev, 11848 "Set vf %d vlan spoof check %s failed, ret=%d\n", 11849 vf, enable ? "on" : "off", ret); 11850 11851 return ret; 11852 } 11853 11854 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, 11855 bool enable) 11856 { 11857 struct hclge_vport *vport = hclge_get_vport(handle); 11858 struct hclge_dev *hdev = vport->back; 11859 u32 new_spoofchk = enable ? 1 : 0; 11860 int ret; 11861 11862 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 11863 return -EOPNOTSUPP; 11864 11865 vport = hclge_get_vf_vport(hdev, vf); 11866 if (!vport) 11867 return -EINVAL; 11868 11869 if (vport->vf_info.spoofchk == new_spoofchk) 11870 return 0; 11871 11872 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) 11873 dev_warn(&hdev->pdev->dev, 11874 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", 11875 vf); 11876 else if (enable && hclge_is_umv_space_full(vport, true)) 11877 dev_warn(&hdev->pdev->dev, 11878 "vf %d mac table is full, enable spoof check may cause its packet send fail\n", 11879 vf); 11880 11881 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); 11882 if (ret) 11883 return ret; 11884 11885 vport->vf_info.spoofchk = new_spoofchk; 11886 return 0; 11887 } 11888 11889 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) 11890 { 11891 struct hclge_vport *vport = hdev->vport; 11892 int ret; 11893 int i; 11894 11895 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 11896 return 0; 11897 11898 /* resume the vf spoof check state after reset */ 11899 for (i = 0; i < hdev->num_alloc_vport; i++) { 11900 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, 11901 vport->vf_info.spoofchk); 11902 if (ret) 11903 return ret; 11904 11905 vport++; 11906 } 11907 11908 return 0; 11909 } 11910 11911 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) 11912 { 11913 struct hclge_vport *vport = hclge_get_vport(handle); 11914 struct hclge_dev *hdev = vport->back; 11915 u32 new_trusted = enable ? 1 : 0; 11916 11917 vport = hclge_get_vf_vport(hdev, vf); 11918 if (!vport) 11919 return -EINVAL; 11920 11921 if (vport->vf_info.trusted == new_trusted) 11922 return 0; 11923 11924 vport->vf_info.trusted = new_trusted; 11925 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 11926 hclge_task_schedule(hdev, 0); 11927 11928 return 0; 11929 } 11930 11931 static void hclge_reset_vf_rate(struct hclge_dev *hdev) 11932 { 11933 int ret; 11934 int vf; 11935 11936 /* reset vf rate to default value */ 11937 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { 11938 struct hclge_vport *vport = &hdev->vport[vf]; 11939 11940 vport->vf_info.max_tx_rate = 0; 11941 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); 11942 if (ret) 11943 dev_err(&hdev->pdev->dev, 11944 "vf%d failed to reset to default, ret=%d\n", 11945 vf - HCLGE_VF_VPORT_START_NUM, ret); 11946 } 11947 } 11948 11949 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, 11950 int min_tx_rate, int max_tx_rate) 11951 { 11952 if (min_tx_rate != 0 || 11953 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { 11954 dev_err(&hdev->pdev->dev, 11955 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n", 11956 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); 11957 return -EINVAL; 11958 } 11959 11960 return 0; 11961 } 11962 11963 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, 11964 int min_tx_rate, int max_tx_rate, bool force) 11965 { 11966 struct hclge_vport *vport = hclge_get_vport(handle); 11967 struct hclge_dev *hdev = vport->back; 11968 int ret; 11969 11970 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); 11971 if (ret) 11972 return ret; 11973 11974 vport = hclge_get_vf_vport(hdev, vf); 11975 if (!vport) 11976 return -EINVAL; 11977 11978 if (!force && max_tx_rate == vport->vf_info.max_tx_rate) 11979 return 0; 11980 11981 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); 11982 if (ret) 11983 return ret; 11984 11985 vport->vf_info.max_tx_rate = max_tx_rate; 11986 11987 return 0; 11988 } 11989 11990 static int hclge_resume_vf_rate(struct hclge_dev *hdev) 11991 { 11992 struct hnae3_handle *handle = &hdev->vport->nic; 11993 struct hclge_vport *vport; 11994 int ret; 11995 int vf; 11996 11997 /* resume the vf max_tx_rate after reset */ 11998 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { 11999 vport = hclge_get_vf_vport(hdev, vf); 12000 if (!vport) 12001 return -EINVAL; 12002 12003 /* zero means max rate, after reset, firmware already set it to 12004 * max rate, so just continue. 12005 */ 12006 if (!vport->vf_info.max_tx_rate) 12007 continue; 12008 12009 ret = hclge_set_vf_rate(handle, vf, 0, 12010 vport->vf_info.max_tx_rate, true); 12011 if (ret) { 12012 dev_err(&hdev->pdev->dev, 12013 "vf%d failed to resume tx_rate:%u, ret=%d\n", 12014 vf, vport->vf_info.max_tx_rate, ret); 12015 return ret; 12016 } 12017 } 12018 12019 return 0; 12020 } 12021 12022 static void hclge_reset_vport_state(struct hclge_dev *hdev) 12023 { 12024 struct hclge_vport *vport = hdev->vport; 12025 int i; 12026 12027 for (i = 0; i < hdev->num_alloc_vport; i++) { 12028 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 12029 vport++; 12030 } 12031 } 12032 12033 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) 12034 { 12035 struct hclge_dev *hdev = ae_dev->priv; 12036 struct pci_dev *pdev = ae_dev->pdev; 12037 int ret; 12038 12039 set_bit(HCLGE_STATE_DOWN, &hdev->state); 12040 12041 hclge_stats_clear(hdev); 12042 /* NOTE: pf reset needn't to clear or restore pf and vf table entry. 12043 * so here should not clean table in memory. 12044 */ 12045 if (hdev->reset_type == HNAE3_IMP_RESET || 12046 hdev->reset_type == HNAE3_GLOBAL_RESET) { 12047 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); 12048 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); 12049 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); 12050 hclge_reset_umv_space(hdev); 12051 } 12052 12053 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, 12054 true, hdev->reset_pending); 12055 if (ret) { 12056 dev_err(&pdev->dev, "Cmd queue init failed\n"); 12057 return ret; 12058 } 12059 12060 ret = hclge_map_tqp(hdev); 12061 if (ret) { 12062 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); 12063 return ret; 12064 } 12065 12066 ret = hclge_mac_init(hdev); 12067 if (ret) { 12068 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); 12069 return ret; 12070 } 12071 12072 ret = hclge_tp_port_init(hdev); 12073 if (ret) { 12074 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n", 12075 ret); 12076 return ret; 12077 } 12078 12079 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); 12080 if (ret) { 12081 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); 12082 return ret; 12083 } 12084 12085 ret = hclge_config_gro(hdev); 12086 if (ret) 12087 return ret; 12088 12089 ret = hclge_init_vlan_config(hdev); 12090 if (ret) { 12091 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 12092 return ret; 12093 } 12094 12095 ret = hclge_tm_init_hw(hdev, true); 12096 if (ret) { 12097 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); 12098 return ret; 12099 } 12100 12101 ret = hclge_rss_init_hw(hdev); 12102 if (ret) { 12103 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); 12104 return ret; 12105 } 12106 12107 ret = init_mgr_tbl(hdev); 12108 if (ret) { 12109 dev_err(&pdev->dev, 12110 "failed to reinit manager table, ret = %d\n", ret); 12111 return ret; 12112 } 12113 12114 ret = hclge_init_fd_config(hdev); 12115 if (ret) { 12116 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret); 12117 return ret; 12118 } 12119 12120 ret = hclge_ptp_init(hdev); 12121 if (ret) 12122 return ret; 12123 12124 /* Log and clear the hw errors those already occurred */ 12125 if (hnae3_dev_ras_imp_supported(hdev)) 12126 hclge_handle_occurred_error(hdev); 12127 else 12128 hclge_handle_all_hns_hw_errors(ae_dev); 12129 12130 /* Re-enable the hw error interrupts because 12131 * the interrupts get disabled on global reset. 12132 */ 12133 ret = hclge_config_nic_hw_error(hdev, true); 12134 if (ret) { 12135 dev_err(&pdev->dev, 12136 "fail(%d) to re-enable NIC hw error interrupts\n", 12137 ret); 12138 return ret; 12139 } 12140 12141 if (hdev->roce_client) { 12142 ret = hclge_config_rocee_ras_interrupt(hdev, true); 12143 if (ret) { 12144 dev_err(&pdev->dev, 12145 "fail(%d) to re-enable roce ras interrupts\n", 12146 ret); 12147 return ret; 12148 } 12149 } 12150 12151 hclge_reset_vport_state(hdev); 12152 ret = hclge_reset_vport_spoofchk(hdev); 12153 if (ret) 12154 return ret; 12155 12156 ret = hclge_resume_vf_rate(hdev); 12157 if (ret) 12158 return ret; 12159 12160 hclge_init_rxd_adv_layout(hdev); 12161 12162 ret = hclge_update_wol(hdev); 12163 if (ret) 12164 dev_warn(&pdev->dev, 12165 "failed to update wol config, ret = %d\n", ret); 12166 12167 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", 12168 HCLGE_DRIVER_NAME); 12169 12170 return 0; 12171 } 12172 12173 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 12174 { 12175 struct hclge_dev *hdev = ae_dev->priv; 12176 struct hclge_mac *mac = &hdev->hw.mac; 12177 12178 hclge_reset_vf_rate(hdev); 12179 hclge_clear_vf_vlan(hdev); 12180 hclge_state_uninit(hdev); 12181 hclge_ptp_uninit(hdev); 12182 hclge_uninit_rxd_adv_layout(hdev); 12183 hclge_uninit_mac_table(hdev); 12184 hclge_del_all_fd_entries(hdev); 12185 12186 if (mac->phydev) 12187 mdiobus_unregister(mac->mdio_bus); 12188 12189 /* Disable MISC vector(vector0) */ 12190 hclge_enable_vector(&hdev->misc_vector, false); 12191 synchronize_irq(hdev->misc_vector.vector_irq); 12192 12193 /* Disable all hw interrupts */ 12194 hclge_config_mac_tnl_int(hdev, false); 12195 hclge_config_nic_hw_error(hdev, false); 12196 hclge_config_rocee_ras_interrupt(hdev, false); 12197 12198 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 12199 hclge_misc_irq_uninit(hdev); 12200 hclge_devlink_uninit(hdev); 12201 hclge_pci_uninit(hdev); 12202 hclge_uninit_vport_vlan_table(hdev); 12203 mutex_destroy(&hdev->vport_lock); 12204 ae_dev->priv = NULL; 12205 } 12206 12207 static u32 hclge_get_max_channels(struct hnae3_handle *handle) 12208 { 12209 struct hclge_vport *vport = hclge_get_vport(handle); 12210 struct hclge_dev *hdev = vport->back; 12211 12212 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); 12213 } 12214 12215 static void hclge_get_channels(struct hnae3_handle *handle, 12216 struct ethtool_channels *ch) 12217 { 12218 ch->max_combined = hclge_get_max_channels(handle); 12219 ch->other_count = 1; 12220 ch->max_other = 1; 12221 ch->combined_count = handle->kinfo.rss_size; 12222 } 12223 12224 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, 12225 u16 *alloc_tqps, u16 *max_rss_size) 12226 { 12227 struct hclge_vport *vport = hclge_get_vport(handle); 12228 struct hclge_dev *hdev = vport->back; 12229 12230 *alloc_tqps = vport->alloc_tqps; 12231 *max_rss_size = hdev->pf_rss_size_max; 12232 } 12233 12234 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle) 12235 { 12236 struct hclge_vport *vport = hclge_get_vport(handle); 12237 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; 12238 struct hclge_dev *hdev = vport->back; 12239 u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; 12240 u16 tc_valid[HCLGE_MAX_TC_NUM]; 12241 u16 roundup_size; 12242 unsigned int i; 12243 12244 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); 12245 roundup_size = ilog2(roundup_size); 12246 /* Set the RSS TC mode according to the new RSS size */ 12247 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 12248 tc_valid[i] = 0; 12249 12250 if (!(hdev->hw_tc_map & BIT(i))) 12251 continue; 12252 12253 tc_valid[i] = 1; 12254 tc_size[i] = roundup_size; 12255 tc_offset[i] = vport->nic.kinfo.rss_size * i; 12256 } 12257 12258 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, 12259 tc_size); 12260 } 12261 12262 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 12263 bool rxfh_configured) 12264 { 12265 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); 12266 struct hclge_vport *vport = hclge_get_vport(handle); 12267 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 12268 struct hclge_dev *hdev = vport->back; 12269 u16 cur_rss_size = kinfo->rss_size; 12270 u16 cur_tqps = kinfo->num_tqps; 12271 u32 *rss_indir; 12272 unsigned int i; 12273 int ret; 12274 12275 kinfo->req_rss_size = new_tqps_num; 12276 12277 ret = hclge_tm_vport_map_update(hdev); 12278 if (ret) { 12279 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); 12280 return ret; 12281 } 12282 12283 ret = hclge_set_rss_tc_mode_cfg(handle); 12284 if (ret) 12285 return ret; 12286 12287 /* RSS indirection table has been configured by user */ 12288 if (rxfh_configured) 12289 goto out; 12290 12291 /* Reinitializes the rss indirect table according to the new RSS size */ 12292 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32), 12293 GFP_KERNEL); 12294 if (!rss_indir) 12295 return -ENOMEM; 12296 12297 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) 12298 rss_indir[i] = i % kinfo->rss_size; 12299 12300 ret = hclge_set_rss(handle, rss_indir, NULL, 0); 12301 if (ret) 12302 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 12303 ret); 12304 12305 kfree(rss_indir); 12306 12307 out: 12308 if (!ret) 12309 dev_info(&hdev->pdev->dev, 12310 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 12311 cur_rss_size, kinfo->rss_size, 12312 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 12313 12314 return ret; 12315 } 12316 12317 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) 12318 { 12319 struct hclge_set_led_state_cmd *req; 12320 struct hclge_desc desc; 12321 int ret; 12322 12323 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); 12324 12325 req = (struct hclge_set_led_state_cmd *)desc.data; 12326 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, 12327 HCLGE_LED_LOCATE_STATE_S, locate_led_status); 12328 12329 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12330 if (ret) 12331 dev_err(&hdev->pdev->dev, 12332 "Send set led state cmd error, ret =%d\n", ret); 12333 12334 return ret; 12335 } 12336 12337 enum hclge_led_status { 12338 HCLGE_LED_OFF, 12339 HCLGE_LED_ON, 12340 HCLGE_LED_NO_CHANGE = 0xFF, 12341 }; 12342 12343 static int hclge_set_led_id(struct hnae3_handle *handle, 12344 enum ethtool_phys_id_state status) 12345 { 12346 struct hclge_vport *vport = hclge_get_vport(handle); 12347 struct hclge_dev *hdev = vport->back; 12348 12349 switch (status) { 12350 case ETHTOOL_ID_ACTIVE: 12351 return hclge_set_led_status(hdev, HCLGE_LED_ON); 12352 case ETHTOOL_ID_INACTIVE: 12353 return hclge_set_led_status(hdev, HCLGE_LED_OFF); 12354 default: 12355 return -EINVAL; 12356 } 12357 } 12358 12359 static void hclge_get_link_mode(struct hnae3_handle *handle, 12360 unsigned long *supported, 12361 unsigned long *advertising) 12362 { 12363 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS); 12364 struct hclge_vport *vport = hclge_get_vport(handle); 12365 struct hclge_dev *hdev = vport->back; 12366 unsigned int idx = 0; 12367 12368 for (; idx < size; idx++) { 12369 supported[idx] = hdev->hw.mac.supported[idx]; 12370 advertising[idx] = hdev->hw.mac.advertising[idx]; 12371 } 12372 } 12373 12374 static int hclge_gro_en(struct hnae3_handle *handle, bool enable) 12375 { 12376 struct hclge_vport *vport = hclge_get_vport(handle); 12377 struct hclge_dev *hdev = vport->back; 12378 bool gro_en_old = hdev->gro_en; 12379 int ret; 12380 12381 hdev->gro_en = enable; 12382 ret = hclge_config_gro(hdev); 12383 if (ret) 12384 hdev->gro_en = gro_en_old; 12385 12386 return ret; 12387 } 12388 12389 static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport) 12390 { 12391 struct hnae3_handle *handle = &vport->nic; 12392 struct hclge_dev *hdev = vport->back; 12393 bool uc_en = false; 12394 bool mc_en = false; 12395 u8 tmp_flags; 12396 bool bc_en; 12397 int ret; 12398 12399 if (vport->last_promisc_flags != vport->overflow_promisc_flags) { 12400 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12401 vport->last_promisc_flags = vport->overflow_promisc_flags; 12402 } 12403 12404 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, 12405 &vport->state)) 12406 return 0; 12407 12408 /* for PF */ 12409 if (!vport->vport_id) { 12410 tmp_flags = handle->netdev_flags | vport->last_promisc_flags; 12411 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE, 12412 tmp_flags & HNAE3_MPE); 12413 if (!ret) 12414 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, 12415 &vport->state); 12416 else 12417 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, 12418 &vport->state); 12419 return ret; 12420 } 12421 12422 /* for VF */ 12423 if (vport->vf_info.trusted) { 12424 uc_en = vport->vf_info.request_uc_en > 0 || 12425 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE; 12426 mc_en = vport->vf_info.request_mc_en > 0 || 12427 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE; 12428 } 12429 bc_en = vport->vf_info.request_bc_en > 0; 12430 12431 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, 12432 mc_en, bc_en); 12433 if (ret) { 12434 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 12435 return ret; 12436 } 12437 hclge_set_vport_vlan_fltr_change(vport); 12438 12439 return 0; 12440 } 12441 12442 static void hclge_sync_promisc_mode(struct hclge_dev *hdev) 12443 { 12444 struct hclge_vport *vport; 12445 int ret; 12446 u16 i; 12447 12448 for (i = 0; i < hdev->num_alloc_vport; i++) { 12449 vport = &hdev->vport[i]; 12450 12451 ret = hclge_sync_vport_promisc_mode(vport); 12452 if (ret) 12453 return; 12454 } 12455 } 12456 12457 static bool hclge_module_existed(struct hclge_dev *hdev) 12458 { 12459 struct hclge_desc desc; 12460 u32 existed; 12461 int ret; 12462 12463 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true); 12464 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12465 if (ret) { 12466 dev_err(&hdev->pdev->dev, 12467 "failed to get SFP exist state, ret = %d\n", ret); 12468 return false; 12469 } 12470 12471 existed = le32_to_cpu(desc.data[0]); 12472 12473 return existed != 0; 12474 } 12475 12476 /* need 6 bds(total 140 bytes) in one reading 12477 * return the number of bytes actually read, 0 means read failed. 12478 */ 12479 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, 12480 u32 len, u8 *data) 12481 { 12482 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM]; 12483 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0; 12484 u16 read_len; 12485 u16 copy_len; 12486 int ret; 12487 int i; 12488 12489 /* setup all 6 bds to read module eeprom info. */ 12490 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) { 12491 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM, 12492 true); 12493 12494 /* bd0~bd4 need next flag */ 12495 if (i < HCLGE_SFP_INFO_CMD_NUM - 1) 12496 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 12497 } 12498 12499 /* setup bd0, this bd contains offset and read length. */ 12500 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data; 12501 sfp_info_bd0->offset = cpu_to_le16((u16)offset); 12502 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN); 12503 sfp_info_bd0->read_len = cpu_to_le16(read_len); 12504 12505 ret = hclge_cmd_send(&hdev->hw, desc, i); 12506 if (ret) { 12507 dev_err(&hdev->pdev->dev, 12508 "failed to get SFP eeprom info, ret = %d\n", ret); 12509 return 0; 12510 } 12511 12512 /* copy sfp info from bd0 to out buffer. */ 12513 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN); 12514 memcpy(data, sfp_info_bd0->data, copy_len); 12515 read_len = copy_len; 12516 12517 /* copy sfp info from bd1~bd5 to out buffer if needed. */ 12518 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) { 12519 if (read_len >= len) 12520 return read_len; 12521 12522 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN); 12523 memcpy(data + read_len, desc[i].data, copy_len); 12524 read_len += copy_len; 12525 } 12526 12527 return read_len; 12528 } 12529 12530 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, 12531 u32 len, u8 *data) 12532 { 12533 struct hclge_vport *vport = hclge_get_vport(handle); 12534 struct hclge_dev *hdev = vport->back; 12535 u32 read_len = 0; 12536 u16 data_len; 12537 12538 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) 12539 return -EOPNOTSUPP; 12540 12541 if (!hclge_module_existed(hdev)) 12542 return -ENXIO; 12543 12544 while (read_len < len) { 12545 data_len = hclge_get_sfp_eeprom_info(hdev, 12546 offset + read_len, 12547 len - read_len, 12548 data + read_len); 12549 if (!data_len) 12550 return -EIO; 12551 12552 read_len += data_len; 12553 } 12554 12555 return 0; 12556 } 12557 12558 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, 12559 u32 *status_code) 12560 { 12561 struct hclge_vport *vport = hclge_get_vport(handle); 12562 struct hclge_dev *hdev = vport->back; 12563 struct hclge_desc desc; 12564 int ret; 12565 12566 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) 12567 return -EOPNOTSUPP; 12568 12569 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); 12570 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 12571 if (ret) { 12572 dev_err(&hdev->pdev->dev, 12573 "failed to query link diagnosis info, ret = %d\n", ret); 12574 return ret; 12575 } 12576 12577 *status_code = le32_to_cpu(desc.data[0]); 12578 return 0; 12579 } 12580 12581 /* After disable sriov, VF still has some config and info need clean, 12582 * which configed by PF. 12583 */ 12584 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) 12585 { 12586 struct hclge_dev *hdev = vport->back; 12587 struct hclge_vlan_info vlan_info; 12588 int ret; 12589 12590 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state); 12591 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 12592 vport->need_notify = 0; 12593 vport->mps = 0; 12594 12595 /* after disable sriov, clean VF rate configured by PF */ 12596 ret = hclge_tm_qs_shaper_cfg(vport, 0); 12597 if (ret) 12598 dev_err(&hdev->pdev->dev, 12599 "failed to clean vf%d rate config, ret = %d\n", 12600 vfid, ret); 12601 12602 vlan_info.vlan_tag = 0; 12603 vlan_info.qos = 0; 12604 vlan_info.vlan_proto = ETH_P_8021Q; 12605 ret = hclge_update_port_base_vlan_cfg(vport, 12606 HNAE3_PORT_BASE_VLAN_DISABLE, 12607 &vlan_info); 12608 if (ret) 12609 dev_err(&hdev->pdev->dev, 12610 "failed to clean vf%d port base vlan, ret = %d\n", 12611 vfid, ret); 12612 12613 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); 12614 if (ret) 12615 dev_err(&hdev->pdev->dev, 12616 "failed to clean vf%d spoof config, ret = %d\n", 12617 vfid, ret); 12618 12619 memset(&vport->vf_info, 0, sizeof(vport->vf_info)); 12620 } 12621 12622 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) 12623 { 12624 struct hclge_dev *hdev = ae_dev->priv; 12625 struct hclge_vport *vport; 12626 int i; 12627 12628 for (i = 0; i < num_vfs; i++) { 12629 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; 12630 12631 hclge_clear_vport_vf_info(vport, i); 12632 } 12633 } 12634 12635 static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, 12636 u8 *priority) 12637 { 12638 struct hclge_vport *vport = hclge_get_vport(h); 12639 12640 if (dscp >= HNAE3_MAX_DSCP) 12641 return -EINVAL; 12642 12643 if (tc_mode) 12644 *tc_mode = vport->nic.kinfo.tc_map_mode; 12645 if (priority) 12646 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : 12647 vport->nic.kinfo.dscp_prio[dscp]; 12648 12649 return 0; 12650 } 12651 12652 static const struct hnae3_ae_ops hclge_ops = { 12653 .init_ae_dev = hclge_init_ae_dev, 12654 .uninit_ae_dev = hclge_uninit_ae_dev, 12655 .reset_prepare = hclge_reset_prepare_general, 12656 .reset_done = hclge_reset_done, 12657 .init_client_instance = hclge_init_client_instance, 12658 .uninit_client_instance = hclge_uninit_client_instance, 12659 .map_ring_to_vector = hclge_map_ring_to_vector, 12660 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector, 12661 .get_vector = hclge_get_vector, 12662 .put_vector = hclge_put_vector, 12663 .set_promisc_mode = hclge_set_promisc_mode, 12664 .request_update_promisc_mode = hclge_request_update_promisc_mode, 12665 .set_loopback = hclge_set_loopback, 12666 .start = hclge_ae_start, 12667 .stop = hclge_ae_stop, 12668 .client_start = hclge_client_start, 12669 .client_stop = hclge_client_stop, 12670 .get_status = hclge_get_status, 12671 .get_ksettings_an_result = hclge_get_ksettings_an_result, 12672 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, 12673 .get_media_type = hclge_get_media_type, 12674 .check_port_speed = hclge_check_port_speed, 12675 .get_fec_stats = hclge_get_fec_stats, 12676 .get_fec = hclge_get_fec, 12677 .set_fec = hclge_set_fec, 12678 .get_rss_key_size = hclge_comm_get_rss_key_size, 12679 .get_rss = hclge_get_rss, 12680 .set_rss = hclge_set_rss, 12681 .set_rss_tuple = hclge_set_rss_tuple, 12682 .get_rss_tuple = hclge_get_rss_tuple, 12683 .get_tc_size = hclge_get_tc_size, 12684 .get_mac_addr = hclge_get_mac_addr, 12685 .set_mac_addr = hclge_set_mac_addr, 12686 .do_ioctl = hclge_do_ioctl, 12687 .add_uc_addr = hclge_add_uc_addr, 12688 .rm_uc_addr = hclge_rm_uc_addr, 12689 .add_mc_addr = hclge_add_mc_addr, 12690 .rm_mc_addr = hclge_rm_mc_addr, 12691 .set_autoneg = hclge_set_autoneg, 12692 .get_autoneg = hclge_get_autoneg, 12693 .restart_autoneg = hclge_restart_autoneg, 12694 .halt_autoneg = hclge_halt_autoneg, 12695 .get_pauseparam = hclge_get_pauseparam, 12696 .set_pauseparam = hclge_set_pauseparam, 12697 .set_mtu = hclge_set_mtu, 12698 .reset_queue = hclge_reset_tqp, 12699 .get_stats = hclge_get_stats, 12700 .get_mac_stats = hclge_get_mac_stat, 12701 .update_stats = hclge_update_stats, 12702 .get_strings = hclge_get_strings, 12703 .get_sset_count = hclge_get_sset_count, 12704 .get_fw_version = hclge_get_fw_version, 12705 .get_mdix_mode = hclge_get_mdix_mode, 12706 .enable_vlan_filter = hclge_enable_vlan_filter, 12707 .set_vlan_filter = hclge_set_vlan_filter, 12708 .set_vf_vlan_filter = hclge_set_vf_vlan_filter, 12709 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, 12710 .reset_event = hclge_reset_event, 12711 .get_reset_level = hclge_get_reset_level, 12712 .set_default_reset_request = hclge_set_def_reset_request, 12713 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, 12714 .set_channels = hclge_set_channels, 12715 .get_channels = hclge_get_channels, 12716 .get_regs_len = hclge_get_regs_len, 12717 .get_regs = hclge_get_regs, 12718 .set_led_id = hclge_set_led_id, 12719 .get_link_mode = hclge_get_link_mode, 12720 .add_fd_entry = hclge_add_fd_entry, 12721 .del_fd_entry = hclge_del_fd_entry, 12722 .get_fd_rule_cnt = hclge_get_fd_rule_cnt, 12723 .get_fd_rule_info = hclge_get_fd_rule_info, 12724 .get_fd_all_rules = hclge_get_all_rules, 12725 .enable_fd = hclge_enable_fd, 12726 .add_arfs_entry = hclge_add_fd_entry_by_arfs, 12727 .dbg_read_cmd = hclge_dbg_read_cmd, 12728 .handle_hw_ras_error = hclge_handle_hw_ras_error, 12729 .get_hw_reset_stat = hclge_get_hw_reset_stat, 12730 .ae_dev_resetting = hclge_ae_dev_resetting, 12731 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, 12732 .set_gro_en = hclge_gro_en, 12733 .get_global_queue_id = hclge_covert_handle_qid_global, 12734 .set_timer_task = hclge_set_timer_task, 12735 .mac_connect_phy = hclge_mac_connect_phy, 12736 .mac_disconnect_phy = hclge_mac_disconnect_phy, 12737 .get_vf_config = hclge_get_vf_config, 12738 .set_vf_link_state = hclge_set_vf_link_state, 12739 .set_vf_spoofchk = hclge_set_vf_spoofchk, 12740 .set_vf_trust = hclge_set_vf_trust, 12741 .set_vf_rate = hclge_set_vf_rate, 12742 .set_vf_mac = hclge_set_vf_mac, 12743 .get_module_eeprom = hclge_get_module_eeprom, 12744 .get_cmdq_stat = hclge_get_cmdq_stat, 12745 .add_cls_flower = hclge_add_cls_flower, 12746 .del_cls_flower = hclge_del_cls_flower, 12747 .cls_flower_active = hclge_is_cls_flower_active, 12748 .get_phy_link_ksettings = hclge_get_phy_link_ksettings, 12749 .set_phy_link_ksettings = hclge_set_phy_link_ksettings, 12750 .set_tx_hwts_info = hclge_ptp_set_tx_info, 12751 .get_rx_hwts = hclge_ptp_get_rx_hwts, 12752 .get_ts_info = hclge_ptp_get_ts_info, 12753 .get_link_diagnosis_info = hclge_get_link_diagnosis_info, 12754 .clean_vf_config = hclge_clean_vport_config, 12755 .get_dscp_prio = hclge_get_dscp_prio, 12756 .get_wol = hclge_get_wol, 12757 .set_wol = hclge_set_wol, 12758 }; 12759 12760 static struct hnae3_ae_algo ae_algo = { 12761 .ops = &hclge_ops, 12762 .pdev_id_table = ae_algo_pci_tbl, 12763 }; 12764 12765 static int __init hclge_init(void) 12766 { 12767 pr_info("%s is initializing\n", HCLGE_NAME); 12768 12769 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); 12770 if (!hclge_wq) { 12771 pr_err("%s: failed to create workqueue\n", HCLGE_NAME); 12772 return -ENOMEM; 12773 } 12774 12775 hnae3_register_ae_algo(&ae_algo); 12776 12777 return 0; 12778 } 12779 12780 static void __exit hclge_exit(void) 12781 { 12782 hnae3_unregister_ae_algo_prepare(&ae_algo); 12783 hnae3_unregister_ae_algo(&ae_algo); 12784 destroy_workqueue(hclge_wq); 12785 } 12786 module_init(hclge_init); 12787 module_exit(hclge_exit); 12788 12789 MODULE_LICENSE("GPL"); 12790 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 12791 MODULE_DESCRIPTION("HCLGE Driver"); 12792 MODULE_VERSION(HCLGE_MOD_VERSION); 12793