xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision 57985788158a5a6b77612e531b9d89bcad06e47c)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48},
389 	{ OUTER_SRC_MAC, 48},
390 	{ OUTER_VLAN_TAG_FST, 16},
391 	{ OUTER_VLAN_TAG_SEC, 16},
392 	{ OUTER_ETH_TYPE, 16},
393 	{ OUTER_L2_RSV, 16},
394 	{ OUTER_IP_TOS, 8},
395 	{ OUTER_IP_PROTO, 8},
396 	{ OUTER_SRC_IP, 32},
397 	{ OUTER_DST_IP, 32},
398 	{ OUTER_L3_RSV, 16},
399 	{ OUTER_SRC_PORT, 16},
400 	{ OUTER_DST_PORT, 16},
401 	{ OUTER_L4_RSV, 32},
402 	{ OUTER_TUN_VNI, 24},
403 	{ OUTER_TUN_FLOW_ID, 8},
404 	{ INNER_DST_MAC, 48},
405 	{ INNER_SRC_MAC, 48},
406 	{ INNER_VLAN_TAG_FST, 16},
407 	{ INNER_VLAN_TAG_SEC, 16},
408 	{ INNER_ETH_TYPE, 16},
409 	{ INNER_L2_RSV, 16},
410 	{ INNER_IP_TOS, 8},
411 	{ INNER_IP_PROTO, 8},
412 	{ INNER_SRC_IP, 32},
413 	{ INNER_DST_IP, 32},
414 	{ INNER_L3_RSV, 16},
415 	{ INNER_SRC_PORT, 16},
416 	{ INNER_DST_PORT, 16},
417 	{ INNER_L4_RSV, 32},
418 };
419 
420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 {
422 #define HCLGE_MAC_CMD_NUM 21
423 
424 	u64 *data = (u64 *)(&hdev->mac_stats);
425 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
426 	__le64 *desc_data;
427 	int i, k, n;
428 	int ret;
429 
430 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 	if (ret) {
433 		dev_err(&hdev->pdev->dev,
434 			"Get MAC pkt stats fail, status = %d.\n", ret);
435 
436 		return ret;
437 	}
438 
439 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440 		/* for special opcode 0032, only the first desc has the head */
441 		if (unlikely(i == 0)) {
442 			desc_data = (__le64 *)(&desc[i].data[0]);
443 			n = HCLGE_RD_FIRST_STATS_NUM;
444 		} else {
445 			desc_data = (__le64 *)(&desc[i]);
446 			n = HCLGE_RD_OTHER_STATS_NUM;
447 		}
448 
449 		for (k = 0; k < n; k++) {
450 			*data += le64_to_cpu(*desc_data);
451 			data++;
452 			desc_data++;
453 		}
454 	}
455 
456 	return 0;
457 }
458 
459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 {
461 	u64 *data = (u64 *)(&hdev->mac_stats);
462 	struct hclge_desc *desc;
463 	__le64 *desc_data;
464 	u16 i, k, n;
465 	int ret;
466 
467 	/* This may be called inside atomic sections,
468 	 * so GFP_ATOMIC is more suitalbe here
469 	 */
470 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 	if (!desc)
472 		return -ENOMEM;
473 
474 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476 	if (ret) {
477 		kfree(desc);
478 		return ret;
479 	}
480 
481 	for (i = 0; i < desc_num; i++) {
482 		/* for special opcode 0034, only the first desc has the head */
483 		if (i == 0) {
484 			desc_data = (__le64 *)(&desc[i].data[0]);
485 			n = HCLGE_RD_FIRST_STATS_NUM;
486 		} else {
487 			desc_data = (__le64 *)(&desc[i]);
488 			n = HCLGE_RD_OTHER_STATS_NUM;
489 		}
490 
491 		for (k = 0; k < n; k++) {
492 			*data += le64_to_cpu(*desc_data);
493 			data++;
494 			desc_data++;
495 		}
496 	}
497 
498 	kfree(desc);
499 
500 	return 0;
501 }
502 
503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 {
505 	struct hclge_desc desc;
506 	__le32 *desc_data;
507 	u32 reg_num;
508 	int ret;
509 
510 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 	if (ret)
513 		return ret;
514 
515 	desc_data = (__le32 *)(&desc.data[0]);
516 	reg_num = le32_to_cpu(*desc_data);
517 
518 	*desc_num = 1 + ((reg_num - 3) >> 2) +
519 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520 
521 	return 0;
522 }
523 
524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
525 {
526 	u32 desc_num;
527 	int ret;
528 
529 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 
531 	/* The firmware supports the new statistics acquisition method */
532 	if (!ret)
533 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
534 	else if (ret == -EOPNOTSUPP)
535 		ret = hclge_mac_update_stats_defective(hdev);
536 	else
537 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538 
539 	return ret;
540 }
541 
542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 {
544 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545 	struct hclge_vport *vport = hclge_get_vport(handle);
546 	struct hclge_dev *hdev = vport->back;
547 	struct hnae3_queue *queue;
548 	struct hclge_desc desc[1];
549 	struct hclge_tqp *tqp;
550 	int ret, i;
551 
552 	for (i = 0; i < kinfo->num_tqps; i++) {
553 		queue = handle->kinfo.tqp[i];
554 		tqp = container_of(queue, struct hclge_tqp, q);
555 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
556 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
557 					   true);
558 
559 		desc[0].data[0] = cpu_to_le32(tqp->index);
560 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 		if (ret) {
562 			dev_err(&hdev->pdev->dev,
563 				"Query tqp stat fail, status = %d,queue = %d\n",
564 				ret, i);
565 			return ret;
566 		}
567 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568 			le32_to_cpu(desc[0].data[1]);
569 	}
570 
571 	for (i = 0; i < kinfo->num_tqps; i++) {
572 		queue = handle->kinfo.tqp[i];
573 		tqp = container_of(queue, struct hclge_tqp, q);
574 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
575 		hclge_cmd_setup_basic_desc(&desc[0],
576 					   HCLGE_OPC_QUERY_TX_STATS,
577 					   true);
578 
579 		desc[0].data[0] = cpu_to_le32(tqp->index);
580 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 		if (ret) {
582 			dev_err(&hdev->pdev->dev,
583 				"Query tqp stat fail, status = %d,queue = %d\n",
584 				ret, i);
585 			return ret;
586 		}
587 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588 			le32_to_cpu(desc[0].data[1]);
589 	}
590 
591 	return 0;
592 }
593 
594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 {
596 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597 	struct hclge_tqp *tqp;
598 	u64 *buff = data;
599 	int i;
600 
601 	for (i = 0; i < kinfo->num_tqps; i++) {
602 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
604 	}
605 
606 	for (i = 0; i < kinfo->num_tqps; i++) {
607 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609 	}
610 
611 	return buff;
612 }
613 
614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 {
616 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 
618 	/* each tqp has TX & RX two queues */
619 	return kinfo->num_tqps * (2);
620 }
621 
622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 {
624 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 	u8 *buff = data;
626 	int i;
627 
628 	for (i = 0; i < kinfo->num_tqps; i++) {
629 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630 			struct hclge_tqp, q);
631 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632 			 tqp->index);
633 		buff = buff + ETH_GSTRING_LEN;
634 	}
635 
636 	for (i = 0; i < kinfo->num_tqps; i++) {
637 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638 			struct hclge_tqp, q);
639 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640 			 tqp->index);
641 		buff = buff + ETH_GSTRING_LEN;
642 	}
643 
644 	return buff;
645 }
646 
647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648 				 const struct hclge_comm_stats_str strs[],
649 				 int size, u64 *data)
650 {
651 	u64 *buf = data;
652 	u32 i;
653 
654 	for (i = 0; i < size; i++)
655 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
656 
657 	return buf + size;
658 }
659 
660 static u8 *hclge_comm_get_strings(u32 stringset,
661 				  const struct hclge_comm_stats_str strs[],
662 				  int size, u8 *data)
663 {
664 	char *buff = (char *)data;
665 	u32 i;
666 
667 	if (stringset != ETH_SS_STATS)
668 		return buff;
669 
670 	for (i = 0; i < size; i++) {
671 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672 		buff = buff + ETH_GSTRING_LEN;
673 	}
674 
675 	return (u8 *)buff;
676 }
677 
678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 {
680 	struct hnae3_handle *handle;
681 	int status;
682 
683 	handle = &hdev->vport[0].nic;
684 	if (handle->client) {
685 		status = hclge_tqps_update_stats(handle);
686 		if (status) {
687 			dev_err(&hdev->pdev->dev,
688 				"Update TQPS stats fail, status = %d.\n",
689 				status);
690 		}
691 	}
692 
693 	status = hclge_mac_update_stats(hdev);
694 	if (status)
695 		dev_err(&hdev->pdev->dev,
696 			"Update MAC stats fail, status = %d.\n", status);
697 }
698 
699 static void hclge_update_stats(struct hnae3_handle *handle,
700 			       struct net_device_stats *net_stats)
701 {
702 	struct hclge_vport *vport = hclge_get_vport(handle);
703 	struct hclge_dev *hdev = vport->back;
704 	int status;
705 
706 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
707 		return;
708 
709 	status = hclge_mac_update_stats(hdev);
710 	if (status)
711 		dev_err(&hdev->pdev->dev,
712 			"Update MAC stats fail, status = %d.\n",
713 			status);
714 
715 	status = hclge_tqps_update_stats(handle);
716 	if (status)
717 		dev_err(&hdev->pdev->dev,
718 			"Update TQPS stats fail, status = %d.\n",
719 			status);
720 
721 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
722 }
723 
724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 {
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727 		HNAE3_SUPPORT_PHY_LOOPBACK |\
728 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 
731 	struct hclge_vport *vport = hclge_get_vport(handle);
732 	struct hclge_dev *hdev = vport->back;
733 	int count = 0;
734 
735 	/* Loopback test support rules:
736 	 * mac: only GE mode support
737 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 	 * phy: only support when phy device exist on board
739 	 */
740 	if (stringset == ETH_SS_TEST) {
741 		/* clear loopback bit flags at first */
742 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 			count += 1;
748 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 		}
750 
751 		count += 2;
752 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 
755 		if (hdev->hw.mac.phydev) {
756 			count += 1;
757 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758 		}
759 
760 	} else if (stringset == ETH_SS_STATS) {
761 		count = ARRAY_SIZE(g_mac_stats_string) +
762 			hclge_tqps_get_sset_count(handle, stringset);
763 	}
764 
765 	return count;
766 }
767 
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
769 			      u8 *data)
770 {
771 	u8 *p = (char *)data;
772 	int size;
773 
774 	if (stringset == ETH_SS_STATS) {
775 		size = ARRAY_SIZE(g_mac_stats_string);
776 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777 					   size, p);
778 		p = hclge_tqps_get_strings(handle, p);
779 	} else if (stringset == ETH_SS_TEST) {
780 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782 			       ETH_GSTRING_LEN);
783 			p += ETH_GSTRING_LEN;
784 		}
785 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787 			       ETH_GSTRING_LEN);
788 			p += ETH_GSTRING_LEN;
789 		}
790 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791 			memcpy(p,
792 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793 			       ETH_GSTRING_LEN);
794 			p += ETH_GSTRING_LEN;
795 		}
796 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798 			       ETH_GSTRING_LEN);
799 			p += ETH_GSTRING_LEN;
800 		}
801 	}
802 }
803 
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 {
806 	struct hclge_vport *vport = hclge_get_vport(handle);
807 	struct hclge_dev *hdev = vport->back;
808 	u64 *p;
809 
810 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811 				 ARRAY_SIZE(g_mac_stats_string), data);
812 	p = hclge_tqps_get_stats(handle, p);
813 }
814 
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 			       struct hns3_mac_stats *mac_stats)
817 {
818 	struct hclge_vport *vport = hclge_get_vport(handle);
819 	struct hclge_dev *hdev = vport->back;
820 
821 	hclge_update_stats(handle, NULL);
822 
823 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
825 }
826 
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828 				   struct hclge_func_status_cmd *status)
829 {
830 #define HCLGE_MAC_ID_MASK	0xF
831 
832 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833 		return -EINVAL;
834 
835 	/* Set the pf to main pf */
836 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 		hdev->flag |= HCLGE_FLAG_MAIN;
838 	else
839 		hdev->flag &= ~HCLGE_FLAG_MAIN;
840 
841 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
842 	return 0;
843 }
844 
845 static int hclge_query_function_status(struct hclge_dev *hdev)
846 {
847 #define HCLGE_QUERY_MAX_CNT	5
848 
849 	struct hclge_func_status_cmd *req;
850 	struct hclge_desc desc;
851 	int timeout = 0;
852 	int ret;
853 
854 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855 	req = (struct hclge_func_status_cmd *)desc.data;
856 
857 	do {
858 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 		if (ret) {
860 			dev_err(&hdev->pdev->dev,
861 				"query function status failed %d.\n", ret);
862 			return ret;
863 		}
864 
865 		/* Check pf reset is done */
866 		if (req->pf_state)
867 			break;
868 		usleep_range(1000, 2000);
869 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
870 
871 	return hclge_parse_func_status(hdev, req);
872 }
873 
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 {
876 	struct hclge_pf_res_cmd *req;
877 	struct hclge_desc desc;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 	if (ret) {
883 		dev_err(&hdev->pdev->dev,
884 			"query pf resource failed %d.\n", ret);
885 		return ret;
886 	}
887 
888 	req = (struct hclge_pf_res_cmd *)desc.data;
889 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890 			 le16_to_cpu(req->ext_tqp_num);
891 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892 
893 	if (req->tx_buf_size)
894 		hdev->tx_buf_size =
895 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896 	else
897 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898 
899 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900 
901 	if (req->dv_buf_size)
902 		hdev->dv_buf_size =
903 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904 	else
905 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906 
907 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908 
909 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911 		dev_err(&hdev->pdev->dev,
912 			"only %u msi resources available, not enough for pf(min:2).\n",
913 			hdev->num_nic_msi);
914 		return -EINVAL;
915 	}
916 
917 	if (hnae3_dev_roce_supported(hdev)) {
918 		hdev->num_roce_msi =
919 			le16_to_cpu(req->pf_intr_vector_number_roce);
920 
921 		/* PF should have NIC vectors and Roce vectors,
922 		 * NIC vectors are queued before Roce vectors.
923 		 */
924 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
925 	} else {
926 		hdev->num_msi = hdev->num_nic_msi;
927 	}
928 
929 	return 0;
930 }
931 
932 static int hclge_parse_speed(int speed_cmd, int *speed)
933 {
934 	switch (speed_cmd) {
935 	case 6:
936 		*speed = HCLGE_MAC_SPEED_10M;
937 		break;
938 	case 7:
939 		*speed = HCLGE_MAC_SPEED_100M;
940 		break;
941 	case 0:
942 		*speed = HCLGE_MAC_SPEED_1G;
943 		break;
944 	case 1:
945 		*speed = HCLGE_MAC_SPEED_10G;
946 		break;
947 	case 2:
948 		*speed = HCLGE_MAC_SPEED_25G;
949 		break;
950 	case 3:
951 		*speed = HCLGE_MAC_SPEED_40G;
952 		break;
953 	case 4:
954 		*speed = HCLGE_MAC_SPEED_50G;
955 		break;
956 	case 5:
957 		*speed = HCLGE_MAC_SPEED_100G;
958 		break;
959 	case 8:
960 		*speed = HCLGE_MAC_SPEED_200G;
961 		break;
962 	default:
963 		return -EINVAL;
964 	}
965 
966 	return 0;
967 }
968 
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971 	struct hclge_vport *vport = hclge_get_vport(handle);
972 	struct hclge_dev *hdev = vport->back;
973 	u32 speed_ability = hdev->hw.mac.speed_ability;
974 	u32 speed_bit = 0;
975 
976 	switch (speed) {
977 	case HCLGE_MAC_SPEED_10M:
978 		speed_bit = HCLGE_SUPPORT_10M_BIT;
979 		break;
980 	case HCLGE_MAC_SPEED_100M:
981 		speed_bit = HCLGE_SUPPORT_100M_BIT;
982 		break;
983 	case HCLGE_MAC_SPEED_1G:
984 		speed_bit = HCLGE_SUPPORT_1G_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_10G:
987 		speed_bit = HCLGE_SUPPORT_10G_BIT;
988 		break;
989 	case HCLGE_MAC_SPEED_25G:
990 		speed_bit = HCLGE_SUPPORT_25G_BIT;
991 		break;
992 	case HCLGE_MAC_SPEED_40G:
993 		speed_bit = HCLGE_SUPPORT_40G_BIT;
994 		break;
995 	case HCLGE_MAC_SPEED_50G:
996 		speed_bit = HCLGE_SUPPORT_50G_BIT;
997 		break;
998 	case HCLGE_MAC_SPEED_100G:
999 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1000 		break;
1001 	case HCLGE_MAC_SPEED_200G:
1002 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1003 		break;
1004 	default:
1005 		return -EINVAL;
1006 	}
1007 
1008 	if (speed_bit & speed_ability)
1009 		return 0;
1010 
1011 	return -EINVAL;
1012 }
1013 
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1015 {
1016 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018 				 mac->supported);
1019 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021 				 mac->supported);
1022 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024 				 mac->supported);
1025 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027 				 mac->supported);
1028 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030 				 mac->supported);
1031 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1033 				 mac->supported);
1034 }
1035 
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1037 {
1038 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 				 mac->supported);
1047 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 				 mac->supported);
1050 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 				 mac->supported);
1053 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1054 		linkmode_set_bit(
1055 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1056 			mac->supported);
1057 }
1058 
1059 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1060 {
1061 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1063 				 mac->supported);
1064 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1078 				 mac->supported);
1079 }
1080 
1081 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1082 {
1083 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1085 				 mac->supported);
1086 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1088 				 mac->supported);
1089 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1091 				 mac->supported);
1092 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1094 				 mac->supported);
1095 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1097 				 mac->supported);
1098 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1099 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1100 				 mac->supported);
1101 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1103 				 mac->supported);
1104 }
1105 
1106 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1107 {
1108 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1110 
1111 	switch (mac->speed) {
1112 	case HCLGE_MAC_SPEED_10G:
1113 	case HCLGE_MAC_SPEED_40G:
1114 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1115 				 mac->supported);
1116 		mac->fec_ability =
1117 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1118 		break;
1119 	case HCLGE_MAC_SPEED_25G:
1120 	case HCLGE_MAC_SPEED_50G:
1121 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1122 				 mac->supported);
1123 		mac->fec_ability =
1124 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125 			BIT(HNAE3_FEC_AUTO);
1126 		break;
1127 	case HCLGE_MAC_SPEED_100G:
1128 	case HCLGE_MAC_SPEED_200G:
1129 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1131 		break;
1132 	default:
1133 		mac->fec_ability = 0;
1134 		break;
1135 	}
1136 }
1137 
1138 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1139 					u16 speed_ability)
1140 {
1141 	struct hclge_mac *mac = &hdev->hw.mac;
1142 
1143 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1145 				 mac->supported);
1146 
1147 	hclge_convert_setting_sr(mac, speed_ability);
1148 	hclge_convert_setting_lr(mac, speed_ability);
1149 	hclge_convert_setting_cr(mac, speed_ability);
1150 	if (hnae3_dev_fec_supported(hdev))
1151 		hclge_convert_setting_fec(mac);
1152 
1153 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1154 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1156 }
1157 
1158 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1159 					    u16 speed_ability)
1160 {
1161 	struct hclge_mac *mac = &hdev->hw.mac;
1162 
1163 	hclge_convert_setting_kr(mac, speed_ability);
1164 	if (hnae3_dev_fec_supported(hdev))
1165 		hclge_convert_setting_fec(mac);
1166 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1167 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1168 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1169 }
1170 
1171 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1172 					 u16 speed_ability)
1173 {
1174 	unsigned long *supported = hdev->hw.mac.supported;
1175 
1176 	/* default to support all speed for GE port */
1177 	if (!speed_ability)
1178 		speed_ability = HCLGE_SUPPORT_GE;
1179 
1180 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1181 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1182 				 supported);
1183 
1184 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1185 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1186 				 supported);
1187 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1188 				 supported);
1189 	}
1190 
1191 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1192 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1193 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1194 	}
1195 
1196 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1197 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1200 }
1201 
1202 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1203 {
1204 	u8 media_type = hdev->hw.mac.media_type;
1205 
1206 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1207 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1208 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1209 		hclge_parse_copper_link_mode(hdev, speed_ability);
1210 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1211 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1212 }
1213 
1214 static u32 hclge_get_max_speed(u16 speed_ability)
1215 {
1216 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1217 		return HCLGE_MAC_SPEED_200G;
1218 
1219 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1220 		return HCLGE_MAC_SPEED_100G;
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1223 		return HCLGE_MAC_SPEED_50G;
1224 
1225 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1226 		return HCLGE_MAC_SPEED_40G;
1227 
1228 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1229 		return HCLGE_MAC_SPEED_25G;
1230 
1231 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1232 		return HCLGE_MAC_SPEED_10G;
1233 
1234 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1235 		return HCLGE_MAC_SPEED_1G;
1236 
1237 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1238 		return HCLGE_MAC_SPEED_100M;
1239 
1240 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1241 		return HCLGE_MAC_SPEED_10M;
1242 
1243 	return HCLGE_MAC_SPEED_1G;
1244 }
1245 
1246 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1247 {
1248 #define SPEED_ABILITY_EXT_SHIFT			8
1249 
1250 	struct hclge_cfg_param_cmd *req;
1251 	u64 mac_addr_tmp_high;
1252 	u16 speed_ability_ext;
1253 	u64 mac_addr_tmp;
1254 	unsigned int i;
1255 
1256 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1257 
1258 	/* get the configuration */
1259 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1260 					      HCLGE_CFG_VMDQ_M,
1261 					      HCLGE_CFG_VMDQ_S);
1262 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1263 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1264 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265 					    HCLGE_CFG_TQP_DESC_N_M,
1266 					    HCLGE_CFG_TQP_DESC_N_S);
1267 
1268 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1269 					HCLGE_CFG_PHY_ADDR_M,
1270 					HCLGE_CFG_PHY_ADDR_S);
1271 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1272 					  HCLGE_CFG_MEDIA_TP_M,
1273 					  HCLGE_CFG_MEDIA_TP_S);
1274 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1275 					  HCLGE_CFG_RX_BUF_LEN_M,
1276 					  HCLGE_CFG_RX_BUF_LEN_S);
1277 	/* get mac_address */
1278 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1279 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1280 					    HCLGE_CFG_MAC_ADDR_H_M,
1281 					    HCLGE_CFG_MAC_ADDR_H_S);
1282 
1283 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1284 
1285 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1286 					     HCLGE_CFG_DEFAULT_SPEED_M,
1287 					     HCLGE_CFG_DEFAULT_SPEED_S);
1288 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289 					       HCLGE_CFG_RSS_SIZE_M,
1290 					       HCLGE_CFG_RSS_SIZE_S);
1291 
1292 	for (i = 0; i < ETH_ALEN; i++)
1293 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1294 
1295 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1296 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1297 
1298 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1299 					     HCLGE_CFG_SPEED_ABILITY_M,
1300 					     HCLGE_CFG_SPEED_ABILITY_S);
1301 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1303 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1304 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1305 
1306 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1308 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1309 	if (!cfg->umv_space)
1310 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1311 
1312 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1313 					       HCLGE_CFG_PF_RSS_SIZE_M,
1314 					       HCLGE_CFG_PF_RSS_SIZE_S);
1315 
1316 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1317 	 * power of 2, instead of reading out directly. This would
1318 	 * be more flexible for future changes and expansions.
1319 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1320 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1321 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1322 	 */
1323 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1324 			       1U << cfg->pf_rss_size_max :
1325 			       cfg->vf_rss_size_max;
1326 }
1327 
1328 /* hclge_get_cfg: query the static parameter from flash
1329  * @hdev: pointer to struct hclge_dev
1330  * @hcfg: the config structure to be getted
1331  */
1332 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1333 {
1334 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1335 	struct hclge_cfg_param_cmd *req;
1336 	unsigned int i;
1337 	int ret;
1338 
1339 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1340 		u32 offset = 0;
1341 
1342 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1343 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1344 					   true);
1345 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1346 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1347 		/* Len should be united by 4 bytes when send to hardware */
1348 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1349 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1350 		req->offset = cpu_to_le32(offset);
1351 	}
1352 
1353 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1354 	if (ret) {
1355 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1356 		return ret;
1357 	}
1358 
1359 	hclge_parse_cfg(hcfg, desc);
1360 
1361 	return 0;
1362 }
1363 
1364 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1365 {
1366 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1367 
1368 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1369 
1370 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1371 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1372 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1373 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1374 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1375 }
1376 
1377 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1378 				  struct hclge_desc *desc)
1379 {
1380 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1381 	struct hclge_dev_specs_0_cmd *req0;
1382 	struct hclge_dev_specs_1_cmd *req1;
1383 
1384 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1385 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1386 
1387 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1388 	ae_dev->dev_specs.rss_ind_tbl_size =
1389 		le16_to_cpu(req0->rss_ind_tbl_size);
1390 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1391 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1392 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1393 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1394 }
1395 
1396 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1397 {
1398 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1399 
1400 	if (!dev_specs->max_non_tso_bd_num)
1401 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1402 	if (!dev_specs->rss_ind_tbl_size)
1403 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1404 	if (!dev_specs->rss_key_size)
1405 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1406 	if (!dev_specs->max_tm_rate)
1407 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1408 	if (!dev_specs->max_int_gl)
1409 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1410 }
1411 
1412 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1413 {
1414 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1415 	int ret;
1416 	int i;
1417 
1418 	/* set default specifications as devices lower than version V3 do not
1419 	 * support querying specifications from firmware.
1420 	 */
1421 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1422 		hclge_set_default_dev_specs(hdev);
1423 		return 0;
1424 	}
1425 
1426 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1427 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1428 					   true);
1429 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1430 	}
1431 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1432 
1433 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1434 	if (ret)
1435 		return ret;
1436 
1437 	hclge_parse_dev_specs(hdev, desc);
1438 	hclge_check_dev_specs(hdev);
1439 
1440 	return 0;
1441 }
1442 
1443 static int hclge_get_cap(struct hclge_dev *hdev)
1444 {
1445 	int ret;
1446 
1447 	ret = hclge_query_function_status(hdev);
1448 	if (ret) {
1449 		dev_err(&hdev->pdev->dev,
1450 			"query function status error %d.\n", ret);
1451 		return ret;
1452 	}
1453 
1454 	/* get pf resource */
1455 	return hclge_query_pf_resource(hdev);
1456 }
1457 
1458 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1459 {
1460 #define HCLGE_MIN_TX_DESC	64
1461 #define HCLGE_MIN_RX_DESC	64
1462 
1463 	if (!is_kdump_kernel())
1464 		return;
1465 
1466 	dev_info(&hdev->pdev->dev,
1467 		 "Running kdump kernel. Using minimal resources\n");
1468 
1469 	/* minimal queue pairs equals to the number of vports */
1470 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1471 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1472 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1473 }
1474 
1475 static int hclge_configure(struct hclge_dev *hdev)
1476 {
1477 	struct hclge_cfg cfg;
1478 	unsigned int i;
1479 	int ret;
1480 
1481 	ret = hclge_get_cfg(hdev, &cfg);
1482 	if (ret)
1483 		return ret;
1484 
1485 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1486 	hdev->base_tqp_pid = 0;
1487 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1488 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1489 	hdev->rx_buf_len = cfg.rx_buf_len;
1490 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1491 	hdev->hw.mac.media_type = cfg.media_type;
1492 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1493 	hdev->num_tx_desc = cfg.tqp_desc_num;
1494 	hdev->num_rx_desc = cfg.tqp_desc_num;
1495 	hdev->tm_info.num_pg = 1;
1496 	hdev->tc_max = cfg.tc_num;
1497 	hdev->tm_info.hw_pfc_map = 0;
1498 	hdev->wanted_umv_size = cfg.umv_space;
1499 
1500 	if (hnae3_dev_fd_supported(hdev)) {
1501 		hdev->fd_en = true;
1502 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1503 	}
1504 
1505 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1506 	if (ret) {
1507 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1508 			cfg.default_speed, ret);
1509 		return ret;
1510 	}
1511 
1512 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1513 
1514 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1515 
1516 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1517 	    (hdev->tc_max < 1)) {
1518 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1519 			 hdev->tc_max);
1520 		hdev->tc_max = 1;
1521 	}
1522 
1523 	/* Dev does not support DCB */
1524 	if (!hnae3_dev_dcb_supported(hdev)) {
1525 		hdev->tc_max = 1;
1526 		hdev->pfc_max = 0;
1527 	} else {
1528 		hdev->pfc_max = hdev->tc_max;
1529 	}
1530 
1531 	hdev->tm_info.num_tc = 1;
1532 
1533 	/* Currently not support uncontiuous tc */
1534 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1535 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1536 
1537 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1538 
1539 	hclge_init_kdump_kernel_config(hdev);
1540 
1541 	/* Set the init affinity based on pci func number */
1542 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1543 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1544 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1545 			&hdev->affinity_mask);
1546 
1547 	return ret;
1548 }
1549 
1550 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1551 			    u16 tso_mss_max)
1552 {
1553 	struct hclge_cfg_tso_status_cmd *req;
1554 	struct hclge_desc desc;
1555 
1556 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1557 
1558 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1559 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1560 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1561 
1562 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1563 }
1564 
1565 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1566 {
1567 	struct hclge_cfg_gro_status_cmd *req;
1568 	struct hclge_desc desc;
1569 	int ret;
1570 
1571 	if (!hnae3_dev_gro_supported(hdev))
1572 		return 0;
1573 
1574 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1575 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1576 
1577 	req->gro_en = en ? 1 : 0;
1578 
1579 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1580 	if (ret)
1581 		dev_err(&hdev->pdev->dev,
1582 			"GRO hardware config cmd failed, ret = %d\n", ret);
1583 
1584 	return ret;
1585 }
1586 
1587 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1588 {
1589 	struct hclge_tqp *tqp;
1590 	int i;
1591 
1592 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1593 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1594 	if (!hdev->htqp)
1595 		return -ENOMEM;
1596 
1597 	tqp = hdev->htqp;
1598 
1599 	for (i = 0; i < hdev->num_tqps; i++) {
1600 		tqp->dev = &hdev->pdev->dev;
1601 		tqp->index = i;
1602 
1603 		tqp->q.ae_algo = &ae_algo;
1604 		tqp->q.buf_size = hdev->rx_buf_len;
1605 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1606 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1607 
1608 		/* need an extended offset to configure queues >=
1609 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1610 		 */
1611 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1612 			tqp->q.io_base = hdev->hw.io_base +
1613 					 HCLGE_TQP_REG_OFFSET +
1614 					 i * HCLGE_TQP_REG_SIZE;
1615 		else
1616 			tqp->q.io_base = hdev->hw.io_base +
1617 					 HCLGE_TQP_REG_OFFSET +
1618 					 HCLGE_TQP_EXT_REG_OFFSET +
1619 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1620 					 HCLGE_TQP_REG_SIZE;
1621 
1622 		tqp++;
1623 	}
1624 
1625 	return 0;
1626 }
1627 
1628 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1629 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1630 {
1631 	struct hclge_tqp_map_cmd *req;
1632 	struct hclge_desc desc;
1633 	int ret;
1634 
1635 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1636 
1637 	req = (struct hclge_tqp_map_cmd *)desc.data;
1638 	req->tqp_id = cpu_to_le16(tqp_pid);
1639 	req->tqp_vf = func_id;
1640 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1641 	if (!is_pf)
1642 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1643 	req->tqp_vid = cpu_to_le16(tqp_vid);
1644 
1645 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1646 	if (ret)
1647 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1648 
1649 	return ret;
1650 }
1651 
1652 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1653 {
1654 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1655 	struct hclge_dev *hdev = vport->back;
1656 	int i, alloced;
1657 
1658 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1659 	     alloced < num_tqps; i++) {
1660 		if (!hdev->htqp[i].alloced) {
1661 			hdev->htqp[i].q.handle = &vport->nic;
1662 			hdev->htqp[i].q.tqp_index = alloced;
1663 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1664 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1665 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1666 			hdev->htqp[i].alloced = true;
1667 			alloced++;
1668 		}
1669 	}
1670 	vport->alloc_tqps = alloced;
1671 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1672 				vport->alloc_tqps / hdev->tm_info.num_tc);
1673 
1674 	/* ensure one to one mapping between irq and queue at default */
1675 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1676 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1677 
1678 	return 0;
1679 }
1680 
1681 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1682 			    u16 num_tx_desc, u16 num_rx_desc)
1683 
1684 {
1685 	struct hnae3_handle *nic = &vport->nic;
1686 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1687 	struct hclge_dev *hdev = vport->back;
1688 	int ret;
1689 
1690 	kinfo->num_tx_desc = num_tx_desc;
1691 	kinfo->num_rx_desc = num_rx_desc;
1692 
1693 	kinfo->rx_buf_len = hdev->rx_buf_len;
1694 
1695 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1696 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1697 	if (!kinfo->tqp)
1698 		return -ENOMEM;
1699 
1700 	ret = hclge_assign_tqp(vport, num_tqps);
1701 	if (ret)
1702 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1703 
1704 	return ret;
1705 }
1706 
1707 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1708 				  struct hclge_vport *vport)
1709 {
1710 	struct hnae3_handle *nic = &vport->nic;
1711 	struct hnae3_knic_private_info *kinfo;
1712 	u16 i;
1713 
1714 	kinfo = &nic->kinfo;
1715 	for (i = 0; i < vport->alloc_tqps; i++) {
1716 		struct hclge_tqp *q =
1717 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1718 		bool is_pf;
1719 		int ret;
1720 
1721 		is_pf = !(vport->vport_id);
1722 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1723 					     i, is_pf);
1724 		if (ret)
1725 			return ret;
1726 	}
1727 
1728 	return 0;
1729 }
1730 
1731 static int hclge_map_tqp(struct hclge_dev *hdev)
1732 {
1733 	struct hclge_vport *vport = hdev->vport;
1734 	u16 i, num_vport;
1735 
1736 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1737 	for (i = 0; i < num_vport; i++)	{
1738 		int ret;
1739 
1740 		ret = hclge_map_tqp_to_vport(hdev, vport);
1741 		if (ret)
1742 			return ret;
1743 
1744 		vport++;
1745 	}
1746 
1747 	return 0;
1748 }
1749 
1750 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1751 {
1752 	struct hnae3_handle *nic = &vport->nic;
1753 	struct hclge_dev *hdev = vport->back;
1754 	int ret;
1755 
1756 	nic->pdev = hdev->pdev;
1757 	nic->ae_algo = &ae_algo;
1758 	nic->numa_node_mask = hdev->numa_node_mask;
1759 
1760 	ret = hclge_knic_setup(vport, num_tqps,
1761 			       hdev->num_tx_desc, hdev->num_rx_desc);
1762 	if (ret)
1763 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1764 
1765 	return ret;
1766 }
1767 
1768 static int hclge_alloc_vport(struct hclge_dev *hdev)
1769 {
1770 	struct pci_dev *pdev = hdev->pdev;
1771 	struct hclge_vport *vport;
1772 	u32 tqp_main_vport;
1773 	u32 tqp_per_vport;
1774 	int num_vport, i;
1775 	int ret;
1776 
1777 	/* We need to alloc a vport for main NIC of PF */
1778 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1779 
1780 	if (hdev->num_tqps < num_vport) {
1781 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1782 			hdev->num_tqps, num_vport);
1783 		return -EINVAL;
1784 	}
1785 
1786 	/* Alloc the same number of TQPs for every vport */
1787 	tqp_per_vport = hdev->num_tqps / num_vport;
1788 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1789 
1790 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1791 			     GFP_KERNEL);
1792 	if (!vport)
1793 		return -ENOMEM;
1794 
1795 	hdev->vport = vport;
1796 	hdev->num_alloc_vport = num_vport;
1797 
1798 	if (IS_ENABLED(CONFIG_PCI_IOV))
1799 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1800 
1801 	for (i = 0; i < num_vport; i++) {
1802 		vport->back = hdev;
1803 		vport->vport_id = i;
1804 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1805 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1806 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1807 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1808 		INIT_LIST_HEAD(&vport->vlan_list);
1809 		INIT_LIST_HEAD(&vport->uc_mac_list);
1810 		INIT_LIST_HEAD(&vport->mc_mac_list);
1811 		spin_lock_init(&vport->mac_list_lock);
1812 
1813 		if (i == 0)
1814 			ret = hclge_vport_setup(vport, tqp_main_vport);
1815 		else
1816 			ret = hclge_vport_setup(vport, tqp_per_vport);
1817 		if (ret) {
1818 			dev_err(&pdev->dev,
1819 				"vport setup failed for vport %d, %d\n",
1820 				i, ret);
1821 			return ret;
1822 		}
1823 
1824 		vport++;
1825 	}
1826 
1827 	return 0;
1828 }
1829 
1830 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1831 				    struct hclge_pkt_buf_alloc *buf_alloc)
1832 {
1833 /* TX buffer size is unit by 128 byte */
1834 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1835 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1836 	struct hclge_tx_buff_alloc_cmd *req;
1837 	struct hclge_desc desc;
1838 	int ret;
1839 	u8 i;
1840 
1841 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1842 
1843 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1844 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1845 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1846 
1847 		req->tx_pkt_buff[i] =
1848 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1849 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1850 	}
1851 
1852 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1853 	if (ret)
1854 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1855 			ret);
1856 
1857 	return ret;
1858 }
1859 
1860 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1861 				 struct hclge_pkt_buf_alloc *buf_alloc)
1862 {
1863 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1864 
1865 	if (ret)
1866 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1867 
1868 	return ret;
1869 }
1870 
1871 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1872 {
1873 	unsigned int i;
1874 	u32 cnt = 0;
1875 
1876 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1877 		if (hdev->hw_tc_map & BIT(i))
1878 			cnt++;
1879 	return cnt;
1880 }
1881 
1882 /* Get the number of pfc enabled TCs, which have private buffer */
1883 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1884 				  struct hclge_pkt_buf_alloc *buf_alloc)
1885 {
1886 	struct hclge_priv_buf *priv;
1887 	unsigned int i;
1888 	int cnt = 0;
1889 
1890 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1891 		priv = &buf_alloc->priv_buf[i];
1892 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1893 		    priv->enable)
1894 			cnt++;
1895 	}
1896 
1897 	return cnt;
1898 }
1899 
1900 /* Get the number of pfc disabled TCs, which have private buffer */
1901 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1902 				     struct hclge_pkt_buf_alloc *buf_alloc)
1903 {
1904 	struct hclge_priv_buf *priv;
1905 	unsigned int i;
1906 	int cnt = 0;
1907 
1908 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1909 		priv = &buf_alloc->priv_buf[i];
1910 		if (hdev->hw_tc_map & BIT(i) &&
1911 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1912 		    priv->enable)
1913 			cnt++;
1914 	}
1915 
1916 	return cnt;
1917 }
1918 
1919 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1920 {
1921 	struct hclge_priv_buf *priv;
1922 	u32 rx_priv = 0;
1923 	int i;
1924 
1925 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1926 		priv = &buf_alloc->priv_buf[i];
1927 		if (priv->enable)
1928 			rx_priv += priv->buf_size;
1929 	}
1930 	return rx_priv;
1931 }
1932 
1933 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1934 {
1935 	u32 i, total_tx_size = 0;
1936 
1937 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1938 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1939 
1940 	return total_tx_size;
1941 }
1942 
1943 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1944 				struct hclge_pkt_buf_alloc *buf_alloc,
1945 				u32 rx_all)
1946 {
1947 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1948 	u32 tc_num = hclge_get_tc_num(hdev);
1949 	u32 shared_buf, aligned_mps;
1950 	u32 rx_priv;
1951 	int i;
1952 
1953 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1954 
1955 	if (hnae3_dev_dcb_supported(hdev))
1956 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1957 					hdev->dv_buf_size;
1958 	else
1959 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1960 					+ hdev->dv_buf_size;
1961 
1962 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1963 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1964 			     HCLGE_BUF_SIZE_UNIT);
1965 
1966 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1967 	if (rx_all < rx_priv + shared_std)
1968 		return false;
1969 
1970 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1971 	buf_alloc->s_buf.buf_size = shared_buf;
1972 	if (hnae3_dev_dcb_supported(hdev)) {
1973 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1974 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1975 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1976 				  HCLGE_BUF_SIZE_UNIT);
1977 	} else {
1978 		buf_alloc->s_buf.self.high = aligned_mps +
1979 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1980 		buf_alloc->s_buf.self.low = aligned_mps;
1981 	}
1982 
1983 	if (hnae3_dev_dcb_supported(hdev)) {
1984 		hi_thrd = shared_buf - hdev->dv_buf_size;
1985 
1986 		if (tc_num <= NEED_RESERVE_TC_NUM)
1987 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1988 					/ BUF_MAX_PERCENT;
1989 
1990 		if (tc_num)
1991 			hi_thrd = hi_thrd / tc_num;
1992 
1993 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1994 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1995 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1996 	} else {
1997 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1998 		lo_thrd = aligned_mps;
1999 	}
2000 
2001 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2002 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2003 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2004 	}
2005 
2006 	return true;
2007 }
2008 
2009 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2010 				struct hclge_pkt_buf_alloc *buf_alloc)
2011 {
2012 	u32 i, total_size;
2013 
2014 	total_size = hdev->pkt_buf_size;
2015 
2016 	/* alloc tx buffer for all enabled tc */
2017 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2018 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2019 
2020 		if (hdev->hw_tc_map & BIT(i)) {
2021 			if (total_size < hdev->tx_buf_size)
2022 				return -ENOMEM;
2023 
2024 			priv->tx_buf_size = hdev->tx_buf_size;
2025 		} else {
2026 			priv->tx_buf_size = 0;
2027 		}
2028 
2029 		total_size -= priv->tx_buf_size;
2030 	}
2031 
2032 	return 0;
2033 }
2034 
2035 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2036 				  struct hclge_pkt_buf_alloc *buf_alloc)
2037 {
2038 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2039 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2040 	unsigned int i;
2041 
2042 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2043 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2044 
2045 		priv->enable = 0;
2046 		priv->wl.low = 0;
2047 		priv->wl.high = 0;
2048 		priv->buf_size = 0;
2049 
2050 		if (!(hdev->hw_tc_map & BIT(i)))
2051 			continue;
2052 
2053 		priv->enable = 1;
2054 
2055 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2056 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2057 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2058 						HCLGE_BUF_SIZE_UNIT);
2059 		} else {
2060 			priv->wl.low = 0;
2061 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2062 					aligned_mps;
2063 		}
2064 
2065 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2066 	}
2067 
2068 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2069 }
2070 
2071 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2072 					  struct hclge_pkt_buf_alloc *buf_alloc)
2073 {
2074 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2075 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2076 	int i;
2077 
2078 	/* let the last to be cleared first */
2079 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2080 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2081 		unsigned int mask = BIT((unsigned int)i);
2082 
2083 		if (hdev->hw_tc_map & mask &&
2084 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2085 			/* Clear the no pfc TC private buffer */
2086 			priv->wl.low = 0;
2087 			priv->wl.high = 0;
2088 			priv->buf_size = 0;
2089 			priv->enable = 0;
2090 			no_pfc_priv_num--;
2091 		}
2092 
2093 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2094 		    no_pfc_priv_num == 0)
2095 			break;
2096 	}
2097 
2098 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2099 }
2100 
2101 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2102 					struct hclge_pkt_buf_alloc *buf_alloc)
2103 {
2104 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2105 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2106 	int i;
2107 
2108 	/* let the last to be cleared first */
2109 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2110 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2111 		unsigned int mask = BIT((unsigned int)i);
2112 
2113 		if (hdev->hw_tc_map & mask &&
2114 		    hdev->tm_info.hw_pfc_map & mask) {
2115 			/* Reduce the number of pfc TC with private buffer */
2116 			priv->wl.low = 0;
2117 			priv->enable = 0;
2118 			priv->wl.high = 0;
2119 			priv->buf_size = 0;
2120 			pfc_priv_num--;
2121 		}
2122 
2123 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2124 		    pfc_priv_num == 0)
2125 			break;
2126 	}
2127 
2128 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2129 }
2130 
2131 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2132 				      struct hclge_pkt_buf_alloc *buf_alloc)
2133 {
2134 #define COMPENSATE_BUFFER	0x3C00
2135 #define COMPENSATE_HALF_MPS_NUM	5
2136 #define PRIV_WL_GAP		0x1800
2137 
2138 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2139 	u32 tc_num = hclge_get_tc_num(hdev);
2140 	u32 half_mps = hdev->mps >> 1;
2141 	u32 min_rx_priv;
2142 	unsigned int i;
2143 
2144 	if (tc_num)
2145 		rx_priv = rx_priv / tc_num;
2146 
2147 	if (tc_num <= NEED_RESERVE_TC_NUM)
2148 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2149 
2150 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2151 			COMPENSATE_HALF_MPS_NUM * half_mps;
2152 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2153 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2154 
2155 	if (rx_priv < min_rx_priv)
2156 		return false;
2157 
2158 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2159 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2160 
2161 		priv->enable = 0;
2162 		priv->wl.low = 0;
2163 		priv->wl.high = 0;
2164 		priv->buf_size = 0;
2165 
2166 		if (!(hdev->hw_tc_map & BIT(i)))
2167 			continue;
2168 
2169 		priv->enable = 1;
2170 		priv->buf_size = rx_priv;
2171 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2172 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2173 	}
2174 
2175 	buf_alloc->s_buf.buf_size = 0;
2176 
2177 	return true;
2178 }
2179 
2180 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2181  * @hdev: pointer to struct hclge_dev
2182  * @buf_alloc: pointer to buffer calculation data
2183  * @return: 0: calculate sucessful, negative: fail
2184  */
2185 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2186 				struct hclge_pkt_buf_alloc *buf_alloc)
2187 {
2188 	/* When DCB is not supported, rx private buffer is not allocated. */
2189 	if (!hnae3_dev_dcb_supported(hdev)) {
2190 		u32 rx_all = hdev->pkt_buf_size;
2191 
2192 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2193 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2194 			return -ENOMEM;
2195 
2196 		return 0;
2197 	}
2198 
2199 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2200 		return 0;
2201 
2202 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2203 		return 0;
2204 
2205 	/* try to decrease the buffer size */
2206 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2207 		return 0;
2208 
2209 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2210 		return 0;
2211 
2212 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2213 		return 0;
2214 
2215 	return -ENOMEM;
2216 }
2217 
2218 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2219 				   struct hclge_pkt_buf_alloc *buf_alloc)
2220 {
2221 	struct hclge_rx_priv_buff_cmd *req;
2222 	struct hclge_desc desc;
2223 	int ret;
2224 	int i;
2225 
2226 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2227 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2228 
2229 	/* Alloc private buffer TCs */
2230 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2231 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2232 
2233 		req->buf_num[i] =
2234 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2235 		req->buf_num[i] |=
2236 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2237 	}
2238 
2239 	req->shared_buf =
2240 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2241 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2242 
2243 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2244 	if (ret)
2245 		dev_err(&hdev->pdev->dev,
2246 			"rx private buffer alloc cmd failed %d\n", ret);
2247 
2248 	return ret;
2249 }
2250 
2251 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2252 				   struct hclge_pkt_buf_alloc *buf_alloc)
2253 {
2254 	struct hclge_rx_priv_wl_buf *req;
2255 	struct hclge_priv_buf *priv;
2256 	struct hclge_desc desc[2];
2257 	int i, j;
2258 	int ret;
2259 
2260 	for (i = 0; i < 2; i++) {
2261 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2262 					   false);
2263 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2264 
2265 		/* The first descriptor set the NEXT bit to 1 */
2266 		if (i == 0)
2267 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2268 		else
2269 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2270 
2271 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2272 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2273 
2274 			priv = &buf_alloc->priv_buf[idx];
2275 			req->tc_wl[j].high =
2276 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2277 			req->tc_wl[j].high |=
2278 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2279 			req->tc_wl[j].low =
2280 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2281 			req->tc_wl[j].low |=
2282 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2283 		}
2284 	}
2285 
2286 	/* Send 2 descriptor at one time */
2287 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2288 	if (ret)
2289 		dev_err(&hdev->pdev->dev,
2290 			"rx private waterline config cmd failed %d\n",
2291 			ret);
2292 	return ret;
2293 }
2294 
2295 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2296 				    struct hclge_pkt_buf_alloc *buf_alloc)
2297 {
2298 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2299 	struct hclge_rx_com_thrd *req;
2300 	struct hclge_desc desc[2];
2301 	struct hclge_tc_thrd *tc;
2302 	int i, j;
2303 	int ret;
2304 
2305 	for (i = 0; i < 2; i++) {
2306 		hclge_cmd_setup_basic_desc(&desc[i],
2307 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2308 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2309 
2310 		/* The first descriptor set the NEXT bit to 1 */
2311 		if (i == 0)
2312 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2313 		else
2314 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2315 
2316 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2317 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2318 
2319 			req->com_thrd[j].high =
2320 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2321 			req->com_thrd[j].high |=
2322 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2323 			req->com_thrd[j].low =
2324 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2325 			req->com_thrd[j].low |=
2326 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2327 		}
2328 	}
2329 
2330 	/* Send 2 descriptors at one time */
2331 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2332 	if (ret)
2333 		dev_err(&hdev->pdev->dev,
2334 			"common threshold config cmd failed %d\n", ret);
2335 	return ret;
2336 }
2337 
2338 static int hclge_common_wl_config(struct hclge_dev *hdev,
2339 				  struct hclge_pkt_buf_alloc *buf_alloc)
2340 {
2341 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2342 	struct hclge_rx_com_wl *req;
2343 	struct hclge_desc desc;
2344 	int ret;
2345 
2346 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2347 
2348 	req = (struct hclge_rx_com_wl *)desc.data;
2349 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2350 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2351 
2352 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2353 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2354 
2355 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2356 	if (ret)
2357 		dev_err(&hdev->pdev->dev,
2358 			"common waterline config cmd failed %d\n", ret);
2359 
2360 	return ret;
2361 }
2362 
2363 int hclge_buffer_alloc(struct hclge_dev *hdev)
2364 {
2365 	struct hclge_pkt_buf_alloc *pkt_buf;
2366 	int ret;
2367 
2368 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2369 	if (!pkt_buf)
2370 		return -ENOMEM;
2371 
2372 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2373 	if (ret) {
2374 		dev_err(&hdev->pdev->dev,
2375 			"could not calc tx buffer size for all TCs %d\n", ret);
2376 		goto out;
2377 	}
2378 
2379 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2380 	if (ret) {
2381 		dev_err(&hdev->pdev->dev,
2382 			"could not alloc tx buffers %d\n", ret);
2383 		goto out;
2384 	}
2385 
2386 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2387 	if (ret) {
2388 		dev_err(&hdev->pdev->dev,
2389 			"could not calc rx priv buffer size for all TCs %d\n",
2390 			ret);
2391 		goto out;
2392 	}
2393 
2394 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2395 	if (ret) {
2396 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2397 			ret);
2398 		goto out;
2399 	}
2400 
2401 	if (hnae3_dev_dcb_supported(hdev)) {
2402 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2403 		if (ret) {
2404 			dev_err(&hdev->pdev->dev,
2405 				"could not configure rx private waterline %d\n",
2406 				ret);
2407 			goto out;
2408 		}
2409 
2410 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2411 		if (ret) {
2412 			dev_err(&hdev->pdev->dev,
2413 				"could not configure common threshold %d\n",
2414 				ret);
2415 			goto out;
2416 		}
2417 	}
2418 
2419 	ret = hclge_common_wl_config(hdev, pkt_buf);
2420 	if (ret)
2421 		dev_err(&hdev->pdev->dev,
2422 			"could not configure common waterline %d\n", ret);
2423 
2424 out:
2425 	kfree(pkt_buf);
2426 	return ret;
2427 }
2428 
2429 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2430 {
2431 	struct hnae3_handle *roce = &vport->roce;
2432 	struct hnae3_handle *nic = &vport->nic;
2433 	struct hclge_dev *hdev = vport->back;
2434 
2435 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2436 
2437 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2438 		return -EINVAL;
2439 
2440 	roce->rinfo.base_vector = hdev->roce_base_vector;
2441 
2442 	roce->rinfo.netdev = nic->kinfo.netdev;
2443 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2444 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2445 
2446 	roce->pdev = nic->pdev;
2447 	roce->ae_algo = nic->ae_algo;
2448 	roce->numa_node_mask = nic->numa_node_mask;
2449 
2450 	return 0;
2451 }
2452 
2453 static int hclge_init_msi(struct hclge_dev *hdev)
2454 {
2455 	struct pci_dev *pdev = hdev->pdev;
2456 	int vectors;
2457 	int i;
2458 
2459 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2460 					hdev->num_msi,
2461 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2462 	if (vectors < 0) {
2463 		dev_err(&pdev->dev,
2464 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2465 			vectors);
2466 		return vectors;
2467 	}
2468 	if (vectors < hdev->num_msi)
2469 		dev_warn(&hdev->pdev->dev,
2470 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2471 			 hdev->num_msi, vectors);
2472 
2473 	hdev->num_msi = vectors;
2474 	hdev->num_msi_left = vectors;
2475 
2476 	hdev->base_msi_vector = pdev->irq;
2477 	hdev->roce_base_vector = hdev->base_msi_vector +
2478 				hdev->num_nic_msi;
2479 
2480 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2481 					   sizeof(u16), GFP_KERNEL);
2482 	if (!hdev->vector_status) {
2483 		pci_free_irq_vectors(pdev);
2484 		return -ENOMEM;
2485 	}
2486 
2487 	for (i = 0; i < hdev->num_msi; i++)
2488 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2489 
2490 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2491 					sizeof(int), GFP_KERNEL);
2492 	if (!hdev->vector_irq) {
2493 		pci_free_irq_vectors(pdev);
2494 		return -ENOMEM;
2495 	}
2496 
2497 	return 0;
2498 }
2499 
2500 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2501 {
2502 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2503 		duplex = HCLGE_MAC_FULL;
2504 
2505 	return duplex;
2506 }
2507 
2508 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2509 				      u8 duplex)
2510 {
2511 	struct hclge_config_mac_speed_dup_cmd *req;
2512 	struct hclge_desc desc;
2513 	int ret;
2514 
2515 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2516 
2517 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2518 
2519 	if (duplex)
2520 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2521 
2522 	switch (speed) {
2523 	case HCLGE_MAC_SPEED_10M:
2524 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2525 				HCLGE_CFG_SPEED_S, 6);
2526 		break;
2527 	case HCLGE_MAC_SPEED_100M:
2528 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2529 				HCLGE_CFG_SPEED_S, 7);
2530 		break;
2531 	case HCLGE_MAC_SPEED_1G:
2532 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2533 				HCLGE_CFG_SPEED_S, 0);
2534 		break;
2535 	case HCLGE_MAC_SPEED_10G:
2536 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2537 				HCLGE_CFG_SPEED_S, 1);
2538 		break;
2539 	case HCLGE_MAC_SPEED_25G:
2540 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541 				HCLGE_CFG_SPEED_S, 2);
2542 		break;
2543 	case HCLGE_MAC_SPEED_40G:
2544 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2545 				HCLGE_CFG_SPEED_S, 3);
2546 		break;
2547 	case HCLGE_MAC_SPEED_50G:
2548 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2549 				HCLGE_CFG_SPEED_S, 4);
2550 		break;
2551 	case HCLGE_MAC_SPEED_100G:
2552 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2553 				HCLGE_CFG_SPEED_S, 5);
2554 		break;
2555 	case HCLGE_MAC_SPEED_200G:
2556 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2557 				HCLGE_CFG_SPEED_S, 8);
2558 		break;
2559 	default:
2560 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2561 		return -EINVAL;
2562 	}
2563 
2564 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2565 		      1);
2566 
2567 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2568 	if (ret) {
2569 		dev_err(&hdev->pdev->dev,
2570 			"mac speed/duplex config cmd failed %d.\n", ret);
2571 		return ret;
2572 	}
2573 
2574 	return 0;
2575 }
2576 
2577 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2578 {
2579 	struct hclge_mac *mac = &hdev->hw.mac;
2580 	int ret;
2581 
2582 	duplex = hclge_check_speed_dup(duplex, speed);
2583 	if (!mac->support_autoneg && mac->speed == speed &&
2584 	    mac->duplex == duplex)
2585 		return 0;
2586 
2587 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2588 	if (ret)
2589 		return ret;
2590 
2591 	hdev->hw.mac.speed = speed;
2592 	hdev->hw.mac.duplex = duplex;
2593 
2594 	return 0;
2595 }
2596 
2597 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2598 				     u8 duplex)
2599 {
2600 	struct hclge_vport *vport = hclge_get_vport(handle);
2601 	struct hclge_dev *hdev = vport->back;
2602 
2603 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2604 }
2605 
2606 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2607 {
2608 	struct hclge_config_auto_neg_cmd *req;
2609 	struct hclge_desc desc;
2610 	u32 flag = 0;
2611 	int ret;
2612 
2613 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2614 
2615 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2616 	if (enable)
2617 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2618 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2619 
2620 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2621 	if (ret)
2622 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2623 			ret);
2624 
2625 	return ret;
2626 }
2627 
2628 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2629 {
2630 	struct hclge_vport *vport = hclge_get_vport(handle);
2631 	struct hclge_dev *hdev = vport->back;
2632 
2633 	if (!hdev->hw.mac.support_autoneg) {
2634 		if (enable) {
2635 			dev_err(&hdev->pdev->dev,
2636 				"autoneg is not supported by current port\n");
2637 			return -EOPNOTSUPP;
2638 		} else {
2639 			return 0;
2640 		}
2641 	}
2642 
2643 	return hclge_set_autoneg_en(hdev, enable);
2644 }
2645 
2646 static int hclge_get_autoneg(struct hnae3_handle *handle)
2647 {
2648 	struct hclge_vport *vport = hclge_get_vport(handle);
2649 	struct hclge_dev *hdev = vport->back;
2650 	struct phy_device *phydev = hdev->hw.mac.phydev;
2651 
2652 	if (phydev)
2653 		return phydev->autoneg;
2654 
2655 	return hdev->hw.mac.autoneg;
2656 }
2657 
2658 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2659 {
2660 	struct hclge_vport *vport = hclge_get_vport(handle);
2661 	struct hclge_dev *hdev = vport->back;
2662 	int ret;
2663 
2664 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2665 
2666 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2667 	if (ret)
2668 		return ret;
2669 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2670 }
2671 
2672 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2673 {
2674 	struct hclge_vport *vport = hclge_get_vport(handle);
2675 	struct hclge_dev *hdev = vport->back;
2676 
2677 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2678 		return hclge_set_autoneg_en(hdev, !halt);
2679 
2680 	return 0;
2681 }
2682 
2683 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2684 {
2685 	struct hclge_config_fec_cmd *req;
2686 	struct hclge_desc desc;
2687 	int ret;
2688 
2689 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2690 
2691 	req = (struct hclge_config_fec_cmd *)desc.data;
2692 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2693 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2694 	if (fec_mode & BIT(HNAE3_FEC_RS))
2695 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2696 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2697 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2698 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2699 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2700 
2701 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2702 	if (ret)
2703 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2704 
2705 	return ret;
2706 }
2707 
2708 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2709 {
2710 	struct hclge_vport *vport = hclge_get_vport(handle);
2711 	struct hclge_dev *hdev = vport->back;
2712 	struct hclge_mac *mac = &hdev->hw.mac;
2713 	int ret;
2714 
2715 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2716 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2717 		return -EINVAL;
2718 	}
2719 
2720 	ret = hclge_set_fec_hw(hdev, fec_mode);
2721 	if (ret)
2722 		return ret;
2723 
2724 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2725 	return 0;
2726 }
2727 
2728 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2729 			  u8 *fec_mode)
2730 {
2731 	struct hclge_vport *vport = hclge_get_vport(handle);
2732 	struct hclge_dev *hdev = vport->back;
2733 	struct hclge_mac *mac = &hdev->hw.mac;
2734 
2735 	if (fec_ability)
2736 		*fec_ability = mac->fec_ability;
2737 	if (fec_mode)
2738 		*fec_mode = mac->fec_mode;
2739 }
2740 
2741 static int hclge_mac_init(struct hclge_dev *hdev)
2742 {
2743 	struct hclge_mac *mac = &hdev->hw.mac;
2744 	int ret;
2745 
2746 	hdev->support_sfp_query = true;
2747 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2748 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2749 					 hdev->hw.mac.duplex);
2750 	if (ret)
2751 		return ret;
2752 
2753 	if (hdev->hw.mac.support_autoneg) {
2754 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2755 		if (ret)
2756 			return ret;
2757 	}
2758 
2759 	mac->link = 0;
2760 
2761 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2762 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2763 		if (ret)
2764 			return ret;
2765 	}
2766 
2767 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2768 	if (ret) {
2769 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2770 		return ret;
2771 	}
2772 
2773 	ret = hclge_set_default_loopback(hdev);
2774 	if (ret)
2775 		return ret;
2776 
2777 	ret = hclge_buffer_alloc(hdev);
2778 	if (ret)
2779 		dev_err(&hdev->pdev->dev,
2780 			"allocate buffer fail, ret=%d\n", ret);
2781 
2782 	return ret;
2783 }
2784 
2785 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2786 {
2787 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2788 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2789 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2790 				    hclge_wq, &hdev->service_task, 0);
2791 }
2792 
2793 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2794 {
2795 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2796 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2797 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2798 				    hclge_wq, &hdev->service_task, 0);
2799 }
2800 
2801 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2802 {
2803 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2804 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2805 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2806 				    hclge_wq, &hdev->service_task,
2807 				    delay_time);
2808 }
2809 
2810 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2811 {
2812 	struct hclge_link_status_cmd *req;
2813 	struct hclge_desc desc;
2814 	int ret;
2815 
2816 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2817 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2818 	if (ret) {
2819 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2820 			ret);
2821 		return ret;
2822 	}
2823 
2824 	req = (struct hclge_link_status_cmd *)desc.data;
2825 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2826 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2827 
2828 	return 0;
2829 }
2830 
2831 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2832 {
2833 	struct phy_device *phydev = hdev->hw.mac.phydev;
2834 
2835 	*link_status = HCLGE_LINK_STATUS_DOWN;
2836 
2837 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2838 		return 0;
2839 
2840 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2841 		return 0;
2842 
2843 	return hclge_get_mac_link_status(hdev, link_status);
2844 }
2845 
2846 static void hclge_update_link_status(struct hclge_dev *hdev)
2847 {
2848 	struct hnae3_client *rclient = hdev->roce_client;
2849 	struct hnae3_client *client = hdev->nic_client;
2850 	struct hnae3_handle *rhandle;
2851 	struct hnae3_handle *handle;
2852 	int state;
2853 	int ret;
2854 	int i;
2855 
2856 	if (!client)
2857 		return;
2858 
2859 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2860 		return;
2861 
2862 	ret = hclge_get_mac_phy_link(hdev, &state);
2863 	if (ret) {
2864 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2865 		return;
2866 	}
2867 
2868 	if (state != hdev->hw.mac.link) {
2869 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2870 			handle = &hdev->vport[i].nic;
2871 			client->ops->link_status_change(handle, state);
2872 			hclge_config_mac_tnl_int(hdev, state);
2873 			rhandle = &hdev->vport[i].roce;
2874 			if (rclient && rclient->ops->link_status_change)
2875 				rclient->ops->link_status_change(rhandle,
2876 								 state);
2877 		}
2878 		hdev->hw.mac.link = state;
2879 	}
2880 
2881 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2882 }
2883 
2884 static void hclge_update_port_capability(struct hclge_mac *mac)
2885 {
2886 	/* update fec ability by speed */
2887 	hclge_convert_setting_fec(mac);
2888 
2889 	/* firmware can not identify back plane type, the media type
2890 	 * read from configuration can help deal it
2891 	 */
2892 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2893 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2894 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2895 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2896 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2897 
2898 	if (mac->support_autoneg) {
2899 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2900 		linkmode_copy(mac->advertising, mac->supported);
2901 	} else {
2902 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2903 				   mac->supported);
2904 		linkmode_zero(mac->advertising);
2905 	}
2906 }
2907 
2908 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2909 {
2910 	struct hclge_sfp_info_cmd *resp;
2911 	struct hclge_desc desc;
2912 	int ret;
2913 
2914 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2915 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2916 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2917 	if (ret == -EOPNOTSUPP) {
2918 		dev_warn(&hdev->pdev->dev,
2919 			 "IMP do not support get SFP speed %d\n", ret);
2920 		return ret;
2921 	} else if (ret) {
2922 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2923 		return ret;
2924 	}
2925 
2926 	*speed = le32_to_cpu(resp->speed);
2927 
2928 	return 0;
2929 }
2930 
2931 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2932 {
2933 	struct hclge_sfp_info_cmd *resp;
2934 	struct hclge_desc desc;
2935 	int ret;
2936 
2937 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2938 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2939 
2940 	resp->query_type = QUERY_ACTIVE_SPEED;
2941 
2942 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2943 	if (ret == -EOPNOTSUPP) {
2944 		dev_warn(&hdev->pdev->dev,
2945 			 "IMP does not support get SFP info %d\n", ret);
2946 		return ret;
2947 	} else if (ret) {
2948 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2949 		return ret;
2950 	}
2951 
2952 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2953 	 * set to mac->speed.
2954 	 */
2955 	if (!le32_to_cpu(resp->speed))
2956 		return 0;
2957 
2958 	mac->speed = le32_to_cpu(resp->speed);
2959 	/* if resp->speed_ability is 0, it means it's an old version
2960 	 * firmware, do not update these params
2961 	 */
2962 	if (resp->speed_ability) {
2963 		mac->module_type = le32_to_cpu(resp->module_type);
2964 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2965 		mac->autoneg = resp->autoneg;
2966 		mac->support_autoneg = resp->autoneg_ability;
2967 		mac->speed_type = QUERY_ACTIVE_SPEED;
2968 		if (!resp->active_fec)
2969 			mac->fec_mode = 0;
2970 		else
2971 			mac->fec_mode = BIT(resp->active_fec);
2972 	} else {
2973 		mac->speed_type = QUERY_SFP_SPEED;
2974 	}
2975 
2976 	return 0;
2977 }
2978 
2979 static int hclge_update_port_info(struct hclge_dev *hdev)
2980 {
2981 	struct hclge_mac *mac = &hdev->hw.mac;
2982 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2983 	int ret;
2984 
2985 	/* get the port info from SFP cmd if not copper port */
2986 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2987 		return 0;
2988 
2989 	/* if IMP does not support get SFP/qSFP info, return directly */
2990 	if (!hdev->support_sfp_query)
2991 		return 0;
2992 
2993 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2994 		ret = hclge_get_sfp_info(hdev, mac);
2995 	else
2996 		ret = hclge_get_sfp_speed(hdev, &speed);
2997 
2998 	if (ret == -EOPNOTSUPP) {
2999 		hdev->support_sfp_query = false;
3000 		return ret;
3001 	} else if (ret) {
3002 		return ret;
3003 	}
3004 
3005 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3006 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3007 			hclge_update_port_capability(mac);
3008 			return 0;
3009 		}
3010 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3011 					       HCLGE_MAC_FULL);
3012 	} else {
3013 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3014 			return 0; /* do nothing if no SFP */
3015 
3016 		/* must config full duplex for SFP */
3017 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3018 	}
3019 }
3020 
3021 static int hclge_get_status(struct hnae3_handle *handle)
3022 {
3023 	struct hclge_vport *vport = hclge_get_vport(handle);
3024 	struct hclge_dev *hdev = vport->back;
3025 
3026 	hclge_update_link_status(hdev);
3027 
3028 	return hdev->hw.mac.link;
3029 }
3030 
3031 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3032 {
3033 	if (!pci_num_vf(hdev->pdev)) {
3034 		dev_err(&hdev->pdev->dev,
3035 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3036 		return NULL;
3037 	}
3038 
3039 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3040 		dev_err(&hdev->pdev->dev,
3041 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3042 			vf, pci_num_vf(hdev->pdev));
3043 		return NULL;
3044 	}
3045 
3046 	/* VF start from 1 in vport */
3047 	vf += HCLGE_VF_VPORT_START_NUM;
3048 	return &hdev->vport[vf];
3049 }
3050 
3051 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3052 			       struct ifla_vf_info *ivf)
3053 {
3054 	struct hclge_vport *vport = hclge_get_vport(handle);
3055 	struct hclge_dev *hdev = vport->back;
3056 
3057 	vport = hclge_get_vf_vport(hdev, vf);
3058 	if (!vport)
3059 		return -EINVAL;
3060 
3061 	ivf->vf = vf;
3062 	ivf->linkstate = vport->vf_info.link_state;
3063 	ivf->spoofchk = vport->vf_info.spoofchk;
3064 	ivf->trusted = vport->vf_info.trusted;
3065 	ivf->min_tx_rate = 0;
3066 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3067 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3068 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3069 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3070 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3071 
3072 	return 0;
3073 }
3074 
3075 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3076 				   int link_state)
3077 {
3078 	struct hclge_vport *vport = hclge_get_vport(handle);
3079 	struct hclge_dev *hdev = vport->back;
3080 
3081 	vport = hclge_get_vf_vport(hdev, vf);
3082 	if (!vport)
3083 		return -EINVAL;
3084 
3085 	vport->vf_info.link_state = link_state;
3086 
3087 	return 0;
3088 }
3089 
3090 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3091 {
3092 	u32 cmdq_src_reg, msix_src_reg;
3093 
3094 	/* fetch the events from their corresponding regs */
3095 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3096 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3097 
3098 	/* Assumption: If by any chance reset and mailbox events are reported
3099 	 * together then we will only process reset event in this go and will
3100 	 * defer the processing of the mailbox events. Since, we would have not
3101 	 * cleared RX CMDQ event this time we would receive again another
3102 	 * interrupt from H/W just for the mailbox.
3103 	 *
3104 	 * check for vector0 reset event sources
3105 	 */
3106 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3107 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3108 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3109 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3110 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3111 		hdev->rst_stats.imp_rst_cnt++;
3112 		return HCLGE_VECTOR0_EVENT_RST;
3113 	}
3114 
3115 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3116 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3117 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3118 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3119 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3120 		hdev->rst_stats.global_rst_cnt++;
3121 		return HCLGE_VECTOR0_EVENT_RST;
3122 	}
3123 
3124 	/* check for vector0 msix event source */
3125 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3126 		*clearval = msix_src_reg;
3127 		return HCLGE_VECTOR0_EVENT_ERR;
3128 	}
3129 
3130 	/* check for vector0 mailbox(=CMDQ RX) event source */
3131 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3132 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3133 		*clearval = cmdq_src_reg;
3134 		return HCLGE_VECTOR0_EVENT_MBX;
3135 	}
3136 
3137 	/* print other vector0 event source */
3138 	dev_info(&hdev->pdev->dev,
3139 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3140 		 cmdq_src_reg, msix_src_reg);
3141 	*clearval = msix_src_reg;
3142 
3143 	return HCLGE_VECTOR0_EVENT_OTHER;
3144 }
3145 
3146 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3147 				    u32 regclr)
3148 {
3149 	switch (event_type) {
3150 	case HCLGE_VECTOR0_EVENT_RST:
3151 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3152 		break;
3153 	case HCLGE_VECTOR0_EVENT_MBX:
3154 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3155 		break;
3156 	default:
3157 		break;
3158 	}
3159 }
3160 
3161 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3162 {
3163 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3164 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3165 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3166 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3167 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3168 }
3169 
3170 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3171 {
3172 	writel(enable ? 1 : 0, vector->addr);
3173 }
3174 
3175 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3176 {
3177 	struct hclge_dev *hdev = data;
3178 	u32 clearval = 0;
3179 	u32 event_cause;
3180 
3181 	hclge_enable_vector(&hdev->misc_vector, false);
3182 	event_cause = hclge_check_event_cause(hdev, &clearval);
3183 
3184 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3185 	switch (event_cause) {
3186 	case HCLGE_VECTOR0_EVENT_ERR:
3187 		/* we do not know what type of reset is required now. This could
3188 		 * only be decided after we fetch the type of errors which
3189 		 * caused this event. Therefore, we will do below for now:
3190 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3191 		 *    have defered type of reset to be used.
3192 		 * 2. Schedule the reset serivce task.
3193 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3194 		 *    will fetch the correct type of reset.  This would be done
3195 		 *    by first decoding the types of errors.
3196 		 */
3197 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3198 		fallthrough;
3199 	case HCLGE_VECTOR0_EVENT_RST:
3200 		hclge_reset_task_schedule(hdev);
3201 		break;
3202 	case HCLGE_VECTOR0_EVENT_MBX:
3203 		/* If we are here then,
3204 		 * 1. Either we are not handling any mbx task and we are not
3205 		 *    scheduled as well
3206 		 *                        OR
3207 		 * 2. We could be handling a mbx task but nothing more is
3208 		 *    scheduled.
3209 		 * In both cases, we should schedule mbx task as there are more
3210 		 * mbx messages reported by this interrupt.
3211 		 */
3212 		hclge_mbx_task_schedule(hdev);
3213 		break;
3214 	default:
3215 		dev_warn(&hdev->pdev->dev,
3216 			 "received unknown or unhandled event of vector0\n");
3217 		break;
3218 	}
3219 
3220 	hclge_clear_event_cause(hdev, event_cause, clearval);
3221 
3222 	/* Enable interrupt if it is not cause by reset. And when
3223 	 * clearval equal to 0, it means interrupt status may be
3224 	 * cleared by hardware before driver reads status register.
3225 	 * For this case, vector0 interrupt also should be enabled.
3226 	 */
3227 	if (!clearval ||
3228 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3229 		hclge_enable_vector(&hdev->misc_vector, true);
3230 	}
3231 
3232 	return IRQ_HANDLED;
3233 }
3234 
3235 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3236 {
3237 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3238 		dev_warn(&hdev->pdev->dev,
3239 			 "vector(vector_id %d) has been freed.\n", vector_id);
3240 		return;
3241 	}
3242 
3243 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3244 	hdev->num_msi_left += 1;
3245 	hdev->num_msi_used -= 1;
3246 }
3247 
3248 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3249 {
3250 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3251 
3252 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3253 
3254 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3255 	hdev->vector_status[0] = 0;
3256 
3257 	hdev->num_msi_left -= 1;
3258 	hdev->num_msi_used += 1;
3259 }
3260 
3261 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3262 				      const cpumask_t *mask)
3263 {
3264 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3265 					      affinity_notify);
3266 
3267 	cpumask_copy(&hdev->affinity_mask, mask);
3268 }
3269 
3270 static void hclge_irq_affinity_release(struct kref *ref)
3271 {
3272 }
3273 
3274 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3275 {
3276 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3277 			      &hdev->affinity_mask);
3278 
3279 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3280 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3281 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3282 				  &hdev->affinity_notify);
3283 }
3284 
3285 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3286 {
3287 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3288 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3289 }
3290 
3291 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3292 {
3293 	int ret;
3294 
3295 	hclge_get_misc_vector(hdev);
3296 
3297 	/* this would be explicitly freed in the end */
3298 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3299 		 HCLGE_NAME, pci_name(hdev->pdev));
3300 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3301 			  0, hdev->misc_vector.name, hdev);
3302 	if (ret) {
3303 		hclge_free_vector(hdev, 0);
3304 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3305 			hdev->misc_vector.vector_irq);
3306 	}
3307 
3308 	return ret;
3309 }
3310 
3311 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3312 {
3313 	free_irq(hdev->misc_vector.vector_irq, hdev);
3314 	hclge_free_vector(hdev, 0);
3315 }
3316 
3317 int hclge_notify_client(struct hclge_dev *hdev,
3318 			enum hnae3_reset_notify_type type)
3319 {
3320 	struct hnae3_client *client = hdev->nic_client;
3321 	u16 i;
3322 
3323 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3324 		return 0;
3325 
3326 	if (!client->ops->reset_notify)
3327 		return -EOPNOTSUPP;
3328 
3329 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3330 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3331 		int ret;
3332 
3333 		ret = client->ops->reset_notify(handle, type);
3334 		if (ret) {
3335 			dev_err(&hdev->pdev->dev,
3336 				"notify nic client failed %d(%d)\n", type, ret);
3337 			return ret;
3338 		}
3339 	}
3340 
3341 	return 0;
3342 }
3343 
3344 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3345 				    enum hnae3_reset_notify_type type)
3346 {
3347 	struct hnae3_client *client = hdev->roce_client;
3348 	int ret;
3349 	u16 i;
3350 
3351 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3352 		return 0;
3353 
3354 	if (!client->ops->reset_notify)
3355 		return -EOPNOTSUPP;
3356 
3357 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3358 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3359 
3360 		ret = client->ops->reset_notify(handle, type);
3361 		if (ret) {
3362 			dev_err(&hdev->pdev->dev,
3363 				"notify roce client failed %d(%d)",
3364 				type, ret);
3365 			return ret;
3366 		}
3367 	}
3368 
3369 	return ret;
3370 }
3371 
3372 static int hclge_reset_wait(struct hclge_dev *hdev)
3373 {
3374 #define HCLGE_RESET_WATI_MS	100
3375 #define HCLGE_RESET_WAIT_CNT	350
3376 
3377 	u32 val, reg, reg_bit;
3378 	u32 cnt = 0;
3379 
3380 	switch (hdev->reset_type) {
3381 	case HNAE3_IMP_RESET:
3382 		reg = HCLGE_GLOBAL_RESET_REG;
3383 		reg_bit = HCLGE_IMP_RESET_BIT;
3384 		break;
3385 	case HNAE3_GLOBAL_RESET:
3386 		reg = HCLGE_GLOBAL_RESET_REG;
3387 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3388 		break;
3389 	case HNAE3_FUNC_RESET:
3390 		reg = HCLGE_FUN_RST_ING;
3391 		reg_bit = HCLGE_FUN_RST_ING_B;
3392 		break;
3393 	default:
3394 		dev_err(&hdev->pdev->dev,
3395 			"Wait for unsupported reset type: %d\n",
3396 			hdev->reset_type);
3397 		return -EINVAL;
3398 	}
3399 
3400 	val = hclge_read_dev(&hdev->hw, reg);
3401 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3402 		msleep(HCLGE_RESET_WATI_MS);
3403 		val = hclge_read_dev(&hdev->hw, reg);
3404 		cnt++;
3405 	}
3406 
3407 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3408 		dev_warn(&hdev->pdev->dev,
3409 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3410 		return -EBUSY;
3411 	}
3412 
3413 	return 0;
3414 }
3415 
3416 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3417 {
3418 	struct hclge_vf_rst_cmd *req;
3419 	struct hclge_desc desc;
3420 
3421 	req = (struct hclge_vf_rst_cmd *)desc.data;
3422 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3423 	req->dest_vfid = func_id;
3424 
3425 	if (reset)
3426 		req->vf_rst = 0x1;
3427 
3428 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3429 }
3430 
3431 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3432 {
3433 	int i;
3434 
3435 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3436 		struct hclge_vport *vport = &hdev->vport[i];
3437 		int ret;
3438 
3439 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3440 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3441 		if (ret) {
3442 			dev_err(&hdev->pdev->dev,
3443 				"set vf(%u) rst failed %d!\n",
3444 				vport->vport_id, ret);
3445 			return ret;
3446 		}
3447 
3448 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3449 			continue;
3450 
3451 		/* Inform VF to process the reset.
3452 		 * hclge_inform_reset_assert_to_vf may fail if VF
3453 		 * driver is not loaded.
3454 		 */
3455 		ret = hclge_inform_reset_assert_to_vf(vport);
3456 		if (ret)
3457 			dev_warn(&hdev->pdev->dev,
3458 				 "inform reset to vf(%u) failed %d!\n",
3459 				 vport->vport_id, ret);
3460 	}
3461 
3462 	return 0;
3463 }
3464 
3465 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3466 {
3467 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3468 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3469 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3470 		return;
3471 
3472 	hclge_mbx_handler(hdev);
3473 
3474 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3475 }
3476 
3477 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3478 {
3479 	struct hclge_pf_rst_sync_cmd *req;
3480 	struct hclge_desc desc;
3481 	int cnt = 0;
3482 	int ret;
3483 
3484 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3485 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3486 
3487 	do {
3488 		/* vf need to down netdev by mbx during PF or FLR reset */
3489 		hclge_mailbox_service_task(hdev);
3490 
3491 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3492 		/* for compatible with old firmware, wait
3493 		 * 100 ms for VF to stop IO
3494 		 */
3495 		if (ret == -EOPNOTSUPP) {
3496 			msleep(HCLGE_RESET_SYNC_TIME);
3497 			return;
3498 		} else if (ret) {
3499 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3500 				 ret);
3501 			return;
3502 		} else if (req->all_vf_ready) {
3503 			return;
3504 		}
3505 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3506 		hclge_cmd_reuse_desc(&desc, true);
3507 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3508 
3509 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3510 }
3511 
3512 void hclge_report_hw_error(struct hclge_dev *hdev,
3513 			   enum hnae3_hw_error_type type)
3514 {
3515 	struct hnae3_client *client = hdev->nic_client;
3516 	u16 i;
3517 
3518 	if (!client || !client->ops->process_hw_error ||
3519 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3520 		return;
3521 
3522 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3523 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3524 }
3525 
3526 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3527 {
3528 	u32 reg_val;
3529 
3530 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3531 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3532 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3533 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3534 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3535 	}
3536 
3537 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3538 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3539 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3540 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3541 	}
3542 }
3543 
3544 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3545 {
3546 	struct hclge_desc desc;
3547 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3548 	int ret;
3549 
3550 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3551 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3552 	req->fun_reset_vfid = func_id;
3553 
3554 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3555 	if (ret)
3556 		dev_err(&hdev->pdev->dev,
3557 			"send function reset cmd fail, status =%d\n", ret);
3558 
3559 	return ret;
3560 }
3561 
3562 static void hclge_do_reset(struct hclge_dev *hdev)
3563 {
3564 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3565 	struct pci_dev *pdev = hdev->pdev;
3566 	u32 val;
3567 
3568 	if (hclge_get_hw_reset_stat(handle)) {
3569 		dev_info(&pdev->dev, "hardware reset not finish\n");
3570 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3571 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3572 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3573 		return;
3574 	}
3575 
3576 	switch (hdev->reset_type) {
3577 	case HNAE3_GLOBAL_RESET:
3578 		dev_info(&pdev->dev, "global reset requested\n");
3579 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3580 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3581 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3582 		break;
3583 	case HNAE3_FUNC_RESET:
3584 		dev_info(&pdev->dev, "PF reset requested\n");
3585 		/* schedule again to check later */
3586 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3587 		hclge_reset_task_schedule(hdev);
3588 		break;
3589 	default:
3590 		dev_warn(&pdev->dev,
3591 			 "unsupported reset type: %d\n", hdev->reset_type);
3592 		break;
3593 	}
3594 }
3595 
3596 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3597 						   unsigned long *addr)
3598 {
3599 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3600 	struct hclge_dev *hdev = ae_dev->priv;
3601 
3602 	/* first, resolve any unknown reset type to the known type(s) */
3603 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3604 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3605 					HCLGE_MISC_VECTOR_INT_STS);
3606 		/* we will intentionally ignore any errors from this function
3607 		 *  as we will end up in *some* reset request in any case
3608 		 */
3609 		if (hclge_handle_hw_msix_error(hdev, addr))
3610 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3611 				 msix_sts_reg);
3612 
3613 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3614 		/* We defered the clearing of the error event which caused
3615 		 * interrupt since it was not posssible to do that in
3616 		 * interrupt context (and this is the reason we introduced
3617 		 * new UNKNOWN reset type). Now, the errors have been
3618 		 * handled and cleared in hardware we can safely enable
3619 		 * interrupts. This is an exception to the norm.
3620 		 */
3621 		hclge_enable_vector(&hdev->misc_vector, true);
3622 	}
3623 
3624 	/* return the highest priority reset level amongst all */
3625 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3626 		rst_level = HNAE3_IMP_RESET;
3627 		clear_bit(HNAE3_IMP_RESET, addr);
3628 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3629 		clear_bit(HNAE3_FUNC_RESET, addr);
3630 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3631 		rst_level = HNAE3_GLOBAL_RESET;
3632 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3633 		clear_bit(HNAE3_FUNC_RESET, addr);
3634 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3635 		rst_level = HNAE3_FUNC_RESET;
3636 		clear_bit(HNAE3_FUNC_RESET, addr);
3637 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3638 		rst_level = HNAE3_FLR_RESET;
3639 		clear_bit(HNAE3_FLR_RESET, addr);
3640 	}
3641 
3642 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3643 	    rst_level < hdev->reset_type)
3644 		return HNAE3_NONE_RESET;
3645 
3646 	return rst_level;
3647 }
3648 
3649 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3650 {
3651 	u32 clearval = 0;
3652 
3653 	switch (hdev->reset_type) {
3654 	case HNAE3_IMP_RESET:
3655 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3656 		break;
3657 	case HNAE3_GLOBAL_RESET:
3658 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3659 		break;
3660 	default:
3661 		break;
3662 	}
3663 
3664 	if (!clearval)
3665 		return;
3666 
3667 	/* For revision 0x20, the reset interrupt source
3668 	 * can only be cleared after hardware reset done
3669 	 */
3670 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3671 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3672 				clearval);
3673 
3674 	hclge_enable_vector(&hdev->misc_vector, true);
3675 }
3676 
3677 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3678 {
3679 	u32 reg_val;
3680 
3681 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3682 	if (enable)
3683 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3684 	else
3685 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3686 
3687 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3688 }
3689 
3690 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3691 {
3692 	int ret;
3693 
3694 	ret = hclge_set_all_vf_rst(hdev, true);
3695 	if (ret)
3696 		return ret;
3697 
3698 	hclge_func_reset_sync_vf(hdev);
3699 
3700 	return 0;
3701 }
3702 
3703 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3704 {
3705 	u32 reg_val;
3706 	int ret = 0;
3707 
3708 	switch (hdev->reset_type) {
3709 	case HNAE3_FUNC_RESET:
3710 		ret = hclge_func_reset_notify_vf(hdev);
3711 		if (ret)
3712 			return ret;
3713 
3714 		ret = hclge_func_reset_cmd(hdev, 0);
3715 		if (ret) {
3716 			dev_err(&hdev->pdev->dev,
3717 				"asserting function reset fail %d!\n", ret);
3718 			return ret;
3719 		}
3720 
3721 		/* After performaning pf reset, it is not necessary to do the
3722 		 * mailbox handling or send any command to firmware, because
3723 		 * any mailbox handling or command to firmware is only valid
3724 		 * after hclge_cmd_init is called.
3725 		 */
3726 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3727 		hdev->rst_stats.pf_rst_cnt++;
3728 		break;
3729 	case HNAE3_FLR_RESET:
3730 		ret = hclge_func_reset_notify_vf(hdev);
3731 		if (ret)
3732 			return ret;
3733 		break;
3734 	case HNAE3_IMP_RESET:
3735 		hclge_handle_imp_error(hdev);
3736 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3737 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3738 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3739 		break;
3740 	default:
3741 		break;
3742 	}
3743 
3744 	/* inform hardware that preparatory work is done */
3745 	msleep(HCLGE_RESET_SYNC_TIME);
3746 	hclge_reset_handshake(hdev, true);
3747 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3748 
3749 	return ret;
3750 }
3751 
3752 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3753 {
3754 #define MAX_RESET_FAIL_CNT 5
3755 
3756 	if (hdev->reset_pending) {
3757 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3758 			 hdev->reset_pending);
3759 		return true;
3760 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3761 		   HCLGE_RESET_INT_M) {
3762 		dev_info(&hdev->pdev->dev,
3763 			 "reset failed because new reset interrupt\n");
3764 		hclge_clear_reset_cause(hdev);
3765 		return false;
3766 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3767 		hdev->rst_stats.reset_fail_cnt++;
3768 		set_bit(hdev->reset_type, &hdev->reset_pending);
3769 		dev_info(&hdev->pdev->dev,
3770 			 "re-schedule reset task(%u)\n",
3771 			 hdev->rst_stats.reset_fail_cnt);
3772 		return true;
3773 	}
3774 
3775 	hclge_clear_reset_cause(hdev);
3776 
3777 	/* recover the handshake status when reset fail */
3778 	hclge_reset_handshake(hdev, true);
3779 
3780 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3781 
3782 	hclge_dbg_dump_rst_info(hdev);
3783 
3784 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3785 
3786 	return false;
3787 }
3788 
3789 static int hclge_set_rst_done(struct hclge_dev *hdev)
3790 {
3791 	struct hclge_pf_rst_done_cmd *req;
3792 	struct hclge_desc desc;
3793 	int ret;
3794 
3795 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3796 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3797 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3798 
3799 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3800 	/* To be compatible with the old firmware, which does not support
3801 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3802 	 * return success
3803 	 */
3804 	if (ret == -EOPNOTSUPP) {
3805 		dev_warn(&hdev->pdev->dev,
3806 			 "current firmware does not support command(0x%x)!\n",
3807 			 HCLGE_OPC_PF_RST_DONE);
3808 		return 0;
3809 	} else if (ret) {
3810 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3811 			ret);
3812 	}
3813 
3814 	return ret;
3815 }
3816 
3817 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3818 {
3819 	int ret = 0;
3820 
3821 	switch (hdev->reset_type) {
3822 	case HNAE3_FUNC_RESET:
3823 	case HNAE3_FLR_RESET:
3824 		ret = hclge_set_all_vf_rst(hdev, false);
3825 		break;
3826 	case HNAE3_GLOBAL_RESET:
3827 	case HNAE3_IMP_RESET:
3828 		ret = hclge_set_rst_done(hdev);
3829 		break;
3830 	default:
3831 		break;
3832 	}
3833 
3834 	/* clear up the handshake status after re-initialize done */
3835 	hclge_reset_handshake(hdev, false);
3836 
3837 	return ret;
3838 }
3839 
3840 static int hclge_reset_stack(struct hclge_dev *hdev)
3841 {
3842 	int ret;
3843 
3844 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3845 	if (ret)
3846 		return ret;
3847 
3848 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3849 	if (ret)
3850 		return ret;
3851 
3852 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3853 }
3854 
3855 static int hclge_reset_prepare(struct hclge_dev *hdev)
3856 {
3857 	int ret;
3858 
3859 	hdev->rst_stats.reset_cnt++;
3860 	/* perform reset of the stack & ae device for a client */
3861 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3862 	if (ret)
3863 		return ret;
3864 
3865 	rtnl_lock();
3866 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3867 	rtnl_unlock();
3868 	if (ret)
3869 		return ret;
3870 
3871 	return hclge_reset_prepare_wait(hdev);
3872 }
3873 
3874 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3875 {
3876 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3877 	enum hnae3_reset_type reset_level;
3878 	int ret;
3879 
3880 	hdev->rst_stats.hw_reset_done_cnt++;
3881 
3882 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3883 	if (ret)
3884 		return ret;
3885 
3886 	rtnl_lock();
3887 	ret = hclge_reset_stack(hdev);
3888 	rtnl_unlock();
3889 	if (ret)
3890 		return ret;
3891 
3892 	hclge_clear_reset_cause(hdev);
3893 
3894 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3895 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3896 	 * times
3897 	 */
3898 	if (ret &&
3899 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3900 		return ret;
3901 
3902 	ret = hclge_reset_prepare_up(hdev);
3903 	if (ret)
3904 		return ret;
3905 
3906 	rtnl_lock();
3907 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3908 	rtnl_unlock();
3909 	if (ret)
3910 		return ret;
3911 
3912 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3913 	if (ret)
3914 		return ret;
3915 
3916 	hdev->last_reset_time = jiffies;
3917 	hdev->rst_stats.reset_fail_cnt = 0;
3918 	hdev->rst_stats.reset_done_cnt++;
3919 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3920 
3921 	/* if default_reset_request has a higher level reset request,
3922 	 * it should be handled as soon as possible. since some errors
3923 	 * need this kind of reset to fix.
3924 	 */
3925 	reset_level = hclge_get_reset_level(ae_dev,
3926 					    &hdev->default_reset_request);
3927 	if (reset_level != HNAE3_NONE_RESET)
3928 		set_bit(reset_level, &hdev->reset_request);
3929 
3930 	return 0;
3931 }
3932 
3933 static void hclge_reset(struct hclge_dev *hdev)
3934 {
3935 	if (hclge_reset_prepare(hdev))
3936 		goto err_reset;
3937 
3938 	if (hclge_reset_wait(hdev))
3939 		goto err_reset;
3940 
3941 	if (hclge_reset_rebuild(hdev))
3942 		goto err_reset;
3943 
3944 	return;
3945 
3946 err_reset:
3947 	if (hclge_reset_err_handle(hdev))
3948 		hclge_reset_task_schedule(hdev);
3949 }
3950 
3951 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3952 {
3953 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3954 	struct hclge_dev *hdev = ae_dev->priv;
3955 
3956 	/* We might end up getting called broadly because of 2 below cases:
3957 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3958 	 *    normalcy is to reset.
3959 	 * 2. A new reset request from the stack due to timeout
3960 	 *
3961 	 * For the first case,error event might not have ae handle available.
3962 	 * check if this is a new reset request and we are not here just because
3963 	 * last reset attempt did not succeed and watchdog hit us again. We will
3964 	 * know this if last reset request did not occur very recently (watchdog
3965 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3966 	 * In case of new request we reset the "reset level" to PF reset.
3967 	 * And if it is a repeat reset request of the most recent one then we
3968 	 * want to make sure we throttle the reset request. Therefore, we will
3969 	 * not allow it again before 3*HZ times.
3970 	 */
3971 	if (!handle)
3972 		handle = &hdev->vport[0].nic;
3973 
3974 	if (time_before(jiffies, (hdev->last_reset_time +
3975 				  HCLGE_RESET_INTERVAL))) {
3976 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3977 		return;
3978 	} else if (hdev->default_reset_request) {
3979 		hdev->reset_level =
3980 			hclge_get_reset_level(ae_dev,
3981 					      &hdev->default_reset_request);
3982 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3983 		hdev->reset_level = HNAE3_FUNC_RESET;
3984 	}
3985 
3986 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3987 		 hdev->reset_level);
3988 
3989 	/* request reset & schedule reset task */
3990 	set_bit(hdev->reset_level, &hdev->reset_request);
3991 	hclge_reset_task_schedule(hdev);
3992 
3993 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3994 		hdev->reset_level++;
3995 }
3996 
3997 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3998 					enum hnae3_reset_type rst_type)
3999 {
4000 	struct hclge_dev *hdev = ae_dev->priv;
4001 
4002 	set_bit(rst_type, &hdev->default_reset_request);
4003 }
4004 
4005 static void hclge_reset_timer(struct timer_list *t)
4006 {
4007 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4008 
4009 	/* if default_reset_request has no value, it means that this reset
4010 	 * request has already be handled, so just return here
4011 	 */
4012 	if (!hdev->default_reset_request)
4013 		return;
4014 
4015 	dev_info(&hdev->pdev->dev,
4016 		 "triggering reset in reset timer\n");
4017 	hclge_reset_event(hdev->pdev, NULL);
4018 }
4019 
4020 static void hclge_reset_subtask(struct hclge_dev *hdev)
4021 {
4022 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4023 
4024 	/* check if there is any ongoing reset in the hardware. This status can
4025 	 * be checked from reset_pending. If there is then, we need to wait for
4026 	 * hardware to complete reset.
4027 	 *    a. If we are able to figure out in reasonable time that hardware
4028 	 *       has fully resetted then, we can proceed with driver, client
4029 	 *       reset.
4030 	 *    b. else, we can come back later to check this status so re-sched
4031 	 *       now.
4032 	 */
4033 	hdev->last_reset_time = jiffies;
4034 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4035 	if (hdev->reset_type != HNAE3_NONE_RESET)
4036 		hclge_reset(hdev);
4037 
4038 	/* check if we got any *new* reset requests to be honored */
4039 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4040 	if (hdev->reset_type != HNAE3_NONE_RESET)
4041 		hclge_do_reset(hdev);
4042 
4043 	hdev->reset_type = HNAE3_NONE_RESET;
4044 }
4045 
4046 static void hclge_reset_service_task(struct hclge_dev *hdev)
4047 {
4048 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4049 		return;
4050 
4051 	down(&hdev->reset_sem);
4052 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4053 
4054 	hclge_reset_subtask(hdev);
4055 
4056 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4057 	up(&hdev->reset_sem);
4058 }
4059 
4060 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4061 {
4062 	int i;
4063 
4064 	/* start from vport 1 for PF is always alive */
4065 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4066 		struct hclge_vport *vport = &hdev->vport[i];
4067 
4068 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4069 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4070 
4071 		/* If vf is not alive, set to default value */
4072 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4073 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4074 	}
4075 }
4076 
4077 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4078 {
4079 	unsigned long delta = round_jiffies_relative(HZ);
4080 
4081 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4082 		return;
4083 
4084 	/* Always handle the link updating to make sure link state is
4085 	 * updated when it is triggered by mbx.
4086 	 */
4087 	hclge_update_link_status(hdev);
4088 	hclge_sync_mac_table(hdev);
4089 	hclge_sync_promisc_mode(hdev);
4090 
4091 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4092 		delta = jiffies - hdev->last_serv_processed;
4093 
4094 		if (delta < round_jiffies_relative(HZ)) {
4095 			delta = round_jiffies_relative(HZ) - delta;
4096 			goto out;
4097 		}
4098 	}
4099 
4100 	hdev->serv_processed_cnt++;
4101 	hclge_update_vport_alive(hdev);
4102 
4103 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4104 		hdev->last_serv_processed = jiffies;
4105 		goto out;
4106 	}
4107 
4108 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4109 		hclge_update_stats_for_all(hdev);
4110 
4111 	hclge_update_port_info(hdev);
4112 	hclge_sync_vlan_filter(hdev);
4113 
4114 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4115 		hclge_rfs_filter_expire(hdev);
4116 
4117 	hdev->last_serv_processed = jiffies;
4118 
4119 out:
4120 	hclge_task_schedule(hdev, delta);
4121 }
4122 
4123 static void hclge_service_task(struct work_struct *work)
4124 {
4125 	struct hclge_dev *hdev =
4126 		container_of(work, struct hclge_dev, service_task.work);
4127 
4128 	hclge_reset_service_task(hdev);
4129 	hclge_mailbox_service_task(hdev);
4130 	hclge_periodic_service_task(hdev);
4131 
4132 	/* Handle reset and mbx again in case periodical task delays the
4133 	 * handling by calling hclge_task_schedule() in
4134 	 * hclge_periodic_service_task().
4135 	 */
4136 	hclge_reset_service_task(hdev);
4137 	hclge_mailbox_service_task(hdev);
4138 }
4139 
4140 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4141 {
4142 	/* VF handle has no client */
4143 	if (!handle->client)
4144 		return container_of(handle, struct hclge_vport, nic);
4145 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4146 		return container_of(handle, struct hclge_vport, roce);
4147 	else
4148 		return container_of(handle, struct hclge_vport, nic);
4149 }
4150 
4151 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4152 				  struct hnae3_vector_info *vector_info)
4153 {
4154 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4155 
4156 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4157 
4158 	/* need an extend offset to config vector >= 64 */
4159 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4160 		vector_info->io_addr = hdev->hw.io_base +
4161 				HCLGE_VECTOR_REG_BASE +
4162 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4163 	else
4164 		vector_info->io_addr = hdev->hw.io_base +
4165 				HCLGE_VECTOR_EXT_REG_BASE +
4166 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4167 				HCLGE_VECTOR_REG_OFFSET_H +
4168 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4169 				HCLGE_VECTOR_REG_OFFSET;
4170 
4171 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4172 	hdev->vector_irq[idx] = vector_info->vector;
4173 }
4174 
4175 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4176 			    struct hnae3_vector_info *vector_info)
4177 {
4178 	struct hclge_vport *vport = hclge_get_vport(handle);
4179 	struct hnae3_vector_info *vector = vector_info;
4180 	struct hclge_dev *hdev = vport->back;
4181 	int alloc = 0;
4182 	u16 i = 0;
4183 	u16 j;
4184 
4185 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4186 	vector_num = min(hdev->num_msi_left, vector_num);
4187 
4188 	for (j = 0; j < vector_num; j++) {
4189 		while (++i < hdev->num_nic_msi) {
4190 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4191 				hclge_get_vector_info(hdev, i, vector);
4192 				vector++;
4193 				alloc++;
4194 
4195 				break;
4196 			}
4197 		}
4198 	}
4199 	hdev->num_msi_left -= alloc;
4200 	hdev->num_msi_used += alloc;
4201 
4202 	return alloc;
4203 }
4204 
4205 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4206 {
4207 	int i;
4208 
4209 	for (i = 0; i < hdev->num_msi; i++)
4210 		if (vector == hdev->vector_irq[i])
4211 			return i;
4212 
4213 	return -EINVAL;
4214 }
4215 
4216 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4217 {
4218 	struct hclge_vport *vport = hclge_get_vport(handle);
4219 	struct hclge_dev *hdev = vport->back;
4220 	int vector_id;
4221 
4222 	vector_id = hclge_get_vector_index(hdev, vector);
4223 	if (vector_id < 0) {
4224 		dev_err(&hdev->pdev->dev,
4225 			"Get vector index fail. vector = %d\n", vector);
4226 		return vector_id;
4227 	}
4228 
4229 	hclge_free_vector(hdev, vector_id);
4230 
4231 	return 0;
4232 }
4233 
4234 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4235 {
4236 	return HCLGE_RSS_KEY_SIZE;
4237 }
4238 
4239 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4240 {
4241 	return HCLGE_RSS_IND_TBL_SIZE;
4242 }
4243 
4244 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4245 				  const u8 hfunc, const u8 *key)
4246 {
4247 	struct hclge_rss_config_cmd *req;
4248 	unsigned int key_offset = 0;
4249 	struct hclge_desc desc;
4250 	int key_counts;
4251 	int key_size;
4252 	int ret;
4253 
4254 	key_counts = HCLGE_RSS_KEY_SIZE;
4255 	req = (struct hclge_rss_config_cmd *)desc.data;
4256 
4257 	while (key_counts) {
4258 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4259 					   false);
4260 
4261 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4262 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4263 
4264 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4265 		memcpy(req->hash_key,
4266 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4267 
4268 		key_counts -= key_size;
4269 		key_offset++;
4270 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4271 		if (ret) {
4272 			dev_err(&hdev->pdev->dev,
4273 				"Configure RSS config fail, status = %d\n",
4274 				ret);
4275 			return ret;
4276 		}
4277 	}
4278 	return 0;
4279 }
4280 
4281 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4282 {
4283 	struct hclge_rss_indirection_table_cmd *req;
4284 	struct hclge_desc desc;
4285 	u8 rss_msb_oft;
4286 	u8 rss_msb_val;
4287 	int ret;
4288 	u16 qid;
4289 	int i;
4290 	u32 j;
4291 
4292 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4293 
4294 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4295 		hclge_cmd_setup_basic_desc
4296 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4297 
4298 		req->start_table_index =
4299 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4300 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4301 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4302 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4303 			req->rss_qid_l[j] = qid & 0xff;
4304 			rss_msb_oft =
4305 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4306 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4307 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4308 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4309 		}
4310 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4311 		if (ret) {
4312 			dev_err(&hdev->pdev->dev,
4313 				"Configure rss indir table fail,status = %d\n",
4314 				ret);
4315 			return ret;
4316 		}
4317 	}
4318 	return 0;
4319 }
4320 
4321 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4322 				 u16 *tc_size, u16 *tc_offset)
4323 {
4324 	struct hclge_rss_tc_mode_cmd *req;
4325 	struct hclge_desc desc;
4326 	int ret;
4327 	int i;
4328 
4329 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4330 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4331 
4332 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4333 		u16 mode = 0;
4334 
4335 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4336 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4337 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4338 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4339 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4340 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4341 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4342 
4343 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4344 	}
4345 
4346 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4347 	if (ret)
4348 		dev_err(&hdev->pdev->dev,
4349 			"Configure rss tc mode fail, status = %d\n", ret);
4350 
4351 	return ret;
4352 }
4353 
4354 static void hclge_get_rss_type(struct hclge_vport *vport)
4355 {
4356 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4357 	    vport->rss_tuple_sets.ipv4_udp_en ||
4358 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4359 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4360 	    vport->rss_tuple_sets.ipv6_udp_en ||
4361 	    vport->rss_tuple_sets.ipv6_sctp_en)
4362 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4363 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4364 		 vport->rss_tuple_sets.ipv6_fragment_en)
4365 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4366 	else
4367 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4368 }
4369 
4370 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4371 {
4372 	struct hclge_rss_input_tuple_cmd *req;
4373 	struct hclge_desc desc;
4374 	int ret;
4375 
4376 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4377 
4378 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4379 
4380 	/* Get the tuple cfg from pf */
4381 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4382 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4383 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4384 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4385 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4386 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4387 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4388 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4389 	hclge_get_rss_type(&hdev->vport[0]);
4390 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4391 	if (ret)
4392 		dev_err(&hdev->pdev->dev,
4393 			"Configure rss input fail, status = %d\n", ret);
4394 	return ret;
4395 }
4396 
4397 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4398 			 u8 *key, u8 *hfunc)
4399 {
4400 	struct hclge_vport *vport = hclge_get_vport(handle);
4401 	int i;
4402 
4403 	/* Get hash algorithm */
4404 	if (hfunc) {
4405 		switch (vport->rss_algo) {
4406 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4407 			*hfunc = ETH_RSS_HASH_TOP;
4408 			break;
4409 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4410 			*hfunc = ETH_RSS_HASH_XOR;
4411 			break;
4412 		default:
4413 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4414 			break;
4415 		}
4416 	}
4417 
4418 	/* Get the RSS Key required by the user */
4419 	if (key)
4420 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4421 
4422 	/* Get indirect table */
4423 	if (indir)
4424 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4425 			indir[i] =  vport->rss_indirection_tbl[i];
4426 
4427 	return 0;
4428 }
4429 
4430 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4431 			 const  u8 *key, const  u8 hfunc)
4432 {
4433 	struct hclge_vport *vport = hclge_get_vport(handle);
4434 	struct hclge_dev *hdev = vport->back;
4435 	u8 hash_algo;
4436 	int ret, i;
4437 
4438 	/* Set the RSS Hash Key if specififed by the user */
4439 	if (key) {
4440 		switch (hfunc) {
4441 		case ETH_RSS_HASH_TOP:
4442 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4443 			break;
4444 		case ETH_RSS_HASH_XOR:
4445 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4446 			break;
4447 		case ETH_RSS_HASH_NO_CHANGE:
4448 			hash_algo = vport->rss_algo;
4449 			break;
4450 		default:
4451 			return -EINVAL;
4452 		}
4453 
4454 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4455 		if (ret)
4456 			return ret;
4457 
4458 		/* Update the shadow RSS key with user specified qids */
4459 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4460 		vport->rss_algo = hash_algo;
4461 	}
4462 
4463 	/* Update the shadow RSS table with user specified qids */
4464 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4465 		vport->rss_indirection_tbl[i] = indir[i];
4466 
4467 	/* Update the hardware */
4468 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4469 }
4470 
4471 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4472 {
4473 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4474 
4475 	if (nfc->data & RXH_L4_B_2_3)
4476 		hash_sets |= HCLGE_D_PORT_BIT;
4477 	else
4478 		hash_sets &= ~HCLGE_D_PORT_BIT;
4479 
4480 	if (nfc->data & RXH_IP_SRC)
4481 		hash_sets |= HCLGE_S_IP_BIT;
4482 	else
4483 		hash_sets &= ~HCLGE_S_IP_BIT;
4484 
4485 	if (nfc->data & RXH_IP_DST)
4486 		hash_sets |= HCLGE_D_IP_BIT;
4487 	else
4488 		hash_sets &= ~HCLGE_D_IP_BIT;
4489 
4490 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4491 		hash_sets |= HCLGE_V_TAG_BIT;
4492 
4493 	return hash_sets;
4494 }
4495 
4496 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4497 			       struct ethtool_rxnfc *nfc)
4498 {
4499 	struct hclge_vport *vport = hclge_get_vport(handle);
4500 	struct hclge_dev *hdev = vport->back;
4501 	struct hclge_rss_input_tuple_cmd *req;
4502 	struct hclge_desc desc;
4503 	u8 tuple_sets;
4504 	int ret;
4505 
4506 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4507 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4508 		return -EINVAL;
4509 
4510 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4511 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4512 
4513 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4514 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4515 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4516 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4517 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4518 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4519 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4520 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4521 
4522 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4523 	switch (nfc->flow_type) {
4524 	case TCP_V4_FLOW:
4525 		req->ipv4_tcp_en = tuple_sets;
4526 		break;
4527 	case TCP_V6_FLOW:
4528 		req->ipv6_tcp_en = tuple_sets;
4529 		break;
4530 	case UDP_V4_FLOW:
4531 		req->ipv4_udp_en = tuple_sets;
4532 		break;
4533 	case UDP_V6_FLOW:
4534 		req->ipv6_udp_en = tuple_sets;
4535 		break;
4536 	case SCTP_V4_FLOW:
4537 		req->ipv4_sctp_en = tuple_sets;
4538 		break;
4539 	case SCTP_V6_FLOW:
4540 		if ((nfc->data & RXH_L4_B_0_1) ||
4541 		    (nfc->data & RXH_L4_B_2_3))
4542 			return -EINVAL;
4543 
4544 		req->ipv6_sctp_en = tuple_sets;
4545 		break;
4546 	case IPV4_FLOW:
4547 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4548 		break;
4549 	case IPV6_FLOW:
4550 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4551 		break;
4552 	default:
4553 		return -EINVAL;
4554 	}
4555 
4556 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4557 	if (ret) {
4558 		dev_err(&hdev->pdev->dev,
4559 			"Set rss tuple fail, status = %d\n", ret);
4560 		return ret;
4561 	}
4562 
4563 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4564 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4565 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4566 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4567 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4568 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4569 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4570 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4571 	hclge_get_rss_type(vport);
4572 	return 0;
4573 }
4574 
4575 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4576 			       struct ethtool_rxnfc *nfc)
4577 {
4578 	struct hclge_vport *vport = hclge_get_vport(handle);
4579 	u8 tuple_sets;
4580 
4581 	nfc->data = 0;
4582 
4583 	switch (nfc->flow_type) {
4584 	case TCP_V4_FLOW:
4585 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4586 		break;
4587 	case UDP_V4_FLOW:
4588 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4589 		break;
4590 	case TCP_V6_FLOW:
4591 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4592 		break;
4593 	case UDP_V6_FLOW:
4594 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4595 		break;
4596 	case SCTP_V4_FLOW:
4597 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4598 		break;
4599 	case SCTP_V6_FLOW:
4600 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4601 		break;
4602 	case IPV4_FLOW:
4603 	case IPV6_FLOW:
4604 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4605 		break;
4606 	default:
4607 		return -EINVAL;
4608 	}
4609 
4610 	if (!tuple_sets)
4611 		return 0;
4612 
4613 	if (tuple_sets & HCLGE_D_PORT_BIT)
4614 		nfc->data |= RXH_L4_B_2_3;
4615 	if (tuple_sets & HCLGE_S_PORT_BIT)
4616 		nfc->data |= RXH_L4_B_0_1;
4617 	if (tuple_sets & HCLGE_D_IP_BIT)
4618 		nfc->data |= RXH_IP_DST;
4619 	if (tuple_sets & HCLGE_S_IP_BIT)
4620 		nfc->data |= RXH_IP_SRC;
4621 
4622 	return 0;
4623 }
4624 
4625 static int hclge_get_tc_size(struct hnae3_handle *handle)
4626 {
4627 	struct hclge_vport *vport = hclge_get_vport(handle);
4628 	struct hclge_dev *hdev = vport->back;
4629 
4630 	return hdev->pf_rss_size_max;
4631 }
4632 
4633 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4634 {
4635 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4636 	struct hclge_vport *vport = hdev->vport;
4637 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4638 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4639 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4640 	struct hnae3_tc_info *tc_info;
4641 	u16 roundup_size;
4642 	u16 rss_size;
4643 	int i;
4644 
4645 	tc_info = &vport->nic.kinfo.tc_info;
4646 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4647 		rss_size = tc_info->tqp_count[i];
4648 		tc_valid[i] = 0;
4649 
4650 		if (!(hdev->hw_tc_map & BIT(i)))
4651 			continue;
4652 
4653 		/* tc_size set to hardware is the log2 of roundup power of two
4654 		 * of rss_size, the acutal queue size is limited by indirection
4655 		 * table.
4656 		 */
4657 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4658 		    rss_size == 0) {
4659 			dev_err(&hdev->pdev->dev,
4660 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4661 				rss_size);
4662 			return -EINVAL;
4663 		}
4664 
4665 		roundup_size = roundup_pow_of_two(rss_size);
4666 		roundup_size = ilog2(roundup_size);
4667 
4668 		tc_valid[i] = 1;
4669 		tc_size[i] = roundup_size;
4670 		tc_offset[i] = tc_info->tqp_offset[i];
4671 	}
4672 
4673 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4674 }
4675 
4676 int hclge_rss_init_hw(struct hclge_dev *hdev)
4677 {
4678 	struct hclge_vport *vport = hdev->vport;
4679 	u16 *rss_indir = vport[0].rss_indirection_tbl;
4680 	u8 *key = vport[0].rss_hash_key;
4681 	u8 hfunc = vport[0].rss_algo;
4682 	int ret;
4683 
4684 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4685 	if (ret)
4686 		return ret;
4687 
4688 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4689 	if (ret)
4690 		return ret;
4691 
4692 	ret = hclge_set_rss_input_tuple(hdev);
4693 	if (ret)
4694 		return ret;
4695 
4696 	return hclge_init_rss_tc_mode(hdev);
4697 }
4698 
4699 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4700 {
4701 	struct hclge_vport *vport = hdev->vport;
4702 	int i, j;
4703 
4704 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4705 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4706 			vport[j].rss_indirection_tbl[i] =
4707 				i % vport[j].alloc_rss_size;
4708 	}
4709 }
4710 
4711 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4712 {
4713 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4714 	struct hclge_vport *vport = hdev->vport;
4715 
4716 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4717 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4718 
4719 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4720 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4721 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4722 		vport[i].rss_tuple_sets.ipv4_udp_en =
4723 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4724 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4725 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4726 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4727 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4728 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4729 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4730 		vport[i].rss_tuple_sets.ipv6_udp_en =
4731 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4732 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4733 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4734 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4735 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4736 
4737 		vport[i].rss_algo = rss_algo;
4738 
4739 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4740 		       HCLGE_RSS_KEY_SIZE);
4741 	}
4742 
4743 	hclge_rss_indir_init_cfg(hdev);
4744 }
4745 
4746 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4747 				int vector_id, bool en,
4748 				struct hnae3_ring_chain_node *ring_chain)
4749 {
4750 	struct hclge_dev *hdev = vport->back;
4751 	struct hnae3_ring_chain_node *node;
4752 	struct hclge_desc desc;
4753 	struct hclge_ctrl_vector_chain_cmd *req =
4754 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4755 	enum hclge_cmd_status status;
4756 	enum hclge_opcode_type op;
4757 	u16 tqp_type_and_id;
4758 	int i;
4759 
4760 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4761 	hclge_cmd_setup_basic_desc(&desc, op, false);
4762 	req->int_vector_id_l = hnae3_get_field(vector_id,
4763 					       HCLGE_VECTOR_ID_L_M,
4764 					       HCLGE_VECTOR_ID_L_S);
4765 	req->int_vector_id_h = hnae3_get_field(vector_id,
4766 					       HCLGE_VECTOR_ID_H_M,
4767 					       HCLGE_VECTOR_ID_H_S);
4768 
4769 	i = 0;
4770 	for (node = ring_chain; node; node = node->next) {
4771 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4772 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4773 				HCLGE_INT_TYPE_S,
4774 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4775 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4776 				HCLGE_TQP_ID_S, node->tqp_index);
4777 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4778 				HCLGE_INT_GL_IDX_S,
4779 				hnae3_get_field(node->int_gl_idx,
4780 						HNAE3_RING_GL_IDX_M,
4781 						HNAE3_RING_GL_IDX_S));
4782 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4783 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4784 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4785 			req->vfid = vport->vport_id;
4786 
4787 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4788 			if (status) {
4789 				dev_err(&hdev->pdev->dev,
4790 					"Map TQP fail, status is %d.\n",
4791 					status);
4792 				return -EIO;
4793 			}
4794 			i = 0;
4795 
4796 			hclge_cmd_setup_basic_desc(&desc,
4797 						   op,
4798 						   false);
4799 			req->int_vector_id_l =
4800 				hnae3_get_field(vector_id,
4801 						HCLGE_VECTOR_ID_L_M,
4802 						HCLGE_VECTOR_ID_L_S);
4803 			req->int_vector_id_h =
4804 				hnae3_get_field(vector_id,
4805 						HCLGE_VECTOR_ID_H_M,
4806 						HCLGE_VECTOR_ID_H_S);
4807 		}
4808 	}
4809 
4810 	if (i > 0) {
4811 		req->int_cause_num = i;
4812 		req->vfid = vport->vport_id;
4813 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4814 		if (status) {
4815 			dev_err(&hdev->pdev->dev,
4816 				"Map TQP fail, status is %d.\n", status);
4817 			return -EIO;
4818 		}
4819 	}
4820 
4821 	return 0;
4822 }
4823 
4824 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4825 				    struct hnae3_ring_chain_node *ring_chain)
4826 {
4827 	struct hclge_vport *vport = hclge_get_vport(handle);
4828 	struct hclge_dev *hdev = vport->back;
4829 	int vector_id;
4830 
4831 	vector_id = hclge_get_vector_index(hdev, vector);
4832 	if (vector_id < 0) {
4833 		dev_err(&hdev->pdev->dev,
4834 			"failed to get vector index. vector=%d\n", vector);
4835 		return vector_id;
4836 	}
4837 
4838 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4839 }
4840 
4841 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4842 				       struct hnae3_ring_chain_node *ring_chain)
4843 {
4844 	struct hclge_vport *vport = hclge_get_vport(handle);
4845 	struct hclge_dev *hdev = vport->back;
4846 	int vector_id, ret;
4847 
4848 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4849 		return 0;
4850 
4851 	vector_id = hclge_get_vector_index(hdev, vector);
4852 	if (vector_id < 0) {
4853 		dev_err(&handle->pdev->dev,
4854 			"Get vector index fail. ret =%d\n", vector_id);
4855 		return vector_id;
4856 	}
4857 
4858 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4859 	if (ret)
4860 		dev_err(&handle->pdev->dev,
4861 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4862 			vector_id, ret);
4863 
4864 	return ret;
4865 }
4866 
4867 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4868 				      bool en_uc, bool en_mc, bool en_bc)
4869 {
4870 	struct hclge_vport *vport = &hdev->vport[vf_id];
4871 	struct hnae3_handle *handle = &vport->nic;
4872 	struct hclge_promisc_cfg_cmd *req;
4873 	struct hclge_desc desc;
4874 	bool uc_tx_en = en_uc;
4875 	u8 promisc_cfg = 0;
4876 	int ret;
4877 
4878 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4879 
4880 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4881 	req->vf_id = vf_id;
4882 
4883 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4884 		uc_tx_en = false;
4885 
4886 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4887 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4888 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4889 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4890 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4891 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4892 	req->extend_promisc = promisc_cfg;
4893 
4894 	/* to be compatible with DEVICE_VERSION_V1/2 */
4895 	promisc_cfg = 0;
4896 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4897 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4898 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4899 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4900 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4901 	req->promisc = promisc_cfg;
4902 
4903 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4904 	if (ret)
4905 		dev_err(&hdev->pdev->dev,
4906 			"failed to set vport %u promisc mode, ret = %d.\n",
4907 			vf_id, ret);
4908 
4909 	return ret;
4910 }
4911 
4912 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4913 				 bool en_mc_pmc, bool en_bc_pmc)
4914 {
4915 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4916 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
4917 }
4918 
4919 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4920 				  bool en_mc_pmc)
4921 {
4922 	struct hclge_vport *vport = hclge_get_vport(handle);
4923 	struct hclge_dev *hdev = vport->back;
4924 	bool en_bc_pmc = true;
4925 
4926 	/* For device whose version below V2, if broadcast promisc enabled,
4927 	 * vlan filter is always bypassed. So broadcast promisc should be
4928 	 * disabled until user enable promisc mode
4929 	 */
4930 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4931 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4932 
4933 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4934 					    en_bc_pmc);
4935 }
4936 
4937 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4938 {
4939 	struct hclge_vport *vport = hclge_get_vport(handle);
4940 	struct hclge_dev *hdev = vport->back;
4941 
4942 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4943 }
4944 
4945 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4946 {
4947 	struct hclge_get_fd_mode_cmd *req;
4948 	struct hclge_desc desc;
4949 	int ret;
4950 
4951 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4952 
4953 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4954 
4955 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4956 	if (ret) {
4957 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4958 		return ret;
4959 	}
4960 
4961 	*fd_mode = req->mode;
4962 
4963 	return ret;
4964 }
4965 
4966 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4967 				   u32 *stage1_entry_num,
4968 				   u32 *stage2_entry_num,
4969 				   u16 *stage1_counter_num,
4970 				   u16 *stage2_counter_num)
4971 {
4972 	struct hclge_get_fd_allocation_cmd *req;
4973 	struct hclge_desc desc;
4974 	int ret;
4975 
4976 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4977 
4978 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4979 
4980 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4981 	if (ret) {
4982 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4983 			ret);
4984 		return ret;
4985 	}
4986 
4987 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4988 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4989 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4990 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4991 
4992 	return ret;
4993 }
4994 
4995 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4996 				   enum HCLGE_FD_STAGE stage_num)
4997 {
4998 	struct hclge_set_fd_key_config_cmd *req;
4999 	struct hclge_fd_key_cfg *stage;
5000 	struct hclge_desc desc;
5001 	int ret;
5002 
5003 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5004 
5005 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5006 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5007 	req->stage = stage_num;
5008 	req->key_select = stage->key_sel;
5009 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5010 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5011 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5012 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5013 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5014 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5015 
5016 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5017 	if (ret)
5018 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5019 
5020 	return ret;
5021 }
5022 
5023 static int hclge_init_fd_config(struct hclge_dev *hdev)
5024 {
5025 #define LOW_2_WORDS		0x03
5026 	struct hclge_fd_key_cfg *key_cfg;
5027 	int ret;
5028 
5029 	if (!hnae3_dev_fd_supported(hdev))
5030 		return 0;
5031 
5032 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5033 	if (ret)
5034 		return ret;
5035 
5036 	switch (hdev->fd_cfg.fd_mode) {
5037 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5038 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5039 		break;
5040 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5041 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5042 		break;
5043 	default:
5044 		dev_err(&hdev->pdev->dev,
5045 			"Unsupported flow director mode %u\n",
5046 			hdev->fd_cfg.fd_mode);
5047 		return -EOPNOTSUPP;
5048 	}
5049 
5050 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5051 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5052 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5053 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5054 	key_cfg->outer_sipv6_word_en = 0;
5055 	key_cfg->outer_dipv6_word_en = 0;
5056 
5057 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5058 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5059 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5060 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5061 
5062 	/* If use max 400bit key, we can support tuples for ether type */
5063 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5064 		key_cfg->tuple_active |=
5065 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5066 
5067 	/* roce_type is used to filter roce frames
5068 	 * dst_vport is used to specify the rule
5069 	 */
5070 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5071 
5072 	ret = hclge_get_fd_allocation(hdev,
5073 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5074 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5075 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5076 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5077 	if (ret)
5078 		return ret;
5079 
5080 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5081 }
5082 
5083 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5084 				int loc, u8 *key, bool is_add)
5085 {
5086 	struct hclge_fd_tcam_config_1_cmd *req1;
5087 	struct hclge_fd_tcam_config_2_cmd *req2;
5088 	struct hclge_fd_tcam_config_3_cmd *req3;
5089 	struct hclge_desc desc[3];
5090 	int ret;
5091 
5092 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5093 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5094 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5095 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5096 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5097 
5098 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5099 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5100 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5101 
5102 	req1->stage = stage;
5103 	req1->xy_sel = sel_x ? 1 : 0;
5104 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5105 	req1->index = cpu_to_le32(loc);
5106 	req1->entry_vld = sel_x ? is_add : 0;
5107 
5108 	if (key) {
5109 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5110 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5111 		       sizeof(req2->tcam_data));
5112 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5113 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5114 	}
5115 
5116 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5117 	if (ret)
5118 		dev_err(&hdev->pdev->dev,
5119 			"config tcam key fail, ret=%d\n",
5120 			ret);
5121 
5122 	return ret;
5123 }
5124 
5125 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5126 			      struct hclge_fd_ad_data *action)
5127 {
5128 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5129 	struct hclge_fd_ad_config_cmd *req;
5130 	struct hclge_desc desc;
5131 	u64 ad_data = 0;
5132 	int ret;
5133 
5134 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5135 
5136 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5137 	req->index = cpu_to_le32(loc);
5138 	req->stage = stage;
5139 
5140 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5141 		      action->write_rule_id_to_bd);
5142 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5143 			action->rule_id);
5144 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5145 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5146 			      action->override_tc);
5147 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5148 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5149 	}
5150 	ad_data <<= 32;
5151 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5152 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5153 		      action->forward_to_direct_queue);
5154 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5155 			action->queue_id);
5156 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5157 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5158 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5159 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5160 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5161 			action->counter_id);
5162 
5163 	req->ad_data = cpu_to_le64(ad_data);
5164 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5165 	if (ret)
5166 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5167 
5168 	return ret;
5169 }
5170 
5171 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5172 				   struct hclge_fd_rule *rule)
5173 {
5174 	u16 tmp_x_s, tmp_y_s;
5175 	u32 tmp_x_l, tmp_y_l;
5176 	int i;
5177 
5178 	if (rule->unused_tuple & tuple_bit)
5179 		return true;
5180 
5181 	switch (tuple_bit) {
5182 	case BIT(INNER_DST_MAC):
5183 		for (i = 0; i < ETH_ALEN; i++) {
5184 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5185 			       rule->tuples_mask.dst_mac[i]);
5186 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5187 			       rule->tuples_mask.dst_mac[i]);
5188 		}
5189 
5190 		return true;
5191 	case BIT(INNER_SRC_MAC):
5192 		for (i = 0; i < ETH_ALEN; i++) {
5193 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5194 			       rule->tuples.src_mac[i]);
5195 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5196 			       rule->tuples.src_mac[i]);
5197 		}
5198 
5199 		return true;
5200 	case BIT(INNER_VLAN_TAG_FST):
5201 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5202 		       rule->tuples_mask.vlan_tag1);
5203 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5204 		       rule->tuples_mask.vlan_tag1);
5205 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5206 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5207 
5208 		return true;
5209 	case BIT(INNER_ETH_TYPE):
5210 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5211 		       rule->tuples_mask.ether_proto);
5212 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5213 		       rule->tuples_mask.ether_proto);
5214 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5215 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5216 
5217 		return true;
5218 	case BIT(INNER_IP_TOS):
5219 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5220 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5221 
5222 		return true;
5223 	case BIT(INNER_IP_PROTO):
5224 		calc_x(*key_x, rule->tuples.ip_proto,
5225 		       rule->tuples_mask.ip_proto);
5226 		calc_y(*key_y, rule->tuples.ip_proto,
5227 		       rule->tuples_mask.ip_proto);
5228 
5229 		return true;
5230 	case BIT(INNER_SRC_IP):
5231 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5232 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5233 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5234 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5235 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5236 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5237 
5238 		return true;
5239 	case BIT(INNER_DST_IP):
5240 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5241 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5242 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5243 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5244 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5245 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5246 
5247 		return true;
5248 	case BIT(INNER_SRC_PORT):
5249 		calc_x(tmp_x_s, rule->tuples.src_port,
5250 		       rule->tuples_mask.src_port);
5251 		calc_y(tmp_y_s, rule->tuples.src_port,
5252 		       rule->tuples_mask.src_port);
5253 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5254 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5255 
5256 		return true;
5257 	case BIT(INNER_DST_PORT):
5258 		calc_x(tmp_x_s, rule->tuples.dst_port,
5259 		       rule->tuples_mask.dst_port);
5260 		calc_y(tmp_y_s, rule->tuples.dst_port,
5261 		       rule->tuples_mask.dst_port);
5262 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5263 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5264 
5265 		return true;
5266 	default:
5267 		return false;
5268 	}
5269 }
5270 
5271 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5272 				 u8 vf_id, u8 network_port_id)
5273 {
5274 	u32 port_number = 0;
5275 
5276 	if (port_type == HOST_PORT) {
5277 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5278 				pf_id);
5279 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5280 				vf_id);
5281 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5282 	} else {
5283 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5284 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5285 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5286 	}
5287 
5288 	return port_number;
5289 }
5290 
5291 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5292 				       __le32 *key_x, __le32 *key_y,
5293 				       struct hclge_fd_rule *rule)
5294 {
5295 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5296 	u8 cur_pos = 0, tuple_size, shift_bits;
5297 	unsigned int i;
5298 
5299 	for (i = 0; i < MAX_META_DATA; i++) {
5300 		tuple_size = meta_data_key_info[i].key_length;
5301 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5302 
5303 		switch (tuple_bit) {
5304 		case BIT(ROCE_TYPE):
5305 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5306 			cur_pos += tuple_size;
5307 			break;
5308 		case BIT(DST_VPORT):
5309 			port_number = hclge_get_port_number(HOST_PORT, 0,
5310 							    rule->vf_id, 0);
5311 			hnae3_set_field(meta_data,
5312 					GENMASK(cur_pos + tuple_size, cur_pos),
5313 					cur_pos, port_number);
5314 			cur_pos += tuple_size;
5315 			break;
5316 		default:
5317 			break;
5318 		}
5319 	}
5320 
5321 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5322 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5323 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5324 
5325 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5326 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5327 }
5328 
5329 /* A complete key is combined with meta data key and tuple key.
5330  * Meta data key is stored at the MSB region, and tuple key is stored at
5331  * the LSB region, unused bits will be filled 0.
5332  */
5333 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5334 			    struct hclge_fd_rule *rule)
5335 {
5336 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5337 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5338 	u8 *cur_key_x, *cur_key_y;
5339 	u8 meta_data_region;
5340 	u8 tuple_size;
5341 	int ret;
5342 	u32 i;
5343 
5344 	memset(key_x, 0, sizeof(key_x));
5345 	memset(key_y, 0, sizeof(key_y));
5346 	cur_key_x = key_x;
5347 	cur_key_y = key_y;
5348 
5349 	for (i = 0 ; i < MAX_TUPLE; i++) {
5350 		bool tuple_valid;
5351 		u32 check_tuple;
5352 
5353 		tuple_size = tuple_key_info[i].key_length / 8;
5354 		check_tuple = key_cfg->tuple_active & BIT(i);
5355 
5356 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5357 						     cur_key_y, rule);
5358 		if (tuple_valid) {
5359 			cur_key_x += tuple_size;
5360 			cur_key_y += tuple_size;
5361 		}
5362 	}
5363 
5364 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5365 			MAX_META_DATA_LENGTH / 8;
5366 
5367 	hclge_fd_convert_meta_data(key_cfg,
5368 				   (__le32 *)(key_x + meta_data_region),
5369 				   (__le32 *)(key_y + meta_data_region),
5370 				   rule);
5371 
5372 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5373 				   true);
5374 	if (ret) {
5375 		dev_err(&hdev->pdev->dev,
5376 			"fd key_y config fail, loc=%u, ret=%d\n",
5377 			rule->queue_id, ret);
5378 		return ret;
5379 	}
5380 
5381 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5382 				   true);
5383 	if (ret)
5384 		dev_err(&hdev->pdev->dev,
5385 			"fd key_x config fail, loc=%u, ret=%d\n",
5386 			rule->queue_id, ret);
5387 	return ret;
5388 }
5389 
5390 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5391 			       struct hclge_fd_rule *rule)
5392 {
5393 	struct hclge_vport *vport = hdev->vport;
5394 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5395 	struct hclge_fd_ad_data ad_data;
5396 
5397 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5398 	ad_data.ad_id = rule->location;
5399 
5400 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5401 		ad_data.drop_packet = true;
5402 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5403 		ad_data.override_tc = true;
5404 		ad_data.queue_id =
5405 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5406 		ad_data.tc_size =
5407 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5408 	} else {
5409 		ad_data.forward_to_direct_queue = true;
5410 		ad_data.queue_id = rule->queue_id;
5411 	}
5412 
5413 	ad_data.use_counter = false;
5414 	ad_data.counter_id = 0;
5415 
5416 	ad_data.use_next_stage = false;
5417 	ad_data.next_input_key = 0;
5418 
5419 	ad_data.write_rule_id_to_bd = true;
5420 	ad_data.rule_id = rule->location;
5421 
5422 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5423 }
5424 
5425 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5426 				       u32 *unused_tuple)
5427 {
5428 	if (!spec || !unused_tuple)
5429 		return -EINVAL;
5430 
5431 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5432 
5433 	if (!spec->ip4src)
5434 		*unused_tuple |= BIT(INNER_SRC_IP);
5435 
5436 	if (!spec->ip4dst)
5437 		*unused_tuple |= BIT(INNER_DST_IP);
5438 
5439 	if (!spec->psrc)
5440 		*unused_tuple |= BIT(INNER_SRC_PORT);
5441 
5442 	if (!spec->pdst)
5443 		*unused_tuple |= BIT(INNER_DST_PORT);
5444 
5445 	if (!spec->tos)
5446 		*unused_tuple |= BIT(INNER_IP_TOS);
5447 
5448 	return 0;
5449 }
5450 
5451 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5452 				    u32 *unused_tuple)
5453 {
5454 	if (!spec || !unused_tuple)
5455 		return -EINVAL;
5456 
5457 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5458 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5459 
5460 	if (!spec->ip4src)
5461 		*unused_tuple |= BIT(INNER_SRC_IP);
5462 
5463 	if (!spec->ip4dst)
5464 		*unused_tuple |= BIT(INNER_DST_IP);
5465 
5466 	if (!spec->tos)
5467 		*unused_tuple |= BIT(INNER_IP_TOS);
5468 
5469 	if (!spec->proto)
5470 		*unused_tuple |= BIT(INNER_IP_PROTO);
5471 
5472 	if (spec->l4_4_bytes)
5473 		return -EOPNOTSUPP;
5474 
5475 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5476 		return -EOPNOTSUPP;
5477 
5478 	return 0;
5479 }
5480 
5481 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5482 				       u32 *unused_tuple)
5483 {
5484 	if (!spec || !unused_tuple)
5485 		return -EINVAL;
5486 
5487 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5488 		BIT(INNER_IP_TOS);
5489 
5490 	/* check whether src/dst ip address used */
5491 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5492 	    !spec->ip6src[2] && !spec->ip6src[3])
5493 		*unused_tuple |= BIT(INNER_SRC_IP);
5494 
5495 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5496 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5497 		*unused_tuple |= BIT(INNER_DST_IP);
5498 
5499 	if (!spec->psrc)
5500 		*unused_tuple |= BIT(INNER_SRC_PORT);
5501 
5502 	if (!spec->pdst)
5503 		*unused_tuple |= BIT(INNER_DST_PORT);
5504 
5505 	if (spec->tclass)
5506 		return -EOPNOTSUPP;
5507 
5508 	return 0;
5509 }
5510 
5511 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5512 				    u32 *unused_tuple)
5513 {
5514 	if (!spec || !unused_tuple)
5515 		return -EINVAL;
5516 
5517 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5518 		BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5519 
5520 	/* check whether src/dst ip address used */
5521 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5522 	    !spec->ip6src[2] && !spec->ip6src[3])
5523 		*unused_tuple |= BIT(INNER_SRC_IP);
5524 
5525 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5526 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5527 		*unused_tuple |= BIT(INNER_DST_IP);
5528 
5529 	if (!spec->l4_proto)
5530 		*unused_tuple |= BIT(INNER_IP_PROTO);
5531 
5532 	if (spec->tclass)
5533 		return -EOPNOTSUPP;
5534 
5535 	if (spec->l4_4_bytes)
5536 		return -EOPNOTSUPP;
5537 
5538 	return 0;
5539 }
5540 
5541 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5542 {
5543 	if (!spec || !unused_tuple)
5544 		return -EINVAL;
5545 
5546 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5547 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5548 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5549 
5550 	if (is_zero_ether_addr(spec->h_source))
5551 		*unused_tuple |= BIT(INNER_SRC_MAC);
5552 
5553 	if (is_zero_ether_addr(spec->h_dest))
5554 		*unused_tuple |= BIT(INNER_DST_MAC);
5555 
5556 	if (!spec->h_proto)
5557 		*unused_tuple |= BIT(INNER_ETH_TYPE);
5558 
5559 	return 0;
5560 }
5561 
5562 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5563 				    struct ethtool_rx_flow_spec *fs,
5564 				    u32 *unused_tuple)
5565 {
5566 	if (fs->flow_type & FLOW_EXT) {
5567 		if (fs->h_ext.vlan_etype) {
5568 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5569 			return -EOPNOTSUPP;
5570 		}
5571 
5572 		if (!fs->h_ext.vlan_tci)
5573 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5574 
5575 		if (fs->m_ext.vlan_tci &&
5576 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5577 			dev_err(&hdev->pdev->dev,
5578 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5579 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5580 			return -EINVAL;
5581 		}
5582 	} else {
5583 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5584 	}
5585 
5586 	if (fs->flow_type & FLOW_MAC_EXT) {
5587 		if (hdev->fd_cfg.fd_mode !=
5588 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5589 			dev_err(&hdev->pdev->dev,
5590 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
5591 			return -EOPNOTSUPP;
5592 		}
5593 
5594 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5595 			*unused_tuple |= BIT(INNER_DST_MAC);
5596 		else
5597 			*unused_tuple &= ~BIT(INNER_DST_MAC);
5598 	}
5599 
5600 	return 0;
5601 }
5602 
5603 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5604 			       struct ethtool_rx_flow_spec *fs,
5605 			       u32 *unused_tuple)
5606 {
5607 	u32 flow_type;
5608 	int ret;
5609 
5610 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5611 		dev_err(&hdev->pdev->dev,
5612 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
5613 			fs->location,
5614 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5615 		return -EINVAL;
5616 	}
5617 
5618 	if ((fs->flow_type & FLOW_EXT) &&
5619 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5620 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5621 		return -EOPNOTSUPP;
5622 	}
5623 
5624 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5625 	switch (flow_type) {
5626 	case SCTP_V4_FLOW:
5627 	case TCP_V4_FLOW:
5628 	case UDP_V4_FLOW:
5629 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5630 						  unused_tuple);
5631 		break;
5632 	case IP_USER_FLOW:
5633 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5634 					       unused_tuple);
5635 		break;
5636 	case SCTP_V6_FLOW:
5637 	case TCP_V6_FLOW:
5638 	case UDP_V6_FLOW:
5639 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5640 						  unused_tuple);
5641 		break;
5642 	case IPV6_USER_FLOW:
5643 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5644 					       unused_tuple);
5645 		break;
5646 	case ETHER_FLOW:
5647 		if (hdev->fd_cfg.fd_mode !=
5648 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5649 			dev_err(&hdev->pdev->dev,
5650 				"ETHER_FLOW is not supported in current fd mode!\n");
5651 			return -EOPNOTSUPP;
5652 		}
5653 
5654 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5655 						 unused_tuple);
5656 		break;
5657 	default:
5658 		dev_err(&hdev->pdev->dev,
5659 			"unsupported protocol type, protocol type = %#x\n",
5660 			flow_type);
5661 		return -EOPNOTSUPP;
5662 	}
5663 
5664 	if (ret) {
5665 		dev_err(&hdev->pdev->dev,
5666 			"failed to check flow union tuple, ret = %d\n",
5667 			ret);
5668 		return ret;
5669 	}
5670 
5671 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5672 }
5673 
5674 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5675 {
5676 	struct hclge_fd_rule *rule = NULL;
5677 	struct hlist_node *node2;
5678 
5679 	spin_lock_bh(&hdev->fd_rule_lock);
5680 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5681 		if (rule->location >= location)
5682 			break;
5683 	}
5684 
5685 	spin_unlock_bh(&hdev->fd_rule_lock);
5686 
5687 	return  rule && rule->location == location;
5688 }
5689 
5690 /* make sure being called after lock up with fd_rule_lock */
5691 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5692 				     struct hclge_fd_rule *new_rule,
5693 				     u16 location,
5694 				     bool is_add)
5695 {
5696 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5697 	struct hlist_node *node2;
5698 
5699 	if (is_add && !new_rule)
5700 		return -EINVAL;
5701 
5702 	hlist_for_each_entry_safe(rule, node2,
5703 				  &hdev->fd_rule_list, rule_node) {
5704 		if (rule->location >= location)
5705 			break;
5706 		parent = rule;
5707 	}
5708 
5709 	if (rule && rule->location == location) {
5710 		hlist_del(&rule->rule_node);
5711 		kfree(rule);
5712 		hdev->hclge_fd_rule_num--;
5713 
5714 		if (!is_add) {
5715 			if (!hdev->hclge_fd_rule_num)
5716 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5717 			clear_bit(location, hdev->fd_bmap);
5718 
5719 			return 0;
5720 		}
5721 	} else if (!is_add) {
5722 		dev_err(&hdev->pdev->dev,
5723 			"delete fail, rule %u is inexistent\n",
5724 			location);
5725 		return -EINVAL;
5726 	}
5727 
5728 	INIT_HLIST_NODE(&new_rule->rule_node);
5729 
5730 	if (parent)
5731 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5732 	else
5733 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5734 
5735 	set_bit(location, hdev->fd_bmap);
5736 	hdev->hclge_fd_rule_num++;
5737 	hdev->fd_active_type = new_rule->rule_type;
5738 
5739 	return 0;
5740 }
5741 
5742 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5743 			      struct ethtool_rx_flow_spec *fs,
5744 			      struct hclge_fd_rule *rule)
5745 {
5746 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5747 
5748 	switch (flow_type) {
5749 	case SCTP_V4_FLOW:
5750 	case TCP_V4_FLOW:
5751 	case UDP_V4_FLOW:
5752 		rule->tuples.src_ip[IPV4_INDEX] =
5753 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5754 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5755 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5756 
5757 		rule->tuples.dst_ip[IPV4_INDEX] =
5758 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5759 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5760 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5761 
5762 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5763 		rule->tuples_mask.src_port =
5764 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5765 
5766 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5767 		rule->tuples_mask.dst_port =
5768 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5769 
5770 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5771 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5772 
5773 		rule->tuples.ether_proto = ETH_P_IP;
5774 		rule->tuples_mask.ether_proto = 0xFFFF;
5775 
5776 		break;
5777 	case IP_USER_FLOW:
5778 		rule->tuples.src_ip[IPV4_INDEX] =
5779 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5780 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5781 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5782 
5783 		rule->tuples.dst_ip[IPV4_INDEX] =
5784 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5785 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5786 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5787 
5788 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5789 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5790 
5791 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5792 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5793 
5794 		rule->tuples.ether_proto = ETH_P_IP;
5795 		rule->tuples_mask.ether_proto = 0xFFFF;
5796 
5797 		break;
5798 	case SCTP_V6_FLOW:
5799 	case TCP_V6_FLOW:
5800 	case UDP_V6_FLOW:
5801 		be32_to_cpu_array(rule->tuples.src_ip,
5802 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5803 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5804 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5805 
5806 		be32_to_cpu_array(rule->tuples.dst_ip,
5807 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5808 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5809 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5810 
5811 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5812 		rule->tuples_mask.src_port =
5813 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5814 
5815 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5816 		rule->tuples_mask.dst_port =
5817 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5818 
5819 		rule->tuples.ether_proto = ETH_P_IPV6;
5820 		rule->tuples_mask.ether_proto = 0xFFFF;
5821 
5822 		break;
5823 	case IPV6_USER_FLOW:
5824 		be32_to_cpu_array(rule->tuples.src_ip,
5825 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5826 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5827 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5828 
5829 		be32_to_cpu_array(rule->tuples.dst_ip,
5830 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5831 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5832 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5833 
5834 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5835 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5836 
5837 		rule->tuples.ether_proto = ETH_P_IPV6;
5838 		rule->tuples_mask.ether_proto = 0xFFFF;
5839 
5840 		break;
5841 	case ETHER_FLOW:
5842 		ether_addr_copy(rule->tuples.src_mac,
5843 				fs->h_u.ether_spec.h_source);
5844 		ether_addr_copy(rule->tuples_mask.src_mac,
5845 				fs->m_u.ether_spec.h_source);
5846 
5847 		ether_addr_copy(rule->tuples.dst_mac,
5848 				fs->h_u.ether_spec.h_dest);
5849 		ether_addr_copy(rule->tuples_mask.dst_mac,
5850 				fs->m_u.ether_spec.h_dest);
5851 
5852 		rule->tuples.ether_proto =
5853 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5854 		rule->tuples_mask.ether_proto =
5855 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5856 
5857 		break;
5858 	default:
5859 		return -EOPNOTSUPP;
5860 	}
5861 
5862 	switch (flow_type) {
5863 	case SCTP_V4_FLOW:
5864 	case SCTP_V6_FLOW:
5865 		rule->tuples.ip_proto = IPPROTO_SCTP;
5866 		rule->tuples_mask.ip_proto = 0xFF;
5867 		break;
5868 	case TCP_V4_FLOW:
5869 	case TCP_V6_FLOW:
5870 		rule->tuples.ip_proto = IPPROTO_TCP;
5871 		rule->tuples_mask.ip_proto = 0xFF;
5872 		break;
5873 	case UDP_V4_FLOW:
5874 	case UDP_V6_FLOW:
5875 		rule->tuples.ip_proto = IPPROTO_UDP;
5876 		rule->tuples_mask.ip_proto = 0xFF;
5877 		break;
5878 	default:
5879 		break;
5880 	}
5881 
5882 	if (fs->flow_type & FLOW_EXT) {
5883 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5884 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5885 	}
5886 
5887 	if (fs->flow_type & FLOW_MAC_EXT) {
5888 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5889 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5890 	}
5891 
5892 	return 0;
5893 }
5894 
5895 /* make sure being called after lock up with fd_rule_lock */
5896 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5897 				struct hclge_fd_rule *rule)
5898 {
5899 	int ret;
5900 
5901 	if (!rule) {
5902 		dev_err(&hdev->pdev->dev,
5903 			"The flow director rule is NULL\n");
5904 		return -EINVAL;
5905 	}
5906 
5907 	/* it will never fail here, so needn't to check return value */
5908 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5909 
5910 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5911 	if (ret)
5912 		goto clear_rule;
5913 
5914 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5915 	if (ret)
5916 		goto clear_rule;
5917 
5918 	return 0;
5919 
5920 clear_rule:
5921 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5922 	return ret;
5923 }
5924 
5925 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5926 {
5927 	struct hclge_vport *vport = hclge_get_vport(handle);
5928 	struct hclge_dev *hdev = vport->back;
5929 
5930 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5931 }
5932 
5933 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5934 			      struct ethtool_rxnfc *cmd)
5935 {
5936 	struct hclge_vport *vport = hclge_get_vport(handle);
5937 	struct hclge_dev *hdev = vport->back;
5938 	u16 dst_vport_id = 0, q_index = 0;
5939 	struct ethtool_rx_flow_spec *fs;
5940 	struct hclge_fd_rule *rule;
5941 	u32 unused = 0;
5942 	u8 action;
5943 	int ret;
5944 
5945 	if (!hnae3_dev_fd_supported(hdev)) {
5946 		dev_err(&hdev->pdev->dev,
5947 			"flow table director is not supported\n");
5948 		return -EOPNOTSUPP;
5949 	}
5950 
5951 	if (!hdev->fd_en) {
5952 		dev_err(&hdev->pdev->dev,
5953 			"please enable flow director first\n");
5954 		return -EOPNOTSUPP;
5955 	}
5956 
5957 	if (hclge_is_cls_flower_active(handle)) {
5958 		dev_err(&hdev->pdev->dev,
5959 			"please delete all exist cls flower rules first\n");
5960 		return -EINVAL;
5961 	}
5962 
5963 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5964 
5965 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5966 	if (ret)
5967 		return ret;
5968 
5969 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5970 		action = HCLGE_FD_ACTION_DROP_PACKET;
5971 	} else {
5972 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5973 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5974 		u16 tqps;
5975 
5976 		if (vf > hdev->num_req_vfs) {
5977 			dev_err(&hdev->pdev->dev,
5978 				"Error: vf id (%u) > max vf num (%u)\n",
5979 				vf, hdev->num_req_vfs);
5980 			return -EINVAL;
5981 		}
5982 
5983 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5984 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5985 
5986 		if (ring >= tqps) {
5987 			dev_err(&hdev->pdev->dev,
5988 				"Error: queue id (%u) > max tqp num (%u)\n",
5989 				ring, tqps - 1);
5990 			return -EINVAL;
5991 		}
5992 
5993 		action = HCLGE_FD_ACTION_SELECT_QUEUE;
5994 		q_index = ring;
5995 	}
5996 
5997 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5998 	if (!rule)
5999 		return -ENOMEM;
6000 
6001 	ret = hclge_fd_get_tuple(hdev, fs, rule);
6002 	if (ret) {
6003 		kfree(rule);
6004 		return ret;
6005 	}
6006 
6007 	rule->flow_type = fs->flow_type;
6008 	rule->location = fs->location;
6009 	rule->unused_tuple = unused;
6010 	rule->vf_id = dst_vport_id;
6011 	rule->queue_id = q_index;
6012 	rule->action = action;
6013 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6014 
6015 	/* to avoid rule conflict, when user configure rule by ethtool,
6016 	 * we need to clear all arfs rules
6017 	 */
6018 	spin_lock_bh(&hdev->fd_rule_lock);
6019 	hclge_clear_arfs_rules(handle);
6020 
6021 	ret = hclge_fd_config_rule(hdev, rule);
6022 
6023 	spin_unlock_bh(&hdev->fd_rule_lock);
6024 
6025 	return ret;
6026 }
6027 
6028 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6029 			      struct ethtool_rxnfc *cmd)
6030 {
6031 	struct hclge_vport *vport = hclge_get_vport(handle);
6032 	struct hclge_dev *hdev = vport->back;
6033 	struct ethtool_rx_flow_spec *fs;
6034 	int ret;
6035 
6036 	if (!hnae3_dev_fd_supported(hdev))
6037 		return -EOPNOTSUPP;
6038 
6039 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6040 
6041 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6042 		return -EINVAL;
6043 
6044 	if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6045 	    !hclge_fd_rule_exist(hdev, fs->location)) {
6046 		dev_err(&hdev->pdev->dev,
6047 			"Delete fail, rule %u is inexistent\n", fs->location);
6048 		return -ENOENT;
6049 	}
6050 
6051 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6052 				   NULL, false);
6053 	if (ret)
6054 		return ret;
6055 
6056 	spin_lock_bh(&hdev->fd_rule_lock);
6057 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6058 
6059 	spin_unlock_bh(&hdev->fd_rule_lock);
6060 
6061 	return ret;
6062 }
6063 
6064 /* make sure being called after lock up with fd_rule_lock */
6065 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6066 				     bool clear_list)
6067 {
6068 	struct hclge_vport *vport = hclge_get_vport(handle);
6069 	struct hclge_dev *hdev = vport->back;
6070 	struct hclge_fd_rule *rule;
6071 	struct hlist_node *node;
6072 	u16 location;
6073 
6074 	if (!hnae3_dev_fd_supported(hdev))
6075 		return;
6076 
6077 	for_each_set_bit(location, hdev->fd_bmap,
6078 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6079 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6080 				     NULL, false);
6081 
6082 	if (clear_list) {
6083 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6084 					  rule_node) {
6085 			hlist_del(&rule->rule_node);
6086 			kfree(rule);
6087 		}
6088 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6089 		hdev->hclge_fd_rule_num = 0;
6090 		bitmap_zero(hdev->fd_bmap,
6091 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6092 	}
6093 }
6094 
6095 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6096 {
6097 	struct hclge_vport *vport = hclge_get_vport(handle);
6098 	struct hclge_dev *hdev = vport->back;
6099 	struct hclge_fd_rule *rule;
6100 	struct hlist_node *node;
6101 	int ret;
6102 
6103 	/* Return ok here, because reset error handling will check this
6104 	 * return value. If error is returned here, the reset process will
6105 	 * fail.
6106 	 */
6107 	if (!hnae3_dev_fd_supported(hdev))
6108 		return 0;
6109 
6110 	/* if fd is disabled, should not restore it when reset */
6111 	if (!hdev->fd_en)
6112 		return 0;
6113 
6114 	spin_lock_bh(&hdev->fd_rule_lock);
6115 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6116 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6117 		if (!ret)
6118 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6119 
6120 		if (ret) {
6121 			dev_warn(&hdev->pdev->dev,
6122 				 "Restore rule %u failed, remove it\n",
6123 				 rule->location);
6124 			clear_bit(rule->location, hdev->fd_bmap);
6125 			hlist_del(&rule->rule_node);
6126 			kfree(rule);
6127 			hdev->hclge_fd_rule_num--;
6128 		}
6129 	}
6130 
6131 	if (hdev->hclge_fd_rule_num)
6132 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6133 
6134 	spin_unlock_bh(&hdev->fd_rule_lock);
6135 
6136 	return 0;
6137 }
6138 
6139 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6140 				 struct ethtool_rxnfc *cmd)
6141 {
6142 	struct hclge_vport *vport = hclge_get_vport(handle);
6143 	struct hclge_dev *hdev = vport->back;
6144 
6145 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6146 		return -EOPNOTSUPP;
6147 
6148 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6149 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6150 
6151 	return 0;
6152 }
6153 
6154 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6155 				     struct ethtool_tcpip4_spec *spec,
6156 				     struct ethtool_tcpip4_spec *spec_mask)
6157 {
6158 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6159 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6160 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6161 
6162 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6163 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6164 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6165 
6166 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6167 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6168 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6169 
6170 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6171 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6172 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6173 
6174 	spec->tos = rule->tuples.ip_tos;
6175 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6176 			0 : rule->tuples_mask.ip_tos;
6177 }
6178 
6179 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6180 				  struct ethtool_usrip4_spec *spec,
6181 				  struct ethtool_usrip4_spec *spec_mask)
6182 {
6183 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6184 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6185 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6186 
6187 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6188 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6189 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6190 
6191 	spec->tos = rule->tuples.ip_tos;
6192 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6193 			0 : rule->tuples_mask.ip_tos;
6194 
6195 	spec->proto = rule->tuples.ip_proto;
6196 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6197 			0 : rule->tuples_mask.ip_proto;
6198 
6199 	spec->ip_ver = ETH_RX_NFC_IP4;
6200 }
6201 
6202 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6203 				     struct ethtool_tcpip6_spec *spec,
6204 				     struct ethtool_tcpip6_spec *spec_mask)
6205 {
6206 	cpu_to_be32_array(spec->ip6src,
6207 			  rule->tuples.src_ip, IPV6_SIZE);
6208 	cpu_to_be32_array(spec->ip6dst,
6209 			  rule->tuples.dst_ip, IPV6_SIZE);
6210 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6211 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6212 	else
6213 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6214 				  IPV6_SIZE);
6215 
6216 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6217 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6218 	else
6219 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6220 				  IPV6_SIZE);
6221 
6222 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6223 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6224 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6225 
6226 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6227 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6228 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6229 }
6230 
6231 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6232 				  struct ethtool_usrip6_spec *spec,
6233 				  struct ethtool_usrip6_spec *spec_mask)
6234 {
6235 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6236 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6237 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6238 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6239 	else
6240 		cpu_to_be32_array(spec_mask->ip6src,
6241 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6242 
6243 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6244 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6245 	else
6246 		cpu_to_be32_array(spec_mask->ip6dst,
6247 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6248 
6249 	spec->l4_proto = rule->tuples.ip_proto;
6250 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6251 			0 : rule->tuples_mask.ip_proto;
6252 }
6253 
6254 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6255 				    struct ethhdr *spec,
6256 				    struct ethhdr *spec_mask)
6257 {
6258 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6259 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6260 
6261 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6262 		eth_zero_addr(spec_mask->h_source);
6263 	else
6264 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6265 
6266 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6267 		eth_zero_addr(spec_mask->h_dest);
6268 	else
6269 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6270 
6271 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6272 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6273 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6274 }
6275 
6276 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6277 				  struct hclge_fd_rule *rule)
6278 {
6279 	if (fs->flow_type & FLOW_EXT) {
6280 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6281 		fs->m_ext.vlan_tci =
6282 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6283 				cpu_to_be16(VLAN_VID_MASK) :
6284 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6285 	}
6286 
6287 	if (fs->flow_type & FLOW_MAC_EXT) {
6288 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6289 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6290 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6291 		else
6292 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6293 					rule->tuples_mask.dst_mac);
6294 	}
6295 }
6296 
6297 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6298 				  struct ethtool_rxnfc *cmd)
6299 {
6300 	struct hclge_vport *vport = hclge_get_vport(handle);
6301 	struct hclge_fd_rule *rule = NULL;
6302 	struct hclge_dev *hdev = vport->back;
6303 	struct ethtool_rx_flow_spec *fs;
6304 	struct hlist_node *node2;
6305 
6306 	if (!hnae3_dev_fd_supported(hdev))
6307 		return -EOPNOTSUPP;
6308 
6309 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6310 
6311 	spin_lock_bh(&hdev->fd_rule_lock);
6312 
6313 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6314 		if (rule->location >= fs->location)
6315 			break;
6316 	}
6317 
6318 	if (!rule || fs->location != rule->location) {
6319 		spin_unlock_bh(&hdev->fd_rule_lock);
6320 
6321 		return -ENOENT;
6322 	}
6323 
6324 	fs->flow_type = rule->flow_type;
6325 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6326 	case SCTP_V4_FLOW:
6327 	case TCP_V4_FLOW:
6328 	case UDP_V4_FLOW:
6329 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6330 					 &fs->m_u.tcp_ip4_spec);
6331 		break;
6332 	case IP_USER_FLOW:
6333 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6334 				      &fs->m_u.usr_ip4_spec);
6335 		break;
6336 	case SCTP_V6_FLOW:
6337 	case TCP_V6_FLOW:
6338 	case UDP_V6_FLOW:
6339 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6340 					 &fs->m_u.tcp_ip6_spec);
6341 		break;
6342 	case IPV6_USER_FLOW:
6343 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6344 				      &fs->m_u.usr_ip6_spec);
6345 		break;
6346 	/* The flow type of fd rule has been checked before adding in to rule
6347 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6348 	 * for the default case
6349 	 */
6350 	default:
6351 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6352 					&fs->m_u.ether_spec);
6353 		break;
6354 	}
6355 
6356 	hclge_fd_get_ext_info(fs, rule);
6357 
6358 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6359 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6360 	} else {
6361 		u64 vf_id;
6362 
6363 		fs->ring_cookie = rule->queue_id;
6364 		vf_id = rule->vf_id;
6365 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6366 		fs->ring_cookie |= vf_id;
6367 	}
6368 
6369 	spin_unlock_bh(&hdev->fd_rule_lock);
6370 
6371 	return 0;
6372 }
6373 
6374 static int hclge_get_all_rules(struct hnae3_handle *handle,
6375 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6376 {
6377 	struct hclge_vport *vport = hclge_get_vport(handle);
6378 	struct hclge_dev *hdev = vport->back;
6379 	struct hclge_fd_rule *rule;
6380 	struct hlist_node *node2;
6381 	int cnt = 0;
6382 
6383 	if (!hnae3_dev_fd_supported(hdev))
6384 		return -EOPNOTSUPP;
6385 
6386 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6387 
6388 	spin_lock_bh(&hdev->fd_rule_lock);
6389 	hlist_for_each_entry_safe(rule, node2,
6390 				  &hdev->fd_rule_list, rule_node) {
6391 		if (cnt == cmd->rule_cnt) {
6392 			spin_unlock_bh(&hdev->fd_rule_lock);
6393 			return -EMSGSIZE;
6394 		}
6395 
6396 		rule_locs[cnt] = rule->location;
6397 		cnt++;
6398 	}
6399 
6400 	spin_unlock_bh(&hdev->fd_rule_lock);
6401 
6402 	cmd->rule_cnt = cnt;
6403 
6404 	return 0;
6405 }
6406 
6407 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6408 				     struct hclge_fd_rule_tuples *tuples)
6409 {
6410 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6411 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6412 
6413 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6414 	tuples->ip_proto = fkeys->basic.ip_proto;
6415 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6416 
6417 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6418 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6419 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6420 	} else {
6421 		int i;
6422 
6423 		for (i = 0; i < IPV6_SIZE; i++) {
6424 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6425 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6426 		}
6427 	}
6428 }
6429 
6430 /* traverse all rules, check whether an existed rule has the same tuples */
6431 static struct hclge_fd_rule *
6432 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6433 			  const struct hclge_fd_rule_tuples *tuples)
6434 {
6435 	struct hclge_fd_rule *rule = NULL;
6436 	struct hlist_node *node;
6437 
6438 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6439 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6440 			return rule;
6441 	}
6442 
6443 	return NULL;
6444 }
6445 
6446 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6447 				     struct hclge_fd_rule *rule)
6448 {
6449 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6450 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6451 			     BIT(INNER_SRC_PORT);
6452 	rule->action = 0;
6453 	rule->vf_id = 0;
6454 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6455 	if (tuples->ether_proto == ETH_P_IP) {
6456 		if (tuples->ip_proto == IPPROTO_TCP)
6457 			rule->flow_type = TCP_V4_FLOW;
6458 		else
6459 			rule->flow_type = UDP_V4_FLOW;
6460 	} else {
6461 		if (tuples->ip_proto == IPPROTO_TCP)
6462 			rule->flow_type = TCP_V6_FLOW;
6463 		else
6464 			rule->flow_type = UDP_V6_FLOW;
6465 	}
6466 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6467 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6468 }
6469 
6470 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6471 				      u16 flow_id, struct flow_keys *fkeys)
6472 {
6473 	struct hclge_vport *vport = hclge_get_vport(handle);
6474 	struct hclge_fd_rule_tuples new_tuples = {};
6475 	struct hclge_dev *hdev = vport->back;
6476 	struct hclge_fd_rule *rule;
6477 	u16 tmp_queue_id;
6478 	u16 bit_id;
6479 	int ret;
6480 
6481 	if (!hnae3_dev_fd_supported(hdev))
6482 		return -EOPNOTSUPP;
6483 
6484 	/* when there is already fd rule existed add by user,
6485 	 * arfs should not work
6486 	 */
6487 	spin_lock_bh(&hdev->fd_rule_lock);
6488 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6489 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6490 		spin_unlock_bh(&hdev->fd_rule_lock);
6491 		return -EOPNOTSUPP;
6492 	}
6493 
6494 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6495 
6496 	/* check is there flow director filter existed for this flow,
6497 	 * if not, create a new filter for it;
6498 	 * if filter exist with different queue id, modify the filter;
6499 	 * if filter exist with same queue id, do nothing
6500 	 */
6501 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6502 	if (!rule) {
6503 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6504 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6505 			spin_unlock_bh(&hdev->fd_rule_lock);
6506 			return -ENOSPC;
6507 		}
6508 
6509 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6510 		if (!rule) {
6511 			spin_unlock_bh(&hdev->fd_rule_lock);
6512 			return -ENOMEM;
6513 		}
6514 
6515 		set_bit(bit_id, hdev->fd_bmap);
6516 		rule->location = bit_id;
6517 		rule->arfs.flow_id = flow_id;
6518 		rule->queue_id = queue_id;
6519 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6520 		ret = hclge_fd_config_rule(hdev, rule);
6521 
6522 		spin_unlock_bh(&hdev->fd_rule_lock);
6523 
6524 		if (ret)
6525 			return ret;
6526 
6527 		return rule->location;
6528 	}
6529 
6530 	spin_unlock_bh(&hdev->fd_rule_lock);
6531 
6532 	if (rule->queue_id == queue_id)
6533 		return rule->location;
6534 
6535 	tmp_queue_id = rule->queue_id;
6536 	rule->queue_id = queue_id;
6537 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6538 	if (ret) {
6539 		rule->queue_id = tmp_queue_id;
6540 		return ret;
6541 	}
6542 
6543 	return rule->location;
6544 }
6545 
6546 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6547 {
6548 #ifdef CONFIG_RFS_ACCEL
6549 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6550 	struct hclge_fd_rule *rule;
6551 	struct hlist_node *node;
6552 	HLIST_HEAD(del_list);
6553 
6554 	spin_lock_bh(&hdev->fd_rule_lock);
6555 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6556 		spin_unlock_bh(&hdev->fd_rule_lock);
6557 		return;
6558 	}
6559 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6560 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6561 					rule->arfs.flow_id, rule->location)) {
6562 			hlist_del_init(&rule->rule_node);
6563 			hlist_add_head(&rule->rule_node, &del_list);
6564 			hdev->hclge_fd_rule_num--;
6565 			clear_bit(rule->location, hdev->fd_bmap);
6566 		}
6567 	}
6568 	spin_unlock_bh(&hdev->fd_rule_lock);
6569 
6570 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6571 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6572 				     rule->location, NULL, false);
6573 		kfree(rule);
6574 	}
6575 #endif
6576 }
6577 
6578 /* make sure being called after lock up with fd_rule_lock */
6579 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6580 {
6581 #ifdef CONFIG_RFS_ACCEL
6582 	struct hclge_vport *vport = hclge_get_vport(handle);
6583 	struct hclge_dev *hdev = vport->back;
6584 
6585 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6586 		hclge_del_all_fd_entries(handle, true);
6587 #endif
6588 }
6589 
6590 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6591 				    struct hclge_fd_rule *rule)
6592 {
6593 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6594 		struct flow_match_basic match;
6595 		u16 ethtype_key, ethtype_mask;
6596 
6597 		flow_rule_match_basic(flow, &match);
6598 		ethtype_key = ntohs(match.key->n_proto);
6599 		ethtype_mask = ntohs(match.mask->n_proto);
6600 
6601 		if (ethtype_key == ETH_P_ALL) {
6602 			ethtype_key = 0;
6603 			ethtype_mask = 0;
6604 		}
6605 		rule->tuples.ether_proto = ethtype_key;
6606 		rule->tuples_mask.ether_proto = ethtype_mask;
6607 		rule->tuples.ip_proto = match.key->ip_proto;
6608 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
6609 	} else {
6610 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
6611 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6612 	}
6613 }
6614 
6615 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6616 				  struct hclge_fd_rule *rule)
6617 {
6618 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6619 		struct flow_match_eth_addrs match;
6620 
6621 		flow_rule_match_eth_addrs(flow, &match);
6622 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6623 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6624 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
6625 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6626 	} else {
6627 		rule->unused_tuple |= BIT(INNER_DST_MAC);
6628 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
6629 	}
6630 }
6631 
6632 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6633 				   struct hclge_fd_rule *rule)
6634 {
6635 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6636 		struct flow_match_vlan match;
6637 
6638 		flow_rule_match_vlan(flow, &match);
6639 		rule->tuples.vlan_tag1 = match.key->vlan_id |
6640 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
6641 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6642 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6643 	} else {
6644 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6645 	}
6646 }
6647 
6648 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6649 				 struct hclge_fd_rule *rule)
6650 {
6651 	u16 addr_type = 0;
6652 
6653 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6654 		struct flow_match_control match;
6655 
6656 		flow_rule_match_control(flow, &match);
6657 		addr_type = match.key->addr_type;
6658 	}
6659 
6660 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6661 		struct flow_match_ipv4_addrs match;
6662 
6663 		flow_rule_match_ipv4_addrs(flow, &match);
6664 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6665 		rule->tuples_mask.src_ip[IPV4_INDEX] =
6666 						be32_to_cpu(match.mask->src);
6667 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6668 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
6669 						be32_to_cpu(match.mask->dst);
6670 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6671 		struct flow_match_ipv6_addrs match;
6672 
6673 		flow_rule_match_ipv6_addrs(flow, &match);
6674 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6675 				  IPV6_SIZE);
6676 		be32_to_cpu_array(rule->tuples_mask.src_ip,
6677 				  match.mask->src.s6_addr32, IPV6_SIZE);
6678 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6679 				  IPV6_SIZE);
6680 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
6681 				  match.mask->dst.s6_addr32, IPV6_SIZE);
6682 	} else {
6683 		rule->unused_tuple |= BIT(INNER_SRC_IP);
6684 		rule->unused_tuple |= BIT(INNER_DST_IP);
6685 	}
6686 }
6687 
6688 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6689 				   struct hclge_fd_rule *rule)
6690 {
6691 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6692 		struct flow_match_ports match;
6693 
6694 		flow_rule_match_ports(flow, &match);
6695 
6696 		rule->tuples.src_port = be16_to_cpu(match.key->src);
6697 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6698 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6699 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6700 	} else {
6701 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
6702 		rule->unused_tuple |= BIT(INNER_DST_PORT);
6703 	}
6704 }
6705 
6706 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6707 				  struct flow_cls_offload *cls_flower,
6708 				  struct hclge_fd_rule *rule)
6709 {
6710 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6711 	struct flow_dissector *dissector = flow->match.dissector;
6712 
6713 	if (dissector->used_keys &
6714 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6715 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
6716 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6717 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
6718 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6719 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6720 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6721 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6722 			dissector->used_keys);
6723 		return -EOPNOTSUPP;
6724 	}
6725 
6726 	hclge_get_cls_key_basic(flow, rule);
6727 	hclge_get_cls_key_mac(flow, rule);
6728 	hclge_get_cls_key_vlan(flow, rule);
6729 	hclge_get_cls_key_ip(flow, rule);
6730 	hclge_get_cls_key_port(flow, rule);
6731 
6732 	return 0;
6733 }
6734 
6735 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6736 				  struct flow_cls_offload *cls_flower, int tc)
6737 {
6738 	u32 prio = cls_flower->common.prio;
6739 
6740 	if (tc < 0 || tc > hdev->tc_max) {
6741 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6742 		return -EINVAL;
6743 	}
6744 
6745 	if (prio == 0 ||
6746 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6747 		dev_err(&hdev->pdev->dev,
6748 			"prio %u should be in range[1, %u]\n",
6749 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6750 		return -EINVAL;
6751 	}
6752 
6753 	if (test_bit(prio - 1, hdev->fd_bmap)) {
6754 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6755 		return -EINVAL;
6756 	}
6757 	return 0;
6758 }
6759 
6760 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6761 				struct flow_cls_offload *cls_flower,
6762 				int tc)
6763 {
6764 	struct hclge_vport *vport = hclge_get_vport(handle);
6765 	struct hclge_dev *hdev = vport->back;
6766 	struct hclge_fd_rule *rule;
6767 	int ret;
6768 
6769 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6770 		dev_err(&hdev->pdev->dev,
6771 			"please remove all exist fd rules via ethtool first\n");
6772 		return -EINVAL;
6773 	}
6774 
6775 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6776 	if (ret) {
6777 		dev_err(&hdev->pdev->dev,
6778 			"failed to check cls flower params, ret = %d\n", ret);
6779 		return ret;
6780 	}
6781 
6782 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6783 	if (!rule)
6784 		return -ENOMEM;
6785 
6786 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6787 	if (ret)
6788 		goto err;
6789 
6790 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
6791 	rule->cls_flower.tc = tc;
6792 	rule->location = cls_flower->common.prio - 1;
6793 	rule->vf_id = 0;
6794 	rule->cls_flower.cookie = cls_flower->cookie;
6795 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6796 
6797 	spin_lock_bh(&hdev->fd_rule_lock);
6798 	hclge_clear_arfs_rules(handle);
6799 
6800 	ret = hclge_fd_config_rule(hdev, rule);
6801 
6802 	spin_unlock_bh(&hdev->fd_rule_lock);
6803 
6804 	if (ret) {
6805 		dev_err(&hdev->pdev->dev,
6806 			"failed to add cls flower rule, ret = %d\n", ret);
6807 		goto err;
6808 	}
6809 
6810 	return 0;
6811 err:
6812 	kfree(rule);
6813 	return ret;
6814 }
6815 
6816 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6817 						   unsigned long cookie)
6818 {
6819 	struct hclge_fd_rule *rule;
6820 	struct hlist_node *node;
6821 
6822 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6823 		if (rule->cls_flower.cookie == cookie)
6824 			return rule;
6825 	}
6826 
6827 	return NULL;
6828 }
6829 
6830 static int hclge_del_cls_flower(struct hnae3_handle *handle,
6831 				struct flow_cls_offload *cls_flower)
6832 {
6833 	struct hclge_vport *vport = hclge_get_vport(handle);
6834 	struct hclge_dev *hdev = vport->back;
6835 	struct hclge_fd_rule *rule;
6836 	int ret;
6837 
6838 	spin_lock_bh(&hdev->fd_rule_lock);
6839 
6840 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6841 	if (!rule) {
6842 		spin_unlock_bh(&hdev->fd_rule_lock);
6843 		return -EINVAL;
6844 	}
6845 
6846 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6847 				   NULL, false);
6848 	if (ret) {
6849 		dev_err(&hdev->pdev->dev,
6850 			"failed to delete cls flower rule %u, ret = %d\n",
6851 			rule->location, ret);
6852 		spin_unlock_bh(&hdev->fd_rule_lock);
6853 		return ret;
6854 	}
6855 
6856 	ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6857 	if (ret) {
6858 		dev_err(&hdev->pdev->dev,
6859 			"failed to delete cls flower rule %u in list, ret = %d\n",
6860 			rule->location, ret);
6861 		spin_unlock_bh(&hdev->fd_rule_lock);
6862 		return ret;
6863 	}
6864 
6865 	spin_unlock_bh(&hdev->fd_rule_lock);
6866 
6867 	return 0;
6868 }
6869 
6870 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6871 {
6872 	struct hclge_vport *vport = hclge_get_vport(handle);
6873 	struct hclge_dev *hdev = vport->back;
6874 
6875 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6876 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6877 }
6878 
6879 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6880 {
6881 	struct hclge_vport *vport = hclge_get_vport(handle);
6882 	struct hclge_dev *hdev = vport->back;
6883 
6884 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6885 }
6886 
6887 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6888 {
6889 	struct hclge_vport *vport = hclge_get_vport(handle);
6890 	struct hclge_dev *hdev = vport->back;
6891 
6892 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6893 }
6894 
6895 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6896 {
6897 	struct hclge_vport *vport = hclge_get_vport(handle);
6898 	struct hclge_dev *hdev = vport->back;
6899 
6900 	return hdev->rst_stats.hw_reset_done_cnt;
6901 }
6902 
6903 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6904 {
6905 	struct hclge_vport *vport = hclge_get_vport(handle);
6906 	struct hclge_dev *hdev = vport->back;
6907 	bool clear;
6908 
6909 	hdev->fd_en = enable;
6910 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6911 
6912 	if (!enable) {
6913 		spin_lock_bh(&hdev->fd_rule_lock);
6914 		hclge_del_all_fd_entries(handle, clear);
6915 		spin_unlock_bh(&hdev->fd_rule_lock);
6916 	} else {
6917 		hclge_restore_fd_entries(handle);
6918 	}
6919 }
6920 
6921 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6922 {
6923 	struct hclge_desc desc;
6924 	struct hclge_config_mac_mode_cmd *req =
6925 		(struct hclge_config_mac_mode_cmd *)desc.data;
6926 	u32 loop_en = 0;
6927 	int ret;
6928 
6929 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6930 
6931 	if (enable) {
6932 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6933 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6934 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6935 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6936 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6937 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6938 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6939 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6940 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6941 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6942 	}
6943 
6944 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6945 
6946 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6947 	if (ret)
6948 		dev_err(&hdev->pdev->dev,
6949 			"mac enable fail, ret =%d.\n", ret);
6950 }
6951 
6952 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6953 				     u8 switch_param, u8 param_mask)
6954 {
6955 	struct hclge_mac_vlan_switch_cmd *req;
6956 	struct hclge_desc desc;
6957 	u32 func_id;
6958 	int ret;
6959 
6960 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6961 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6962 
6963 	/* read current config parameter */
6964 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6965 				   true);
6966 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6967 	req->func_id = cpu_to_le32(func_id);
6968 
6969 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6970 	if (ret) {
6971 		dev_err(&hdev->pdev->dev,
6972 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6973 		return ret;
6974 	}
6975 
6976 	/* modify and write new config parameter */
6977 	hclge_cmd_reuse_desc(&desc, false);
6978 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6979 	req->param_mask = param_mask;
6980 
6981 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6982 	if (ret)
6983 		dev_err(&hdev->pdev->dev,
6984 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6985 	return ret;
6986 }
6987 
6988 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6989 				       int link_ret)
6990 {
6991 #define HCLGE_PHY_LINK_STATUS_NUM  200
6992 
6993 	struct phy_device *phydev = hdev->hw.mac.phydev;
6994 	int i = 0;
6995 	int ret;
6996 
6997 	do {
6998 		ret = phy_read_status(phydev);
6999 		if (ret) {
7000 			dev_err(&hdev->pdev->dev,
7001 				"phy update link status fail, ret = %d\n", ret);
7002 			return;
7003 		}
7004 
7005 		if (phydev->link == link_ret)
7006 			break;
7007 
7008 		msleep(HCLGE_LINK_STATUS_MS);
7009 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7010 }
7011 
7012 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7013 {
7014 #define HCLGE_MAC_LINK_STATUS_NUM  100
7015 
7016 	int link_status;
7017 	int i = 0;
7018 	int ret;
7019 
7020 	do {
7021 		ret = hclge_get_mac_link_status(hdev, &link_status);
7022 		if (ret)
7023 			return ret;
7024 		if (link_status == link_ret)
7025 			return 0;
7026 
7027 		msleep(HCLGE_LINK_STATUS_MS);
7028 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7029 	return -EBUSY;
7030 }
7031 
7032 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7033 					  bool is_phy)
7034 {
7035 	int link_ret;
7036 
7037 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7038 
7039 	if (is_phy)
7040 		hclge_phy_link_status_wait(hdev, link_ret);
7041 
7042 	return hclge_mac_link_status_wait(hdev, link_ret);
7043 }
7044 
7045 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7046 {
7047 	struct hclge_config_mac_mode_cmd *req;
7048 	struct hclge_desc desc;
7049 	u32 loop_en;
7050 	int ret;
7051 
7052 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7053 	/* 1 Read out the MAC mode config at first */
7054 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7055 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7056 	if (ret) {
7057 		dev_err(&hdev->pdev->dev,
7058 			"mac loopback get fail, ret =%d.\n", ret);
7059 		return ret;
7060 	}
7061 
7062 	/* 2 Then setup the loopback flag */
7063 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7064 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7065 
7066 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7067 
7068 	/* 3 Config mac work mode with loopback flag
7069 	 * and its original configure parameters
7070 	 */
7071 	hclge_cmd_reuse_desc(&desc, false);
7072 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7073 	if (ret)
7074 		dev_err(&hdev->pdev->dev,
7075 			"mac loopback set fail, ret =%d.\n", ret);
7076 	return ret;
7077 }
7078 
7079 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7080 				     enum hnae3_loop loop_mode)
7081 {
7082 #define HCLGE_SERDES_RETRY_MS	10
7083 #define HCLGE_SERDES_RETRY_NUM	100
7084 
7085 	struct hclge_serdes_lb_cmd *req;
7086 	struct hclge_desc desc;
7087 	int ret, i = 0;
7088 	u8 loop_mode_b;
7089 
7090 	req = (struct hclge_serdes_lb_cmd *)desc.data;
7091 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7092 
7093 	switch (loop_mode) {
7094 	case HNAE3_LOOP_SERIAL_SERDES:
7095 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7096 		break;
7097 	case HNAE3_LOOP_PARALLEL_SERDES:
7098 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7099 		break;
7100 	default:
7101 		dev_err(&hdev->pdev->dev,
7102 			"unsupported serdes loopback mode %d\n", loop_mode);
7103 		return -ENOTSUPP;
7104 	}
7105 
7106 	if (en) {
7107 		req->enable = loop_mode_b;
7108 		req->mask = loop_mode_b;
7109 	} else {
7110 		req->mask = loop_mode_b;
7111 	}
7112 
7113 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7114 	if (ret) {
7115 		dev_err(&hdev->pdev->dev,
7116 			"serdes loopback set fail, ret = %d\n", ret);
7117 		return ret;
7118 	}
7119 
7120 	do {
7121 		msleep(HCLGE_SERDES_RETRY_MS);
7122 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7123 					   true);
7124 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7125 		if (ret) {
7126 			dev_err(&hdev->pdev->dev,
7127 				"serdes loopback get, ret = %d\n", ret);
7128 			return ret;
7129 		}
7130 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
7131 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
7132 
7133 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7134 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7135 		return -EBUSY;
7136 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7137 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7138 		return -EIO;
7139 	}
7140 	return ret;
7141 }
7142 
7143 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7144 				     enum hnae3_loop loop_mode)
7145 {
7146 	int ret;
7147 
7148 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7149 	if (ret)
7150 		return ret;
7151 
7152 	hclge_cfg_mac_mode(hdev, en);
7153 
7154 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7155 	if (ret)
7156 		dev_err(&hdev->pdev->dev,
7157 			"serdes loopback config mac mode timeout\n");
7158 
7159 	return ret;
7160 }
7161 
7162 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7163 				     struct phy_device *phydev)
7164 {
7165 	int ret;
7166 
7167 	if (!phydev->suspended) {
7168 		ret = phy_suspend(phydev);
7169 		if (ret)
7170 			return ret;
7171 	}
7172 
7173 	ret = phy_resume(phydev);
7174 	if (ret)
7175 		return ret;
7176 
7177 	return phy_loopback(phydev, true);
7178 }
7179 
7180 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7181 				      struct phy_device *phydev)
7182 {
7183 	int ret;
7184 
7185 	ret = phy_loopback(phydev, false);
7186 	if (ret)
7187 		return ret;
7188 
7189 	return phy_suspend(phydev);
7190 }
7191 
7192 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7193 {
7194 	struct phy_device *phydev = hdev->hw.mac.phydev;
7195 	int ret;
7196 
7197 	if (!phydev)
7198 		return -ENOTSUPP;
7199 
7200 	if (en)
7201 		ret = hclge_enable_phy_loopback(hdev, phydev);
7202 	else
7203 		ret = hclge_disable_phy_loopback(hdev, phydev);
7204 	if (ret) {
7205 		dev_err(&hdev->pdev->dev,
7206 			"set phy loopback fail, ret = %d\n", ret);
7207 		return ret;
7208 	}
7209 
7210 	hclge_cfg_mac_mode(hdev, en);
7211 
7212 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7213 	if (ret)
7214 		dev_err(&hdev->pdev->dev,
7215 			"phy loopback config mac mode timeout\n");
7216 
7217 	return ret;
7218 }
7219 
7220 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7221 			    int stream_id, bool enable)
7222 {
7223 	struct hclge_desc desc;
7224 	struct hclge_cfg_com_tqp_queue_cmd *req =
7225 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7226 	int ret;
7227 
7228 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7229 	req->tqp_id = cpu_to_le16(tqp_id);
7230 	req->stream_id = cpu_to_le16(stream_id);
7231 	if (enable)
7232 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7233 
7234 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7235 	if (ret)
7236 		dev_err(&hdev->pdev->dev,
7237 			"Tqp enable fail, status =%d.\n", ret);
7238 	return ret;
7239 }
7240 
7241 static int hclge_set_loopback(struct hnae3_handle *handle,
7242 			      enum hnae3_loop loop_mode, bool en)
7243 {
7244 	struct hclge_vport *vport = hclge_get_vport(handle);
7245 	struct hnae3_knic_private_info *kinfo;
7246 	struct hclge_dev *hdev = vport->back;
7247 	int i, ret;
7248 
7249 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7250 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7251 	 * the same, the packets are looped back in the SSU. If SSU loopback
7252 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7253 	 */
7254 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7255 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7256 
7257 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7258 						HCLGE_SWITCH_ALW_LPBK_MASK);
7259 		if (ret)
7260 			return ret;
7261 	}
7262 
7263 	switch (loop_mode) {
7264 	case HNAE3_LOOP_APP:
7265 		ret = hclge_set_app_loopback(hdev, en);
7266 		break;
7267 	case HNAE3_LOOP_SERIAL_SERDES:
7268 	case HNAE3_LOOP_PARALLEL_SERDES:
7269 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7270 		break;
7271 	case HNAE3_LOOP_PHY:
7272 		ret = hclge_set_phy_loopback(hdev, en);
7273 		break;
7274 	default:
7275 		ret = -ENOTSUPP;
7276 		dev_err(&hdev->pdev->dev,
7277 			"loop_mode %d is not supported\n", loop_mode);
7278 		break;
7279 	}
7280 
7281 	if (ret)
7282 		return ret;
7283 
7284 	kinfo = &vport->nic.kinfo;
7285 	for (i = 0; i < kinfo->num_tqps; i++) {
7286 		ret = hclge_tqp_enable(hdev, i, 0, en);
7287 		if (ret)
7288 			return ret;
7289 	}
7290 
7291 	return 0;
7292 }
7293 
7294 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7295 {
7296 	int ret;
7297 
7298 	ret = hclge_set_app_loopback(hdev, false);
7299 	if (ret)
7300 		return ret;
7301 
7302 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7303 	if (ret)
7304 		return ret;
7305 
7306 	return hclge_cfg_serdes_loopback(hdev, false,
7307 					 HNAE3_LOOP_PARALLEL_SERDES);
7308 }
7309 
7310 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7311 {
7312 	struct hclge_vport *vport = hclge_get_vport(handle);
7313 	struct hnae3_knic_private_info *kinfo;
7314 	struct hnae3_queue *queue;
7315 	struct hclge_tqp *tqp;
7316 	int i;
7317 
7318 	kinfo = &vport->nic.kinfo;
7319 	for (i = 0; i < kinfo->num_tqps; i++) {
7320 		queue = handle->kinfo.tqp[i];
7321 		tqp = container_of(queue, struct hclge_tqp, q);
7322 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7323 	}
7324 }
7325 
7326 static void hclge_flush_link_update(struct hclge_dev *hdev)
7327 {
7328 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
7329 
7330 	unsigned long last = hdev->serv_processed_cnt;
7331 	int i = 0;
7332 
7333 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7334 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7335 	       last == hdev->serv_processed_cnt)
7336 		usleep_range(1, 1);
7337 }
7338 
7339 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7340 {
7341 	struct hclge_vport *vport = hclge_get_vport(handle);
7342 	struct hclge_dev *hdev = vport->back;
7343 
7344 	if (enable) {
7345 		hclge_task_schedule(hdev, 0);
7346 	} else {
7347 		/* Set the DOWN flag here to disable link updating */
7348 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7349 
7350 		/* flush memory to make sure DOWN is seen by service task */
7351 		smp_mb__before_atomic();
7352 		hclge_flush_link_update(hdev);
7353 	}
7354 }
7355 
7356 static int hclge_ae_start(struct hnae3_handle *handle)
7357 {
7358 	struct hclge_vport *vport = hclge_get_vport(handle);
7359 	struct hclge_dev *hdev = vport->back;
7360 
7361 	/* mac enable */
7362 	hclge_cfg_mac_mode(hdev, true);
7363 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7364 	hdev->hw.mac.link = 0;
7365 
7366 	/* reset tqp stats */
7367 	hclge_reset_tqp_stats(handle);
7368 
7369 	hclge_mac_start_phy(hdev);
7370 
7371 	return 0;
7372 }
7373 
7374 static void hclge_ae_stop(struct hnae3_handle *handle)
7375 {
7376 	struct hclge_vport *vport = hclge_get_vport(handle);
7377 	struct hclge_dev *hdev = vport->back;
7378 	int i;
7379 
7380 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7381 	spin_lock_bh(&hdev->fd_rule_lock);
7382 	hclge_clear_arfs_rules(handle);
7383 	spin_unlock_bh(&hdev->fd_rule_lock);
7384 
7385 	/* If it is not PF reset, the firmware will disable the MAC,
7386 	 * so it only need to stop phy here.
7387 	 */
7388 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7389 	    hdev->reset_type != HNAE3_FUNC_RESET) {
7390 		hclge_mac_stop_phy(hdev);
7391 		hclge_update_link_status(hdev);
7392 		return;
7393 	}
7394 
7395 	for (i = 0; i < handle->kinfo.num_tqps; i++)
7396 		hclge_reset_tqp(handle, i);
7397 
7398 	hclge_config_mac_tnl_int(hdev, false);
7399 
7400 	/* Mac disable */
7401 	hclge_cfg_mac_mode(hdev, false);
7402 
7403 	hclge_mac_stop_phy(hdev);
7404 
7405 	/* reset tqp stats */
7406 	hclge_reset_tqp_stats(handle);
7407 	hclge_update_link_status(hdev);
7408 }
7409 
7410 int hclge_vport_start(struct hclge_vport *vport)
7411 {
7412 	struct hclge_dev *hdev = vport->back;
7413 
7414 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7415 	vport->last_active_jiffies = jiffies;
7416 
7417 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7418 		if (vport->vport_id) {
7419 			hclge_restore_mac_table_common(vport);
7420 			hclge_restore_vport_vlan_table(vport);
7421 		} else {
7422 			hclge_restore_hw_table(hdev);
7423 		}
7424 	}
7425 
7426 	clear_bit(vport->vport_id, hdev->vport_config_block);
7427 
7428 	return 0;
7429 }
7430 
7431 void hclge_vport_stop(struct hclge_vport *vport)
7432 {
7433 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7434 }
7435 
7436 static int hclge_client_start(struct hnae3_handle *handle)
7437 {
7438 	struct hclge_vport *vport = hclge_get_vport(handle);
7439 
7440 	return hclge_vport_start(vport);
7441 }
7442 
7443 static void hclge_client_stop(struct hnae3_handle *handle)
7444 {
7445 	struct hclge_vport *vport = hclge_get_vport(handle);
7446 
7447 	hclge_vport_stop(vport);
7448 }
7449 
7450 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7451 					 u16 cmdq_resp, u8  resp_code,
7452 					 enum hclge_mac_vlan_tbl_opcode op)
7453 {
7454 	struct hclge_dev *hdev = vport->back;
7455 
7456 	if (cmdq_resp) {
7457 		dev_err(&hdev->pdev->dev,
7458 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7459 			cmdq_resp);
7460 		return -EIO;
7461 	}
7462 
7463 	if (op == HCLGE_MAC_VLAN_ADD) {
7464 		if (!resp_code || resp_code == 1)
7465 			return 0;
7466 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7467 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
7468 			return -ENOSPC;
7469 
7470 		dev_err(&hdev->pdev->dev,
7471 			"add mac addr failed for undefined, code=%u.\n",
7472 			resp_code);
7473 		return -EIO;
7474 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
7475 		if (!resp_code) {
7476 			return 0;
7477 		} else if (resp_code == 1) {
7478 			dev_dbg(&hdev->pdev->dev,
7479 				"remove mac addr failed for miss.\n");
7480 			return -ENOENT;
7481 		}
7482 
7483 		dev_err(&hdev->pdev->dev,
7484 			"remove mac addr failed for undefined, code=%u.\n",
7485 			resp_code);
7486 		return -EIO;
7487 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
7488 		if (!resp_code) {
7489 			return 0;
7490 		} else if (resp_code == 1) {
7491 			dev_dbg(&hdev->pdev->dev,
7492 				"lookup mac addr failed for miss.\n");
7493 			return -ENOENT;
7494 		}
7495 
7496 		dev_err(&hdev->pdev->dev,
7497 			"lookup mac addr failed for undefined, code=%u.\n",
7498 			resp_code);
7499 		return -EIO;
7500 	}
7501 
7502 	dev_err(&hdev->pdev->dev,
7503 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7504 
7505 	return -EINVAL;
7506 }
7507 
7508 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7509 {
7510 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7511 
7512 	unsigned int word_num;
7513 	unsigned int bit_num;
7514 
7515 	if (vfid > 255 || vfid < 0)
7516 		return -EIO;
7517 
7518 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7519 		word_num = vfid / 32;
7520 		bit_num  = vfid % 32;
7521 		if (clr)
7522 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7523 		else
7524 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7525 	} else {
7526 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7527 		bit_num  = vfid % 32;
7528 		if (clr)
7529 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7530 		else
7531 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7532 	}
7533 
7534 	return 0;
7535 }
7536 
7537 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7538 {
7539 #define HCLGE_DESC_NUMBER 3
7540 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7541 	int i, j;
7542 
7543 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7544 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7545 			if (desc[i].data[j])
7546 				return false;
7547 
7548 	return true;
7549 }
7550 
7551 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7552 				   const u8 *addr, bool is_mc)
7553 {
7554 	const unsigned char *mac_addr = addr;
7555 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7556 		       (mac_addr[0]) | (mac_addr[1] << 8);
7557 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7558 
7559 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7560 	if (is_mc) {
7561 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7562 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7563 	}
7564 
7565 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7566 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7567 }
7568 
7569 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7570 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
7571 {
7572 	struct hclge_dev *hdev = vport->back;
7573 	struct hclge_desc desc;
7574 	u8 resp_code;
7575 	u16 retval;
7576 	int ret;
7577 
7578 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7579 
7580 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7581 
7582 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7583 	if (ret) {
7584 		dev_err(&hdev->pdev->dev,
7585 			"del mac addr failed for cmd_send, ret =%d.\n",
7586 			ret);
7587 		return ret;
7588 	}
7589 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7590 	retval = le16_to_cpu(desc.retval);
7591 
7592 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7593 					     HCLGE_MAC_VLAN_REMOVE);
7594 }
7595 
7596 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7597 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7598 				     struct hclge_desc *desc,
7599 				     bool is_mc)
7600 {
7601 	struct hclge_dev *hdev = vport->back;
7602 	u8 resp_code;
7603 	u16 retval;
7604 	int ret;
7605 
7606 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7607 	if (is_mc) {
7608 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7609 		memcpy(desc[0].data,
7610 		       req,
7611 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7612 		hclge_cmd_setup_basic_desc(&desc[1],
7613 					   HCLGE_OPC_MAC_VLAN_ADD,
7614 					   true);
7615 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7616 		hclge_cmd_setup_basic_desc(&desc[2],
7617 					   HCLGE_OPC_MAC_VLAN_ADD,
7618 					   true);
7619 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7620 	} else {
7621 		memcpy(desc[0].data,
7622 		       req,
7623 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7624 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7625 	}
7626 	if (ret) {
7627 		dev_err(&hdev->pdev->dev,
7628 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7629 			ret);
7630 		return ret;
7631 	}
7632 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7633 	retval = le16_to_cpu(desc[0].retval);
7634 
7635 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7636 					     HCLGE_MAC_VLAN_LKUP);
7637 }
7638 
7639 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7640 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7641 				  struct hclge_desc *mc_desc)
7642 {
7643 	struct hclge_dev *hdev = vport->back;
7644 	int cfg_status;
7645 	u8 resp_code;
7646 	u16 retval;
7647 	int ret;
7648 
7649 	if (!mc_desc) {
7650 		struct hclge_desc desc;
7651 
7652 		hclge_cmd_setup_basic_desc(&desc,
7653 					   HCLGE_OPC_MAC_VLAN_ADD,
7654 					   false);
7655 		memcpy(desc.data, req,
7656 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7657 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7658 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7659 		retval = le16_to_cpu(desc.retval);
7660 
7661 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7662 							   resp_code,
7663 							   HCLGE_MAC_VLAN_ADD);
7664 	} else {
7665 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7666 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7667 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7668 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7669 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7670 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7671 		memcpy(mc_desc[0].data, req,
7672 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7673 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7674 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7675 		retval = le16_to_cpu(mc_desc[0].retval);
7676 
7677 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7678 							   resp_code,
7679 							   HCLGE_MAC_VLAN_ADD);
7680 	}
7681 
7682 	if (ret) {
7683 		dev_err(&hdev->pdev->dev,
7684 			"add mac addr failed for cmd_send, ret =%d.\n",
7685 			ret);
7686 		return ret;
7687 	}
7688 
7689 	return cfg_status;
7690 }
7691 
7692 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7693 			       u16 *allocated_size)
7694 {
7695 	struct hclge_umv_spc_alc_cmd *req;
7696 	struct hclge_desc desc;
7697 	int ret;
7698 
7699 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7700 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7701 
7702 	req->space_size = cpu_to_le32(space_size);
7703 
7704 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7705 	if (ret) {
7706 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7707 			ret);
7708 		return ret;
7709 	}
7710 
7711 	*allocated_size = le32_to_cpu(desc.data[1]);
7712 
7713 	return 0;
7714 }
7715 
7716 static int hclge_init_umv_space(struct hclge_dev *hdev)
7717 {
7718 	u16 allocated_size = 0;
7719 	int ret;
7720 
7721 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7722 	if (ret)
7723 		return ret;
7724 
7725 	if (allocated_size < hdev->wanted_umv_size)
7726 		dev_warn(&hdev->pdev->dev,
7727 			 "failed to alloc umv space, want %u, get %u\n",
7728 			 hdev->wanted_umv_size, allocated_size);
7729 
7730 	hdev->max_umv_size = allocated_size;
7731 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7732 	hdev->share_umv_size = hdev->priv_umv_size +
7733 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7734 
7735 	return 0;
7736 }
7737 
7738 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7739 {
7740 	struct hclge_vport *vport;
7741 	int i;
7742 
7743 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7744 		vport = &hdev->vport[i];
7745 		vport->used_umv_num = 0;
7746 	}
7747 
7748 	mutex_lock(&hdev->vport_lock);
7749 	hdev->share_umv_size = hdev->priv_umv_size +
7750 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7751 	mutex_unlock(&hdev->vport_lock);
7752 }
7753 
7754 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7755 {
7756 	struct hclge_dev *hdev = vport->back;
7757 	bool is_full;
7758 
7759 	if (need_lock)
7760 		mutex_lock(&hdev->vport_lock);
7761 
7762 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7763 		   hdev->share_umv_size == 0);
7764 
7765 	if (need_lock)
7766 		mutex_unlock(&hdev->vport_lock);
7767 
7768 	return is_full;
7769 }
7770 
7771 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7772 {
7773 	struct hclge_dev *hdev = vport->back;
7774 
7775 	if (is_free) {
7776 		if (vport->used_umv_num > hdev->priv_umv_size)
7777 			hdev->share_umv_size++;
7778 
7779 		if (vport->used_umv_num > 0)
7780 			vport->used_umv_num--;
7781 	} else {
7782 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7783 		    hdev->share_umv_size > 0)
7784 			hdev->share_umv_size--;
7785 		vport->used_umv_num++;
7786 	}
7787 }
7788 
7789 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7790 						  const u8 *mac_addr)
7791 {
7792 	struct hclge_mac_node *mac_node, *tmp;
7793 
7794 	list_for_each_entry_safe(mac_node, tmp, list, node)
7795 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7796 			return mac_node;
7797 
7798 	return NULL;
7799 }
7800 
7801 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7802 				  enum HCLGE_MAC_NODE_STATE state)
7803 {
7804 	switch (state) {
7805 	/* from set_rx_mode or tmp_add_list */
7806 	case HCLGE_MAC_TO_ADD:
7807 		if (mac_node->state == HCLGE_MAC_TO_DEL)
7808 			mac_node->state = HCLGE_MAC_ACTIVE;
7809 		break;
7810 	/* only from set_rx_mode */
7811 	case HCLGE_MAC_TO_DEL:
7812 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
7813 			list_del(&mac_node->node);
7814 			kfree(mac_node);
7815 		} else {
7816 			mac_node->state = HCLGE_MAC_TO_DEL;
7817 		}
7818 		break;
7819 	/* only from tmp_add_list, the mac_node->state won't be
7820 	 * ACTIVE.
7821 	 */
7822 	case HCLGE_MAC_ACTIVE:
7823 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7824 			mac_node->state = HCLGE_MAC_ACTIVE;
7825 
7826 		break;
7827 	}
7828 }
7829 
7830 int hclge_update_mac_list(struct hclge_vport *vport,
7831 			  enum HCLGE_MAC_NODE_STATE state,
7832 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
7833 			  const unsigned char *addr)
7834 {
7835 	struct hclge_dev *hdev = vport->back;
7836 	struct hclge_mac_node *mac_node;
7837 	struct list_head *list;
7838 
7839 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7840 		&vport->uc_mac_list : &vport->mc_mac_list;
7841 
7842 	spin_lock_bh(&vport->mac_list_lock);
7843 
7844 	/* if the mac addr is already in the mac list, no need to add a new
7845 	 * one into it, just check the mac addr state, convert it to a new
7846 	 * new state, or just remove it, or do nothing.
7847 	 */
7848 	mac_node = hclge_find_mac_node(list, addr);
7849 	if (mac_node) {
7850 		hclge_update_mac_node(mac_node, state);
7851 		spin_unlock_bh(&vport->mac_list_lock);
7852 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7853 		return 0;
7854 	}
7855 
7856 	/* if this address is never added, unnecessary to delete */
7857 	if (state == HCLGE_MAC_TO_DEL) {
7858 		spin_unlock_bh(&vport->mac_list_lock);
7859 		dev_err(&hdev->pdev->dev,
7860 			"failed to delete address %pM from mac list\n",
7861 			addr);
7862 		return -ENOENT;
7863 	}
7864 
7865 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7866 	if (!mac_node) {
7867 		spin_unlock_bh(&vport->mac_list_lock);
7868 		return -ENOMEM;
7869 	}
7870 
7871 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7872 
7873 	mac_node->state = state;
7874 	ether_addr_copy(mac_node->mac_addr, addr);
7875 	list_add_tail(&mac_node->node, list);
7876 
7877 	spin_unlock_bh(&vport->mac_list_lock);
7878 
7879 	return 0;
7880 }
7881 
7882 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7883 			     const unsigned char *addr)
7884 {
7885 	struct hclge_vport *vport = hclge_get_vport(handle);
7886 
7887 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7888 				     addr);
7889 }
7890 
7891 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7892 			     const unsigned char *addr)
7893 {
7894 	struct hclge_dev *hdev = vport->back;
7895 	struct hclge_mac_vlan_tbl_entry_cmd req;
7896 	struct hclge_desc desc;
7897 	u16 egress_port = 0;
7898 	int ret;
7899 
7900 	/* mac addr check */
7901 	if (is_zero_ether_addr(addr) ||
7902 	    is_broadcast_ether_addr(addr) ||
7903 	    is_multicast_ether_addr(addr)) {
7904 		dev_err(&hdev->pdev->dev,
7905 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7906 			 addr, is_zero_ether_addr(addr),
7907 			 is_broadcast_ether_addr(addr),
7908 			 is_multicast_ether_addr(addr));
7909 		return -EINVAL;
7910 	}
7911 
7912 	memset(&req, 0, sizeof(req));
7913 
7914 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7915 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7916 
7917 	req.egress_port = cpu_to_le16(egress_port);
7918 
7919 	hclge_prepare_mac_addr(&req, addr, false);
7920 
7921 	/* Lookup the mac address in the mac_vlan table, and add
7922 	 * it if the entry is inexistent. Repeated unicast entry
7923 	 * is not allowed in the mac vlan table.
7924 	 */
7925 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7926 	if (ret == -ENOENT) {
7927 		mutex_lock(&hdev->vport_lock);
7928 		if (!hclge_is_umv_space_full(vport, false)) {
7929 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7930 			if (!ret)
7931 				hclge_update_umv_space(vport, false);
7932 			mutex_unlock(&hdev->vport_lock);
7933 			return ret;
7934 		}
7935 		mutex_unlock(&hdev->vport_lock);
7936 
7937 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7938 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7939 				hdev->priv_umv_size);
7940 
7941 		return -ENOSPC;
7942 	}
7943 
7944 	/* check if we just hit the duplicate */
7945 	if (!ret) {
7946 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7947 			 vport->vport_id, addr);
7948 		return 0;
7949 	}
7950 
7951 	dev_err(&hdev->pdev->dev,
7952 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7953 		addr);
7954 
7955 	return ret;
7956 }
7957 
7958 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7959 			    const unsigned char *addr)
7960 {
7961 	struct hclge_vport *vport = hclge_get_vport(handle);
7962 
7963 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7964 				     addr);
7965 }
7966 
7967 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7968 			    const unsigned char *addr)
7969 {
7970 	struct hclge_dev *hdev = vport->back;
7971 	struct hclge_mac_vlan_tbl_entry_cmd req;
7972 	int ret;
7973 
7974 	/* mac addr check */
7975 	if (is_zero_ether_addr(addr) ||
7976 	    is_broadcast_ether_addr(addr) ||
7977 	    is_multicast_ether_addr(addr)) {
7978 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7979 			addr);
7980 		return -EINVAL;
7981 	}
7982 
7983 	memset(&req, 0, sizeof(req));
7984 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7985 	hclge_prepare_mac_addr(&req, addr, false);
7986 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7987 	if (!ret) {
7988 		mutex_lock(&hdev->vport_lock);
7989 		hclge_update_umv_space(vport, true);
7990 		mutex_unlock(&hdev->vport_lock);
7991 	} else if (ret == -ENOENT) {
7992 		ret = 0;
7993 	}
7994 
7995 	return ret;
7996 }
7997 
7998 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7999 			     const unsigned char *addr)
8000 {
8001 	struct hclge_vport *vport = hclge_get_vport(handle);
8002 
8003 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8004 				     addr);
8005 }
8006 
8007 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8008 			     const unsigned char *addr)
8009 {
8010 	struct hclge_dev *hdev = vport->back;
8011 	struct hclge_mac_vlan_tbl_entry_cmd req;
8012 	struct hclge_desc desc[3];
8013 	int status;
8014 
8015 	/* mac addr check */
8016 	if (!is_multicast_ether_addr(addr)) {
8017 		dev_err(&hdev->pdev->dev,
8018 			"Add mc mac err! invalid mac:%pM.\n",
8019 			 addr);
8020 		return -EINVAL;
8021 	}
8022 	memset(&req, 0, sizeof(req));
8023 	hclge_prepare_mac_addr(&req, addr, true);
8024 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8025 	if (status) {
8026 		/* This mac addr do not exist, add new entry for it */
8027 		memset(desc[0].data, 0, sizeof(desc[0].data));
8028 		memset(desc[1].data, 0, sizeof(desc[0].data));
8029 		memset(desc[2].data, 0, sizeof(desc[0].data));
8030 	}
8031 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8032 	if (status)
8033 		return status;
8034 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8035 
8036 	/* if already overflow, not to print each time */
8037 	if (status == -ENOSPC &&
8038 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8039 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8040 
8041 	return status;
8042 }
8043 
8044 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8045 			    const unsigned char *addr)
8046 {
8047 	struct hclge_vport *vport = hclge_get_vport(handle);
8048 
8049 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8050 				     addr);
8051 }
8052 
8053 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8054 			    const unsigned char *addr)
8055 {
8056 	struct hclge_dev *hdev = vport->back;
8057 	struct hclge_mac_vlan_tbl_entry_cmd req;
8058 	enum hclge_cmd_status status;
8059 	struct hclge_desc desc[3];
8060 
8061 	/* mac addr check */
8062 	if (!is_multicast_ether_addr(addr)) {
8063 		dev_dbg(&hdev->pdev->dev,
8064 			"Remove mc mac err! invalid mac:%pM.\n",
8065 			 addr);
8066 		return -EINVAL;
8067 	}
8068 
8069 	memset(&req, 0, sizeof(req));
8070 	hclge_prepare_mac_addr(&req, addr, true);
8071 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8072 	if (!status) {
8073 		/* This mac addr exist, remove this handle's VFID for it */
8074 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8075 		if (status)
8076 			return status;
8077 
8078 		if (hclge_is_all_function_id_zero(desc))
8079 			/* All the vfid is zero, so need to delete this entry */
8080 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8081 		else
8082 			/* Not all the vfid is zero, update the vfid */
8083 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8084 
8085 	} else if (status == -ENOENT) {
8086 		status = 0;
8087 	}
8088 
8089 	return status;
8090 }
8091 
8092 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8093 				      struct list_head *list,
8094 				      int (*sync)(struct hclge_vport *,
8095 						  const unsigned char *))
8096 {
8097 	struct hclge_mac_node *mac_node, *tmp;
8098 	int ret;
8099 
8100 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8101 		ret = sync(vport, mac_node->mac_addr);
8102 		if (!ret) {
8103 			mac_node->state = HCLGE_MAC_ACTIVE;
8104 		} else {
8105 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8106 				&vport->state);
8107 			break;
8108 		}
8109 	}
8110 }
8111 
8112 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8113 					struct list_head *list,
8114 					int (*unsync)(struct hclge_vport *,
8115 						      const unsigned char *))
8116 {
8117 	struct hclge_mac_node *mac_node, *tmp;
8118 	int ret;
8119 
8120 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8121 		ret = unsync(vport, mac_node->mac_addr);
8122 		if (!ret || ret == -ENOENT) {
8123 			list_del(&mac_node->node);
8124 			kfree(mac_node);
8125 		} else {
8126 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8127 				&vport->state);
8128 			break;
8129 		}
8130 	}
8131 }
8132 
8133 static bool hclge_sync_from_add_list(struct list_head *add_list,
8134 				     struct list_head *mac_list)
8135 {
8136 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8137 	bool all_added = true;
8138 
8139 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8140 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8141 			all_added = false;
8142 
8143 		/* if the mac address from tmp_add_list is not in the
8144 		 * uc/mc_mac_list, it means have received a TO_DEL request
8145 		 * during the time window of adding the mac address into mac
8146 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8147 		 * then it will be removed at next time. else it must be TO_ADD,
8148 		 * this address hasn't been added into mac table,
8149 		 * so just remove the mac node.
8150 		 */
8151 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8152 		if (new_node) {
8153 			hclge_update_mac_node(new_node, mac_node->state);
8154 			list_del(&mac_node->node);
8155 			kfree(mac_node);
8156 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8157 			mac_node->state = HCLGE_MAC_TO_DEL;
8158 			list_del(&mac_node->node);
8159 			list_add_tail(&mac_node->node, mac_list);
8160 		} else {
8161 			list_del(&mac_node->node);
8162 			kfree(mac_node);
8163 		}
8164 	}
8165 
8166 	return all_added;
8167 }
8168 
8169 static void hclge_sync_from_del_list(struct list_head *del_list,
8170 				     struct list_head *mac_list)
8171 {
8172 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8173 
8174 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8175 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8176 		if (new_node) {
8177 			/* If the mac addr exists in the mac list, it means
8178 			 * received a new TO_ADD request during the time window
8179 			 * of configuring the mac address. For the mac node
8180 			 * state is TO_ADD, and the address is already in the
8181 			 * in the hardware(due to delete fail), so we just need
8182 			 * to change the mac node state to ACTIVE.
8183 			 */
8184 			new_node->state = HCLGE_MAC_ACTIVE;
8185 			list_del(&mac_node->node);
8186 			kfree(mac_node);
8187 		} else {
8188 			list_del(&mac_node->node);
8189 			list_add_tail(&mac_node->node, mac_list);
8190 		}
8191 	}
8192 }
8193 
8194 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8195 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8196 					bool is_all_added)
8197 {
8198 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8199 		if (is_all_added)
8200 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8201 		else
8202 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8203 	} else {
8204 		if (is_all_added)
8205 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8206 		else
8207 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8208 	}
8209 }
8210 
8211 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8212 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8213 {
8214 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8215 	struct list_head tmp_add_list, tmp_del_list;
8216 	struct list_head *list;
8217 	bool all_added;
8218 
8219 	INIT_LIST_HEAD(&tmp_add_list);
8220 	INIT_LIST_HEAD(&tmp_del_list);
8221 
8222 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8223 	 * we can add/delete these mac addr outside the spin lock
8224 	 */
8225 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8226 		&vport->uc_mac_list : &vport->mc_mac_list;
8227 
8228 	spin_lock_bh(&vport->mac_list_lock);
8229 
8230 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8231 		switch (mac_node->state) {
8232 		case HCLGE_MAC_TO_DEL:
8233 			list_del(&mac_node->node);
8234 			list_add_tail(&mac_node->node, &tmp_del_list);
8235 			break;
8236 		case HCLGE_MAC_TO_ADD:
8237 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8238 			if (!new_node)
8239 				goto stop_traverse;
8240 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8241 			new_node->state = mac_node->state;
8242 			list_add_tail(&new_node->node, &tmp_add_list);
8243 			break;
8244 		default:
8245 			break;
8246 		}
8247 	}
8248 
8249 stop_traverse:
8250 	spin_unlock_bh(&vport->mac_list_lock);
8251 
8252 	/* delete first, in order to get max mac table space for adding */
8253 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8254 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8255 					    hclge_rm_uc_addr_common);
8256 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8257 					  hclge_add_uc_addr_common);
8258 	} else {
8259 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8260 					    hclge_rm_mc_addr_common);
8261 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8262 					  hclge_add_mc_addr_common);
8263 	}
8264 
8265 	/* if some mac addresses were added/deleted fail, move back to the
8266 	 * mac_list, and retry at next time.
8267 	 */
8268 	spin_lock_bh(&vport->mac_list_lock);
8269 
8270 	hclge_sync_from_del_list(&tmp_del_list, list);
8271 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8272 
8273 	spin_unlock_bh(&vport->mac_list_lock);
8274 
8275 	hclge_update_overflow_flags(vport, mac_type, all_added);
8276 }
8277 
8278 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8279 {
8280 	struct hclge_dev *hdev = vport->back;
8281 
8282 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8283 		return false;
8284 
8285 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8286 		return true;
8287 
8288 	return false;
8289 }
8290 
8291 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8292 {
8293 	int i;
8294 
8295 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8296 		struct hclge_vport *vport = &hdev->vport[i];
8297 
8298 		if (!hclge_need_sync_mac_table(vport))
8299 			continue;
8300 
8301 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8302 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8303 	}
8304 }
8305 
8306 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8307 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
8308 {
8309 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8310 	struct hclge_mac_node *mac_cfg, *tmp;
8311 	struct hclge_dev *hdev = vport->back;
8312 	struct list_head tmp_del_list, *list;
8313 	int ret;
8314 
8315 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8316 		list = &vport->uc_mac_list;
8317 		unsync = hclge_rm_uc_addr_common;
8318 	} else {
8319 		list = &vport->mc_mac_list;
8320 		unsync = hclge_rm_mc_addr_common;
8321 	}
8322 
8323 	INIT_LIST_HEAD(&tmp_del_list);
8324 
8325 	if (!is_del_list)
8326 		set_bit(vport->vport_id, hdev->vport_config_block);
8327 
8328 	spin_lock_bh(&vport->mac_list_lock);
8329 
8330 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8331 		switch (mac_cfg->state) {
8332 		case HCLGE_MAC_TO_DEL:
8333 		case HCLGE_MAC_ACTIVE:
8334 			list_del(&mac_cfg->node);
8335 			list_add_tail(&mac_cfg->node, &tmp_del_list);
8336 			break;
8337 		case HCLGE_MAC_TO_ADD:
8338 			if (is_del_list) {
8339 				list_del(&mac_cfg->node);
8340 				kfree(mac_cfg);
8341 			}
8342 			break;
8343 		}
8344 	}
8345 
8346 	spin_unlock_bh(&vport->mac_list_lock);
8347 
8348 	list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
8349 		ret = unsync(vport, mac_cfg->mac_addr);
8350 		if (!ret || ret == -ENOENT) {
8351 			/* clear all mac addr from hardware, but remain these
8352 			 * mac addr in the mac list, and restore them after
8353 			 * vf reset finished.
8354 			 */
8355 			if (!is_del_list &&
8356 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8357 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8358 			} else {
8359 				list_del(&mac_cfg->node);
8360 				kfree(mac_cfg);
8361 			}
8362 		} else if (is_del_list) {
8363 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8364 		}
8365 	}
8366 
8367 	spin_lock_bh(&vport->mac_list_lock);
8368 
8369 	hclge_sync_from_del_list(&tmp_del_list, list);
8370 
8371 	spin_unlock_bh(&vport->mac_list_lock);
8372 }
8373 
8374 /* remove all mac address when uninitailize */
8375 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8376 					enum HCLGE_MAC_ADDR_TYPE mac_type)
8377 {
8378 	struct hclge_mac_node *mac_node, *tmp;
8379 	struct hclge_dev *hdev = vport->back;
8380 	struct list_head tmp_del_list, *list;
8381 
8382 	INIT_LIST_HEAD(&tmp_del_list);
8383 
8384 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8385 		&vport->uc_mac_list : &vport->mc_mac_list;
8386 
8387 	spin_lock_bh(&vport->mac_list_lock);
8388 
8389 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8390 		switch (mac_node->state) {
8391 		case HCLGE_MAC_TO_DEL:
8392 		case HCLGE_MAC_ACTIVE:
8393 			list_del(&mac_node->node);
8394 			list_add_tail(&mac_node->node, &tmp_del_list);
8395 			break;
8396 		case HCLGE_MAC_TO_ADD:
8397 			list_del(&mac_node->node);
8398 			kfree(mac_node);
8399 			break;
8400 		}
8401 	}
8402 
8403 	spin_unlock_bh(&vport->mac_list_lock);
8404 
8405 	if (mac_type == HCLGE_MAC_ADDR_UC)
8406 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8407 					    hclge_rm_uc_addr_common);
8408 	else
8409 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8410 					    hclge_rm_mc_addr_common);
8411 
8412 	if (!list_empty(&tmp_del_list))
8413 		dev_warn(&hdev->pdev->dev,
8414 			 "uninit %s mac list for vport %u not completely.\n",
8415 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8416 			 vport->vport_id);
8417 
8418 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8419 		list_del(&mac_node->node);
8420 		kfree(mac_node);
8421 	}
8422 }
8423 
8424 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8425 {
8426 	struct hclge_vport *vport;
8427 	int i;
8428 
8429 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8430 		vport = &hdev->vport[i];
8431 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8432 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8433 	}
8434 }
8435 
8436 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8437 					      u16 cmdq_resp, u8 resp_code)
8438 {
8439 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
8440 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
8441 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
8442 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
8443 
8444 	int return_status;
8445 
8446 	if (cmdq_resp) {
8447 		dev_err(&hdev->pdev->dev,
8448 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8449 			cmdq_resp);
8450 		return -EIO;
8451 	}
8452 
8453 	switch (resp_code) {
8454 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
8455 	case HCLGE_ETHERTYPE_ALREADY_ADD:
8456 		return_status = 0;
8457 		break;
8458 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8459 		dev_err(&hdev->pdev->dev,
8460 			"add mac ethertype failed for manager table overflow.\n");
8461 		return_status = -EIO;
8462 		break;
8463 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
8464 		dev_err(&hdev->pdev->dev,
8465 			"add mac ethertype failed for key conflict.\n");
8466 		return_status = -EIO;
8467 		break;
8468 	default:
8469 		dev_err(&hdev->pdev->dev,
8470 			"add mac ethertype failed for undefined, code=%u.\n",
8471 			resp_code);
8472 		return_status = -EIO;
8473 	}
8474 
8475 	return return_status;
8476 }
8477 
8478 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8479 				     u8 *mac_addr)
8480 {
8481 	struct hclge_mac_vlan_tbl_entry_cmd req;
8482 	struct hclge_dev *hdev = vport->back;
8483 	struct hclge_desc desc;
8484 	u16 egress_port = 0;
8485 	int i;
8486 
8487 	if (is_zero_ether_addr(mac_addr))
8488 		return false;
8489 
8490 	memset(&req, 0, sizeof(req));
8491 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8492 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8493 	req.egress_port = cpu_to_le16(egress_port);
8494 	hclge_prepare_mac_addr(&req, mac_addr, false);
8495 
8496 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8497 		return true;
8498 
8499 	vf_idx += HCLGE_VF_VPORT_START_NUM;
8500 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8501 		if (i != vf_idx &&
8502 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8503 			return true;
8504 
8505 	return false;
8506 }
8507 
8508 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8509 			    u8 *mac_addr)
8510 {
8511 	struct hclge_vport *vport = hclge_get_vport(handle);
8512 	struct hclge_dev *hdev = vport->back;
8513 
8514 	vport = hclge_get_vf_vport(hdev, vf);
8515 	if (!vport)
8516 		return -EINVAL;
8517 
8518 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8519 		dev_info(&hdev->pdev->dev,
8520 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
8521 			 mac_addr);
8522 		return 0;
8523 	}
8524 
8525 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8526 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8527 			mac_addr);
8528 		return -EEXIST;
8529 	}
8530 
8531 	ether_addr_copy(vport->vf_info.mac, mac_addr);
8532 
8533 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8534 		dev_info(&hdev->pdev->dev,
8535 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8536 			 vf, mac_addr);
8537 		return hclge_inform_reset_assert_to_vf(vport);
8538 	}
8539 
8540 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8541 		 vf, mac_addr);
8542 	return 0;
8543 }
8544 
8545 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8546 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
8547 {
8548 	struct hclge_desc desc;
8549 	u8 resp_code;
8550 	u16 retval;
8551 	int ret;
8552 
8553 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8554 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8555 
8556 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8557 	if (ret) {
8558 		dev_err(&hdev->pdev->dev,
8559 			"add mac ethertype failed for cmd_send, ret =%d.\n",
8560 			ret);
8561 		return ret;
8562 	}
8563 
8564 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8565 	retval = le16_to_cpu(desc.retval);
8566 
8567 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8568 }
8569 
8570 static int init_mgr_tbl(struct hclge_dev *hdev)
8571 {
8572 	int ret;
8573 	int i;
8574 
8575 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8576 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8577 		if (ret) {
8578 			dev_err(&hdev->pdev->dev,
8579 				"add mac ethertype failed, ret =%d.\n",
8580 				ret);
8581 			return ret;
8582 		}
8583 	}
8584 
8585 	return 0;
8586 }
8587 
8588 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8589 {
8590 	struct hclge_vport *vport = hclge_get_vport(handle);
8591 	struct hclge_dev *hdev = vport->back;
8592 
8593 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
8594 }
8595 
8596 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8597 				       const u8 *old_addr, const u8 *new_addr)
8598 {
8599 	struct list_head *list = &vport->uc_mac_list;
8600 	struct hclge_mac_node *old_node, *new_node;
8601 
8602 	new_node = hclge_find_mac_node(list, new_addr);
8603 	if (!new_node) {
8604 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8605 		if (!new_node)
8606 			return -ENOMEM;
8607 
8608 		new_node->state = HCLGE_MAC_TO_ADD;
8609 		ether_addr_copy(new_node->mac_addr, new_addr);
8610 		list_add(&new_node->node, list);
8611 	} else {
8612 		if (new_node->state == HCLGE_MAC_TO_DEL)
8613 			new_node->state = HCLGE_MAC_ACTIVE;
8614 
8615 		/* make sure the new addr is in the list head, avoid dev
8616 		 * addr may be not re-added into mac table for the umv space
8617 		 * limitation after global/imp reset which will clear mac
8618 		 * table by hardware.
8619 		 */
8620 		list_move(&new_node->node, list);
8621 	}
8622 
8623 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8624 		old_node = hclge_find_mac_node(list, old_addr);
8625 		if (old_node) {
8626 			if (old_node->state == HCLGE_MAC_TO_ADD) {
8627 				list_del(&old_node->node);
8628 				kfree(old_node);
8629 			} else {
8630 				old_node->state = HCLGE_MAC_TO_DEL;
8631 			}
8632 		}
8633 	}
8634 
8635 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8636 
8637 	return 0;
8638 }
8639 
8640 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8641 			      bool is_first)
8642 {
8643 	const unsigned char *new_addr = (const unsigned char *)p;
8644 	struct hclge_vport *vport = hclge_get_vport(handle);
8645 	struct hclge_dev *hdev = vport->back;
8646 	unsigned char *old_addr = NULL;
8647 	int ret;
8648 
8649 	/* mac addr check */
8650 	if (is_zero_ether_addr(new_addr) ||
8651 	    is_broadcast_ether_addr(new_addr) ||
8652 	    is_multicast_ether_addr(new_addr)) {
8653 		dev_err(&hdev->pdev->dev,
8654 			"change uc mac err! invalid mac: %pM.\n",
8655 			 new_addr);
8656 		return -EINVAL;
8657 	}
8658 
8659 	ret = hclge_pause_addr_cfg(hdev, new_addr);
8660 	if (ret) {
8661 		dev_err(&hdev->pdev->dev,
8662 			"failed to configure mac pause address, ret = %d\n",
8663 			ret);
8664 		return ret;
8665 	}
8666 
8667 	if (!is_first)
8668 		old_addr = hdev->hw.mac.mac_addr;
8669 
8670 	spin_lock_bh(&vport->mac_list_lock);
8671 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8672 	if (ret) {
8673 		dev_err(&hdev->pdev->dev,
8674 			"failed to change the mac addr:%pM, ret = %d\n",
8675 			new_addr, ret);
8676 		spin_unlock_bh(&vport->mac_list_lock);
8677 
8678 		if (!is_first)
8679 			hclge_pause_addr_cfg(hdev, old_addr);
8680 
8681 		return ret;
8682 	}
8683 	/* we must update dev addr with spin lock protect, preventing dev addr
8684 	 * being removed by set_rx_mode path.
8685 	 */
8686 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8687 	spin_unlock_bh(&vport->mac_list_lock);
8688 
8689 	hclge_task_schedule(hdev, 0);
8690 
8691 	return 0;
8692 }
8693 
8694 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8695 			  int cmd)
8696 {
8697 	struct hclge_vport *vport = hclge_get_vport(handle);
8698 	struct hclge_dev *hdev = vport->back;
8699 
8700 	if (!hdev->hw.mac.phydev)
8701 		return -EOPNOTSUPP;
8702 
8703 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8704 }
8705 
8706 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8707 				      u8 fe_type, bool filter_en, u8 vf_id)
8708 {
8709 	struct hclge_vlan_filter_ctrl_cmd *req;
8710 	struct hclge_desc desc;
8711 	int ret;
8712 
8713 	/* read current vlan filter parameter */
8714 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8715 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8716 	req->vlan_type = vlan_type;
8717 	req->vf_id = vf_id;
8718 
8719 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8720 	if (ret) {
8721 		dev_err(&hdev->pdev->dev,
8722 			"failed to get vlan filter config, ret = %d.\n", ret);
8723 		return ret;
8724 	}
8725 
8726 	/* modify and write new config parameter */
8727 	hclge_cmd_reuse_desc(&desc, false);
8728 	req->vlan_fe = filter_en ?
8729 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8730 
8731 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8732 	if (ret)
8733 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8734 			ret);
8735 
8736 	return ret;
8737 }
8738 
8739 #define HCLGE_FILTER_TYPE_VF		0
8740 #define HCLGE_FILTER_TYPE_PORT		1
8741 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
8742 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
8743 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
8744 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
8745 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
8746 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
8747 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
8748 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
8749 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
8750 
8751 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8752 {
8753 	struct hclge_vport *vport = hclge_get_vport(handle);
8754 	struct hclge_dev *hdev = vport->back;
8755 
8756 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8757 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8758 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
8759 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8760 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
8761 	} else {
8762 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8763 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8764 					   0);
8765 	}
8766 	if (enable)
8767 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
8768 	else
8769 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8770 }
8771 
8772 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8773 				    bool is_kill, u16 vlan,
8774 				    __be16 proto)
8775 {
8776 	struct hclge_vport *vport = &hdev->vport[vfid];
8777 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
8778 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
8779 	struct hclge_desc desc[2];
8780 	u8 vf_byte_val;
8781 	u8 vf_byte_off;
8782 	int ret;
8783 
8784 	/* if vf vlan table is full, firmware will close vf vlan filter, it
8785 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
8786 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
8787 	 * new vlan, because tx packets with these vlan id will be dropped.
8788 	 */
8789 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8790 		if (vport->vf_info.spoofchk && vlan) {
8791 			dev_err(&hdev->pdev->dev,
8792 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
8793 			return -EPERM;
8794 		}
8795 		return 0;
8796 	}
8797 
8798 	hclge_cmd_setup_basic_desc(&desc[0],
8799 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8800 	hclge_cmd_setup_basic_desc(&desc[1],
8801 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8802 
8803 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8804 
8805 	vf_byte_off = vfid / 8;
8806 	vf_byte_val = 1 << (vfid % 8);
8807 
8808 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8809 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8810 
8811 	req0->vlan_id  = cpu_to_le16(vlan);
8812 	req0->vlan_cfg = is_kill;
8813 
8814 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8815 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8816 	else
8817 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8818 
8819 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
8820 	if (ret) {
8821 		dev_err(&hdev->pdev->dev,
8822 			"Send vf vlan command fail, ret =%d.\n",
8823 			ret);
8824 		return ret;
8825 	}
8826 
8827 	if (!is_kill) {
8828 #define HCLGE_VF_VLAN_NO_ENTRY	2
8829 		if (!req0->resp_code || req0->resp_code == 1)
8830 			return 0;
8831 
8832 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8833 			set_bit(vfid, hdev->vf_vlan_full);
8834 			dev_warn(&hdev->pdev->dev,
8835 				 "vf vlan table is full, vf vlan filter is disabled\n");
8836 			return 0;
8837 		}
8838 
8839 		dev_err(&hdev->pdev->dev,
8840 			"Add vf vlan filter fail, ret =%u.\n",
8841 			req0->resp_code);
8842 	} else {
8843 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
8844 		if (!req0->resp_code)
8845 			return 0;
8846 
8847 		/* vf vlan filter is disabled when vf vlan table is full,
8848 		 * then new vlan id will not be added into vf vlan table.
8849 		 * Just return 0 without warning, avoid massive verbose
8850 		 * print logs when unload.
8851 		 */
8852 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8853 			return 0;
8854 
8855 		dev_err(&hdev->pdev->dev,
8856 			"Kill vf vlan filter fail, ret =%u.\n",
8857 			req0->resp_code);
8858 	}
8859 
8860 	return -EIO;
8861 }
8862 
8863 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8864 				      u16 vlan_id, bool is_kill)
8865 {
8866 	struct hclge_vlan_filter_pf_cfg_cmd *req;
8867 	struct hclge_desc desc;
8868 	u8 vlan_offset_byte_val;
8869 	u8 vlan_offset_byte;
8870 	u8 vlan_offset_160;
8871 	int ret;
8872 
8873 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8874 
8875 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8876 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8877 			   HCLGE_VLAN_BYTE_SIZE;
8878 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8879 
8880 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8881 	req->vlan_offset = vlan_offset_160;
8882 	req->vlan_cfg = is_kill;
8883 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8884 
8885 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8886 	if (ret)
8887 		dev_err(&hdev->pdev->dev,
8888 			"port vlan command, send fail, ret =%d.\n", ret);
8889 	return ret;
8890 }
8891 
8892 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8893 				    u16 vport_id, u16 vlan_id,
8894 				    bool is_kill)
8895 {
8896 	u16 vport_idx, vport_num = 0;
8897 	int ret;
8898 
8899 	if (is_kill && !vlan_id)
8900 		return 0;
8901 
8902 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8903 				       proto);
8904 	if (ret) {
8905 		dev_err(&hdev->pdev->dev,
8906 			"Set %u vport vlan filter config fail, ret =%d.\n",
8907 			vport_id, ret);
8908 		return ret;
8909 	}
8910 
8911 	/* vlan 0 may be added twice when 8021q module is enabled */
8912 	if (!is_kill && !vlan_id &&
8913 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
8914 		return 0;
8915 
8916 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8917 		dev_err(&hdev->pdev->dev,
8918 			"Add port vlan failed, vport %u is already in vlan %u\n",
8919 			vport_id, vlan_id);
8920 		return -EINVAL;
8921 	}
8922 
8923 	if (is_kill &&
8924 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8925 		dev_err(&hdev->pdev->dev,
8926 			"Delete port vlan failed, vport %u is not in vlan %u\n",
8927 			vport_id, vlan_id);
8928 		return -EINVAL;
8929 	}
8930 
8931 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8932 		vport_num++;
8933 
8934 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8935 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8936 						 is_kill);
8937 
8938 	return ret;
8939 }
8940 
8941 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8942 {
8943 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8944 	struct hclge_vport_vtag_tx_cfg_cmd *req;
8945 	struct hclge_dev *hdev = vport->back;
8946 	struct hclge_desc desc;
8947 	u16 bmap_index;
8948 	int status;
8949 
8950 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8951 
8952 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8953 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8954 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8955 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8956 		      vcfg->accept_tag1 ? 1 : 0);
8957 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8958 		      vcfg->accept_untag1 ? 1 : 0);
8959 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8960 		      vcfg->accept_tag2 ? 1 : 0);
8961 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8962 		      vcfg->accept_untag2 ? 1 : 0);
8963 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8964 		      vcfg->insert_tag1_en ? 1 : 0);
8965 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8966 		      vcfg->insert_tag2_en ? 1 : 0);
8967 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
8968 		      vcfg->tag_shift_mode_en ? 1 : 0);
8969 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8970 
8971 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8972 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8973 			HCLGE_VF_NUM_PER_BYTE;
8974 	req->vf_bitmap[bmap_index] =
8975 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8976 
8977 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8978 	if (status)
8979 		dev_err(&hdev->pdev->dev,
8980 			"Send port txvlan cfg command fail, ret =%d\n",
8981 			status);
8982 
8983 	return status;
8984 }
8985 
8986 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8987 {
8988 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8989 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8990 	struct hclge_dev *hdev = vport->back;
8991 	struct hclge_desc desc;
8992 	u16 bmap_index;
8993 	int status;
8994 
8995 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8996 
8997 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8998 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8999 		      vcfg->strip_tag1_en ? 1 : 0);
9000 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9001 		      vcfg->strip_tag2_en ? 1 : 0);
9002 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9003 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9004 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9005 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9006 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9007 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9008 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9009 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9010 
9011 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9012 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9013 			HCLGE_VF_NUM_PER_BYTE;
9014 	req->vf_bitmap[bmap_index] =
9015 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9016 
9017 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9018 	if (status)
9019 		dev_err(&hdev->pdev->dev,
9020 			"Send port rxvlan cfg command fail, ret =%d\n",
9021 			status);
9022 
9023 	return status;
9024 }
9025 
9026 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9027 				  u16 port_base_vlan_state,
9028 				  u16 vlan_tag)
9029 {
9030 	int ret;
9031 
9032 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9033 		vport->txvlan_cfg.accept_tag1 = true;
9034 		vport->txvlan_cfg.insert_tag1_en = false;
9035 		vport->txvlan_cfg.default_tag1 = 0;
9036 	} else {
9037 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9038 
9039 		vport->txvlan_cfg.accept_tag1 =
9040 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9041 		vport->txvlan_cfg.insert_tag1_en = true;
9042 		vport->txvlan_cfg.default_tag1 = vlan_tag;
9043 	}
9044 
9045 	vport->txvlan_cfg.accept_untag1 = true;
9046 
9047 	/* accept_tag2 and accept_untag2 are not supported on
9048 	 * pdev revision(0x20), new revision support them,
9049 	 * this two fields can not be configured by user.
9050 	 */
9051 	vport->txvlan_cfg.accept_tag2 = true;
9052 	vport->txvlan_cfg.accept_untag2 = true;
9053 	vport->txvlan_cfg.insert_tag2_en = false;
9054 	vport->txvlan_cfg.default_tag2 = 0;
9055 	vport->txvlan_cfg.tag_shift_mode_en = true;
9056 
9057 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9058 		vport->rxvlan_cfg.strip_tag1_en = false;
9059 		vport->rxvlan_cfg.strip_tag2_en =
9060 				vport->rxvlan_cfg.rx_vlan_offload_en;
9061 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9062 	} else {
9063 		vport->rxvlan_cfg.strip_tag1_en =
9064 				vport->rxvlan_cfg.rx_vlan_offload_en;
9065 		vport->rxvlan_cfg.strip_tag2_en = true;
9066 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9067 	}
9068 
9069 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9070 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9071 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9072 
9073 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9074 	if (ret)
9075 		return ret;
9076 
9077 	return hclge_set_vlan_rx_offload_cfg(vport);
9078 }
9079 
9080 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9081 {
9082 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9083 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9084 	struct hclge_desc desc;
9085 	int status;
9086 
9087 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9088 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9089 	rx_req->ot_fst_vlan_type =
9090 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9091 	rx_req->ot_sec_vlan_type =
9092 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9093 	rx_req->in_fst_vlan_type =
9094 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9095 	rx_req->in_sec_vlan_type =
9096 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9097 
9098 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9099 	if (status) {
9100 		dev_err(&hdev->pdev->dev,
9101 			"Send rxvlan protocol type command fail, ret =%d\n",
9102 			status);
9103 		return status;
9104 	}
9105 
9106 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9107 
9108 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9109 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9110 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9111 
9112 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9113 	if (status)
9114 		dev_err(&hdev->pdev->dev,
9115 			"Send txvlan protocol type command fail, ret =%d\n",
9116 			status);
9117 
9118 	return status;
9119 }
9120 
9121 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9122 {
9123 #define HCLGE_DEF_VLAN_TYPE		0x8100
9124 
9125 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9126 	struct hclge_vport *vport;
9127 	int ret;
9128 	int i;
9129 
9130 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9131 		/* for revision 0x21, vf vlan filter is per function */
9132 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9133 			vport = &hdev->vport[i];
9134 			ret = hclge_set_vlan_filter_ctrl(hdev,
9135 							 HCLGE_FILTER_TYPE_VF,
9136 							 HCLGE_FILTER_FE_EGRESS,
9137 							 true,
9138 							 vport->vport_id);
9139 			if (ret)
9140 				return ret;
9141 		}
9142 
9143 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9144 						 HCLGE_FILTER_FE_INGRESS, true,
9145 						 0);
9146 		if (ret)
9147 			return ret;
9148 	} else {
9149 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9150 						 HCLGE_FILTER_FE_EGRESS_V1_B,
9151 						 true, 0);
9152 		if (ret)
9153 			return ret;
9154 	}
9155 
9156 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
9157 
9158 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9159 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9160 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9161 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9162 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9163 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9164 
9165 	ret = hclge_set_vlan_protocol_type(hdev);
9166 	if (ret)
9167 		return ret;
9168 
9169 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9170 		u16 vlan_tag;
9171 
9172 		vport = &hdev->vport[i];
9173 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9174 
9175 		ret = hclge_vlan_offload_cfg(vport,
9176 					     vport->port_base_vlan_cfg.state,
9177 					     vlan_tag);
9178 		if (ret)
9179 			return ret;
9180 	}
9181 
9182 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9183 }
9184 
9185 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9186 				       bool writen_to_tbl)
9187 {
9188 	struct hclge_vport_vlan_cfg *vlan;
9189 
9190 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9191 	if (!vlan)
9192 		return;
9193 
9194 	vlan->hd_tbl_status = writen_to_tbl;
9195 	vlan->vlan_id = vlan_id;
9196 
9197 	list_add_tail(&vlan->node, &vport->vlan_list);
9198 }
9199 
9200 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9201 {
9202 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9203 	struct hclge_dev *hdev = vport->back;
9204 	int ret;
9205 
9206 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9207 		if (!vlan->hd_tbl_status) {
9208 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9209 						       vport->vport_id,
9210 						       vlan->vlan_id, false);
9211 			if (ret) {
9212 				dev_err(&hdev->pdev->dev,
9213 					"restore vport vlan list failed, ret=%d\n",
9214 					ret);
9215 				return ret;
9216 			}
9217 		}
9218 		vlan->hd_tbl_status = true;
9219 	}
9220 
9221 	return 0;
9222 }
9223 
9224 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9225 				      bool is_write_tbl)
9226 {
9227 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9228 	struct hclge_dev *hdev = vport->back;
9229 
9230 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9231 		if (vlan->vlan_id == vlan_id) {
9232 			if (is_write_tbl && vlan->hd_tbl_status)
9233 				hclge_set_vlan_filter_hw(hdev,
9234 							 htons(ETH_P_8021Q),
9235 							 vport->vport_id,
9236 							 vlan_id,
9237 							 true);
9238 
9239 			list_del(&vlan->node);
9240 			kfree(vlan);
9241 			break;
9242 		}
9243 	}
9244 }
9245 
9246 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9247 {
9248 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9249 	struct hclge_dev *hdev = vport->back;
9250 
9251 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9252 		if (vlan->hd_tbl_status)
9253 			hclge_set_vlan_filter_hw(hdev,
9254 						 htons(ETH_P_8021Q),
9255 						 vport->vport_id,
9256 						 vlan->vlan_id,
9257 						 true);
9258 
9259 		vlan->hd_tbl_status = false;
9260 		if (is_del_list) {
9261 			list_del(&vlan->node);
9262 			kfree(vlan);
9263 		}
9264 	}
9265 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
9266 }
9267 
9268 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9269 {
9270 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9271 	struct hclge_vport *vport;
9272 	int i;
9273 
9274 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9275 		vport = &hdev->vport[i];
9276 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9277 			list_del(&vlan->node);
9278 			kfree(vlan);
9279 		}
9280 	}
9281 }
9282 
9283 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9284 {
9285 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9286 	struct hclge_dev *hdev = vport->back;
9287 	u16 vlan_proto;
9288 	u16 vlan_id;
9289 	u16 state;
9290 	int ret;
9291 
9292 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9293 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9294 	state = vport->port_base_vlan_cfg.state;
9295 
9296 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9297 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9298 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9299 					 vport->vport_id, vlan_id,
9300 					 false);
9301 		return;
9302 	}
9303 
9304 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9305 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9306 					       vport->vport_id,
9307 					       vlan->vlan_id, false);
9308 		if (ret)
9309 			break;
9310 		vlan->hd_tbl_status = true;
9311 	}
9312 }
9313 
9314 /* For global reset and imp reset, hardware will clear the mac table,
9315  * so we change the mac address state from ACTIVE to TO_ADD, then they
9316  * can be restored in the service task after reset complete. Furtherly,
9317  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9318  * be restored after reset, so just remove these mac nodes from mac_list.
9319  */
9320 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9321 {
9322 	struct hclge_mac_node *mac_node, *tmp;
9323 
9324 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9325 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
9326 			mac_node->state = HCLGE_MAC_TO_ADD;
9327 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9328 			list_del(&mac_node->node);
9329 			kfree(mac_node);
9330 		}
9331 	}
9332 }
9333 
9334 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9335 {
9336 	spin_lock_bh(&vport->mac_list_lock);
9337 
9338 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9339 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9340 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9341 
9342 	spin_unlock_bh(&vport->mac_list_lock);
9343 }
9344 
9345 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9346 {
9347 	struct hclge_vport *vport = &hdev->vport[0];
9348 	struct hnae3_handle *handle = &vport->nic;
9349 
9350 	hclge_restore_mac_table_common(vport);
9351 	hclge_restore_vport_vlan_table(vport);
9352 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9353 
9354 	hclge_restore_fd_entries(handle);
9355 }
9356 
9357 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9358 {
9359 	struct hclge_vport *vport = hclge_get_vport(handle);
9360 
9361 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9362 		vport->rxvlan_cfg.strip_tag1_en = false;
9363 		vport->rxvlan_cfg.strip_tag2_en = enable;
9364 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9365 	} else {
9366 		vport->rxvlan_cfg.strip_tag1_en = enable;
9367 		vport->rxvlan_cfg.strip_tag2_en = true;
9368 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9369 	}
9370 
9371 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9372 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9373 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9374 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9375 
9376 	return hclge_set_vlan_rx_offload_cfg(vport);
9377 }
9378 
9379 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9380 					    u16 port_base_vlan_state,
9381 					    struct hclge_vlan_info *new_info,
9382 					    struct hclge_vlan_info *old_info)
9383 {
9384 	struct hclge_dev *hdev = vport->back;
9385 	int ret;
9386 
9387 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9388 		hclge_rm_vport_all_vlan_table(vport, false);
9389 		return hclge_set_vlan_filter_hw(hdev,
9390 						 htons(new_info->vlan_proto),
9391 						 vport->vport_id,
9392 						 new_info->vlan_tag,
9393 						 false);
9394 	}
9395 
9396 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9397 				       vport->vport_id, old_info->vlan_tag,
9398 				       true);
9399 	if (ret)
9400 		return ret;
9401 
9402 	return hclge_add_vport_all_vlan_table(vport);
9403 }
9404 
9405 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9406 				    struct hclge_vlan_info *vlan_info)
9407 {
9408 	struct hnae3_handle *nic = &vport->nic;
9409 	struct hclge_vlan_info *old_vlan_info;
9410 	struct hclge_dev *hdev = vport->back;
9411 	int ret;
9412 
9413 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9414 
9415 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9416 	if (ret)
9417 		return ret;
9418 
9419 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9420 		/* add new VLAN tag */
9421 		ret = hclge_set_vlan_filter_hw(hdev,
9422 					       htons(vlan_info->vlan_proto),
9423 					       vport->vport_id,
9424 					       vlan_info->vlan_tag,
9425 					       false);
9426 		if (ret)
9427 			return ret;
9428 
9429 		/* remove old VLAN tag */
9430 		ret = hclge_set_vlan_filter_hw(hdev,
9431 					       htons(old_vlan_info->vlan_proto),
9432 					       vport->vport_id,
9433 					       old_vlan_info->vlan_tag,
9434 					       true);
9435 		if (ret)
9436 			return ret;
9437 
9438 		goto update;
9439 	}
9440 
9441 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9442 					       old_vlan_info);
9443 	if (ret)
9444 		return ret;
9445 
9446 	/* update state only when disable/enable port based VLAN */
9447 	vport->port_base_vlan_cfg.state = state;
9448 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9449 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9450 	else
9451 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9452 
9453 update:
9454 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9455 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9456 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9457 
9458 	return 0;
9459 }
9460 
9461 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9462 					  enum hnae3_port_base_vlan_state state,
9463 					  u16 vlan)
9464 {
9465 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9466 		if (!vlan)
9467 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9468 		else
9469 			return HNAE3_PORT_BASE_VLAN_ENABLE;
9470 	} else {
9471 		if (!vlan)
9472 			return HNAE3_PORT_BASE_VLAN_DISABLE;
9473 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9474 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9475 		else
9476 			return HNAE3_PORT_BASE_VLAN_MODIFY;
9477 	}
9478 }
9479 
9480 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9481 				    u16 vlan, u8 qos, __be16 proto)
9482 {
9483 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9484 	struct hclge_vport *vport = hclge_get_vport(handle);
9485 	struct hclge_dev *hdev = vport->back;
9486 	struct hclge_vlan_info vlan_info;
9487 	u16 state;
9488 	int ret;
9489 
9490 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9491 		return -EOPNOTSUPP;
9492 
9493 	vport = hclge_get_vf_vport(hdev, vfid);
9494 	if (!vport)
9495 		return -EINVAL;
9496 
9497 	/* qos is a 3 bits value, so can not be bigger than 7 */
9498 	if (vlan > VLAN_N_VID - 1 || qos > 7)
9499 		return -EINVAL;
9500 	if (proto != htons(ETH_P_8021Q))
9501 		return -EPROTONOSUPPORT;
9502 
9503 	state = hclge_get_port_base_vlan_state(vport,
9504 					       vport->port_base_vlan_cfg.state,
9505 					       vlan);
9506 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9507 		return 0;
9508 
9509 	vlan_info.vlan_tag = vlan;
9510 	vlan_info.qos = qos;
9511 	vlan_info.vlan_proto = ntohs(proto);
9512 
9513 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9514 	if (ret) {
9515 		dev_err(&hdev->pdev->dev,
9516 			"failed to update port base vlan for vf %d, ret = %d\n",
9517 			vfid, ret);
9518 		return ret;
9519 	}
9520 
9521 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9522 	 * VLAN state.
9523 	 */
9524 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9525 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9526 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9527 						  vport->vport_id, state,
9528 						  vlan, qos,
9529 						  ntohs(proto));
9530 
9531 	return 0;
9532 }
9533 
9534 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9535 {
9536 	struct hclge_vlan_info *vlan_info;
9537 	struct hclge_vport *vport;
9538 	int ret;
9539 	int vf;
9540 
9541 	/* clear port base vlan for all vf */
9542 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9543 		vport = &hdev->vport[vf];
9544 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9545 
9546 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9547 					       vport->vport_id,
9548 					       vlan_info->vlan_tag, true);
9549 		if (ret)
9550 			dev_err(&hdev->pdev->dev,
9551 				"failed to clear vf vlan for vf%d, ret = %d\n",
9552 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9553 	}
9554 }
9555 
9556 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9557 			  u16 vlan_id, bool is_kill)
9558 {
9559 	struct hclge_vport *vport = hclge_get_vport(handle);
9560 	struct hclge_dev *hdev = vport->back;
9561 	bool writen_to_tbl = false;
9562 	int ret = 0;
9563 
9564 	/* When device is resetting or reset failed, firmware is unable to
9565 	 * handle mailbox. Just record the vlan id, and remove it after
9566 	 * reset finished.
9567 	 */
9568 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9569 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9570 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9571 		return -EBUSY;
9572 	}
9573 
9574 	/* when port base vlan enabled, we use port base vlan as the vlan
9575 	 * filter entry. In this case, we don't update vlan filter table
9576 	 * when user add new vlan or remove exist vlan, just update the vport
9577 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
9578 	 * table until port base vlan disabled
9579 	 */
9580 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9581 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9582 					       vlan_id, is_kill);
9583 		writen_to_tbl = true;
9584 	}
9585 
9586 	if (!ret) {
9587 		if (is_kill)
9588 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9589 		else
9590 			hclge_add_vport_vlan_table(vport, vlan_id,
9591 						   writen_to_tbl);
9592 	} else if (is_kill) {
9593 		/* when remove hw vlan filter failed, record the vlan id,
9594 		 * and try to remove it from hw later, to be consistence
9595 		 * with stack
9596 		 */
9597 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9598 	}
9599 	return ret;
9600 }
9601 
9602 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9603 {
9604 #define HCLGE_MAX_SYNC_COUNT	60
9605 
9606 	int i, ret, sync_cnt = 0;
9607 	u16 vlan_id;
9608 
9609 	/* start from vport 1 for PF is always alive */
9610 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9611 		struct hclge_vport *vport = &hdev->vport[i];
9612 
9613 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9614 					 VLAN_N_VID);
9615 		while (vlan_id != VLAN_N_VID) {
9616 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9617 						       vport->vport_id, vlan_id,
9618 						       true);
9619 			if (ret && ret != -EINVAL)
9620 				return;
9621 
9622 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9623 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9624 
9625 			sync_cnt++;
9626 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9627 				return;
9628 
9629 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9630 						 VLAN_N_VID);
9631 		}
9632 	}
9633 }
9634 
9635 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9636 {
9637 	struct hclge_config_max_frm_size_cmd *req;
9638 	struct hclge_desc desc;
9639 
9640 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9641 
9642 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9643 	req->max_frm_size = cpu_to_le16(new_mps);
9644 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9645 
9646 	return hclge_cmd_send(&hdev->hw, &desc, 1);
9647 }
9648 
9649 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9650 {
9651 	struct hclge_vport *vport = hclge_get_vport(handle);
9652 
9653 	return hclge_set_vport_mtu(vport, new_mtu);
9654 }
9655 
9656 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9657 {
9658 	struct hclge_dev *hdev = vport->back;
9659 	int i, max_frm_size, ret;
9660 
9661 	/* HW supprt 2 layer vlan */
9662 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9663 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9664 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
9665 		return -EINVAL;
9666 
9667 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9668 	mutex_lock(&hdev->vport_lock);
9669 	/* VF's mps must fit within hdev->mps */
9670 	if (vport->vport_id && max_frm_size > hdev->mps) {
9671 		mutex_unlock(&hdev->vport_lock);
9672 		return -EINVAL;
9673 	} else if (vport->vport_id) {
9674 		vport->mps = max_frm_size;
9675 		mutex_unlock(&hdev->vport_lock);
9676 		return 0;
9677 	}
9678 
9679 	/* PF's mps must be greater then VF's mps */
9680 	for (i = 1; i < hdev->num_alloc_vport; i++)
9681 		if (max_frm_size < hdev->vport[i].mps) {
9682 			mutex_unlock(&hdev->vport_lock);
9683 			return -EINVAL;
9684 		}
9685 
9686 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9687 
9688 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
9689 	if (ret) {
9690 		dev_err(&hdev->pdev->dev,
9691 			"Change mtu fail, ret =%d\n", ret);
9692 		goto out;
9693 	}
9694 
9695 	hdev->mps = max_frm_size;
9696 	vport->mps = max_frm_size;
9697 
9698 	ret = hclge_buffer_alloc(hdev);
9699 	if (ret)
9700 		dev_err(&hdev->pdev->dev,
9701 			"Allocate buffer fail, ret =%d\n", ret);
9702 
9703 out:
9704 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9705 	mutex_unlock(&hdev->vport_lock);
9706 	return ret;
9707 }
9708 
9709 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9710 				    bool enable)
9711 {
9712 	struct hclge_reset_tqp_queue_cmd *req;
9713 	struct hclge_desc desc;
9714 	int ret;
9715 
9716 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9717 
9718 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9719 	req->tqp_id = cpu_to_le16(queue_id);
9720 	if (enable)
9721 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9722 
9723 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9724 	if (ret) {
9725 		dev_err(&hdev->pdev->dev,
9726 			"Send tqp reset cmd error, status =%d\n", ret);
9727 		return ret;
9728 	}
9729 
9730 	return 0;
9731 }
9732 
9733 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9734 {
9735 	struct hclge_reset_tqp_queue_cmd *req;
9736 	struct hclge_desc desc;
9737 	int ret;
9738 
9739 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9740 
9741 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9742 	req->tqp_id = cpu_to_le16(queue_id);
9743 
9744 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9745 	if (ret) {
9746 		dev_err(&hdev->pdev->dev,
9747 			"Get reset status error, status =%d\n", ret);
9748 		return ret;
9749 	}
9750 
9751 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9752 }
9753 
9754 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9755 {
9756 	struct hnae3_queue *queue;
9757 	struct hclge_tqp *tqp;
9758 
9759 	queue = handle->kinfo.tqp[queue_id];
9760 	tqp = container_of(queue, struct hclge_tqp, q);
9761 
9762 	return tqp->index;
9763 }
9764 
9765 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9766 {
9767 	struct hclge_vport *vport = hclge_get_vport(handle);
9768 	struct hclge_dev *hdev = vport->back;
9769 	int reset_try_times = 0;
9770 	int reset_status;
9771 	u16 queue_gid;
9772 	int ret;
9773 
9774 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9775 
9776 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9777 	if (ret) {
9778 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9779 		return ret;
9780 	}
9781 
9782 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9783 	if (ret) {
9784 		dev_err(&hdev->pdev->dev,
9785 			"Send reset tqp cmd fail, ret = %d\n", ret);
9786 		return ret;
9787 	}
9788 
9789 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9790 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9791 		if (reset_status)
9792 			break;
9793 
9794 		/* Wait for tqp hw reset */
9795 		usleep_range(1000, 1200);
9796 	}
9797 
9798 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9799 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9800 		return ret;
9801 	}
9802 
9803 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9804 	if (ret)
9805 		dev_err(&hdev->pdev->dev,
9806 			"Deassert the soft reset fail, ret = %d\n", ret);
9807 
9808 	return ret;
9809 }
9810 
9811 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9812 {
9813 	struct hclge_dev *hdev = vport->back;
9814 	int reset_try_times = 0;
9815 	int reset_status;
9816 	u16 queue_gid;
9817 	int ret;
9818 
9819 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9820 
9821 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9822 	if (ret) {
9823 		dev_warn(&hdev->pdev->dev,
9824 			 "Send reset tqp cmd fail, ret = %d\n", ret);
9825 		return;
9826 	}
9827 
9828 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9829 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9830 		if (reset_status)
9831 			break;
9832 
9833 		/* Wait for tqp hw reset */
9834 		usleep_range(1000, 1200);
9835 	}
9836 
9837 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9838 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9839 		return;
9840 	}
9841 
9842 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9843 	if (ret)
9844 		dev_warn(&hdev->pdev->dev,
9845 			 "Deassert the soft reset fail, ret = %d\n", ret);
9846 }
9847 
9848 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9849 {
9850 	struct hclge_vport *vport = hclge_get_vport(handle);
9851 	struct hclge_dev *hdev = vport->back;
9852 
9853 	return hdev->fw_version;
9854 }
9855 
9856 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9857 {
9858 	struct phy_device *phydev = hdev->hw.mac.phydev;
9859 
9860 	if (!phydev)
9861 		return;
9862 
9863 	phy_set_asym_pause(phydev, rx_en, tx_en);
9864 }
9865 
9866 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9867 {
9868 	int ret;
9869 
9870 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9871 		return 0;
9872 
9873 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9874 	if (ret)
9875 		dev_err(&hdev->pdev->dev,
9876 			"configure pauseparam error, ret = %d.\n", ret);
9877 
9878 	return ret;
9879 }
9880 
9881 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9882 {
9883 	struct phy_device *phydev = hdev->hw.mac.phydev;
9884 	u16 remote_advertising = 0;
9885 	u16 local_advertising;
9886 	u32 rx_pause, tx_pause;
9887 	u8 flowctl;
9888 
9889 	if (!phydev->link || !phydev->autoneg)
9890 		return 0;
9891 
9892 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9893 
9894 	if (phydev->pause)
9895 		remote_advertising = LPA_PAUSE_CAP;
9896 
9897 	if (phydev->asym_pause)
9898 		remote_advertising |= LPA_PAUSE_ASYM;
9899 
9900 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9901 					   remote_advertising);
9902 	tx_pause = flowctl & FLOW_CTRL_TX;
9903 	rx_pause = flowctl & FLOW_CTRL_RX;
9904 
9905 	if (phydev->duplex == HCLGE_MAC_HALF) {
9906 		tx_pause = 0;
9907 		rx_pause = 0;
9908 	}
9909 
9910 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9911 }
9912 
9913 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9914 				 u32 *rx_en, u32 *tx_en)
9915 {
9916 	struct hclge_vport *vport = hclge_get_vport(handle);
9917 	struct hclge_dev *hdev = vport->back;
9918 	struct phy_device *phydev = hdev->hw.mac.phydev;
9919 
9920 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9921 
9922 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9923 		*rx_en = 0;
9924 		*tx_en = 0;
9925 		return;
9926 	}
9927 
9928 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9929 		*rx_en = 1;
9930 		*tx_en = 0;
9931 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9932 		*tx_en = 1;
9933 		*rx_en = 0;
9934 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9935 		*rx_en = 1;
9936 		*tx_en = 1;
9937 	} else {
9938 		*rx_en = 0;
9939 		*tx_en = 0;
9940 	}
9941 }
9942 
9943 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9944 					 u32 rx_en, u32 tx_en)
9945 {
9946 	if (rx_en && tx_en)
9947 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
9948 	else if (rx_en && !tx_en)
9949 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9950 	else if (!rx_en && tx_en)
9951 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9952 	else
9953 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
9954 
9955 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9956 }
9957 
9958 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9959 				u32 rx_en, u32 tx_en)
9960 {
9961 	struct hclge_vport *vport = hclge_get_vport(handle);
9962 	struct hclge_dev *hdev = vport->back;
9963 	struct phy_device *phydev = hdev->hw.mac.phydev;
9964 	u32 fc_autoneg;
9965 
9966 	if (phydev) {
9967 		fc_autoneg = hclge_get_autoneg(handle);
9968 		if (auto_neg != fc_autoneg) {
9969 			dev_info(&hdev->pdev->dev,
9970 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9971 			return -EOPNOTSUPP;
9972 		}
9973 	}
9974 
9975 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9976 		dev_info(&hdev->pdev->dev,
9977 			 "Priority flow control enabled. Cannot set link flow control.\n");
9978 		return -EOPNOTSUPP;
9979 	}
9980 
9981 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9982 
9983 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9984 
9985 	if (!auto_neg)
9986 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9987 
9988 	if (phydev)
9989 		return phy_start_aneg(phydev);
9990 
9991 	return -EOPNOTSUPP;
9992 }
9993 
9994 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9995 					  u8 *auto_neg, u32 *speed, u8 *duplex)
9996 {
9997 	struct hclge_vport *vport = hclge_get_vport(handle);
9998 	struct hclge_dev *hdev = vport->back;
9999 
10000 	if (speed)
10001 		*speed = hdev->hw.mac.speed;
10002 	if (duplex)
10003 		*duplex = hdev->hw.mac.duplex;
10004 	if (auto_neg)
10005 		*auto_neg = hdev->hw.mac.autoneg;
10006 }
10007 
10008 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10009 				 u8 *module_type)
10010 {
10011 	struct hclge_vport *vport = hclge_get_vport(handle);
10012 	struct hclge_dev *hdev = vport->back;
10013 
10014 	/* When nic is down, the service task is not running, doesn't update
10015 	 * the port information per second. Query the port information before
10016 	 * return the media type, ensure getting the correct media information.
10017 	 */
10018 	hclge_update_port_info(hdev);
10019 
10020 	if (media_type)
10021 		*media_type = hdev->hw.mac.media_type;
10022 
10023 	if (module_type)
10024 		*module_type = hdev->hw.mac.module_type;
10025 }
10026 
10027 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10028 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10029 {
10030 	struct hclge_vport *vport = hclge_get_vport(handle);
10031 	struct hclge_dev *hdev = vport->back;
10032 	struct phy_device *phydev = hdev->hw.mac.phydev;
10033 	int mdix_ctrl, mdix, is_resolved;
10034 	unsigned int retval;
10035 
10036 	if (!phydev) {
10037 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10038 		*tp_mdix = ETH_TP_MDI_INVALID;
10039 		return;
10040 	}
10041 
10042 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10043 
10044 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10045 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10046 				    HCLGE_PHY_MDIX_CTRL_S);
10047 
10048 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10049 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10050 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10051 
10052 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10053 
10054 	switch (mdix_ctrl) {
10055 	case 0x0:
10056 		*tp_mdix_ctrl = ETH_TP_MDI;
10057 		break;
10058 	case 0x1:
10059 		*tp_mdix_ctrl = ETH_TP_MDI_X;
10060 		break;
10061 	case 0x3:
10062 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10063 		break;
10064 	default:
10065 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10066 		break;
10067 	}
10068 
10069 	if (!is_resolved)
10070 		*tp_mdix = ETH_TP_MDI_INVALID;
10071 	else if (mdix)
10072 		*tp_mdix = ETH_TP_MDI_X;
10073 	else
10074 		*tp_mdix = ETH_TP_MDI;
10075 }
10076 
10077 static void hclge_info_show(struct hclge_dev *hdev)
10078 {
10079 	struct device *dev = &hdev->pdev->dev;
10080 
10081 	dev_info(dev, "PF info begin:\n");
10082 
10083 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10084 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10085 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10086 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10087 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10088 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10089 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10090 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10091 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10092 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10093 	dev_info(dev, "This is %s PF\n",
10094 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10095 	dev_info(dev, "DCB %s\n",
10096 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10097 	dev_info(dev, "MQPRIO %s\n",
10098 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10099 
10100 	dev_info(dev, "PF info end.\n");
10101 }
10102 
10103 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10104 					  struct hclge_vport *vport)
10105 {
10106 	struct hnae3_client *client = vport->nic.client;
10107 	struct hclge_dev *hdev = ae_dev->priv;
10108 	int rst_cnt = hdev->rst_stats.reset_cnt;
10109 	int ret;
10110 
10111 	ret = client->ops->init_instance(&vport->nic);
10112 	if (ret)
10113 		return ret;
10114 
10115 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10116 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10117 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10118 		ret = -EBUSY;
10119 		goto init_nic_err;
10120 	}
10121 
10122 	/* Enable nic hw error interrupts */
10123 	ret = hclge_config_nic_hw_error(hdev, true);
10124 	if (ret) {
10125 		dev_err(&ae_dev->pdev->dev,
10126 			"fail(%d) to enable hw error interrupts\n", ret);
10127 		goto init_nic_err;
10128 	}
10129 
10130 	hnae3_set_client_init_flag(client, ae_dev, 1);
10131 
10132 	if (netif_msg_drv(&hdev->vport->nic))
10133 		hclge_info_show(hdev);
10134 
10135 	return ret;
10136 
10137 init_nic_err:
10138 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10139 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10140 		msleep(HCLGE_WAIT_RESET_DONE);
10141 
10142 	client->ops->uninit_instance(&vport->nic, 0);
10143 
10144 	return ret;
10145 }
10146 
10147 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10148 					   struct hclge_vport *vport)
10149 {
10150 	struct hclge_dev *hdev = ae_dev->priv;
10151 	struct hnae3_client *client;
10152 	int rst_cnt;
10153 	int ret;
10154 
10155 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10156 	    !hdev->nic_client)
10157 		return 0;
10158 
10159 	client = hdev->roce_client;
10160 	ret = hclge_init_roce_base_info(vport);
10161 	if (ret)
10162 		return ret;
10163 
10164 	rst_cnt = hdev->rst_stats.reset_cnt;
10165 	ret = client->ops->init_instance(&vport->roce);
10166 	if (ret)
10167 		return ret;
10168 
10169 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10170 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10171 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10172 		ret = -EBUSY;
10173 		goto init_roce_err;
10174 	}
10175 
10176 	/* Enable roce ras interrupts */
10177 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
10178 	if (ret) {
10179 		dev_err(&ae_dev->pdev->dev,
10180 			"fail(%d) to enable roce ras interrupts\n", ret);
10181 		goto init_roce_err;
10182 	}
10183 
10184 	hnae3_set_client_init_flag(client, ae_dev, 1);
10185 
10186 	return 0;
10187 
10188 init_roce_err:
10189 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10190 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10191 		msleep(HCLGE_WAIT_RESET_DONE);
10192 
10193 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10194 
10195 	return ret;
10196 }
10197 
10198 static int hclge_init_client_instance(struct hnae3_client *client,
10199 				      struct hnae3_ae_dev *ae_dev)
10200 {
10201 	struct hclge_dev *hdev = ae_dev->priv;
10202 	struct hclge_vport *vport;
10203 	int i, ret;
10204 
10205 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10206 		vport = &hdev->vport[i];
10207 
10208 		switch (client->type) {
10209 		case HNAE3_CLIENT_KNIC:
10210 			hdev->nic_client = client;
10211 			vport->nic.client = client;
10212 			ret = hclge_init_nic_client_instance(ae_dev, vport);
10213 			if (ret)
10214 				goto clear_nic;
10215 
10216 			ret = hclge_init_roce_client_instance(ae_dev, vport);
10217 			if (ret)
10218 				goto clear_roce;
10219 
10220 			break;
10221 		case HNAE3_CLIENT_ROCE:
10222 			if (hnae3_dev_roce_supported(hdev)) {
10223 				hdev->roce_client = client;
10224 				vport->roce.client = client;
10225 			}
10226 
10227 			ret = hclge_init_roce_client_instance(ae_dev, vport);
10228 			if (ret)
10229 				goto clear_roce;
10230 
10231 			break;
10232 		default:
10233 			return -EINVAL;
10234 		}
10235 	}
10236 
10237 	return 0;
10238 
10239 clear_nic:
10240 	hdev->nic_client = NULL;
10241 	vport->nic.client = NULL;
10242 	return ret;
10243 clear_roce:
10244 	hdev->roce_client = NULL;
10245 	vport->roce.client = NULL;
10246 	return ret;
10247 }
10248 
10249 static void hclge_uninit_client_instance(struct hnae3_client *client,
10250 					 struct hnae3_ae_dev *ae_dev)
10251 {
10252 	struct hclge_dev *hdev = ae_dev->priv;
10253 	struct hclge_vport *vport;
10254 	int i;
10255 
10256 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10257 		vport = &hdev->vport[i];
10258 		if (hdev->roce_client) {
10259 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10260 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10261 				msleep(HCLGE_WAIT_RESET_DONE);
10262 
10263 			hdev->roce_client->ops->uninit_instance(&vport->roce,
10264 								0);
10265 			hdev->roce_client = NULL;
10266 			vport->roce.client = NULL;
10267 		}
10268 		if (client->type == HNAE3_CLIENT_ROCE)
10269 			return;
10270 		if (hdev->nic_client && client->ops->uninit_instance) {
10271 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10272 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10273 				msleep(HCLGE_WAIT_RESET_DONE);
10274 
10275 			client->ops->uninit_instance(&vport->nic, 0);
10276 			hdev->nic_client = NULL;
10277 			vport->nic.client = NULL;
10278 		}
10279 	}
10280 }
10281 
10282 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10283 {
10284 #define HCLGE_MEM_BAR		4
10285 
10286 	struct pci_dev *pdev = hdev->pdev;
10287 	struct hclge_hw *hw = &hdev->hw;
10288 
10289 	/* for device does not have device memory, return directly */
10290 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10291 		return 0;
10292 
10293 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
10294 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
10295 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
10296 	if (!hw->mem_base) {
10297 		dev_err(&pdev->dev, "failed to map device memory\n");
10298 		return -EFAULT;
10299 	}
10300 
10301 	return 0;
10302 }
10303 
10304 static int hclge_pci_init(struct hclge_dev *hdev)
10305 {
10306 	struct pci_dev *pdev = hdev->pdev;
10307 	struct hclge_hw *hw;
10308 	int ret;
10309 
10310 	ret = pci_enable_device(pdev);
10311 	if (ret) {
10312 		dev_err(&pdev->dev, "failed to enable PCI device\n");
10313 		return ret;
10314 	}
10315 
10316 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10317 	if (ret) {
10318 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10319 		if (ret) {
10320 			dev_err(&pdev->dev,
10321 				"can't set consistent PCI DMA");
10322 			goto err_disable_device;
10323 		}
10324 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10325 	}
10326 
10327 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10328 	if (ret) {
10329 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10330 		goto err_disable_device;
10331 	}
10332 
10333 	pci_set_master(pdev);
10334 	hw = &hdev->hw;
10335 	hw->io_base = pcim_iomap(pdev, 2, 0);
10336 	if (!hw->io_base) {
10337 		dev_err(&pdev->dev, "Can't map configuration register space\n");
10338 		ret = -ENOMEM;
10339 		goto err_clr_master;
10340 	}
10341 
10342 	ret = hclge_dev_mem_map(hdev);
10343 	if (ret)
10344 		goto err_unmap_io_base;
10345 
10346 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10347 
10348 	return 0;
10349 
10350 err_unmap_io_base:
10351 	pcim_iounmap(pdev, hdev->hw.io_base);
10352 err_clr_master:
10353 	pci_clear_master(pdev);
10354 	pci_release_regions(pdev);
10355 err_disable_device:
10356 	pci_disable_device(pdev);
10357 
10358 	return ret;
10359 }
10360 
10361 static void hclge_pci_uninit(struct hclge_dev *hdev)
10362 {
10363 	struct pci_dev *pdev = hdev->pdev;
10364 
10365 	if (hdev->hw.mem_base)
10366 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10367 
10368 	pcim_iounmap(pdev, hdev->hw.io_base);
10369 	pci_free_irq_vectors(pdev);
10370 	pci_clear_master(pdev);
10371 	pci_release_mem_regions(pdev);
10372 	pci_disable_device(pdev);
10373 }
10374 
10375 static void hclge_state_init(struct hclge_dev *hdev)
10376 {
10377 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10378 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10379 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10380 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10381 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10382 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10383 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10384 }
10385 
10386 static void hclge_state_uninit(struct hclge_dev *hdev)
10387 {
10388 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10389 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10390 
10391 	if (hdev->reset_timer.function)
10392 		del_timer_sync(&hdev->reset_timer);
10393 	if (hdev->service_task.work.func)
10394 		cancel_delayed_work_sync(&hdev->service_task);
10395 }
10396 
10397 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10398 {
10399 #define HCLGE_FLR_RETRY_WAIT_MS	500
10400 #define HCLGE_FLR_RETRY_CNT	5
10401 
10402 	struct hclge_dev *hdev = ae_dev->priv;
10403 	int retry_cnt = 0;
10404 	int ret;
10405 
10406 retry:
10407 	down(&hdev->reset_sem);
10408 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10409 	hdev->reset_type = HNAE3_FLR_RESET;
10410 	ret = hclge_reset_prepare(hdev);
10411 	if (ret || hdev->reset_pending) {
10412 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10413 			ret);
10414 		if (hdev->reset_pending ||
10415 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10416 			dev_err(&hdev->pdev->dev,
10417 				"reset_pending:0x%lx, retry_cnt:%d\n",
10418 				hdev->reset_pending, retry_cnt);
10419 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10420 			up(&hdev->reset_sem);
10421 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
10422 			goto retry;
10423 		}
10424 	}
10425 
10426 	/* disable misc vector before FLR done */
10427 	hclge_enable_vector(&hdev->misc_vector, false);
10428 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10429 	hdev->rst_stats.flr_rst_cnt++;
10430 }
10431 
10432 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10433 {
10434 	struct hclge_dev *hdev = ae_dev->priv;
10435 	int ret;
10436 
10437 	hclge_enable_vector(&hdev->misc_vector, true);
10438 
10439 	ret = hclge_reset_rebuild(hdev);
10440 	if (ret)
10441 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10442 
10443 	hdev->reset_type = HNAE3_NONE_RESET;
10444 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10445 	up(&hdev->reset_sem);
10446 }
10447 
10448 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10449 {
10450 	u16 i;
10451 
10452 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10453 		struct hclge_vport *vport = &hdev->vport[i];
10454 		int ret;
10455 
10456 		 /* Send cmd to clear VF's FUNC_RST_ING */
10457 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10458 		if (ret)
10459 			dev_warn(&hdev->pdev->dev,
10460 				 "clear vf(%u) rst failed %d!\n",
10461 				 vport->vport_id, ret);
10462 	}
10463 }
10464 
10465 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10466 {
10467 	struct pci_dev *pdev = ae_dev->pdev;
10468 	struct hclge_dev *hdev;
10469 	int ret;
10470 
10471 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10472 	if (!hdev)
10473 		return -ENOMEM;
10474 
10475 	hdev->pdev = pdev;
10476 	hdev->ae_dev = ae_dev;
10477 	hdev->reset_type = HNAE3_NONE_RESET;
10478 	hdev->reset_level = HNAE3_FUNC_RESET;
10479 	ae_dev->priv = hdev;
10480 
10481 	/* HW supprt 2 layer vlan */
10482 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10483 
10484 	mutex_init(&hdev->vport_lock);
10485 	spin_lock_init(&hdev->fd_rule_lock);
10486 	sema_init(&hdev->reset_sem, 1);
10487 
10488 	ret = hclge_pci_init(hdev);
10489 	if (ret)
10490 		goto out;
10491 
10492 	/* Firmware command queue initialize */
10493 	ret = hclge_cmd_queue_init(hdev);
10494 	if (ret)
10495 		goto err_pci_uninit;
10496 
10497 	/* Firmware command initialize */
10498 	ret = hclge_cmd_init(hdev);
10499 	if (ret)
10500 		goto err_cmd_uninit;
10501 
10502 	ret = hclge_get_cap(hdev);
10503 	if (ret)
10504 		goto err_cmd_uninit;
10505 
10506 	ret = hclge_query_dev_specs(hdev);
10507 	if (ret) {
10508 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10509 			ret);
10510 		goto err_cmd_uninit;
10511 	}
10512 
10513 	ret = hclge_configure(hdev);
10514 	if (ret) {
10515 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10516 		goto err_cmd_uninit;
10517 	}
10518 
10519 	ret = hclge_init_msi(hdev);
10520 	if (ret) {
10521 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10522 		goto err_cmd_uninit;
10523 	}
10524 
10525 	ret = hclge_misc_irq_init(hdev);
10526 	if (ret)
10527 		goto err_msi_uninit;
10528 
10529 	ret = hclge_alloc_tqps(hdev);
10530 	if (ret) {
10531 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10532 		goto err_msi_irq_uninit;
10533 	}
10534 
10535 	ret = hclge_alloc_vport(hdev);
10536 	if (ret)
10537 		goto err_msi_irq_uninit;
10538 
10539 	ret = hclge_map_tqp(hdev);
10540 	if (ret)
10541 		goto err_msi_irq_uninit;
10542 
10543 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10544 		ret = hclge_mac_mdio_config(hdev);
10545 		if (ret)
10546 			goto err_msi_irq_uninit;
10547 	}
10548 
10549 	ret = hclge_init_umv_space(hdev);
10550 	if (ret)
10551 		goto err_mdiobus_unreg;
10552 
10553 	ret = hclge_mac_init(hdev);
10554 	if (ret) {
10555 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10556 		goto err_mdiobus_unreg;
10557 	}
10558 
10559 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10560 	if (ret) {
10561 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10562 		goto err_mdiobus_unreg;
10563 	}
10564 
10565 	ret = hclge_config_gro(hdev, true);
10566 	if (ret)
10567 		goto err_mdiobus_unreg;
10568 
10569 	ret = hclge_init_vlan_config(hdev);
10570 	if (ret) {
10571 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10572 		goto err_mdiobus_unreg;
10573 	}
10574 
10575 	ret = hclge_tm_schd_init(hdev);
10576 	if (ret) {
10577 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10578 		goto err_mdiobus_unreg;
10579 	}
10580 
10581 	hclge_rss_init_cfg(hdev);
10582 	ret = hclge_rss_init_hw(hdev);
10583 	if (ret) {
10584 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10585 		goto err_mdiobus_unreg;
10586 	}
10587 
10588 	ret = init_mgr_tbl(hdev);
10589 	if (ret) {
10590 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10591 		goto err_mdiobus_unreg;
10592 	}
10593 
10594 	ret = hclge_init_fd_config(hdev);
10595 	if (ret) {
10596 		dev_err(&pdev->dev,
10597 			"fd table init fail, ret=%d\n", ret);
10598 		goto err_mdiobus_unreg;
10599 	}
10600 
10601 	INIT_KFIFO(hdev->mac_tnl_log);
10602 
10603 	hclge_dcb_ops_set(hdev);
10604 
10605 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10606 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10607 
10608 	/* Setup affinity after service timer setup because add_timer_on
10609 	 * is called in affinity notify.
10610 	 */
10611 	hclge_misc_affinity_setup(hdev);
10612 
10613 	hclge_clear_all_event_cause(hdev);
10614 	hclge_clear_resetting_state(hdev);
10615 
10616 	/* Log and clear the hw errors those already occurred */
10617 	hclge_handle_all_hns_hw_errors(ae_dev);
10618 
10619 	/* request delayed reset for the error recovery because an immediate
10620 	 * global reset on a PF affecting pending initialization of other PFs
10621 	 */
10622 	if (ae_dev->hw_err_reset_req) {
10623 		enum hnae3_reset_type reset_level;
10624 
10625 		reset_level = hclge_get_reset_level(ae_dev,
10626 						    &ae_dev->hw_err_reset_req);
10627 		hclge_set_def_reset_request(ae_dev, reset_level);
10628 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10629 	}
10630 
10631 	/* Enable MISC vector(vector0) */
10632 	hclge_enable_vector(&hdev->misc_vector, true);
10633 
10634 	hclge_state_init(hdev);
10635 	hdev->last_reset_time = jiffies;
10636 
10637 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10638 		 HCLGE_DRIVER_NAME);
10639 
10640 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10641 
10642 	return 0;
10643 
10644 err_mdiobus_unreg:
10645 	if (hdev->hw.mac.phydev)
10646 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
10647 err_msi_irq_uninit:
10648 	hclge_misc_irq_uninit(hdev);
10649 err_msi_uninit:
10650 	pci_free_irq_vectors(pdev);
10651 err_cmd_uninit:
10652 	hclge_cmd_uninit(hdev);
10653 err_pci_uninit:
10654 	pcim_iounmap(pdev, hdev->hw.io_base);
10655 	pci_clear_master(pdev);
10656 	pci_release_regions(pdev);
10657 	pci_disable_device(pdev);
10658 out:
10659 	mutex_destroy(&hdev->vport_lock);
10660 	return ret;
10661 }
10662 
10663 static void hclge_stats_clear(struct hclge_dev *hdev)
10664 {
10665 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10666 }
10667 
10668 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10669 {
10670 	return hclge_config_switch_param(hdev, vf, enable,
10671 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10672 }
10673 
10674 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10675 {
10676 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10677 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
10678 					  enable, vf);
10679 }
10680 
10681 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10682 {
10683 	int ret;
10684 
10685 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10686 	if (ret) {
10687 		dev_err(&hdev->pdev->dev,
10688 			"Set vf %d mac spoof check %s failed, ret=%d\n",
10689 			vf, enable ? "on" : "off", ret);
10690 		return ret;
10691 	}
10692 
10693 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10694 	if (ret)
10695 		dev_err(&hdev->pdev->dev,
10696 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
10697 			vf, enable ? "on" : "off", ret);
10698 
10699 	return ret;
10700 }
10701 
10702 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10703 				 bool enable)
10704 {
10705 	struct hclge_vport *vport = hclge_get_vport(handle);
10706 	struct hclge_dev *hdev = vport->back;
10707 	u32 new_spoofchk = enable ? 1 : 0;
10708 	int ret;
10709 
10710 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10711 		return -EOPNOTSUPP;
10712 
10713 	vport = hclge_get_vf_vport(hdev, vf);
10714 	if (!vport)
10715 		return -EINVAL;
10716 
10717 	if (vport->vf_info.spoofchk == new_spoofchk)
10718 		return 0;
10719 
10720 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10721 		dev_warn(&hdev->pdev->dev,
10722 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10723 			 vf);
10724 	else if (enable && hclge_is_umv_space_full(vport, true))
10725 		dev_warn(&hdev->pdev->dev,
10726 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10727 			 vf);
10728 
10729 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10730 	if (ret)
10731 		return ret;
10732 
10733 	vport->vf_info.spoofchk = new_spoofchk;
10734 	return 0;
10735 }
10736 
10737 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10738 {
10739 	struct hclge_vport *vport = hdev->vport;
10740 	int ret;
10741 	int i;
10742 
10743 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10744 		return 0;
10745 
10746 	/* resume the vf spoof check state after reset */
10747 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10748 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10749 					       vport->vf_info.spoofchk);
10750 		if (ret)
10751 			return ret;
10752 
10753 		vport++;
10754 	}
10755 
10756 	return 0;
10757 }
10758 
10759 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10760 {
10761 	struct hclge_vport *vport = hclge_get_vport(handle);
10762 	struct hclge_dev *hdev = vport->back;
10763 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10764 	u32 new_trusted = enable ? 1 : 0;
10765 	bool en_bc_pmc;
10766 	int ret;
10767 
10768 	vport = hclge_get_vf_vport(hdev, vf);
10769 	if (!vport)
10770 		return -EINVAL;
10771 
10772 	if (vport->vf_info.trusted == new_trusted)
10773 		return 0;
10774 
10775 	/* Disable promisc mode for VF if it is not trusted any more. */
10776 	if (!enable && vport->vf_info.promisc_enable) {
10777 		en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10778 		ret = hclge_set_vport_promisc_mode(vport, false, false,
10779 						   en_bc_pmc);
10780 		if (ret)
10781 			return ret;
10782 		vport->vf_info.promisc_enable = 0;
10783 		hclge_inform_vf_promisc_info(vport);
10784 	}
10785 
10786 	vport->vf_info.trusted = new_trusted;
10787 
10788 	return 0;
10789 }
10790 
10791 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10792 {
10793 	int ret;
10794 	int vf;
10795 
10796 	/* reset vf rate to default value */
10797 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10798 		struct hclge_vport *vport = &hdev->vport[vf];
10799 
10800 		vport->vf_info.max_tx_rate = 0;
10801 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10802 		if (ret)
10803 			dev_err(&hdev->pdev->dev,
10804 				"vf%d failed to reset to default, ret=%d\n",
10805 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10806 	}
10807 }
10808 
10809 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10810 				     int min_tx_rate, int max_tx_rate)
10811 {
10812 	if (min_tx_rate != 0 ||
10813 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10814 		dev_err(&hdev->pdev->dev,
10815 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10816 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10817 		return -EINVAL;
10818 	}
10819 
10820 	return 0;
10821 }
10822 
10823 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10824 			     int min_tx_rate, int max_tx_rate, bool force)
10825 {
10826 	struct hclge_vport *vport = hclge_get_vport(handle);
10827 	struct hclge_dev *hdev = vport->back;
10828 	int ret;
10829 
10830 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10831 	if (ret)
10832 		return ret;
10833 
10834 	vport = hclge_get_vf_vport(hdev, vf);
10835 	if (!vport)
10836 		return -EINVAL;
10837 
10838 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10839 		return 0;
10840 
10841 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10842 	if (ret)
10843 		return ret;
10844 
10845 	vport->vf_info.max_tx_rate = max_tx_rate;
10846 
10847 	return 0;
10848 }
10849 
10850 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10851 {
10852 	struct hnae3_handle *handle = &hdev->vport->nic;
10853 	struct hclge_vport *vport;
10854 	int ret;
10855 	int vf;
10856 
10857 	/* resume the vf max_tx_rate after reset */
10858 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10859 		vport = hclge_get_vf_vport(hdev, vf);
10860 		if (!vport)
10861 			return -EINVAL;
10862 
10863 		/* zero means max rate, after reset, firmware already set it to
10864 		 * max rate, so just continue.
10865 		 */
10866 		if (!vport->vf_info.max_tx_rate)
10867 			continue;
10868 
10869 		ret = hclge_set_vf_rate(handle, vf, 0,
10870 					vport->vf_info.max_tx_rate, true);
10871 		if (ret) {
10872 			dev_err(&hdev->pdev->dev,
10873 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
10874 				vf, vport->vf_info.max_tx_rate, ret);
10875 			return ret;
10876 		}
10877 	}
10878 
10879 	return 0;
10880 }
10881 
10882 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10883 {
10884 	struct hclge_vport *vport = hdev->vport;
10885 	int i;
10886 
10887 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10888 		hclge_vport_stop(vport);
10889 		vport++;
10890 	}
10891 }
10892 
10893 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10894 {
10895 	struct hclge_dev *hdev = ae_dev->priv;
10896 	struct pci_dev *pdev = ae_dev->pdev;
10897 	int ret;
10898 
10899 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10900 
10901 	hclge_stats_clear(hdev);
10902 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10903 	 * so here should not clean table in memory.
10904 	 */
10905 	if (hdev->reset_type == HNAE3_IMP_RESET ||
10906 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
10907 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10908 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10909 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10910 		hclge_reset_umv_space(hdev);
10911 	}
10912 
10913 	ret = hclge_cmd_init(hdev);
10914 	if (ret) {
10915 		dev_err(&pdev->dev, "Cmd queue init failed\n");
10916 		return ret;
10917 	}
10918 
10919 	ret = hclge_map_tqp(hdev);
10920 	if (ret) {
10921 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10922 		return ret;
10923 	}
10924 
10925 	ret = hclge_mac_init(hdev);
10926 	if (ret) {
10927 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10928 		return ret;
10929 	}
10930 
10931 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10932 	if (ret) {
10933 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10934 		return ret;
10935 	}
10936 
10937 	ret = hclge_config_gro(hdev, true);
10938 	if (ret)
10939 		return ret;
10940 
10941 	ret = hclge_init_vlan_config(hdev);
10942 	if (ret) {
10943 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10944 		return ret;
10945 	}
10946 
10947 	ret = hclge_tm_init_hw(hdev, true);
10948 	if (ret) {
10949 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10950 		return ret;
10951 	}
10952 
10953 	ret = hclge_rss_init_hw(hdev);
10954 	if (ret) {
10955 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10956 		return ret;
10957 	}
10958 
10959 	ret = init_mgr_tbl(hdev);
10960 	if (ret) {
10961 		dev_err(&pdev->dev,
10962 			"failed to reinit manager table, ret = %d\n", ret);
10963 		return ret;
10964 	}
10965 
10966 	ret = hclge_init_fd_config(hdev);
10967 	if (ret) {
10968 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10969 		return ret;
10970 	}
10971 
10972 	/* Log and clear the hw errors those already occurred */
10973 	hclge_handle_all_hns_hw_errors(ae_dev);
10974 
10975 	/* Re-enable the hw error interrupts because
10976 	 * the interrupts get disabled on global reset.
10977 	 */
10978 	ret = hclge_config_nic_hw_error(hdev, true);
10979 	if (ret) {
10980 		dev_err(&pdev->dev,
10981 			"fail(%d) to re-enable NIC hw error interrupts\n",
10982 			ret);
10983 		return ret;
10984 	}
10985 
10986 	if (hdev->roce_client) {
10987 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
10988 		if (ret) {
10989 			dev_err(&pdev->dev,
10990 				"fail(%d) to re-enable roce ras interrupts\n",
10991 				ret);
10992 			return ret;
10993 		}
10994 	}
10995 
10996 	hclge_reset_vport_state(hdev);
10997 	ret = hclge_reset_vport_spoofchk(hdev);
10998 	if (ret)
10999 		return ret;
11000 
11001 	ret = hclge_resume_vf_rate(hdev);
11002 	if (ret)
11003 		return ret;
11004 
11005 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11006 		 HCLGE_DRIVER_NAME);
11007 
11008 	return 0;
11009 }
11010 
11011 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11012 {
11013 	struct hclge_dev *hdev = ae_dev->priv;
11014 	struct hclge_mac *mac = &hdev->hw.mac;
11015 
11016 	hclge_reset_vf_rate(hdev);
11017 	hclge_clear_vf_vlan(hdev);
11018 	hclge_misc_affinity_teardown(hdev);
11019 	hclge_state_uninit(hdev);
11020 	hclge_uninit_mac_table(hdev);
11021 
11022 	if (mac->phydev)
11023 		mdiobus_unregister(mac->mdio_bus);
11024 
11025 	/* Disable MISC vector(vector0) */
11026 	hclge_enable_vector(&hdev->misc_vector, false);
11027 	synchronize_irq(hdev->misc_vector.vector_irq);
11028 
11029 	/* Disable all hw interrupts */
11030 	hclge_config_mac_tnl_int(hdev, false);
11031 	hclge_config_nic_hw_error(hdev, false);
11032 	hclge_config_rocee_ras_interrupt(hdev, false);
11033 
11034 	hclge_cmd_uninit(hdev);
11035 	hclge_misc_irq_uninit(hdev);
11036 	hclge_pci_uninit(hdev);
11037 	mutex_destroy(&hdev->vport_lock);
11038 	hclge_uninit_vport_vlan_table(hdev);
11039 	ae_dev->priv = NULL;
11040 }
11041 
11042 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11043 {
11044 	struct hclge_vport *vport = hclge_get_vport(handle);
11045 	struct hclge_dev *hdev = vport->back;
11046 
11047 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11048 }
11049 
11050 static void hclge_get_channels(struct hnae3_handle *handle,
11051 			       struct ethtool_channels *ch)
11052 {
11053 	ch->max_combined = hclge_get_max_channels(handle);
11054 	ch->other_count = 1;
11055 	ch->max_other = 1;
11056 	ch->combined_count = handle->kinfo.rss_size;
11057 }
11058 
11059 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11060 					u16 *alloc_tqps, u16 *max_rss_size)
11061 {
11062 	struct hclge_vport *vport = hclge_get_vport(handle);
11063 	struct hclge_dev *hdev = vport->back;
11064 
11065 	*alloc_tqps = vport->alloc_tqps;
11066 	*max_rss_size = hdev->pf_rss_size_max;
11067 }
11068 
11069 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11070 			      bool rxfh_configured)
11071 {
11072 	struct hclge_vport *vport = hclge_get_vport(handle);
11073 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11074 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11075 	struct hclge_dev *hdev = vport->back;
11076 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11077 	u16 cur_rss_size = kinfo->rss_size;
11078 	u16 cur_tqps = kinfo->num_tqps;
11079 	u16 tc_valid[HCLGE_MAX_TC_NUM];
11080 	u16 roundup_size;
11081 	u32 *rss_indir;
11082 	unsigned int i;
11083 	int ret;
11084 
11085 	kinfo->req_rss_size = new_tqps_num;
11086 
11087 	ret = hclge_tm_vport_map_update(hdev);
11088 	if (ret) {
11089 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11090 		return ret;
11091 	}
11092 
11093 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
11094 	roundup_size = ilog2(roundup_size);
11095 	/* Set the RSS TC mode according to the new RSS size */
11096 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11097 		tc_valid[i] = 0;
11098 
11099 		if (!(hdev->hw_tc_map & BIT(i)))
11100 			continue;
11101 
11102 		tc_valid[i] = 1;
11103 		tc_size[i] = roundup_size;
11104 		tc_offset[i] = kinfo->rss_size * i;
11105 	}
11106 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11107 	if (ret)
11108 		return ret;
11109 
11110 	/* RSS indirection table has been configuared by user */
11111 	if (rxfh_configured)
11112 		goto out;
11113 
11114 	/* Reinitializes the rss indirect table according to the new RSS size */
11115 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
11116 	if (!rss_indir)
11117 		return -ENOMEM;
11118 
11119 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
11120 		rss_indir[i] = i % kinfo->rss_size;
11121 
11122 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11123 	if (ret)
11124 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11125 			ret);
11126 
11127 	kfree(rss_indir);
11128 
11129 out:
11130 	if (!ret)
11131 		dev_info(&hdev->pdev->dev,
11132 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11133 			 cur_rss_size, kinfo->rss_size,
11134 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11135 
11136 	return ret;
11137 }
11138 
11139 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11140 			      u32 *regs_num_64_bit)
11141 {
11142 	struct hclge_desc desc;
11143 	u32 total_num;
11144 	int ret;
11145 
11146 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11147 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11148 	if (ret) {
11149 		dev_err(&hdev->pdev->dev,
11150 			"Query register number cmd failed, ret = %d.\n", ret);
11151 		return ret;
11152 	}
11153 
11154 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
11155 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
11156 
11157 	total_num = *regs_num_32_bit + *regs_num_64_bit;
11158 	if (!total_num)
11159 		return -EINVAL;
11160 
11161 	return 0;
11162 }
11163 
11164 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11165 				 void *data)
11166 {
11167 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11168 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11169 
11170 	struct hclge_desc *desc;
11171 	u32 *reg_val = data;
11172 	__le32 *desc_data;
11173 	int nodata_num;
11174 	int cmd_num;
11175 	int i, k, n;
11176 	int ret;
11177 
11178 	if (regs_num == 0)
11179 		return 0;
11180 
11181 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11182 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11183 			       HCLGE_32_BIT_REG_RTN_DATANUM);
11184 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11185 	if (!desc)
11186 		return -ENOMEM;
11187 
11188 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11189 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11190 	if (ret) {
11191 		dev_err(&hdev->pdev->dev,
11192 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
11193 		kfree(desc);
11194 		return ret;
11195 	}
11196 
11197 	for (i = 0; i < cmd_num; i++) {
11198 		if (i == 0) {
11199 			desc_data = (__le32 *)(&desc[i].data[0]);
11200 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11201 		} else {
11202 			desc_data = (__le32 *)(&desc[i]);
11203 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
11204 		}
11205 		for (k = 0; k < n; k++) {
11206 			*reg_val++ = le32_to_cpu(*desc_data++);
11207 
11208 			regs_num--;
11209 			if (!regs_num)
11210 				break;
11211 		}
11212 	}
11213 
11214 	kfree(desc);
11215 	return 0;
11216 }
11217 
11218 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11219 				 void *data)
11220 {
11221 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11222 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11223 
11224 	struct hclge_desc *desc;
11225 	u64 *reg_val = data;
11226 	__le64 *desc_data;
11227 	int nodata_len;
11228 	int cmd_num;
11229 	int i, k, n;
11230 	int ret;
11231 
11232 	if (regs_num == 0)
11233 		return 0;
11234 
11235 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11236 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11237 			       HCLGE_64_BIT_REG_RTN_DATANUM);
11238 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11239 	if (!desc)
11240 		return -ENOMEM;
11241 
11242 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11243 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11244 	if (ret) {
11245 		dev_err(&hdev->pdev->dev,
11246 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
11247 		kfree(desc);
11248 		return ret;
11249 	}
11250 
11251 	for (i = 0; i < cmd_num; i++) {
11252 		if (i == 0) {
11253 			desc_data = (__le64 *)(&desc[i].data[0]);
11254 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11255 		} else {
11256 			desc_data = (__le64 *)(&desc[i]);
11257 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
11258 		}
11259 		for (k = 0; k < n; k++) {
11260 			*reg_val++ = le64_to_cpu(*desc_data++);
11261 
11262 			regs_num--;
11263 			if (!regs_num)
11264 				break;
11265 		}
11266 	}
11267 
11268 	kfree(desc);
11269 	return 0;
11270 }
11271 
11272 #define MAX_SEPARATE_NUM	4
11273 #define SEPARATOR_VALUE		0xFDFCFBFA
11274 #define REG_NUM_PER_LINE	4
11275 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
11276 #define REG_SEPARATOR_LINE	1
11277 #define REG_NUM_REMAIN_MASK	3
11278 #define BD_LIST_MAX_NUM		30
11279 
11280 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11281 {
11282 	int i;
11283 
11284 	/* initialize command BD except the last one */
11285 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11286 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11287 					   true);
11288 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11289 	}
11290 
11291 	/* initialize the last command BD */
11292 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11293 
11294 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11295 }
11296 
11297 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11298 				    int *bd_num_list,
11299 				    u32 type_num)
11300 {
11301 	u32 entries_per_desc, desc_index, index, offset, i;
11302 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11303 	int ret;
11304 
11305 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
11306 	if (ret) {
11307 		dev_err(&hdev->pdev->dev,
11308 			"Get dfx bd num fail, status is %d.\n", ret);
11309 		return ret;
11310 	}
11311 
11312 	entries_per_desc = ARRAY_SIZE(desc[0].data);
11313 	for (i = 0; i < type_num; i++) {
11314 		offset = hclge_dfx_bd_offset_list[i];
11315 		index = offset % entries_per_desc;
11316 		desc_index = offset / entries_per_desc;
11317 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11318 	}
11319 
11320 	return ret;
11321 }
11322 
11323 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11324 				  struct hclge_desc *desc_src, int bd_num,
11325 				  enum hclge_opcode_type cmd)
11326 {
11327 	struct hclge_desc *desc = desc_src;
11328 	int i, ret;
11329 
11330 	hclge_cmd_setup_basic_desc(desc, cmd, true);
11331 	for (i = 0; i < bd_num - 1; i++) {
11332 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11333 		desc++;
11334 		hclge_cmd_setup_basic_desc(desc, cmd, true);
11335 	}
11336 
11337 	desc = desc_src;
11338 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11339 	if (ret)
11340 		dev_err(&hdev->pdev->dev,
11341 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11342 			cmd, ret);
11343 
11344 	return ret;
11345 }
11346 
11347 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11348 				    void *data)
11349 {
11350 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11351 	struct hclge_desc *desc = desc_src;
11352 	u32 *reg = data;
11353 
11354 	entries_per_desc = ARRAY_SIZE(desc->data);
11355 	reg_num = entries_per_desc * bd_num;
11356 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11357 	for (i = 0; i < reg_num; i++) {
11358 		index = i % entries_per_desc;
11359 		desc_index = i / entries_per_desc;
11360 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
11361 	}
11362 	for (i = 0; i < separator_num; i++)
11363 		*reg++ = SEPARATOR_VALUE;
11364 
11365 	return reg_num + separator_num;
11366 }
11367 
11368 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11369 {
11370 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11371 	int data_len_per_desc, bd_num, i;
11372 	int bd_num_list[BD_LIST_MAX_NUM];
11373 	u32 data_len;
11374 	int ret;
11375 
11376 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11377 	if (ret) {
11378 		dev_err(&hdev->pdev->dev,
11379 			"Get dfx reg bd num fail, status is %d.\n", ret);
11380 		return ret;
11381 	}
11382 
11383 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
11384 	*len = 0;
11385 	for (i = 0; i < dfx_reg_type_num; i++) {
11386 		bd_num = bd_num_list[i];
11387 		data_len = data_len_per_desc * bd_num;
11388 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11389 	}
11390 
11391 	return ret;
11392 }
11393 
11394 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11395 {
11396 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11397 	int bd_num, bd_num_max, buf_len, i;
11398 	int bd_num_list[BD_LIST_MAX_NUM];
11399 	struct hclge_desc *desc_src;
11400 	u32 *reg = data;
11401 	int ret;
11402 
11403 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11404 	if (ret) {
11405 		dev_err(&hdev->pdev->dev,
11406 			"Get dfx reg bd num fail, status is %d.\n", ret);
11407 		return ret;
11408 	}
11409 
11410 	bd_num_max = bd_num_list[0];
11411 	for (i = 1; i < dfx_reg_type_num; i++)
11412 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11413 
11414 	buf_len = sizeof(*desc_src) * bd_num_max;
11415 	desc_src = kzalloc(buf_len, GFP_KERNEL);
11416 	if (!desc_src)
11417 		return -ENOMEM;
11418 
11419 	for (i = 0; i < dfx_reg_type_num; i++) {
11420 		bd_num = bd_num_list[i];
11421 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11422 					     hclge_dfx_reg_opcode_list[i]);
11423 		if (ret) {
11424 			dev_err(&hdev->pdev->dev,
11425 				"Get dfx reg fail, status is %d.\n", ret);
11426 			break;
11427 		}
11428 
11429 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11430 	}
11431 
11432 	kfree(desc_src);
11433 	return ret;
11434 }
11435 
11436 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11437 			      struct hnae3_knic_private_info *kinfo)
11438 {
11439 #define HCLGE_RING_REG_OFFSET		0x200
11440 #define HCLGE_RING_INT_REG_OFFSET	0x4
11441 
11442 	int i, j, reg_num, separator_num;
11443 	int data_num_sum;
11444 	u32 *reg = data;
11445 
11446 	/* fetching per-PF registers valus from PF PCIe register space */
11447 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11448 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11449 	for (i = 0; i < reg_num; i++)
11450 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11451 	for (i = 0; i < separator_num; i++)
11452 		*reg++ = SEPARATOR_VALUE;
11453 	data_num_sum = reg_num + separator_num;
11454 
11455 	reg_num = ARRAY_SIZE(common_reg_addr_list);
11456 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11457 	for (i = 0; i < reg_num; i++)
11458 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11459 	for (i = 0; i < separator_num; i++)
11460 		*reg++ = SEPARATOR_VALUE;
11461 	data_num_sum += reg_num + separator_num;
11462 
11463 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
11464 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11465 	for (j = 0; j < kinfo->num_tqps; j++) {
11466 		for (i = 0; i < reg_num; i++)
11467 			*reg++ = hclge_read_dev(&hdev->hw,
11468 						ring_reg_addr_list[i] +
11469 						HCLGE_RING_REG_OFFSET * j);
11470 		for (i = 0; i < separator_num; i++)
11471 			*reg++ = SEPARATOR_VALUE;
11472 	}
11473 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11474 
11475 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11476 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11477 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
11478 		for (i = 0; i < reg_num; i++)
11479 			*reg++ = hclge_read_dev(&hdev->hw,
11480 						tqp_intr_reg_addr_list[i] +
11481 						HCLGE_RING_INT_REG_OFFSET * j);
11482 		for (i = 0; i < separator_num; i++)
11483 			*reg++ = SEPARATOR_VALUE;
11484 	}
11485 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11486 
11487 	return data_num_sum;
11488 }
11489 
11490 static int hclge_get_regs_len(struct hnae3_handle *handle)
11491 {
11492 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11493 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11494 	struct hclge_vport *vport = hclge_get_vport(handle);
11495 	struct hclge_dev *hdev = vport->back;
11496 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11497 	int regs_lines_32_bit, regs_lines_64_bit;
11498 	int ret;
11499 
11500 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11501 	if (ret) {
11502 		dev_err(&hdev->pdev->dev,
11503 			"Get register number failed, ret = %d.\n", ret);
11504 		return ret;
11505 	}
11506 
11507 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11508 	if (ret) {
11509 		dev_err(&hdev->pdev->dev,
11510 			"Get dfx reg len failed, ret = %d.\n", ret);
11511 		return ret;
11512 	}
11513 
11514 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11515 		REG_SEPARATOR_LINE;
11516 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11517 		REG_SEPARATOR_LINE;
11518 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11519 		REG_SEPARATOR_LINE;
11520 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11521 		REG_SEPARATOR_LINE;
11522 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11523 		REG_SEPARATOR_LINE;
11524 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11525 		REG_SEPARATOR_LINE;
11526 
11527 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11528 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11529 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11530 }
11531 
11532 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11533 			   void *data)
11534 {
11535 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11536 	struct hclge_vport *vport = hclge_get_vport(handle);
11537 	struct hclge_dev *hdev = vport->back;
11538 	u32 regs_num_32_bit, regs_num_64_bit;
11539 	int i, reg_num, separator_num, ret;
11540 	u32 *reg = data;
11541 
11542 	*version = hdev->fw_version;
11543 
11544 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11545 	if (ret) {
11546 		dev_err(&hdev->pdev->dev,
11547 			"Get register number failed, ret = %d.\n", ret);
11548 		return;
11549 	}
11550 
11551 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11552 
11553 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11554 	if (ret) {
11555 		dev_err(&hdev->pdev->dev,
11556 			"Get 32 bit register failed, ret = %d.\n", ret);
11557 		return;
11558 	}
11559 	reg_num = regs_num_32_bit;
11560 	reg += reg_num;
11561 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11562 	for (i = 0; i < separator_num; i++)
11563 		*reg++ = SEPARATOR_VALUE;
11564 
11565 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11566 	if (ret) {
11567 		dev_err(&hdev->pdev->dev,
11568 			"Get 64 bit register failed, ret = %d.\n", ret);
11569 		return;
11570 	}
11571 	reg_num = regs_num_64_bit * 2;
11572 	reg += reg_num;
11573 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11574 	for (i = 0; i < separator_num; i++)
11575 		*reg++ = SEPARATOR_VALUE;
11576 
11577 	ret = hclge_get_dfx_reg(hdev, reg);
11578 	if (ret)
11579 		dev_err(&hdev->pdev->dev,
11580 			"Get dfx register failed, ret = %d.\n", ret);
11581 }
11582 
11583 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11584 {
11585 	struct hclge_set_led_state_cmd *req;
11586 	struct hclge_desc desc;
11587 	int ret;
11588 
11589 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11590 
11591 	req = (struct hclge_set_led_state_cmd *)desc.data;
11592 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11593 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11594 
11595 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11596 	if (ret)
11597 		dev_err(&hdev->pdev->dev,
11598 			"Send set led state cmd error, ret =%d\n", ret);
11599 
11600 	return ret;
11601 }
11602 
11603 enum hclge_led_status {
11604 	HCLGE_LED_OFF,
11605 	HCLGE_LED_ON,
11606 	HCLGE_LED_NO_CHANGE = 0xFF,
11607 };
11608 
11609 static int hclge_set_led_id(struct hnae3_handle *handle,
11610 			    enum ethtool_phys_id_state status)
11611 {
11612 	struct hclge_vport *vport = hclge_get_vport(handle);
11613 	struct hclge_dev *hdev = vport->back;
11614 
11615 	switch (status) {
11616 	case ETHTOOL_ID_ACTIVE:
11617 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
11618 	case ETHTOOL_ID_INACTIVE:
11619 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11620 	default:
11621 		return -EINVAL;
11622 	}
11623 }
11624 
11625 static void hclge_get_link_mode(struct hnae3_handle *handle,
11626 				unsigned long *supported,
11627 				unsigned long *advertising)
11628 {
11629 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11630 	struct hclge_vport *vport = hclge_get_vport(handle);
11631 	struct hclge_dev *hdev = vport->back;
11632 	unsigned int idx = 0;
11633 
11634 	for (; idx < size; idx++) {
11635 		supported[idx] = hdev->hw.mac.supported[idx];
11636 		advertising[idx] = hdev->hw.mac.advertising[idx];
11637 	}
11638 }
11639 
11640 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11641 {
11642 	struct hclge_vport *vport = hclge_get_vport(handle);
11643 	struct hclge_dev *hdev = vport->back;
11644 
11645 	return hclge_config_gro(hdev, enable);
11646 }
11647 
11648 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11649 {
11650 	struct hclge_vport *vport = &hdev->vport[0];
11651 	struct hnae3_handle *handle = &vport->nic;
11652 	u8 tmp_flags;
11653 	int ret;
11654 
11655 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11656 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11657 		vport->last_promisc_flags = vport->overflow_promisc_flags;
11658 	}
11659 
11660 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11661 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11662 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11663 					     tmp_flags & HNAE3_MPE);
11664 		if (!ret) {
11665 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11666 			hclge_enable_vlan_filter(handle,
11667 						 tmp_flags & HNAE3_VLAN_FLTR);
11668 		}
11669 	}
11670 }
11671 
11672 static bool hclge_module_existed(struct hclge_dev *hdev)
11673 {
11674 	struct hclge_desc desc;
11675 	u32 existed;
11676 	int ret;
11677 
11678 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11679 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11680 	if (ret) {
11681 		dev_err(&hdev->pdev->dev,
11682 			"failed to get SFP exist state, ret = %d\n", ret);
11683 		return false;
11684 	}
11685 
11686 	existed = le32_to_cpu(desc.data[0]);
11687 
11688 	return existed != 0;
11689 }
11690 
11691 /* need 6 bds(total 140 bytes) in one reading
11692  * return the number of bytes actually read, 0 means read failed.
11693  */
11694 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11695 				     u32 len, u8 *data)
11696 {
11697 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11698 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11699 	u16 read_len;
11700 	u16 copy_len;
11701 	int ret;
11702 	int i;
11703 
11704 	/* setup all 6 bds to read module eeprom info. */
11705 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11706 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11707 					   true);
11708 
11709 		/* bd0~bd4 need next flag */
11710 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11711 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11712 	}
11713 
11714 	/* setup bd0, this bd contains offset and read length. */
11715 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11716 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11717 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11718 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
11719 
11720 	ret = hclge_cmd_send(&hdev->hw, desc, i);
11721 	if (ret) {
11722 		dev_err(&hdev->pdev->dev,
11723 			"failed to get SFP eeprom info, ret = %d\n", ret);
11724 		return 0;
11725 	}
11726 
11727 	/* copy sfp info from bd0 to out buffer. */
11728 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11729 	memcpy(data, sfp_info_bd0->data, copy_len);
11730 	read_len = copy_len;
11731 
11732 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
11733 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11734 		if (read_len >= len)
11735 			return read_len;
11736 
11737 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11738 		memcpy(data + read_len, desc[i].data, copy_len);
11739 		read_len += copy_len;
11740 	}
11741 
11742 	return read_len;
11743 }
11744 
11745 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11746 				   u32 len, u8 *data)
11747 {
11748 	struct hclge_vport *vport = hclge_get_vport(handle);
11749 	struct hclge_dev *hdev = vport->back;
11750 	u32 read_len = 0;
11751 	u16 data_len;
11752 
11753 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11754 		return -EOPNOTSUPP;
11755 
11756 	if (!hclge_module_existed(hdev))
11757 		return -ENXIO;
11758 
11759 	while (read_len < len) {
11760 		data_len = hclge_get_sfp_eeprom_info(hdev,
11761 						     offset + read_len,
11762 						     len - read_len,
11763 						     data + read_len);
11764 		if (!data_len)
11765 			return -EIO;
11766 
11767 		read_len += data_len;
11768 	}
11769 
11770 	return 0;
11771 }
11772 
11773 static const struct hnae3_ae_ops hclge_ops = {
11774 	.init_ae_dev = hclge_init_ae_dev,
11775 	.uninit_ae_dev = hclge_uninit_ae_dev,
11776 	.flr_prepare = hclge_flr_prepare,
11777 	.flr_done = hclge_flr_done,
11778 	.init_client_instance = hclge_init_client_instance,
11779 	.uninit_client_instance = hclge_uninit_client_instance,
11780 	.map_ring_to_vector = hclge_map_ring_to_vector,
11781 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11782 	.get_vector = hclge_get_vector,
11783 	.put_vector = hclge_put_vector,
11784 	.set_promisc_mode = hclge_set_promisc_mode,
11785 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
11786 	.set_loopback = hclge_set_loopback,
11787 	.start = hclge_ae_start,
11788 	.stop = hclge_ae_stop,
11789 	.client_start = hclge_client_start,
11790 	.client_stop = hclge_client_stop,
11791 	.get_status = hclge_get_status,
11792 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
11793 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11794 	.get_media_type = hclge_get_media_type,
11795 	.check_port_speed = hclge_check_port_speed,
11796 	.get_fec = hclge_get_fec,
11797 	.set_fec = hclge_set_fec,
11798 	.get_rss_key_size = hclge_get_rss_key_size,
11799 	.get_rss_indir_size = hclge_get_rss_indir_size,
11800 	.get_rss = hclge_get_rss,
11801 	.set_rss = hclge_set_rss,
11802 	.set_rss_tuple = hclge_set_rss_tuple,
11803 	.get_rss_tuple = hclge_get_rss_tuple,
11804 	.get_tc_size = hclge_get_tc_size,
11805 	.get_mac_addr = hclge_get_mac_addr,
11806 	.set_mac_addr = hclge_set_mac_addr,
11807 	.do_ioctl = hclge_do_ioctl,
11808 	.add_uc_addr = hclge_add_uc_addr,
11809 	.rm_uc_addr = hclge_rm_uc_addr,
11810 	.add_mc_addr = hclge_add_mc_addr,
11811 	.rm_mc_addr = hclge_rm_mc_addr,
11812 	.set_autoneg = hclge_set_autoneg,
11813 	.get_autoneg = hclge_get_autoneg,
11814 	.restart_autoneg = hclge_restart_autoneg,
11815 	.halt_autoneg = hclge_halt_autoneg,
11816 	.get_pauseparam = hclge_get_pauseparam,
11817 	.set_pauseparam = hclge_set_pauseparam,
11818 	.set_mtu = hclge_set_mtu,
11819 	.reset_queue = hclge_reset_tqp,
11820 	.get_stats = hclge_get_stats,
11821 	.get_mac_stats = hclge_get_mac_stat,
11822 	.update_stats = hclge_update_stats,
11823 	.get_strings = hclge_get_strings,
11824 	.get_sset_count = hclge_get_sset_count,
11825 	.get_fw_version = hclge_get_fw_version,
11826 	.get_mdix_mode = hclge_get_mdix_mode,
11827 	.enable_vlan_filter = hclge_enable_vlan_filter,
11828 	.set_vlan_filter = hclge_set_vlan_filter,
11829 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11830 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11831 	.reset_event = hclge_reset_event,
11832 	.get_reset_level = hclge_get_reset_level,
11833 	.set_default_reset_request = hclge_set_def_reset_request,
11834 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11835 	.set_channels = hclge_set_channels,
11836 	.get_channels = hclge_get_channels,
11837 	.get_regs_len = hclge_get_regs_len,
11838 	.get_regs = hclge_get_regs,
11839 	.set_led_id = hclge_set_led_id,
11840 	.get_link_mode = hclge_get_link_mode,
11841 	.add_fd_entry = hclge_add_fd_entry,
11842 	.del_fd_entry = hclge_del_fd_entry,
11843 	.del_all_fd_entries = hclge_del_all_fd_entries,
11844 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11845 	.get_fd_rule_info = hclge_get_fd_rule_info,
11846 	.get_fd_all_rules = hclge_get_all_rules,
11847 	.enable_fd = hclge_enable_fd,
11848 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
11849 	.dbg_run_cmd = hclge_dbg_run_cmd,
11850 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
11851 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
11852 	.ae_dev_resetting = hclge_ae_dev_resetting,
11853 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11854 	.set_gro_en = hclge_gro_en,
11855 	.get_global_queue_id = hclge_covert_handle_qid_global,
11856 	.set_timer_task = hclge_set_timer_task,
11857 	.mac_connect_phy = hclge_mac_connect_phy,
11858 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
11859 	.get_vf_config = hclge_get_vf_config,
11860 	.set_vf_link_state = hclge_set_vf_link_state,
11861 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
11862 	.set_vf_trust = hclge_set_vf_trust,
11863 	.set_vf_rate = hclge_set_vf_rate,
11864 	.set_vf_mac = hclge_set_vf_mac,
11865 	.get_module_eeprom = hclge_get_module_eeprom,
11866 	.get_cmdq_stat = hclge_get_cmdq_stat,
11867 	.add_cls_flower = hclge_add_cls_flower,
11868 	.del_cls_flower = hclge_del_cls_flower,
11869 	.cls_flower_active = hclge_is_cls_flower_active,
11870 };
11871 
11872 static struct hnae3_ae_algo ae_algo = {
11873 	.ops = &hclge_ops,
11874 	.pdev_id_table = ae_algo_pci_tbl,
11875 };
11876 
11877 static int hclge_init(void)
11878 {
11879 	pr_info("%s is initializing\n", HCLGE_NAME);
11880 
11881 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11882 	if (!hclge_wq) {
11883 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11884 		return -ENOMEM;
11885 	}
11886 
11887 	hnae3_register_ae_algo(&ae_algo);
11888 
11889 	return 0;
11890 }
11891 
11892 static void hclge_exit(void)
11893 {
11894 	hnae3_unregister_ae_algo(&ae_algo);
11895 	destroy_workqueue(hclge_wq);
11896 }
11897 module_init(hclge_init);
11898 module_exit(hclge_exit);
11899 
11900 MODULE_LICENSE("GPL");
11901 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11902 MODULE_DESCRIPTION("HCLGE Driver");
11903 MODULE_VERSION(HCLGE_MOD_VERSION);
11904