xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision a4989fa91110508b64eea7ccde63d062113988ff)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48},
389 	{ OUTER_SRC_MAC, 48},
390 	{ OUTER_VLAN_TAG_FST, 16},
391 	{ OUTER_VLAN_TAG_SEC, 16},
392 	{ OUTER_ETH_TYPE, 16},
393 	{ OUTER_L2_RSV, 16},
394 	{ OUTER_IP_TOS, 8},
395 	{ OUTER_IP_PROTO, 8},
396 	{ OUTER_SRC_IP, 32},
397 	{ OUTER_DST_IP, 32},
398 	{ OUTER_L3_RSV, 16},
399 	{ OUTER_SRC_PORT, 16},
400 	{ OUTER_DST_PORT, 16},
401 	{ OUTER_L4_RSV, 32},
402 	{ OUTER_TUN_VNI, 24},
403 	{ OUTER_TUN_FLOW_ID, 8},
404 	{ INNER_DST_MAC, 48},
405 	{ INNER_SRC_MAC, 48},
406 	{ INNER_VLAN_TAG_FST, 16},
407 	{ INNER_VLAN_TAG_SEC, 16},
408 	{ INNER_ETH_TYPE, 16},
409 	{ INNER_L2_RSV, 16},
410 	{ INNER_IP_TOS, 8},
411 	{ INNER_IP_PROTO, 8},
412 	{ INNER_SRC_IP, 32},
413 	{ INNER_DST_IP, 32},
414 	{ INNER_L3_RSV, 16},
415 	{ INNER_SRC_PORT, 16},
416 	{ INNER_DST_PORT, 16},
417 	{ INNER_L4_RSV, 32},
418 };
419 
420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 {
422 #define HCLGE_MAC_CMD_NUM 21
423 
424 	u64 *data = (u64 *)(&hdev->mac_stats);
425 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
426 	__le64 *desc_data;
427 	int i, k, n;
428 	int ret;
429 
430 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432 	if (ret) {
433 		dev_err(&hdev->pdev->dev,
434 			"Get MAC pkt stats fail, status = %d.\n", ret);
435 
436 		return ret;
437 	}
438 
439 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440 		/* for special opcode 0032, only the first desc has the head */
441 		if (unlikely(i == 0)) {
442 			desc_data = (__le64 *)(&desc[i].data[0]);
443 			n = HCLGE_RD_FIRST_STATS_NUM;
444 		} else {
445 			desc_data = (__le64 *)(&desc[i]);
446 			n = HCLGE_RD_OTHER_STATS_NUM;
447 		}
448 
449 		for (k = 0; k < n; k++) {
450 			*data += le64_to_cpu(*desc_data);
451 			data++;
452 			desc_data++;
453 		}
454 	}
455 
456 	return 0;
457 }
458 
459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 {
461 	u64 *data = (u64 *)(&hdev->mac_stats);
462 	struct hclge_desc *desc;
463 	__le64 *desc_data;
464 	u16 i, k, n;
465 	int ret;
466 
467 	/* This may be called inside atomic sections,
468 	 * so GFP_ATOMIC is more suitalbe here
469 	 */
470 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 	if (!desc)
472 		return -ENOMEM;
473 
474 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476 	if (ret) {
477 		kfree(desc);
478 		return ret;
479 	}
480 
481 	for (i = 0; i < desc_num; i++) {
482 		/* for special opcode 0034, only the first desc has the head */
483 		if (i == 0) {
484 			desc_data = (__le64 *)(&desc[i].data[0]);
485 			n = HCLGE_RD_FIRST_STATS_NUM;
486 		} else {
487 			desc_data = (__le64 *)(&desc[i]);
488 			n = HCLGE_RD_OTHER_STATS_NUM;
489 		}
490 
491 		for (k = 0; k < n; k++) {
492 			*data += le64_to_cpu(*desc_data);
493 			data++;
494 			desc_data++;
495 		}
496 	}
497 
498 	kfree(desc);
499 
500 	return 0;
501 }
502 
503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 {
505 	struct hclge_desc desc;
506 	__le32 *desc_data;
507 	u32 reg_num;
508 	int ret;
509 
510 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 	if (ret)
513 		return ret;
514 
515 	desc_data = (__le32 *)(&desc.data[0]);
516 	reg_num = le32_to_cpu(*desc_data);
517 
518 	*desc_num = 1 + ((reg_num - 3) >> 2) +
519 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520 
521 	return 0;
522 }
523 
524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
525 {
526 	u32 desc_num;
527 	int ret;
528 
529 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
530 
531 	/* The firmware supports the new statistics acquisition method */
532 	if (!ret)
533 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
534 	else if (ret == -EOPNOTSUPP)
535 		ret = hclge_mac_update_stats_defective(hdev);
536 	else
537 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538 
539 	return ret;
540 }
541 
542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 {
544 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545 	struct hclge_vport *vport = hclge_get_vport(handle);
546 	struct hclge_dev *hdev = vport->back;
547 	struct hnae3_queue *queue;
548 	struct hclge_desc desc[1];
549 	struct hclge_tqp *tqp;
550 	int ret, i;
551 
552 	for (i = 0; i < kinfo->num_tqps; i++) {
553 		queue = handle->kinfo.tqp[i];
554 		tqp = container_of(queue, struct hclge_tqp, q);
555 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
556 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
557 					   true);
558 
559 		desc[0].data[0] = cpu_to_le32(tqp->index);
560 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
561 		if (ret) {
562 			dev_err(&hdev->pdev->dev,
563 				"Query tqp stat fail, status = %d,queue = %d\n",
564 				ret, i);
565 			return ret;
566 		}
567 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568 			le32_to_cpu(desc[0].data[1]);
569 	}
570 
571 	for (i = 0; i < kinfo->num_tqps; i++) {
572 		queue = handle->kinfo.tqp[i];
573 		tqp = container_of(queue, struct hclge_tqp, q);
574 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
575 		hclge_cmd_setup_basic_desc(&desc[0],
576 					   HCLGE_OPC_QUERY_TX_STATS,
577 					   true);
578 
579 		desc[0].data[0] = cpu_to_le32(tqp->index);
580 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
581 		if (ret) {
582 			dev_err(&hdev->pdev->dev,
583 				"Query tqp stat fail, status = %d,queue = %d\n",
584 				ret, i);
585 			return ret;
586 		}
587 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588 			le32_to_cpu(desc[0].data[1]);
589 	}
590 
591 	return 0;
592 }
593 
594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 {
596 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597 	struct hclge_tqp *tqp;
598 	u64 *buff = data;
599 	int i;
600 
601 	for (i = 0; i < kinfo->num_tqps; i++) {
602 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
604 	}
605 
606 	for (i = 0; i < kinfo->num_tqps; i++) {
607 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609 	}
610 
611 	return buff;
612 }
613 
614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 {
616 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617 
618 	/* each tqp has TX & RX two queues */
619 	return kinfo->num_tqps * (2);
620 }
621 
622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 {
624 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 	u8 *buff = data;
626 	int i;
627 
628 	for (i = 0; i < kinfo->num_tqps; i++) {
629 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630 			struct hclge_tqp, q);
631 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632 			 tqp->index);
633 		buff = buff + ETH_GSTRING_LEN;
634 	}
635 
636 	for (i = 0; i < kinfo->num_tqps; i++) {
637 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638 			struct hclge_tqp, q);
639 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640 			 tqp->index);
641 		buff = buff + ETH_GSTRING_LEN;
642 	}
643 
644 	return buff;
645 }
646 
647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648 				 const struct hclge_comm_stats_str strs[],
649 				 int size, u64 *data)
650 {
651 	u64 *buf = data;
652 	u32 i;
653 
654 	for (i = 0; i < size; i++)
655 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
656 
657 	return buf + size;
658 }
659 
660 static u8 *hclge_comm_get_strings(u32 stringset,
661 				  const struct hclge_comm_stats_str strs[],
662 				  int size, u8 *data)
663 {
664 	char *buff = (char *)data;
665 	u32 i;
666 
667 	if (stringset != ETH_SS_STATS)
668 		return buff;
669 
670 	for (i = 0; i < size; i++) {
671 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672 		buff = buff + ETH_GSTRING_LEN;
673 	}
674 
675 	return (u8 *)buff;
676 }
677 
678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 {
680 	struct hnae3_handle *handle;
681 	int status;
682 
683 	handle = &hdev->vport[0].nic;
684 	if (handle->client) {
685 		status = hclge_tqps_update_stats(handle);
686 		if (status) {
687 			dev_err(&hdev->pdev->dev,
688 				"Update TQPS stats fail, status = %d.\n",
689 				status);
690 		}
691 	}
692 
693 	status = hclge_mac_update_stats(hdev);
694 	if (status)
695 		dev_err(&hdev->pdev->dev,
696 			"Update MAC stats fail, status = %d.\n", status);
697 }
698 
699 static void hclge_update_stats(struct hnae3_handle *handle,
700 			       struct net_device_stats *net_stats)
701 {
702 	struct hclge_vport *vport = hclge_get_vport(handle);
703 	struct hclge_dev *hdev = vport->back;
704 	int status;
705 
706 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
707 		return;
708 
709 	status = hclge_mac_update_stats(hdev);
710 	if (status)
711 		dev_err(&hdev->pdev->dev,
712 			"Update MAC stats fail, status = %d.\n",
713 			status);
714 
715 	status = hclge_tqps_update_stats(handle);
716 	if (status)
717 		dev_err(&hdev->pdev->dev,
718 			"Update TQPS stats fail, status = %d.\n",
719 			status);
720 
721 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
722 }
723 
724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 {
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727 		HNAE3_SUPPORT_PHY_LOOPBACK |\
728 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730 
731 	struct hclge_vport *vport = hclge_get_vport(handle);
732 	struct hclge_dev *hdev = vport->back;
733 	int count = 0;
734 
735 	/* Loopback test support rules:
736 	 * mac: only GE mode support
737 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
738 	 * phy: only support when phy device exist on board
739 	 */
740 	if (stringset == ETH_SS_TEST) {
741 		/* clear loopback bit flags at first */
742 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747 			count += 1;
748 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 		}
750 
751 		count += 2;
752 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754 
755 		if (hdev->hw.mac.phydev) {
756 			count += 1;
757 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758 		}
759 
760 	} else if (stringset == ETH_SS_STATS) {
761 		count = ARRAY_SIZE(g_mac_stats_string) +
762 			hclge_tqps_get_sset_count(handle, stringset);
763 	}
764 
765 	return count;
766 }
767 
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
769 			      u8 *data)
770 {
771 	u8 *p = (char *)data;
772 	int size;
773 
774 	if (stringset == ETH_SS_STATS) {
775 		size = ARRAY_SIZE(g_mac_stats_string);
776 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777 					   size, p);
778 		p = hclge_tqps_get_strings(handle, p);
779 	} else if (stringset == ETH_SS_TEST) {
780 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782 			       ETH_GSTRING_LEN);
783 			p += ETH_GSTRING_LEN;
784 		}
785 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787 			       ETH_GSTRING_LEN);
788 			p += ETH_GSTRING_LEN;
789 		}
790 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791 			memcpy(p,
792 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793 			       ETH_GSTRING_LEN);
794 			p += ETH_GSTRING_LEN;
795 		}
796 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798 			       ETH_GSTRING_LEN);
799 			p += ETH_GSTRING_LEN;
800 		}
801 	}
802 }
803 
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 {
806 	struct hclge_vport *vport = hclge_get_vport(handle);
807 	struct hclge_dev *hdev = vport->back;
808 	u64 *p;
809 
810 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811 				 ARRAY_SIZE(g_mac_stats_string), data);
812 	p = hclge_tqps_get_stats(handle, p);
813 }
814 
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816 			       struct hns3_mac_stats *mac_stats)
817 {
818 	struct hclge_vport *vport = hclge_get_vport(handle);
819 	struct hclge_dev *hdev = vport->back;
820 
821 	hclge_update_stats(handle, NULL);
822 
823 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
825 }
826 
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828 				   struct hclge_func_status_cmd *status)
829 {
830 #define HCLGE_MAC_ID_MASK	0xF
831 
832 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833 		return -EINVAL;
834 
835 	/* Set the pf to main pf */
836 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
837 		hdev->flag |= HCLGE_FLAG_MAIN;
838 	else
839 		hdev->flag &= ~HCLGE_FLAG_MAIN;
840 
841 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
842 	return 0;
843 }
844 
845 static int hclge_query_function_status(struct hclge_dev *hdev)
846 {
847 #define HCLGE_QUERY_MAX_CNT	5
848 
849 	struct hclge_func_status_cmd *req;
850 	struct hclge_desc desc;
851 	int timeout = 0;
852 	int ret;
853 
854 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855 	req = (struct hclge_func_status_cmd *)desc.data;
856 
857 	do {
858 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859 		if (ret) {
860 			dev_err(&hdev->pdev->dev,
861 				"query function status failed %d.\n", ret);
862 			return ret;
863 		}
864 
865 		/* Check pf reset is done */
866 		if (req->pf_state)
867 			break;
868 		usleep_range(1000, 2000);
869 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
870 
871 	return hclge_parse_func_status(hdev, req);
872 }
873 
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 {
876 	struct hclge_pf_res_cmd *req;
877 	struct hclge_desc desc;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882 	if (ret) {
883 		dev_err(&hdev->pdev->dev,
884 			"query pf resource failed %d.\n", ret);
885 		return ret;
886 	}
887 
888 	req = (struct hclge_pf_res_cmd *)desc.data;
889 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890 			 le16_to_cpu(req->ext_tqp_num);
891 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892 
893 	if (req->tx_buf_size)
894 		hdev->tx_buf_size =
895 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896 	else
897 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898 
899 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900 
901 	if (req->dv_buf_size)
902 		hdev->dv_buf_size =
903 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904 	else
905 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906 
907 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908 
909 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911 		dev_err(&hdev->pdev->dev,
912 			"only %u msi resources available, not enough for pf(min:2).\n",
913 			hdev->num_nic_msi);
914 		return -EINVAL;
915 	}
916 
917 	if (hnae3_dev_roce_supported(hdev)) {
918 		hdev->num_roce_msi =
919 			le16_to_cpu(req->pf_intr_vector_number_roce);
920 
921 		/* PF should have NIC vectors and Roce vectors,
922 		 * NIC vectors are queued before Roce vectors.
923 		 */
924 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
925 	} else {
926 		hdev->num_msi = hdev->num_nic_msi;
927 	}
928 
929 	return 0;
930 }
931 
932 static int hclge_parse_speed(int speed_cmd, int *speed)
933 {
934 	switch (speed_cmd) {
935 	case 6:
936 		*speed = HCLGE_MAC_SPEED_10M;
937 		break;
938 	case 7:
939 		*speed = HCLGE_MAC_SPEED_100M;
940 		break;
941 	case 0:
942 		*speed = HCLGE_MAC_SPEED_1G;
943 		break;
944 	case 1:
945 		*speed = HCLGE_MAC_SPEED_10G;
946 		break;
947 	case 2:
948 		*speed = HCLGE_MAC_SPEED_25G;
949 		break;
950 	case 3:
951 		*speed = HCLGE_MAC_SPEED_40G;
952 		break;
953 	case 4:
954 		*speed = HCLGE_MAC_SPEED_50G;
955 		break;
956 	case 5:
957 		*speed = HCLGE_MAC_SPEED_100G;
958 		break;
959 	case 8:
960 		*speed = HCLGE_MAC_SPEED_200G;
961 		break;
962 	default:
963 		return -EINVAL;
964 	}
965 
966 	return 0;
967 }
968 
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971 	struct hclge_vport *vport = hclge_get_vport(handle);
972 	struct hclge_dev *hdev = vport->back;
973 	u32 speed_ability = hdev->hw.mac.speed_ability;
974 	u32 speed_bit = 0;
975 
976 	switch (speed) {
977 	case HCLGE_MAC_SPEED_10M:
978 		speed_bit = HCLGE_SUPPORT_10M_BIT;
979 		break;
980 	case HCLGE_MAC_SPEED_100M:
981 		speed_bit = HCLGE_SUPPORT_100M_BIT;
982 		break;
983 	case HCLGE_MAC_SPEED_1G:
984 		speed_bit = HCLGE_SUPPORT_1G_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_10G:
987 		speed_bit = HCLGE_SUPPORT_10G_BIT;
988 		break;
989 	case HCLGE_MAC_SPEED_25G:
990 		speed_bit = HCLGE_SUPPORT_25G_BIT;
991 		break;
992 	case HCLGE_MAC_SPEED_40G:
993 		speed_bit = HCLGE_SUPPORT_40G_BIT;
994 		break;
995 	case HCLGE_MAC_SPEED_50G:
996 		speed_bit = HCLGE_SUPPORT_50G_BIT;
997 		break;
998 	case HCLGE_MAC_SPEED_100G:
999 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1000 		break;
1001 	case HCLGE_MAC_SPEED_200G:
1002 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1003 		break;
1004 	default:
1005 		return -EINVAL;
1006 	}
1007 
1008 	if (speed_bit & speed_ability)
1009 		return 0;
1010 
1011 	return -EINVAL;
1012 }
1013 
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1015 {
1016 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018 				 mac->supported);
1019 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021 				 mac->supported);
1022 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024 				 mac->supported);
1025 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027 				 mac->supported);
1028 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030 				 mac->supported);
1031 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1033 				 mac->supported);
1034 }
1035 
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1037 {
1038 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 				 mac->supported);
1047 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 				 mac->supported);
1050 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 				 mac->supported);
1053 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1054 		linkmode_set_bit(
1055 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1056 			mac->supported);
1057 }
1058 
1059 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1060 {
1061 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1063 				 mac->supported);
1064 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1078 				 mac->supported);
1079 }
1080 
1081 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1082 {
1083 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1085 				 mac->supported);
1086 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1088 				 mac->supported);
1089 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1091 				 mac->supported);
1092 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1094 				 mac->supported);
1095 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1097 				 mac->supported);
1098 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1099 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1100 				 mac->supported);
1101 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1103 				 mac->supported);
1104 }
1105 
1106 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1107 {
1108 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1110 
1111 	switch (mac->speed) {
1112 	case HCLGE_MAC_SPEED_10G:
1113 	case HCLGE_MAC_SPEED_40G:
1114 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1115 				 mac->supported);
1116 		mac->fec_ability =
1117 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1118 		break;
1119 	case HCLGE_MAC_SPEED_25G:
1120 	case HCLGE_MAC_SPEED_50G:
1121 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1122 				 mac->supported);
1123 		mac->fec_ability =
1124 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125 			BIT(HNAE3_FEC_AUTO);
1126 		break;
1127 	case HCLGE_MAC_SPEED_100G:
1128 	case HCLGE_MAC_SPEED_200G:
1129 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1131 		break;
1132 	default:
1133 		mac->fec_ability = 0;
1134 		break;
1135 	}
1136 }
1137 
1138 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1139 					u16 speed_ability)
1140 {
1141 	struct hclge_mac *mac = &hdev->hw.mac;
1142 
1143 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1145 				 mac->supported);
1146 
1147 	hclge_convert_setting_sr(mac, speed_ability);
1148 	hclge_convert_setting_lr(mac, speed_ability);
1149 	hclge_convert_setting_cr(mac, speed_ability);
1150 	if (hnae3_dev_fec_supported(hdev))
1151 		hclge_convert_setting_fec(mac);
1152 
1153 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1154 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1156 }
1157 
1158 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1159 					    u16 speed_ability)
1160 {
1161 	struct hclge_mac *mac = &hdev->hw.mac;
1162 
1163 	hclge_convert_setting_kr(mac, speed_ability);
1164 	if (hnae3_dev_fec_supported(hdev))
1165 		hclge_convert_setting_fec(mac);
1166 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1167 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1168 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1169 }
1170 
1171 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1172 					 u16 speed_ability)
1173 {
1174 	unsigned long *supported = hdev->hw.mac.supported;
1175 
1176 	/* default to support all speed for GE port */
1177 	if (!speed_ability)
1178 		speed_ability = HCLGE_SUPPORT_GE;
1179 
1180 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1181 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1182 				 supported);
1183 
1184 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1185 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1186 				 supported);
1187 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1188 				 supported);
1189 	}
1190 
1191 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1192 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1193 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1194 	}
1195 
1196 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1197 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1200 }
1201 
1202 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1203 {
1204 	u8 media_type = hdev->hw.mac.media_type;
1205 
1206 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1207 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1208 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1209 		hclge_parse_copper_link_mode(hdev, speed_ability);
1210 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1211 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1212 }
1213 
1214 static u32 hclge_get_max_speed(u16 speed_ability)
1215 {
1216 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1217 		return HCLGE_MAC_SPEED_200G;
1218 
1219 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1220 		return HCLGE_MAC_SPEED_100G;
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1223 		return HCLGE_MAC_SPEED_50G;
1224 
1225 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1226 		return HCLGE_MAC_SPEED_40G;
1227 
1228 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1229 		return HCLGE_MAC_SPEED_25G;
1230 
1231 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1232 		return HCLGE_MAC_SPEED_10G;
1233 
1234 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1235 		return HCLGE_MAC_SPEED_1G;
1236 
1237 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1238 		return HCLGE_MAC_SPEED_100M;
1239 
1240 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1241 		return HCLGE_MAC_SPEED_10M;
1242 
1243 	return HCLGE_MAC_SPEED_1G;
1244 }
1245 
1246 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1247 {
1248 #define SPEED_ABILITY_EXT_SHIFT			8
1249 
1250 	struct hclge_cfg_param_cmd *req;
1251 	u64 mac_addr_tmp_high;
1252 	u16 speed_ability_ext;
1253 	u64 mac_addr_tmp;
1254 	unsigned int i;
1255 
1256 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1257 
1258 	/* get the configuration */
1259 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1260 					      HCLGE_CFG_VMDQ_M,
1261 					      HCLGE_CFG_VMDQ_S);
1262 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1263 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1264 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265 					    HCLGE_CFG_TQP_DESC_N_M,
1266 					    HCLGE_CFG_TQP_DESC_N_S);
1267 
1268 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1269 					HCLGE_CFG_PHY_ADDR_M,
1270 					HCLGE_CFG_PHY_ADDR_S);
1271 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1272 					  HCLGE_CFG_MEDIA_TP_M,
1273 					  HCLGE_CFG_MEDIA_TP_S);
1274 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1275 					  HCLGE_CFG_RX_BUF_LEN_M,
1276 					  HCLGE_CFG_RX_BUF_LEN_S);
1277 	/* get mac_address */
1278 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1279 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1280 					    HCLGE_CFG_MAC_ADDR_H_M,
1281 					    HCLGE_CFG_MAC_ADDR_H_S);
1282 
1283 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1284 
1285 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1286 					     HCLGE_CFG_DEFAULT_SPEED_M,
1287 					     HCLGE_CFG_DEFAULT_SPEED_S);
1288 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289 					    HCLGE_CFG_RSS_SIZE_M,
1290 					    HCLGE_CFG_RSS_SIZE_S);
1291 
1292 	for (i = 0; i < ETH_ALEN; i++)
1293 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1294 
1295 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1296 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1297 
1298 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1299 					     HCLGE_CFG_SPEED_ABILITY_M,
1300 					     HCLGE_CFG_SPEED_ABILITY_S);
1301 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1303 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1304 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1305 
1306 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1308 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1309 	if (!cfg->umv_space)
1310 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1311 }
1312 
1313 /* hclge_get_cfg: query the static parameter from flash
1314  * @hdev: pointer to struct hclge_dev
1315  * @hcfg: the config structure to be getted
1316  */
1317 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1318 {
1319 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1320 	struct hclge_cfg_param_cmd *req;
1321 	unsigned int i;
1322 	int ret;
1323 
1324 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1325 		u32 offset = 0;
1326 
1327 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1328 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1329 					   true);
1330 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1331 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1332 		/* Len should be united by 4 bytes when send to hardware */
1333 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1334 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1335 		req->offset = cpu_to_le32(offset);
1336 	}
1337 
1338 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1339 	if (ret) {
1340 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1341 		return ret;
1342 	}
1343 
1344 	hclge_parse_cfg(hcfg, desc);
1345 
1346 	return 0;
1347 }
1348 
1349 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1350 {
1351 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1352 
1353 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1354 
1355 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1356 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1357 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1358 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1359 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1360 }
1361 
1362 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1363 				  struct hclge_desc *desc)
1364 {
1365 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1366 	struct hclge_dev_specs_0_cmd *req0;
1367 	struct hclge_dev_specs_1_cmd *req1;
1368 
1369 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1370 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1371 
1372 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1373 	ae_dev->dev_specs.rss_ind_tbl_size =
1374 		le16_to_cpu(req0->rss_ind_tbl_size);
1375 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1376 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1377 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1378 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1379 }
1380 
1381 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1382 {
1383 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1384 
1385 	if (!dev_specs->max_non_tso_bd_num)
1386 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1387 	if (!dev_specs->rss_ind_tbl_size)
1388 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1389 	if (!dev_specs->rss_key_size)
1390 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1391 	if (!dev_specs->max_tm_rate)
1392 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1393 	if (!dev_specs->max_int_gl)
1394 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1395 }
1396 
1397 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1398 {
1399 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1400 	int ret;
1401 	int i;
1402 
1403 	/* set default specifications as devices lower than version V3 do not
1404 	 * support querying specifications from firmware.
1405 	 */
1406 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1407 		hclge_set_default_dev_specs(hdev);
1408 		return 0;
1409 	}
1410 
1411 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1412 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1413 					   true);
1414 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1415 	}
1416 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1417 
1418 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1419 	if (ret)
1420 		return ret;
1421 
1422 	hclge_parse_dev_specs(hdev, desc);
1423 	hclge_check_dev_specs(hdev);
1424 
1425 	return 0;
1426 }
1427 
1428 static int hclge_get_cap(struct hclge_dev *hdev)
1429 {
1430 	int ret;
1431 
1432 	ret = hclge_query_function_status(hdev);
1433 	if (ret) {
1434 		dev_err(&hdev->pdev->dev,
1435 			"query function status error %d.\n", ret);
1436 		return ret;
1437 	}
1438 
1439 	/* get pf resource */
1440 	return hclge_query_pf_resource(hdev);
1441 }
1442 
1443 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1444 {
1445 #define HCLGE_MIN_TX_DESC	64
1446 #define HCLGE_MIN_RX_DESC	64
1447 
1448 	if (!is_kdump_kernel())
1449 		return;
1450 
1451 	dev_info(&hdev->pdev->dev,
1452 		 "Running kdump kernel. Using minimal resources\n");
1453 
1454 	/* minimal queue pairs equals to the number of vports */
1455 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1456 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1457 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1458 }
1459 
1460 static int hclge_configure(struct hclge_dev *hdev)
1461 {
1462 	struct hclge_cfg cfg;
1463 	unsigned int i;
1464 	int ret;
1465 
1466 	ret = hclge_get_cfg(hdev, &cfg);
1467 	if (ret)
1468 		return ret;
1469 
1470 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1471 	hdev->base_tqp_pid = 0;
1472 	hdev->rss_size_max = cfg.rss_size_max;
1473 	hdev->rx_buf_len = cfg.rx_buf_len;
1474 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1475 	hdev->hw.mac.media_type = cfg.media_type;
1476 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1477 	hdev->num_tx_desc = cfg.tqp_desc_num;
1478 	hdev->num_rx_desc = cfg.tqp_desc_num;
1479 	hdev->tm_info.num_pg = 1;
1480 	hdev->tc_max = cfg.tc_num;
1481 	hdev->tm_info.hw_pfc_map = 0;
1482 	hdev->wanted_umv_size = cfg.umv_space;
1483 
1484 	if (hnae3_dev_fd_supported(hdev)) {
1485 		hdev->fd_en = true;
1486 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1487 	}
1488 
1489 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1490 	if (ret) {
1491 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1492 			cfg.default_speed, ret);
1493 		return ret;
1494 	}
1495 
1496 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1497 
1498 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1499 
1500 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1501 	    (hdev->tc_max < 1)) {
1502 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1503 			 hdev->tc_max);
1504 		hdev->tc_max = 1;
1505 	}
1506 
1507 	/* Dev does not support DCB */
1508 	if (!hnae3_dev_dcb_supported(hdev)) {
1509 		hdev->tc_max = 1;
1510 		hdev->pfc_max = 0;
1511 	} else {
1512 		hdev->pfc_max = hdev->tc_max;
1513 	}
1514 
1515 	hdev->tm_info.num_tc = 1;
1516 
1517 	/* Currently not support uncontiuous tc */
1518 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1519 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1520 
1521 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1522 
1523 	hclge_init_kdump_kernel_config(hdev);
1524 
1525 	/* Set the init affinity based on pci func number */
1526 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1527 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1528 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1529 			&hdev->affinity_mask);
1530 
1531 	return ret;
1532 }
1533 
1534 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1535 			    u16 tso_mss_max)
1536 {
1537 	struct hclge_cfg_tso_status_cmd *req;
1538 	struct hclge_desc desc;
1539 
1540 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1541 
1542 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1543 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1544 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1545 
1546 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1547 }
1548 
1549 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1550 {
1551 	struct hclge_cfg_gro_status_cmd *req;
1552 	struct hclge_desc desc;
1553 	int ret;
1554 
1555 	if (!hnae3_dev_gro_supported(hdev))
1556 		return 0;
1557 
1558 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1559 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1560 
1561 	req->gro_en = en ? 1 : 0;
1562 
1563 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1564 	if (ret)
1565 		dev_err(&hdev->pdev->dev,
1566 			"GRO hardware config cmd failed, ret = %d\n", ret);
1567 
1568 	return ret;
1569 }
1570 
1571 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1572 {
1573 	struct hclge_tqp *tqp;
1574 	int i;
1575 
1576 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1577 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1578 	if (!hdev->htqp)
1579 		return -ENOMEM;
1580 
1581 	tqp = hdev->htqp;
1582 
1583 	for (i = 0; i < hdev->num_tqps; i++) {
1584 		tqp->dev = &hdev->pdev->dev;
1585 		tqp->index = i;
1586 
1587 		tqp->q.ae_algo = &ae_algo;
1588 		tqp->q.buf_size = hdev->rx_buf_len;
1589 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1590 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1591 
1592 		/* need an extended offset to configure queues >=
1593 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1594 		 */
1595 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1596 			tqp->q.io_base = hdev->hw.io_base +
1597 					 HCLGE_TQP_REG_OFFSET +
1598 					 i * HCLGE_TQP_REG_SIZE;
1599 		else
1600 			tqp->q.io_base = hdev->hw.io_base +
1601 					 HCLGE_TQP_REG_OFFSET +
1602 					 HCLGE_TQP_EXT_REG_OFFSET +
1603 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1604 					 HCLGE_TQP_REG_SIZE;
1605 
1606 		tqp++;
1607 	}
1608 
1609 	return 0;
1610 }
1611 
1612 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1613 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1614 {
1615 	struct hclge_tqp_map_cmd *req;
1616 	struct hclge_desc desc;
1617 	int ret;
1618 
1619 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1620 
1621 	req = (struct hclge_tqp_map_cmd *)desc.data;
1622 	req->tqp_id = cpu_to_le16(tqp_pid);
1623 	req->tqp_vf = func_id;
1624 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1625 	if (!is_pf)
1626 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1627 	req->tqp_vid = cpu_to_le16(tqp_vid);
1628 
1629 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1630 	if (ret)
1631 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1632 
1633 	return ret;
1634 }
1635 
1636 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1637 {
1638 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1639 	struct hclge_dev *hdev = vport->back;
1640 	int i, alloced;
1641 
1642 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1643 	     alloced < num_tqps; i++) {
1644 		if (!hdev->htqp[i].alloced) {
1645 			hdev->htqp[i].q.handle = &vport->nic;
1646 			hdev->htqp[i].q.tqp_index = alloced;
1647 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1648 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1649 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1650 			hdev->htqp[i].alloced = true;
1651 			alloced++;
1652 		}
1653 	}
1654 	vport->alloc_tqps = alloced;
1655 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1656 				vport->alloc_tqps / hdev->tm_info.num_tc);
1657 
1658 	/* ensure one to one mapping between irq and queue at default */
1659 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1660 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1661 
1662 	return 0;
1663 }
1664 
1665 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1666 			    u16 num_tx_desc, u16 num_rx_desc)
1667 
1668 {
1669 	struct hnae3_handle *nic = &vport->nic;
1670 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1671 	struct hclge_dev *hdev = vport->back;
1672 	int ret;
1673 
1674 	kinfo->num_tx_desc = num_tx_desc;
1675 	kinfo->num_rx_desc = num_rx_desc;
1676 
1677 	kinfo->rx_buf_len = hdev->rx_buf_len;
1678 
1679 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1680 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1681 	if (!kinfo->tqp)
1682 		return -ENOMEM;
1683 
1684 	ret = hclge_assign_tqp(vport, num_tqps);
1685 	if (ret)
1686 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1687 
1688 	return ret;
1689 }
1690 
1691 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1692 				  struct hclge_vport *vport)
1693 {
1694 	struct hnae3_handle *nic = &vport->nic;
1695 	struct hnae3_knic_private_info *kinfo;
1696 	u16 i;
1697 
1698 	kinfo = &nic->kinfo;
1699 	for (i = 0; i < vport->alloc_tqps; i++) {
1700 		struct hclge_tqp *q =
1701 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1702 		bool is_pf;
1703 		int ret;
1704 
1705 		is_pf = !(vport->vport_id);
1706 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1707 					     i, is_pf);
1708 		if (ret)
1709 			return ret;
1710 	}
1711 
1712 	return 0;
1713 }
1714 
1715 static int hclge_map_tqp(struct hclge_dev *hdev)
1716 {
1717 	struct hclge_vport *vport = hdev->vport;
1718 	u16 i, num_vport;
1719 
1720 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1721 	for (i = 0; i < num_vport; i++)	{
1722 		int ret;
1723 
1724 		ret = hclge_map_tqp_to_vport(hdev, vport);
1725 		if (ret)
1726 			return ret;
1727 
1728 		vport++;
1729 	}
1730 
1731 	return 0;
1732 }
1733 
1734 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1735 {
1736 	struct hnae3_handle *nic = &vport->nic;
1737 	struct hclge_dev *hdev = vport->back;
1738 	int ret;
1739 
1740 	nic->pdev = hdev->pdev;
1741 	nic->ae_algo = &ae_algo;
1742 	nic->numa_node_mask = hdev->numa_node_mask;
1743 
1744 	ret = hclge_knic_setup(vport, num_tqps,
1745 			       hdev->num_tx_desc, hdev->num_rx_desc);
1746 	if (ret)
1747 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1748 
1749 	return ret;
1750 }
1751 
1752 static int hclge_alloc_vport(struct hclge_dev *hdev)
1753 {
1754 	struct pci_dev *pdev = hdev->pdev;
1755 	struct hclge_vport *vport;
1756 	u32 tqp_main_vport;
1757 	u32 tqp_per_vport;
1758 	int num_vport, i;
1759 	int ret;
1760 
1761 	/* We need to alloc a vport for main NIC of PF */
1762 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1763 
1764 	if (hdev->num_tqps < num_vport) {
1765 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1766 			hdev->num_tqps, num_vport);
1767 		return -EINVAL;
1768 	}
1769 
1770 	/* Alloc the same number of TQPs for every vport */
1771 	tqp_per_vport = hdev->num_tqps / num_vport;
1772 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1773 
1774 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1775 			     GFP_KERNEL);
1776 	if (!vport)
1777 		return -ENOMEM;
1778 
1779 	hdev->vport = vport;
1780 	hdev->num_alloc_vport = num_vport;
1781 
1782 	if (IS_ENABLED(CONFIG_PCI_IOV))
1783 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1784 
1785 	for (i = 0; i < num_vport; i++) {
1786 		vport->back = hdev;
1787 		vport->vport_id = i;
1788 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1789 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1790 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1791 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1792 		INIT_LIST_HEAD(&vport->vlan_list);
1793 		INIT_LIST_HEAD(&vport->uc_mac_list);
1794 		INIT_LIST_HEAD(&vport->mc_mac_list);
1795 		spin_lock_init(&vport->mac_list_lock);
1796 
1797 		if (i == 0)
1798 			ret = hclge_vport_setup(vport, tqp_main_vport);
1799 		else
1800 			ret = hclge_vport_setup(vport, tqp_per_vport);
1801 		if (ret) {
1802 			dev_err(&pdev->dev,
1803 				"vport setup failed for vport %d, %d\n",
1804 				i, ret);
1805 			return ret;
1806 		}
1807 
1808 		vport++;
1809 	}
1810 
1811 	return 0;
1812 }
1813 
1814 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1815 				    struct hclge_pkt_buf_alloc *buf_alloc)
1816 {
1817 /* TX buffer size is unit by 128 byte */
1818 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1819 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1820 	struct hclge_tx_buff_alloc_cmd *req;
1821 	struct hclge_desc desc;
1822 	int ret;
1823 	u8 i;
1824 
1825 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1826 
1827 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1828 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1829 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1830 
1831 		req->tx_pkt_buff[i] =
1832 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1833 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1834 	}
1835 
1836 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1837 	if (ret)
1838 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1839 			ret);
1840 
1841 	return ret;
1842 }
1843 
1844 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1845 				 struct hclge_pkt_buf_alloc *buf_alloc)
1846 {
1847 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1848 
1849 	if (ret)
1850 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1851 
1852 	return ret;
1853 }
1854 
1855 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1856 {
1857 	unsigned int i;
1858 	u32 cnt = 0;
1859 
1860 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1861 		if (hdev->hw_tc_map & BIT(i))
1862 			cnt++;
1863 	return cnt;
1864 }
1865 
1866 /* Get the number of pfc enabled TCs, which have private buffer */
1867 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1868 				  struct hclge_pkt_buf_alloc *buf_alloc)
1869 {
1870 	struct hclge_priv_buf *priv;
1871 	unsigned int i;
1872 	int cnt = 0;
1873 
1874 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1875 		priv = &buf_alloc->priv_buf[i];
1876 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1877 		    priv->enable)
1878 			cnt++;
1879 	}
1880 
1881 	return cnt;
1882 }
1883 
1884 /* Get the number of pfc disabled TCs, which have private buffer */
1885 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1886 				     struct hclge_pkt_buf_alloc *buf_alloc)
1887 {
1888 	struct hclge_priv_buf *priv;
1889 	unsigned int i;
1890 	int cnt = 0;
1891 
1892 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1893 		priv = &buf_alloc->priv_buf[i];
1894 		if (hdev->hw_tc_map & BIT(i) &&
1895 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1896 		    priv->enable)
1897 			cnt++;
1898 	}
1899 
1900 	return cnt;
1901 }
1902 
1903 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1904 {
1905 	struct hclge_priv_buf *priv;
1906 	u32 rx_priv = 0;
1907 	int i;
1908 
1909 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1910 		priv = &buf_alloc->priv_buf[i];
1911 		if (priv->enable)
1912 			rx_priv += priv->buf_size;
1913 	}
1914 	return rx_priv;
1915 }
1916 
1917 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1918 {
1919 	u32 i, total_tx_size = 0;
1920 
1921 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1922 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1923 
1924 	return total_tx_size;
1925 }
1926 
1927 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1928 				struct hclge_pkt_buf_alloc *buf_alloc,
1929 				u32 rx_all)
1930 {
1931 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1932 	u32 tc_num = hclge_get_tc_num(hdev);
1933 	u32 shared_buf, aligned_mps;
1934 	u32 rx_priv;
1935 	int i;
1936 
1937 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1938 
1939 	if (hnae3_dev_dcb_supported(hdev))
1940 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1941 					hdev->dv_buf_size;
1942 	else
1943 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1944 					+ hdev->dv_buf_size;
1945 
1946 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1947 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1948 			     HCLGE_BUF_SIZE_UNIT);
1949 
1950 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1951 	if (rx_all < rx_priv + shared_std)
1952 		return false;
1953 
1954 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1955 	buf_alloc->s_buf.buf_size = shared_buf;
1956 	if (hnae3_dev_dcb_supported(hdev)) {
1957 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1958 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1959 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1960 				  HCLGE_BUF_SIZE_UNIT);
1961 	} else {
1962 		buf_alloc->s_buf.self.high = aligned_mps +
1963 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1964 		buf_alloc->s_buf.self.low = aligned_mps;
1965 	}
1966 
1967 	if (hnae3_dev_dcb_supported(hdev)) {
1968 		hi_thrd = shared_buf - hdev->dv_buf_size;
1969 
1970 		if (tc_num <= NEED_RESERVE_TC_NUM)
1971 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1972 					/ BUF_MAX_PERCENT;
1973 
1974 		if (tc_num)
1975 			hi_thrd = hi_thrd / tc_num;
1976 
1977 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1978 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1979 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1980 	} else {
1981 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1982 		lo_thrd = aligned_mps;
1983 	}
1984 
1985 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1986 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1987 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1988 	}
1989 
1990 	return true;
1991 }
1992 
1993 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1994 				struct hclge_pkt_buf_alloc *buf_alloc)
1995 {
1996 	u32 i, total_size;
1997 
1998 	total_size = hdev->pkt_buf_size;
1999 
2000 	/* alloc tx buffer for all enabled tc */
2001 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2002 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2003 
2004 		if (hdev->hw_tc_map & BIT(i)) {
2005 			if (total_size < hdev->tx_buf_size)
2006 				return -ENOMEM;
2007 
2008 			priv->tx_buf_size = hdev->tx_buf_size;
2009 		} else {
2010 			priv->tx_buf_size = 0;
2011 		}
2012 
2013 		total_size -= priv->tx_buf_size;
2014 	}
2015 
2016 	return 0;
2017 }
2018 
2019 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2020 				  struct hclge_pkt_buf_alloc *buf_alloc)
2021 {
2022 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2023 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2024 	unsigned int i;
2025 
2026 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2027 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2028 
2029 		priv->enable = 0;
2030 		priv->wl.low = 0;
2031 		priv->wl.high = 0;
2032 		priv->buf_size = 0;
2033 
2034 		if (!(hdev->hw_tc_map & BIT(i)))
2035 			continue;
2036 
2037 		priv->enable = 1;
2038 
2039 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2040 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2041 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2042 						HCLGE_BUF_SIZE_UNIT);
2043 		} else {
2044 			priv->wl.low = 0;
2045 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2046 					aligned_mps;
2047 		}
2048 
2049 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2050 	}
2051 
2052 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2053 }
2054 
2055 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2056 					  struct hclge_pkt_buf_alloc *buf_alloc)
2057 {
2058 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2059 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2060 	int i;
2061 
2062 	/* let the last to be cleared first */
2063 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2064 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2065 		unsigned int mask = BIT((unsigned int)i);
2066 
2067 		if (hdev->hw_tc_map & mask &&
2068 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2069 			/* Clear the no pfc TC private buffer */
2070 			priv->wl.low = 0;
2071 			priv->wl.high = 0;
2072 			priv->buf_size = 0;
2073 			priv->enable = 0;
2074 			no_pfc_priv_num--;
2075 		}
2076 
2077 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2078 		    no_pfc_priv_num == 0)
2079 			break;
2080 	}
2081 
2082 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2083 }
2084 
2085 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2086 					struct hclge_pkt_buf_alloc *buf_alloc)
2087 {
2088 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2089 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2090 	int i;
2091 
2092 	/* let the last to be cleared first */
2093 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2094 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2095 		unsigned int mask = BIT((unsigned int)i);
2096 
2097 		if (hdev->hw_tc_map & mask &&
2098 		    hdev->tm_info.hw_pfc_map & mask) {
2099 			/* Reduce the number of pfc TC with private buffer */
2100 			priv->wl.low = 0;
2101 			priv->enable = 0;
2102 			priv->wl.high = 0;
2103 			priv->buf_size = 0;
2104 			pfc_priv_num--;
2105 		}
2106 
2107 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2108 		    pfc_priv_num == 0)
2109 			break;
2110 	}
2111 
2112 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2113 }
2114 
2115 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2116 				      struct hclge_pkt_buf_alloc *buf_alloc)
2117 {
2118 #define COMPENSATE_BUFFER	0x3C00
2119 #define COMPENSATE_HALF_MPS_NUM	5
2120 #define PRIV_WL_GAP		0x1800
2121 
2122 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2123 	u32 tc_num = hclge_get_tc_num(hdev);
2124 	u32 half_mps = hdev->mps >> 1;
2125 	u32 min_rx_priv;
2126 	unsigned int i;
2127 
2128 	if (tc_num)
2129 		rx_priv = rx_priv / tc_num;
2130 
2131 	if (tc_num <= NEED_RESERVE_TC_NUM)
2132 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2133 
2134 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2135 			COMPENSATE_HALF_MPS_NUM * half_mps;
2136 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2137 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2138 
2139 	if (rx_priv < min_rx_priv)
2140 		return false;
2141 
2142 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2143 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2144 
2145 		priv->enable = 0;
2146 		priv->wl.low = 0;
2147 		priv->wl.high = 0;
2148 		priv->buf_size = 0;
2149 
2150 		if (!(hdev->hw_tc_map & BIT(i)))
2151 			continue;
2152 
2153 		priv->enable = 1;
2154 		priv->buf_size = rx_priv;
2155 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2156 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2157 	}
2158 
2159 	buf_alloc->s_buf.buf_size = 0;
2160 
2161 	return true;
2162 }
2163 
2164 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2165  * @hdev: pointer to struct hclge_dev
2166  * @buf_alloc: pointer to buffer calculation data
2167  * @return: 0: calculate sucessful, negative: fail
2168  */
2169 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2170 				struct hclge_pkt_buf_alloc *buf_alloc)
2171 {
2172 	/* When DCB is not supported, rx private buffer is not allocated. */
2173 	if (!hnae3_dev_dcb_supported(hdev)) {
2174 		u32 rx_all = hdev->pkt_buf_size;
2175 
2176 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2177 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2178 			return -ENOMEM;
2179 
2180 		return 0;
2181 	}
2182 
2183 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2184 		return 0;
2185 
2186 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2187 		return 0;
2188 
2189 	/* try to decrease the buffer size */
2190 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2191 		return 0;
2192 
2193 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2194 		return 0;
2195 
2196 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2197 		return 0;
2198 
2199 	return -ENOMEM;
2200 }
2201 
2202 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2203 				   struct hclge_pkt_buf_alloc *buf_alloc)
2204 {
2205 	struct hclge_rx_priv_buff_cmd *req;
2206 	struct hclge_desc desc;
2207 	int ret;
2208 	int i;
2209 
2210 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2211 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2212 
2213 	/* Alloc private buffer TCs */
2214 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2215 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2216 
2217 		req->buf_num[i] =
2218 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2219 		req->buf_num[i] |=
2220 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2221 	}
2222 
2223 	req->shared_buf =
2224 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2225 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2226 
2227 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2228 	if (ret)
2229 		dev_err(&hdev->pdev->dev,
2230 			"rx private buffer alloc cmd failed %d\n", ret);
2231 
2232 	return ret;
2233 }
2234 
2235 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2236 				   struct hclge_pkt_buf_alloc *buf_alloc)
2237 {
2238 	struct hclge_rx_priv_wl_buf *req;
2239 	struct hclge_priv_buf *priv;
2240 	struct hclge_desc desc[2];
2241 	int i, j;
2242 	int ret;
2243 
2244 	for (i = 0; i < 2; i++) {
2245 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2246 					   false);
2247 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2248 
2249 		/* The first descriptor set the NEXT bit to 1 */
2250 		if (i == 0)
2251 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2252 		else
2253 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2254 
2255 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2256 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2257 
2258 			priv = &buf_alloc->priv_buf[idx];
2259 			req->tc_wl[j].high =
2260 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2261 			req->tc_wl[j].high |=
2262 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2263 			req->tc_wl[j].low =
2264 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2265 			req->tc_wl[j].low |=
2266 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2267 		}
2268 	}
2269 
2270 	/* Send 2 descriptor at one time */
2271 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2272 	if (ret)
2273 		dev_err(&hdev->pdev->dev,
2274 			"rx private waterline config cmd failed %d\n",
2275 			ret);
2276 	return ret;
2277 }
2278 
2279 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2280 				    struct hclge_pkt_buf_alloc *buf_alloc)
2281 {
2282 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2283 	struct hclge_rx_com_thrd *req;
2284 	struct hclge_desc desc[2];
2285 	struct hclge_tc_thrd *tc;
2286 	int i, j;
2287 	int ret;
2288 
2289 	for (i = 0; i < 2; i++) {
2290 		hclge_cmd_setup_basic_desc(&desc[i],
2291 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2292 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2293 
2294 		/* The first descriptor set the NEXT bit to 1 */
2295 		if (i == 0)
2296 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2297 		else
2298 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2299 
2300 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2301 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2302 
2303 			req->com_thrd[j].high =
2304 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2305 			req->com_thrd[j].high |=
2306 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2307 			req->com_thrd[j].low =
2308 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2309 			req->com_thrd[j].low |=
2310 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2311 		}
2312 	}
2313 
2314 	/* Send 2 descriptors at one time */
2315 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2316 	if (ret)
2317 		dev_err(&hdev->pdev->dev,
2318 			"common threshold config cmd failed %d\n", ret);
2319 	return ret;
2320 }
2321 
2322 static int hclge_common_wl_config(struct hclge_dev *hdev,
2323 				  struct hclge_pkt_buf_alloc *buf_alloc)
2324 {
2325 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2326 	struct hclge_rx_com_wl *req;
2327 	struct hclge_desc desc;
2328 	int ret;
2329 
2330 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2331 
2332 	req = (struct hclge_rx_com_wl *)desc.data;
2333 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2334 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2335 
2336 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2337 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2338 
2339 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2340 	if (ret)
2341 		dev_err(&hdev->pdev->dev,
2342 			"common waterline config cmd failed %d\n", ret);
2343 
2344 	return ret;
2345 }
2346 
2347 int hclge_buffer_alloc(struct hclge_dev *hdev)
2348 {
2349 	struct hclge_pkt_buf_alloc *pkt_buf;
2350 	int ret;
2351 
2352 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2353 	if (!pkt_buf)
2354 		return -ENOMEM;
2355 
2356 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2357 	if (ret) {
2358 		dev_err(&hdev->pdev->dev,
2359 			"could not calc tx buffer size for all TCs %d\n", ret);
2360 		goto out;
2361 	}
2362 
2363 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2364 	if (ret) {
2365 		dev_err(&hdev->pdev->dev,
2366 			"could not alloc tx buffers %d\n", ret);
2367 		goto out;
2368 	}
2369 
2370 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2371 	if (ret) {
2372 		dev_err(&hdev->pdev->dev,
2373 			"could not calc rx priv buffer size for all TCs %d\n",
2374 			ret);
2375 		goto out;
2376 	}
2377 
2378 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2379 	if (ret) {
2380 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2381 			ret);
2382 		goto out;
2383 	}
2384 
2385 	if (hnae3_dev_dcb_supported(hdev)) {
2386 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2387 		if (ret) {
2388 			dev_err(&hdev->pdev->dev,
2389 				"could not configure rx private waterline %d\n",
2390 				ret);
2391 			goto out;
2392 		}
2393 
2394 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2395 		if (ret) {
2396 			dev_err(&hdev->pdev->dev,
2397 				"could not configure common threshold %d\n",
2398 				ret);
2399 			goto out;
2400 		}
2401 	}
2402 
2403 	ret = hclge_common_wl_config(hdev, pkt_buf);
2404 	if (ret)
2405 		dev_err(&hdev->pdev->dev,
2406 			"could not configure common waterline %d\n", ret);
2407 
2408 out:
2409 	kfree(pkt_buf);
2410 	return ret;
2411 }
2412 
2413 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2414 {
2415 	struct hnae3_handle *roce = &vport->roce;
2416 	struct hnae3_handle *nic = &vport->nic;
2417 	struct hclge_dev *hdev = vport->back;
2418 
2419 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2420 
2421 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2422 		return -EINVAL;
2423 
2424 	roce->rinfo.base_vector = hdev->roce_base_vector;
2425 
2426 	roce->rinfo.netdev = nic->kinfo.netdev;
2427 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2428 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2429 
2430 	roce->pdev = nic->pdev;
2431 	roce->ae_algo = nic->ae_algo;
2432 	roce->numa_node_mask = nic->numa_node_mask;
2433 
2434 	return 0;
2435 }
2436 
2437 static int hclge_init_msi(struct hclge_dev *hdev)
2438 {
2439 	struct pci_dev *pdev = hdev->pdev;
2440 	int vectors;
2441 	int i;
2442 
2443 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2444 					hdev->num_msi,
2445 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2446 	if (vectors < 0) {
2447 		dev_err(&pdev->dev,
2448 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2449 			vectors);
2450 		return vectors;
2451 	}
2452 	if (vectors < hdev->num_msi)
2453 		dev_warn(&hdev->pdev->dev,
2454 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2455 			 hdev->num_msi, vectors);
2456 
2457 	hdev->num_msi = vectors;
2458 	hdev->num_msi_left = vectors;
2459 
2460 	hdev->base_msi_vector = pdev->irq;
2461 	hdev->roce_base_vector = hdev->base_msi_vector +
2462 				hdev->num_nic_msi;
2463 
2464 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2465 					   sizeof(u16), GFP_KERNEL);
2466 	if (!hdev->vector_status) {
2467 		pci_free_irq_vectors(pdev);
2468 		return -ENOMEM;
2469 	}
2470 
2471 	for (i = 0; i < hdev->num_msi; i++)
2472 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2473 
2474 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2475 					sizeof(int), GFP_KERNEL);
2476 	if (!hdev->vector_irq) {
2477 		pci_free_irq_vectors(pdev);
2478 		return -ENOMEM;
2479 	}
2480 
2481 	return 0;
2482 }
2483 
2484 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2485 {
2486 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2487 		duplex = HCLGE_MAC_FULL;
2488 
2489 	return duplex;
2490 }
2491 
2492 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2493 				      u8 duplex)
2494 {
2495 	struct hclge_config_mac_speed_dup_cmd *req;
2496 	struct hclge_desc desc;
2497 	int ret;
2498 
2499 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2500 
2501 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2502 
2503 	if (duplex)
2504 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2505 
2506 	switch (speed) {
2507 	case HCLGE_MAC_SPEED_10M:
2508 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2509 				HCLGE_CFG_SPEED_S, 6);
2510 		break;
2511 	case HCLGE_MAC_SPEED_100M:
2512 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2513 				HCLGE_CFG_SPEED_S, 7);
2514 		break;
2515 	case HCLGE_MAC_SPEED_1G:
2516 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2517 				HCLGE_CFG_SPEED_S, 0);
2518 		break;
2519 	case HCLGE_MAC_SPEED_10G:
2520 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2521 				HCLGE_CFG_SPEED_S, 1);
2522 		break;
2523 	case HCLGE_MAC_SPEED_25G:
2524 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2525 				HCLGE_CFG_SPEED_S, 2);
2526 		break;
2527 	case HCLGE_MAC_SPEED_40G:
2528 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2529 				HCLGE_CFG_SPEED_S, 3);
2530 		break;
2531 	case HCLGE_MAC_SPEED_50G:
2532 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2533 				HCLGE_CFG_SPEED_S, 4);
2534 		break;
2535 	case HCLGE_MAC_SPEED_100G:
2536 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2537 				HCLGE_CFG_SPEED_S, 5);
2538 		break;
2539 	case HCLGE_MAC_SPEED_200G:
2540 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541 				HCLGE_CFG_SPEED_S, 8);
2542 		break;
2543 	default:
2544 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2545 		return -EINVAL;
2546 	}
2547 
2548 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2549 		      1);
2550 
2551 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2552 	if (ret) {
2553 		dev_err(&hdev->pdev->dev,
2554 			"mac speed/duplex config cmd failed %d.\n", ret);
2555 		return ret;
2556 	}
2557 
2558 	return 0;
2559 }
2560 
2561 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2562 {
2563 	struct hclge_mac *mac = &hdev->hw.mac;
2564 	int ret;
2565 
2566 	duplex = hclge_check_speed_dup(duplex, speed);
2567 	if (!mac->support_autoneg && mac->speed == speed &&
2568 	    mac->duplex == duplex)
2569 		return 0;
2570 
2571 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2572 	if (ret)
2573 		return ret;
2574 
2575 	hdev->hw.mac.speed = speed;
2576 	hdev->hw.mac.duplex = duplex;
2577 
2578 	return 0;
2579 }
2580 
2581 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2582 				     u8 duplex)
2583 {
2584 	struct hclge_vport *vport = hclge_get_vport(handle);
2585 	struct hclge_dev *hdev = vport->back;
2586 
2587 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2588 }
2589 
2590 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2591 {
2592 	struct hclge_config_auto_neg_cmd *req;
2593 	struct hclge_desc desc;
2594 	u32 flag = 0;
2595 	int ret;
2596 
2597 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2598 
2599 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2600 	if (enable)
2601 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2602 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2603 
2604 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2605 	if (ret)
2606 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2607 			ret);
2608 
2609 	return ret;
2610 }
2611 
2612 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2613 {
2614 	struct hclge_vport *vport = hclge_get_vport(handle);
2615 	struct hclge_dev *hdev = vport->back;
2616 
2617 	if (!hdev->hw.mac.support_autoneg) {
2618 		if (enable) {
2619 			dev_err(&hdev->pdev->dev,
2620 				"autoneg is not supported by current port\n");
2621 			return -EOPNOTSUPP;
2622 		} else {
2623 			return 0;
2624 		}
2625 	}
2626 
2627 	return hclge_set_autoneg_en(hdev, enable);
2628 }
2629 
2630 static int hclge_get_autoneg(struct hnae3_handle *handle)
2631 {
2632 	struct hclge_vport *vport = hclge_get_vport(handle);
2633 	struct hclge_dev *hdev = vport->back;
2634 	struct phy_device *phydev = hdev->hw.mac.phydev;
2635 
2636 	if (phydev)
2637 		return phydev->autoneg;
2638 
2639 	return hdev->hw.mac.autoneg;
2640 }
2641 
2642 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2643 {
2644 	struct hclge_vport *vport = hclge_get_vport(handle);
2645 	struct hclge_dev *hdev = vport->back;
2646 	int ret;
2647 
2648 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2649 
2650 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2651 	if (ret)
2652 		return ret;
2653 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2654 }
2655 
2656 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2657 {
2658 	struct hclge_vport *vport = hclge_get_vport(handle);
2659 	struct hclge_dev *hdev = vport->back;
2660 
2661 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2662 		return hclge_set_autoneg_en(hdev, !halt);
2663 
2664 	return 0;
2665 }
2666 
2667 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2668 {
2669 	struct hclge_config_fec_cmd *req;
2670 	struct hclge_desc desc;
2671 	int ret;
2672 
2673 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2674 
2675 	req = (struct hclge_config_fec_cmd *)desc.data;
2676 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2677 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2678 	if (fec_mode & BIT(HNAE3_FEC_RS))
2679 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2680 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2681 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2682 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2683 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2684 
2685 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2686 	if (ret)
2687 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2688 
2689 	return ret;
2690 }
2691 
2692 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2693 {
2694 	struct hclge_vport *vport = hclge_get_vport(handle);
2695 	struct hclge_dev *hdev = vport->back;
2696 	struct hclge_mac *mac = &hdev->hw.mac;
2697 	int ret;
2698 
2699 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2700 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2701 		return -EINVAL;
2702 	}
2703 
2704 	ret = hclge_set_fec_hw(hdev, fec_mode);
2705 	if (ret)
2706 		return ret;
2707 
2708 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2709 	return 0;
2710 }
2711 
2712 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2713 			  u8 *fec_mode)
2714 {
2715 	struct hclge_vport *vport = hclge_get_vport(handle);
2716 	struct hclge_dev *hdev = vport->back;
2717 	struct hclge_mac *mac = &hdev->hw.mac;
2718 
2719 	if (fec_ability)
2720 		*fec_ability = mac->fec_ability;
2721 	if (fec_mode)
2722 		*fec_mode = mac->fec_mode;
2723 }
2724 
2725 static int hclge_mac_init(struct hclge_dev *hdev)
2726 {
2727 	struct hclge_mac *mac = &hdev->hw.mac;
2728 	int ret;
2729 
2730 	hdev->support_sfp_query = true;
2731 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2732 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2733 					 hdev->hw.mac.duplex);
2734 	if (ret)
2735 		return ret;
2736 
2737 	if (hdev->hw.mac.support_autoneg) {
2738 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2739 		if (ret)
2740 			return ret;
2741 	}
2742 
2743 	mac->link = 0;
2744 
2745 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2746 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2747 		if (ret)
2748 			return ret;
2749 	}
2750 
2751 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2752 	if (ret) {
2753 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2754 		return ret;
2755 	}
2756 
2757 	ret = hclge_set_default_loopback(hdev);
2758 	if (ret)
2759 		return ret;
2760 
2761 	ret = hclge_buffer_alloc(hdev);
2762 	if (ret)
2763 		dev_err(&hdev->pdev->dev,
2764 			"allocate buffer fail, ret=%d\n", ret);
2765 
2766 	return ret;
2767 }
2768 
2769 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2770 {
2771 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2772 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2773 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2774 				    hclge_wq, &hdev->service_task, 0);
2775 }
2776 
2777 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2778 {
2779 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2780 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2781 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2782 				    hclge_wq, &hdev->service_task, 0);
2783 }
2784 
2785 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2786 {
2787 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2788 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2789 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2790 				    hclge_wq, &hdev->service_task,
2791 				    delay_time);
2792 }
2793 
2794 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2795 {
2796 	struct hclge_link_status_cmd *req;
2797 	struct hclge_desc desc;
2798 	int ret;
2799 
2800 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2801 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2802 	if (ret) {
2803 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2804 			ret);
2805 		return ret;
2806 	}
2807 
2808 	req = (struct hclge_link_status_cmd *)desc.data;
2809 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2810 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2811 
2812 	return 0;
2813 }
2814 
2815 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2816 {
2817 	struct phy_device *phydev = hdev->hw.mac.phydev;
2818 
2819 	*link_status = HCLGE_LINK_STATUS_DOWN;
2820 
2821 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2822 		return 0;
2823 
2824 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2825 		return 0;
2826 
2827 	return hclge_get_mac_link_status(hdev, link_status);
2828 }
2829 
2830 static void hclge_update_link_status(struct hclge_dev *hdev)
2831 {
2832 	struct hnae3_client *rclient = hdev->roce_client;
2833 	struct hnae3_client *client = hdev->nic_client;
2834 	struct hnae3_handle *rhandle;
2835 	struct hnae3_handle *handle;
2836 	int state;
2837 	int ret;
2838 	int i;
2839 
2840 	if (!client)
2841 		return;
2842 
2843 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2844 		return;
2845 
2846 	ret = hclge_get_mac_phy_link(hdev, &state);
2847 	if (ret) {
2848 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2849 		return;
2850 	}
2851 
2852 	if (state != hdev->hw.mac.link) {
2853 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2854 			handle = &hdev->vport[i].nic;
2855 			client->ops->link_status_change(handle, state);
2856 			hclge_config_mac_tnl_int(hdev, state);
2857 			rhandle = &hdev->vport[i].roce;
2858 			if (rclient && rclient->ops->link_status_change)
2859 				rclient->ops->link_status_change(rhandle,
2860 								 state);
2861 		}
2862 		hdev->hw.mac.link = state;
2863 	}
2864 
2865 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2866 }
2867 
2868 static void hclge_update_port_capability(struct hclge_mac *mac)
2869 {
2870 	/* update fec ability by speed */
2871 	hclge_convert_setting_fec(mac);
2872 
2873 	/* firmware can not identify back plane type, the media type
2874 	 * read from configuration can help deal it
2875 	 */
2876 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2877 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2878 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2879 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2880 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2881 
2882 	if (mac->support_autoneg) {
2883 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2884 		linkmode_copy(mac->advertising, mac->supported);
2885 	} else {
2886 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2887 				   mac->supported);
2888 		linkmode_zero(mac->advertising);
2889 	}
2890 }
2891 
2892 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2893 {
2894 	struct hclge_sfp_info_cmd *resp;
2895 	struct hclge_desc desc;
2896 	int ret;
2897 
2898 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2899 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2900 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2901 	if (ret == -EOPNOTSUPP) {
2902 		dev_warn(&hdev->pdev->dev,
2903 			 "IMP do not support get SFP speed %d\n", ret);
2904 		return ret;
2905 	} else if (ret) {
2906 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2907 		return ret;
2908 	}
2909 
2910 	*speed = le32_to_cpu(resp->speed);
2911 
2912 	return 0;
2913 }
2914 
2915 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2916 {
2917 	struct hclge_sfp_info_cmd *resp;
2918 	struct hclge_desc desc;
2919 	int ret;
2920 
2921 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2922 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2923 
2924 	resp->query_type = QUERY_ACTIVE_SPEED;
2925 
2926 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2927 	if (ret == -EOPNOTSUPP) {
2928 		dev_warn(&hdev->pdev->dev,
2929 			 "IMP does not support get SFP info %d\n", ret);
2930 		return ret;
2931 	} else if (ret) {
2932 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2933 		return ret;
2934 	}
2935 
2936 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2937 	 * set to mac->speed.
2938 	 */
2939 	if (!le32_to_cpu(resp->speed))
2940 		return 0;
2941 
2942 	mac->speed = le32_to_cpu(resp->speed);
2943 	/* if resp->speed_ability is 0, it means it's an old version
2944 	 * firmware, do not update these params
2945 	 */
2946 	if (resp->speed_ability) {
2947 		mac->module_type = le32_to_cpu(resp->module_type);
2948 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2949 		mac->autoneg = resp->autoneg;
2950 		mac->support_autoneg = resp->autoneg_ability;
2951 		mac->speed_type = QUERY_ACTIVE_SPEED;
2952 		if (!resp->active_fec)
2953 			mac->fec_mode = 0;
2954 		else
2955 			mac->fec_mode = BIT(resp->active_fec);
2956 	} else {
2957 		mac->speed_type = QUERY_SFP_SPEED;
2958 	}
2959 
2960 	return 0;
2961 }
2962 
2963 static int hclge_update_port_info(struct hclge_dev *hdev)
2964 {
2965 	struct hclge_mac *mac = &hdev->hw.mac;
2966 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2967 	int ret;
2968 
2969 	/* get the port info from SFP cmd if not copper port */
2970 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2971 		return 0;
2972 
2973 	/* if IMP does not support get SFP/qSFP info, return directly */
2974 	if (!hdev->support_sfp_query)
2975 		return 0;
2976 
2977 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2978 		ret = hclge_get_sfp_info(hdev, mac);
2979 	else
2980 		ret = hclge_get_sfp_speed(hdev, &speed);
2981 
2982 	if (ret == -EOPNOTSUPP) {
2983 		hdev->support_sfp_query = false;
2984 		return ret;
2985 	} else if (ret) {
2986 		return ret;
2987 	}
2988 
2989 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
2990 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2991 			hclge_update_port_capability(mac);
2992 			return 0;
2993 		}
2994 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2995 					       HCLGE_MAC_FULL);
2996 	} else {
2997 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2998 			return 0; /* do nothing if no SFP */
2999 
3000 		/* must config full duplex for SFP */
3001 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3002 	}
3003 }
3004 
3005 static int hclge_get_status(struct hnae3_handle *handle)
3006 {
3007 	struct hclge_vport *vport = hclge_get_vport(handle);
3008 	struct hclge_dev *hdev = vport->back;
3009 
3010 	hclge_update_link_status(hdev);
3011 
3012 	return hdev->hw.mac.link;
3013 }
3014 
3015 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3016 {
3017 	if (!pci_num_vf(hdev->pdev)) {
3018 		dev_err(&hdev->pdev->dev,
3019 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3020 		return NULL;
3021 	}
3022 
3023 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3024 		dev_err(&hdev->pdev->dev,
3025 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3026 			vf, pci_num_vf(hdev->pdev));
3027 		return NULL;
3028 	}
3029 
3030 	/* VF start from 1 in vport */
3031 	vf += HCLGE_VF_VPORT_START_NUM;
3032 	return &hdev->vport[vf];
3033 }
3034 
3035 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3036 			       struct ifla_vf_info *ivf)
3037 {
3038 	struct hclge_vport *vport = hclge_get_vport(handle);
3039 	struct hclge_dev *hdev = vport->back;
3040 
3041 	vport = hclge_get_vf_vport(hdev, vf);
3042 	if (!vport)
3043 		return -EINVAL;
3044 
3045 	ivf->vf = vf;
3046 	ivf->linkstate = vport->vf_info.link_state;
3047 	ivf->spoofchk = vport->vf_info.spoofchk;
3048 	ivf->trusted = vport->vf_info.trusted;
3049 	ivf->min_tx_rate = 0;
3050 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3051 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3052 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3053 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3054 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3055 
3056 	return 0;
3057 }
3058 
3059 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3060 				   int link_state)
3061 {
3062 	struct hclge_vport *vport = hclge_get_vport(handle);
3063 	struct hclge_dev *hdev = vport->back;
3064 
3065 	vport = hclge_get_vf_vport(hdev, vf);
3066 	if (!vport)
3067 		return -EINVAL;
3068 
3069 	vport->vf_info.link_state = link_state;
3070 
3071 	return 0;
3072 }
3073 
3074 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3075 {
3076 	u32 cmdq_src_reg, msix_src_reg;
3077 
3078 	/* fetch the events from their corresponding regs */
3079 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3080 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3081 
3082 	/* Assumption: If by any chance reset and mailbox events are reported
3083 	 * together then we will only process reset event in this go and will
3084 	 * defer the processing of the mailbox events. Since, we would have not
3085 	 * cleared RX CMDQ event this time we would receive again another
3086 	 * interrupt from H/W just for the mailbox.
3087 	 *
3088 	 * check for vector0 reset event sources
3089 	 */
3090 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3091 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3092 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3093 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3094 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3095 		hdev->rst_stats.imp_rst_cnt++;
3096 		return HCLGE_VECTOR0_EVENT_RST;
3097 	}
3098 
3099 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3100 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3101 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3102 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3103 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3104 		hdev->rst_stats.global_rst_cnt++;
3105 		return HCLGE_VECTOR0_EVENT_RST;
3106 	}
3107 
3108 	/* check for vector0 msix event source */
3109 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3110 		*clearval = msix_src_reg;
3111 		return HCLGE_VECTOR0_EVENT_ERR;
3112 	}
3113 
3114 	/* check for vector0 mailbox(=CMDQ RX) event source */
3115 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3116 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3117 		*clearval = cmdq_src_reg;
3118 		return HCLGE_VECTOR0_EVENT_MBX;
3119 	}
3120 
3121 	/* print other vector0 event source */
3122 	dev_info(&hdev->pdev->dev,
3123 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3124 		 cmdq_src_reg, msix_src_reg);
3125 	*clearval = msix_src_reg;
3126 
3127 	return HCLGE_VECTOR0_EVENT_OTHER;
3128 }
3129 
3130 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3131 				    u32 regclr)
3132 {
3133 	switch (event_type) {
3134 	case HCLGE_VECTOR0_EVENT_RST:
3135 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3136 		break;
3137 	case HCLGE_VECTOR0_EVENT_MBX:
3138 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3139 		break;
3140 	default:
3141 		break;
3142 	}
3143 }
3144 
3145 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3146 {
3147 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3148 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3149 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3150 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3151 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3152 }
3153 
3154 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3155 {
3156 	writel(enable ? 1 : 0, vector->addr);
3157 }
3158 
3159 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3160 {
3161 	struct hclge_dev *hdev = data;
3162 	u32 clearval = 0;
3163 	u32 event_cause;
3164 
3165 	hclge_enable_vector(&hdev->misc_vector, false);
3166 	event_cause = hclge_check_event_cause(hdev, &clearval);
3167 
3168 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3169 	switch (event_cause) {
3170 	case HCLGE_VECTOR0_EVENT_ERR:
3171 		/* we do not know what type of reset is required now. This could
3172 		 * only be decided after we fetch the type of errors which
3173 		 * caused this event. Therefore, we will do below for now:
3174 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3175 		 *    have defered type of reset to be used.
3176 		 * 2. Schedule the reset serivce task.
3177 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3178 		 *    will fetch the correct type of reset.  This would be done
3179 		 *    by first decoding the types of errors.
3180 		 */
3181 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3182 		fallthrough;
3183 	case HCLGE_VECTOR0_EVENT_RST:
3184 		hclge_reset_task_schedule(hdev);
3185 		break;
3186 	case HCLGE_VECTOR0_EVENT_MBX:
3187 		/* If we are here then,
3188 		 * 1. Either we are not handling any mbx task and we are not
3189 		 *    scheduled as well
3190 		 *                        OR
3191 		 * 2. We could be handling a mbx task but nothing more is
3192 		 *    scheduled.
3193 		 * In both cases, we should schedule mbx task as there are more
3194 		 * mbx messages reported by this interrupt.
3195 		 */
3196 		hclge_mbx_task_schedule(hdev);
3197 		break;
3198 	default:
3199 		dev_warn(&hdev->pdev->dev,
3200 			 "received unknown or unhandled event of vector0\n");
3201 		break;
3202 	}
3203 
3204 	hclge_clear_event_cause(hdev, event_cause, clearval);
3205 
3206 	/* Enable interrupt if it is not cause by reset. And when
3207 	 * clearval equal to 0, it means interrupt status may be
3208 	 * cleared by hardware before driver reads status register.
3209 	 * For this case, vector0 interrupt also should be enabled.
3210 	 */
3211 	if (!clearval ||
3212 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3213 		hclge_enable_vector(&hdev->misc_vector, true);
3214 	}
3215 
3216 	return IRQ_HANDLED;
3217 }
3218 
3219 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3220 {
3221 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3222 		dev_warn(&hdev->pdev->dev,
3223 			 "vector(vector_id %d) has been freed.\n", vector_id);
3224 		return;
3225 	}
3226 
3227 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3228 	hdev->num_msi_left += 1;
3229 	hdev->num_msi_used -= 1;
3230 }
3231 
3232 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3233 {
3234 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3235 
3236 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3237 
3238 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3239 	hdev->vector_status[0] = 0;
3240 
3241 	hdev->num_msi_left -= 1;
3242 	hdev->num_msi_used += 1;
3243 }
3244 
3245 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3246 				      const cpumask_t *mask)
3247 {
3248 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3249 					      affinity_notify);
3250 
3251 	cpumask_copy(&hdev->affinity_mask, mask);
3252 }
3253 
3254 static void hclge_irq_affinity_release(struct kref *ref)
3255 {
3256 }
3257 
3258 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3259 {
3260 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3261 			      &hdev->affinity_mask);
3262 
3263 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3264 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3265 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3266 				  &hdev->affinity_notify);
3267 }
3268 
3269 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3270 {
3271 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3272 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3273 }
3274 
3275 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3276 {
3277 	int ret;
3278 
3279 	hclge_get_misc_vector(hdev);
3280 
3281 	/* this would be explicitly freed in the end */
3282 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3283 		 HCLGE_NAME, pci_name(hdev->pdev));
3284 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3285 			  0, hdev->misc_vector.name, hdev);
3286 	if (ret) {
3287 		hclge_free_vector(hdev, 0);
3288 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3289 			hdev->misc_vector.vector_irq);
3290 	}
3291 
3292 	return ret;
3293 }
3294 
3295 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3296 {
3297 	free_irq(hdev->misc_vector.vector_irq, hdev);
3298 	hclge_free_vector(hdev, 0);
3299 }
3300 
3301 int hclge_notify_client(struct hclge_dev *hdev,
3302 			enum hnae3_reset_notify_type type)
3303 {
3304 	struct hnae3_client *client = hdev->nic_client;
3305 	u16 i;
3306 
3307 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3308 		return 0;
3309 
3310 	if (!client->ops->reset_notify)
3311 		return -EOPNOTSUPP;
3312 
3313 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3314 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3315 		int ret;
3316 
3317 		ret = client->ops->reset_notify(handle, type);
3318 		if (ret) {
3319 			dev_err(&hdev->pdev->dev,
3320 				"notify nic client failed %d(%d)\n", type, ret);
3321 			return ret;
3322 		}
3323 	}
3324 
3325 	return 0;
3326 }
3327 
3328 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3329 				    enum hnae3_reset_notify_type type)
3330 {
3331 	struct hnae3_client *client = hdev->roce_client;
3332 	int ret;
3333 	u16 i;
3334 
3335 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3336 		return 0;
3337 
3338 	if (!client->ops->reset_notify)
3339 		return -EOPNOTSUPP;
3340 
3341 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3342 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3343 
3344 		ret = client->ops->reset_notify(handle, type);
3345 		if (ret) {
3346 			dev_err(&hdev->pdev->dev,
3347 				"notify roce client failed %d(%d)",
3348 				type, ret);
3349 			return ret;
3350 		}
3351 	}
3352 
3353 	return ret;
3354 }
3355 
3356 static int hclge_reset_wait(struct hclge_dev *hdev)
3357 {
3358 #define HCLGE_RESET_WATI_MS	100
3359 #define HCLGE_RESET_WAIT_CNT	350
3360 
3361 	u32 val, reg, reg_bit;
3362 	u32 cnt = 0;
3363 
3364 	switch (hdev->reset_type) {
3365 	case HNAE3_IMP_RESET:
3366 		reg = HCLGE_GLOBAL_RESET_REG;
3367 		reg_bit = HCLGE_IMP_RESET_BIT;
3368 		break;
3369 	case HNAE3_GLOBAL_RESET:
3370 		reg = HCLGE_GLOBAL_RESET_REG;
3371 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3372 		break;
3373 	case HNAE3_FUNC_RESET:
3374 		reg = HCLGE_FUN_RST_ING;
3375 		reg_bit = HCLGE_FUN_RST_ING_B;
3376 		break;
3377 	default:
3378 		dev_err(&hdev->pdev->dev,
3379 			"Wait for unsupported reset type: %d\n",
3380 			hdev->reset_type);
3381 		return -EINVAL;
3382 	}
3383 
3384 	val = hclge_read_dev(&hdev->hw, reg);
3385 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3386 		msleep(HCLGE_RESET_WATI_MS);
3387 		val = hclge_read_dev(&hdev->hw, reg);
3388 		cnt++;
3389 	}
3390 
3391 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3392 		dev_warn(&hdev->pdev->dev,
3393 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3394 		return -EBUSY;
3395 	}
3396 
3397 	return 0;
3398 }
3399 
3400 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3401 {
3402 	struct hclge_vf_rst_cmd *req;
3403 	struct hclge_desc desc;
3404 
3405 	req = (struct hclge_vf_rst_cmd *)desc.data;
3406 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3407 	req->dest_vfid = func_id;
3408 
3409 	if (reset)
3410 		req->vf_rst = 0x1;
3411 
3412 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3413 }
3414 
3415 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3416 {
3417 	int i;
3418 
3419 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3420 		struct hclge_vport *vport = &hdev->vport[i];
3421 		int ret;
3422 
3423 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3424 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3425 		if (ret) {
3426 			dev_err(&hdev->pdev->dev,
3427 				"set vf(%u) rst failed %d!\n",
3428 				vport->vport_id, ret);
3429 			return ret;
3430 		}
3431 
3432 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3433 			continue;
3434 
3435 		/* Inform VF to process the reset.
3436 		 * hclge_inform_reset_assert_to_vf may fail if VF
3437 		 * driver is not loaded.
3438 		 */
3439 		ret = hclge_inform_reset_assert_to_vf(vport);
3440 		if (ret)
3441 			dev_warn(&hdev->pdev->dev,
3442 				 "inform reset to vf(%u) failed %d!\n",
3443 				 vport->vport_id, ret);
3444 	}
3445 
3446 	return 0;
3447 }
3448 
3449 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3450 {
3451 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3452 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3453 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3454 		return;
3455 
3456 	hclge_mbx_handler(hdev);
3457 
3458 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3459 }
3460 
3461 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3462 {
3463 	struct hclge_pf_rst_sync_cmd *req;
3464 	struct hclge_desc desc;
3465 	int cnt = 0;
3466 	int ret;
3467 
3468 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3469 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3470 
3471 	do {
3472 		/* vf need to down netdev by mbx during PF or FLR reset */
3473 		hclge_mailbox_service_task(hdev);
3474 
3475 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3476 		/* for compatible with old firmware, wait
3477 		 * 100 ms for VF to stop IO
3478 		 */
3479 		if (ret == -EOPNOTSUPP) {
3480 			msleep(HCLGE_RESET_SYNC_TIME);
3481 			return;
3482 		} else if (ret) {
3483 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3484 				 ret);
3485 			return;
3486 		} else if (req->all_vf_ready) {
3487 			return;
3488 		}
3489 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3490 		hclge_cmd_reuse_desc(&desc, true);
3491 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3492 
3493 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3494 }
3495 
3496 void hclge_report_hw_error(struct hclge_dev *hdev,
3497 			   enum hnae3_hw_error_type type)
3498 {
3499 	struct hnae3_client *client = hdev->nic_client;
3500 	u16 i;
3501 
3502 	if (!client || !client->ops->process_hw_error ||
3503 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3504 		return;
3505 
3506 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3507 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3508 }
3509 
3510 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3511 {
3512 	u32 reg_val;
3513 
3514 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3515 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3516 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3517 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3518 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3519 	}
3520 
3521 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3522 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3523 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3524 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3525 	}
3526 }
3527 
3528 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3529 {
3530 	struct hclge_desc desc;
3531 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3532 	int ret;
3533 
3534 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3535 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3536 	req->fun_reset_vfid = func_id;
3537 
3538 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3539 	if (ret)
3540 		dev_err(&hdev->pdev->dev,
3541 			"send function reset cmd fail, status =%d\n", ret);
3542 
3543 	return ret;
3544 }
3545 
3546 static void hclge_do_reset(struct hclge_dev *hdev)
3547 {
3548 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3549 	struct pci_dev *pdev = hdev->pdev;
3550 	u32 val;
3551 
3552 	if (hclge_get_hw_reset_stat(handle)) {
3553 		dev_info(&pdev->dev, "hardware reset not finish\n");
3554 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3555 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3556 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3557 		return;
3558 	}
3559 
3560 	switch (hdev->reset_type) {
3561 	case HNAE3_GLOBAL_RESET:
3562 		dev_info(&pdev->dev, "global reset requested\n");
3563 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3564 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3565 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3566 		break;
3567 	case HNAE3_FUNC_RESET:
3568 		dev_info(&pdev->dev, "PF reset requested\n");
3569 		/* schedule again to check later */
3570 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3571 		hclge_reset_task_schedule(hdev);
3572 		break;
3573 	default:
3574 		dev_warn(&pdev->dev,
3575 			 "unsupported reset type: %d\n", hdev->reset_type);
3576 		break;
3577 	}
3578 }
3579 
3580 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3581 						   unsigned long *addr)
3582 {
3583 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3584 	struct hclge_dev *hdev = ae_dev->priv;
3585 
3586 	/* first, resolve any unknown reset type to the known type(s) */
3587 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3588 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3589 					HCLGE_MISC_VECTOR_INT_STS);
3590 		/* we will intentionally ignore any errors from this function
3591 		 *  as we will end up in *some* reset request in any case
3592 		 */
3593 		if (hclge_handle_hw_msix_error(hdev, addr))
3594 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3595 				 msix_sts_reg);
3596 
3597 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3598 		/* We defered the clearing of the error event which caused
3599 		 * interrupt since it was not posssible to do that in
3600 		 * interrupt context (and this is the reason we introduced
3601 		 * new UNKNOWN reset type). Now, the errors have been
3602 		 * handled and cleared in hardware we can safely enable
3603 		 * interrupts. This is an exception to the norm.
3604 		 */
3605 		hclge_enable_vector(&hdev->misc_vector, true);
3606 	}
3607 
3608 	/* return the highest priority reset level amongst all */
3609 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3610 		rst_level = HNAE3_IMP_RESET;
3611 		clear_bit(HNAE3_IMP_RESET, addr);
3612 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3613 		clear_bit(HNAE3_FUNC_RESET, addr);
3614 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3615 		rst_level = HNAE3_GLOBAL_RESET;
3616 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3617 		clear_bit(HNAE3_FUNC_RESET, addr);
3618 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3619 		rst_level = HNAE3_FUNC_RESET;
3620 		clear_bit(HNAE3_FUNC_RESET, addr);
3621 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3622 		rst_level = HNAE3_FLR_RESET;
3623 		clear_bit(HNAE3_FLR_RESET, addr);
3624 	}
3625 
3626 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3627 	    rst_level < hdev->reset_type)
3628 		return HNAE3_NONE_RESET;
3629 
3630 	return rst_level;
3631 }
3632 
3633 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3634 {
3635 	u32 clearval = 0;
3636 
3637 	switch (hdev->reset_type) {
3638 	case HNAE3_IMP_RESET:
3639 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3640 		break;
3641 	case HNAE3_GLOBAL_RESET:
3642 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3643 		break;
3644 	default:
3645 		break;
3646 	}
3647 
3648 	if (!clearval)
3649 		return;
3650 
3651 	/* For revision 0x20, the reset interrupt source
3652 	 * can only be cleared after hardware reset done
3653 	 */
3654 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3655 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3656 				clearval);
3657 
3658 	hclge_enable_vector(&hdev->misc_vector, true);
3659 }
3660 
3661 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3662 {
3663 	u32 reg_val;
3664 
3665 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3666 	if (enable)
3667 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3668 	else
3669 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3670 
3671 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3672 }
3673 
3674 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3675 {
3676 	int ret;
3677 
3678 	ret = hclge_set_all_vf_rst(hdev, true);
3679 	if (ret)
3680 		return ret;
3681 
3682 	hclge_func_reset_sync_vf(hdev);
3683 
3684 	return 0;
3685 }
3686 
3687 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3688 {
3689 	u32 reg_val;
3690 	int ret = 0;
3691 
3692 	switch (hdev->reset_type) {
3693 	case HNAE3_FUNC_RESET:
3694 		ret = hclge_func_reset_notify_vf(hdev);
3695 		if (ret)
3696 			return ret;
3697 
3698 		ret = hclge_func_reset_cmd(hdev, 0);
3699 		if (ret) {
3700 			dev_err(&hdev->pdev->dev,
3701 				"asserting function reset fail %d!\n", ret);
3702 			return ret;
3703 		}
3704 
3705 		/* After performaning pf reset, it is not necessary to do the
3706 		 * mailbox handling or send any command to firmware, because
3707 		 * any mailbox handling or command to firmware is only valid
3708 		 * after hclge_cmd_init is called.
3709 		 */
3710 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3711 		hdev->rst_stats.pf_rst_cnt++;
3712 		break;
3713 	case HNAE3_FLR_RESET:
3714 		ret = hclge_func_reset_notify_vf(hdev);
3715 		if (ret)
3716 			return ret;
3717 		break;
3718 	case HNAE3_IMP_RESET:
3719 		hclge_handle_imp_error(hdev);
3720 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3721 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3722 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3723 		break;
3724 	default:
3725 		break;
3726 	}
3727 
3728 	/* inform hardware that preparatory work is done */
3729 	msleep(HCLGE_RESET_SYNC_TIME);
3730 	hclge_reset_handshake(hdev, true);
3731 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3732 
3733 	return ret;
3734 }
3735 
3736 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3737 {
3738 #define MAX_RESET_FAIL_CNT 5
3739 
3740 	if (hdev->reset_pending) {
3741 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3742 			 hdev->reset_pending);
3743 		return true;
3744 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3745 		   HCLGE_RESET_INT_M) {
3746 		dev_info(&hdev->pdev->dev,
3747 			 "reset failed because new reset interrupt\n");
3748 		hclge_clear_reset_cause(hdev);
3749 		return false;
3750 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3751 		hdev->rst_stats.reset_fail_cnt++;
3752 		set_bit(hdev->reset_type, &hdev->reset_pending);
3753 		dev_info(&hdev->pdev->dev,
3754 			 "re-schedule reset task(%u)\n",
3755 			 hdev->rst_stats.reset_fail_cnt);
3756 		return true;
3757 	}
3758 
3759 	hclge_clear_reset_cause(hdev);
3760 
3761 	/* recover the handshake status when reset fail */
3762 	hclge_reset_handshake(hdev, true);
3763 
3764 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3765 
3766 	hclge_dbg_dump_rst_info(hdev);
3767 
3768 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3769 
3770 	return false;
3771 }
3772 
3773 static int hclge_set_rst_done(struct hclge_dev *hdev)
3774 {
3775 	struct hclge_pf_rst_done_cmd *req;
3776 	struct hclge_desc desc;
3777 	int ret;
3778 
3779 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3780 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3781 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3782 
3783 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3784 	/* To be compatible with the old firmware, which does not support
3785 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3786 	 * return success
3787 	 */
3788 	if (ret == -EOPNOTSUPP) {
3789 		dev_warn(&hdev->pdev->dev,
3790 			 "current firmware does not support command(0x%x)!\n",
3791 			 HCLGE_OPC_PF_RST_DONE);
3792 		return 0;
3793 	} else if (ret) {
3794 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3795 			ret);
3796 	}
3797 
3798 	return ret;
3799 }
3800 
3801 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3802 {
3803 	int ret = 0;
3804 
3805 	switch (hdev->reset_type) {
3806 	case HNAE3_FUNC_RESET:
3807 	case HNAE3_FLR_RESET:
3808 		ret = hclge_set_all_vf_rst(hdev, false);
3809 		break;
3810 	case HNAE3_GLOBAL_RESET:
3811 	case HNAE3_IMP_RESET:
3812 		ret = hclge_set_rst_done(hdev);
3813 		break;
3814 	default:
3815 		break;
3816 	}
3817 
3818 	/* clear up the handshake status after re-initialize done */
3819 	hclge_reset_handshake(hdev, false);
3820 
3821 	return ret;
3822 }
3823 
3824 static int hclge_reset_stack(struct hclge_dev *hdev)
3825 {
3826 	int ret;
3827 
3828 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3829 	if (ret)
3830 		return ret;
3831 
3832 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3833 	if (ret)
3834 		return ret;
3835 
3836 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3837 }
3838 
3839 static int hclge_reset_prepare(struct hclge_dev *hdev)
3840 {
3841 	int ret;
3842 
3843 	hdev->rst_stats.reset_cnt++;
3844 	/* perform reset of the stack & ae device for a client */
3845 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3846 	if (ret)
3847 		return ret;
3848 
3849 	rtnl_lock();
3850 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3851 	rtnl_unlock();
3852 	if (ret)
3853 		return ret;
3854 
3855 	return hclge_reset_prepare_wait(hdev);
3856 }
3857 
3858 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3859 {
3860 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3861 	enum hnae3_reset_type reset_level;
3862 	int ret;
3863 
3864 	hdev->rst_stats.hw_reset_done_cnt++;
3865 
3866 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3867 	if (ret)
3868 		return ret;
3869 
3870 	rtnl_lock();
3871 	ret = hclge_reset_stack(hdev);
3872 	rtnl_unlock();
3873 	if (ret)
3874 		return ret;
3875 
3876 	hclge_clear_reset_cause(hdev);
3877 
3878 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3879 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3880 	 * times
3881 	 */
3882 	if (ret &&
3883 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3884 		return ret;
3885 
3886 	ret = hclge_reset_prepare_up(hdev);
3887 	if (ret)
3888 		return ret;
3889 
3890 	rtnl_lock();
3891 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3892 	rtnl_unlock();
3893 	if (ret)
3894 		return ret;
3895 
3896 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3897 	if (ret)
3898 		return ret;
3899 
3900 	hdev->last_reset_time = jiffies;
3901 	hdev->rst_stats.reset_fail_cnt = 0;
3902 	hdev->rst_stats.reset_done_cnt++;
3903 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3904 
3905 	/* if default_reset_request has a higher level reset request,
3906 	 * it should be handled as soon as possible. since some errors
3907 	 * need this kind of reset to fix.
3908 	 */
3909 	reset_level = hclge_get_reset_level(ae_dev,
3910 					    &hdev->default_reset_request);
3911 	if (reset_level != HNAE3_NONE_RESET)
3912 		set_bit(reset_level, &hdev->reset_request);
3913 
3914 	return 0;
3915 }
3916 
3917 static void hclge_reset(struct hclge_dev *hdev)
3918 {
3919 	if (hclge_reset_prepare(hdev))
3920 		goto err_reset;
3921 
3922 	if (hclge_reset_wait(hdev))
3923 		goto err_reset;
3924 
3925 	if (hclge_reset_rebuild(hdev))
3926 		goto err_reset;
3927 
3928 	return;
3929 
3930 err_reset:
3931 	if (hclge_reset_err_handle(hdev))
3932 		hclge_reset_task_schedule(hdev);
3933 }
3934 
3935 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3936 {
3937 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3938 	struct hclge_dev *hdev = ae_dev->priv;
3939 
3940 	/* We might end up getting called broadly because of 2 below cases:
3941 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3942 	 *    normalcy is to reset.
3943 	 * 2. A new reset request from the stack due to timeout
3944 	 *
3945 	 * For the first case,error event might not have ae handle available.
3946 	 * check if this is a new reset request and we are not here just because
3947 	 * last reset attempt did not succeed and watchdog hit us again. We will
3948 	 * know this if last reset request did not occur very recently (watchdog
3949 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3950 	 * In case of new request we reset the "reset level" to PF reset.
3951 	 * And if it is a repeat reset request of the most recent one then we
3952 	 * want to make sure we throttle the reset request. Therefore, we will
3953 	 * not allow it again before 3*HZ times.
3954 	 */
3955 	if (!handle)
3956 		handle = &hdev->vport[0].nic;
3957 
3958 	if (time_before(jiffies, (hdev->last_reset_time +
3959 				  HCLGE_RESET_INTERVAL))) {
3960 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3961 		return;
3962 	} else if (hdev->default_reset_request) {
3963 		hdev->reset_level =
3964 			hclge_get_reset_level(ae_dev,
3965 					      &hdev->default_reset_request);
3966 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3967 		hdev->reset_level = HNAE3_FUNC_RESET;
3968 	}
3969 
3970 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3971 		 hdev->reset_level);
3972 
3973 	/* request reset & schedule reset task */
3974 	set_bit(hdev->reset_level, &hdev->reset_request);
3975 	hclge_reset_task_schedule(hdev);
3976 
3977 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3978 		hdev->reset_level++;
3979 }
3980 
3981 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3982 					enum hnae3_reset_type rst_type)
3983 {
3984 	struct hclge_dev *hdev = ae_dev->priv;
3985 
3986 	set_bit(rst_type, &hdev->default_reset_request);
3987 }
3988 
3989 static void hclge_reset_timer(struct timer_list *t)
3990 {
3991 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3992 
3993 	/* if default_reset_request has no value, it means that this reset
3994 	 * request has already be handled, so just return here
3995 	 */
3996 	if (!hdev->default_reset_request)
3997 		return;
3998 
3999 	dev_info(&hdev->pdev->dev,
4000 		 "triggering reset in reset timer\n");
4001 	hclge_reset_event(hdev->pdev, NULL);
4002 }
4003 
4004 static void hclge_reset_subtask(struct hclge_dev *hdev)
4005 {
4006 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4007 
4008 	/* check if there is any ongoing reset in the hardware. This status can
4009 	 * be checked from reset_pending. If there is then, we need to wait for
4010 	 * hardware to complete reset.
4011 	 *    a. If we are able to figure out in reasonable time that hardware
4012 	 *       has fully resetted then, we can proceed with driver, client
4013 	 *       reset.
4014 	 *    b. else, we can come back later to check this status so re-sched
4015 	 *       now.
4016 	 */
4017 	hdev->last_reset_time = jiffies;
4018 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4019 	if (hdev->reset_type != HNAE3_NONE_RESET)
4020 		hclge_reset(hdev);
4021 
4022 	/* check if we got any *new* reset requests to be honored */
4023 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4024 	if (hdev->reset_type != HNAE3_NONE_RESET)
4025 		hclge_do_reset(hdev);
4026 
4027 	hdev->reset_type = HNAE3_NONE_RESET;
4028 }
4029 
4030 static void hclge_reset_service_task(struct hclge_dev *hdev)
4031 {
4032 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4033 		return;
4034 
4035 	down(&hdev->reset_sem);
4036 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4037 
4038 	hclge_reset_subtask(hdev);
4039 
4040 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4041 	up(&hdev->reset_sem);
4042 }
4043 
4044 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4045 {
4046 	int i;
4047 
4048 	/* start from vport 1 for PF is always alive */
4049 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4050 		struct hclge_vport *vport = &hdev->vport[i];
4051 
4052 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4053 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4054 
4055 		/* If vf is not alive, set to default value */
4056 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4057 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4058 	}
4059 }
4060 
4061 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4062 {
4063 	unsigned long delta = round_jiffies_relative(HZ);
4064 
4065 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4066 		return;
4067 
4068 	/* Always handle the link updating to make sure link state is
4069 	 * updated when it is triggered by mbx.
4070 	 */
4071 	hclge_update_link_status(hdev);
4072 	hclge_sync_mac_table(hdev);
4073 	hclge_sync_promisc_mode(hdev);
4074 
4075 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4076 		delta = jiffies - hdev->last_serv_processed;
4077 
4078 		if (delta < round_jiffies_relative(HZ)) {
4079 			delta = round_jiffies_relative(HZ) - delta;
4080 			goto out;
4081 		}
4082 	}
4083 
4084 	hdev->serv_processed_cnt++;
4085 	hclge_update_vport_alive(hdev);
4086 
4087 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4088 		hdev->last_serv_processed = jiffies;
4089 		goto out;
4090 	}
4091 
4092 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4093 		hclge_update_stats_for_all(hdev);
4094 
4095 	hclge_update_port_info(hdev);
4096 	hclge_sync_vlan_filter(hdev);
4097 
4098 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4099 		hclge_rfs_filter_expire(hdev);
4100 
4101 	hdev->last_serv_processed = jiffies;
4102 
4103 out:
4104 	hclge_task_schedule(hdev, delta);
4105 }
4106 
4107 static void hclge_service_task(struct work_struct *work)
4108 {
4109 	struct hclge_dev *hdev =
4110 		container_of(work, struct hclge_dev, service_task.work);
4111 
4112 	hclge_reset_service_task(hdev);
4113 	hclge_mailbox_service_task(hdev);
4114 	hclge_periodic_service_task(hdev);
4115 
4116 	/* Handle reset and mbx again in case periodical task delays the
4117 	 * handling by calling hclge_task_schedule() in
4118 	 * hclge_periodic_service_task().
4119 	 */
4120 	hclge_reset_service_task(hdev);
4121 	hclge_mailbox_service_task(hdev);
4122 }
4123 
4124 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4125 {
4126 	/* VF handle has no client */
4127 	if (!handle->client)
4128 		return container_of(handle, struct hclge_vport, nic);
4129 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4130 		return container_of(handle, struct hclge_vport, roce);
4131 	else
4132 		return container_of(handle, struct hclge_vport, nic);
4133 }
4134 
4135 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4136 				  struct hnae3_vector_info *vector_info)
4137 {
4138 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4139 
4140 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4141 
4142 	/* need an extend offset to config vector >= 64 */
4143 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4144 		vector_info->io_addr = hdev->hw.io_base +
4145 				HCLGE_VECTOR_REG_BASE +
4146 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4147 	else
4148 		vector_info->io_addr = hdev->hw.io_base +
4149 				HCLGE_VECTOR_EXT_REG_BASE +
4150 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4151 				HCLGE_VECTOR_REG_OFFSET_H +
4152 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4153 				HCLGE_VECTOR_REG_OFFSET;
4154 
4155 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4156 	hdev->vector_irq[idx] = vector_info->vector;
4157 }
4158 
4159 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4160 			    struct hnae3_vector_info *vector_info)
4161 {
4162 	struct hclge_vport *vport = hclge_get_vport(handle);
4163 	struct hnae3_vector_info *vector = vector_info;
4164 	struct hclge_dev *hdev = vport->back;
4165 	int alloc = 0;
4166 	u16 i = 0;
4167 	u16 j;
4168 
4169 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4170 	vector_num = min(hdev->num_msi_left, vector_num);
4171 
4172 	for (j = 0; j < vector_num; j++) {
4173 		while (++i < hdev->num_nic_msi) {
4174 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4175 				hclge_get_vector_info(hdev, i, vector);
4176 				vector++;
4177 				alloc++;
4178 
4179 				break;
4180 			}
4181 		}
4182 	}
4183 	hdev->num_msi_left -= alloc;
4184 	hdev->num_msi_used += alloc;
4185 
4186 	return alloc;
4187 }
4188 
4189 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4190 {
4191 	int i;
4192 
4193 	for (i = 0; i < hdev->num_msi; i++)
4194 		if (vector == hdev->vector_irq[i])
4195 			return i;
4196 
4197 	return -EINVAL;
4198 }
4199 
4200 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4201 {
4202 	struct hclge_vport *vport = hclge_get_vport(handle);
4203 	struct hclge_dev *hdev = vport->back;
4204 	int vector_id;
4205 
4206 	vector_id = hclge_get_vector_index(hdev, vector);
4207 	if (vector_id < 0) {
4208 		dev_err(&hdev->pdev->dev,
4209 			"Get vector index fail. vector = %d\n", vector);
4210 		return vector_id;
4211 	}
4212 
4213 	hclge_free_vector(hdev, vector_id);
4214 
4215 	return 0;
4216 }
4217 
4218 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4219 {
4220 	return HCLGE_RSS_KEY_SIZE;
4221 }
4222 
4223 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4224 {
4225 	return HCLGE_RSS_IND_TBL_SIZE;
4226 }
4227 
4228 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4229 				  const u8 hfunc, const u8 *key)
4230 {
4231 	struct hclge_rss_config_cmd *req;
4232 	unsigned int key_offset = 0;
4233 	struct hclge_desc desc;
4234 	int key_counts;
4235 	int key_size;
4236 	int ret;
4237 
4238 	key_counts = HCLGE_RSS_KEY_SIZE;
4239 	req = (struct hclge_rss_config_cmd *)desc.data;
4240 
4241 	while (key_counts) {
4242 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4243 					   false);
4244 
4245 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4246 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4247 
4248 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4249 		memcpy(req->hash_key,
4250 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4251 
4252 		key_counts -= key_size;
4253 		key_offset++;
4254 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4255 		if (ret) {
4256 			dev_err(&hdev->pdev->dev,
4257 				"Configure RSS config fail, status = %d\n",
4258 				ret);
4259 			return ret;
4260 		}
4261 	}
4262 	return 0;
4263 }
4264 
4265 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4266 {
4267 	struct hclge_rss_indirection_table_cmd *req;
4268 	struct hclge_desc desc;
4269 	int i, j;
4270 	int ret;
4271 
4272 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4273 
4274 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4275 		hclge_cmd_setup_basic_desc
4276 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4277 
4278 		req->start_table_index =
4279 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4280 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4281 
4282 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4283 			req->rss_result[j] =
4284 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4285 
4286 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4287 		if (ret) {
4288 			dev_err(&hdev->pdev->dev,
4289 				"Configure rss indir table fail,status = %d\n",
4290 				ret);
4291 			return ret;
4292 		}
4293 	}
4294 	return 0;
4295 }
4296 
4297 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4298 				 u16 *tc_size, u16 *tc_offset)
4299 {
4300 	struct hclge_rss_tc_mode_cmd *req;
4301 	struct hclge_desc desc;
4302 	int ret;
4303 	int i;
4304 
4305 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4306 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4307 
4308 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4309 		u16 mode = 0;
4310 
4311 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4312 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4313 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4314 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4315 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4316 
4317 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4318 	}
4319 
4320 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4321 	if (ret)
4322 		dev_err(&hdev->pdev->dev,
4323 			"Configure rss tc mode fail, status = %d\n", ret);
4324 
4325 	return ret;
4326 }
4327 
4328 static void hclge_get_rss_type(struct hclge_vport *vport)
4329 {
4330 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4331 	    vport->rss_tuple_sets.ipv4_udp_en ||
4332 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4333 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4334 	    vport->rss_tuple_sets.ipv6_udp_en ||
4335 	    vport->rss_tuple_sets.ipv6_sctp_en)
4336 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4337 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4338 		 vport->rss_tuple_sets.ipv6_fragment_en)
4339 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4340 	else
4341 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4342 }
4343 
4344 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4345 {
4346 	struct hclge_rss_input_tuple_cmd *req;
4347 	struct hclge_desc desc;
4348 	int ret;
4349 
4350 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4351 
4352 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4353 
4354 	/* Get the tuple cfg from pf */
4355 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4356 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4357 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4358 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4359 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4360 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4361 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4362 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4363 	hclge_get_rss_type(&hdev->vport[0]);
4364 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4365 	if (ret)
4366 		dev_err(&hdev->pdev->dev,
4367 			"Configure rss input fail, status = %d\n", ret);
4368 	return ret;
4369 }
4370 
4371 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4372 			 u8 *key, u8 *hfunc)
4373 {
4374 	struct hclge_vport *vport = hclge_get_vport(handle);
4375 	int i;
4376 
4377 	/* Get hash algorithm */
4378 	if (hfunc) {
4379 		switch (vport->rss_algo) {
4380 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4381 			*hfunc = ETH_RSS_HASH_TOP;
4382 			break;
4383 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4384 			*hfunc = ETH_RSS_HASH_XOR;
4385 			break;
4386 		default:
4387 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4388 			break;
4389 		}
4390 	}
4391 
4392 	/* Get the RSS Key required by the user */
4393 	if (key)
4394 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4395 
4396 	/* Get indirect table */
4397 	if (indir)
4398 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4399 			indir[i] =  vport->rss_indirection_tbl[i];
4400 
4401 	return 0;
4402 }
4403 
4404 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4405 			 const  u8 *key, const  u8 hfunc)
4406 {
4407 	struct hclge_vport *vport = hclge_get_vport(handle);
4408 	struct hclge_dev *hdev = vport->back;
4409 	u8 hash_algo;
4410 	int ret, i;
4411 
4412 	/* Set the RSS Hash Key if specififed by the user */
4413 	if (key) {
4414 		switch (hfunc) {
4415 		case ETH_RSS_HASH_TOP:
4416 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4417 			break;
4418 		case ETH_RSS_HASH_XOR:
4419 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4420 			break;
4421 		case ETH_RSS_HASH_NO_CHANGE:
4422 			hash_algo = vport->rss_algo;
4423 			break;
4424 		default:
4425 			return -EINVAL;
4426 		}
4427 
4428 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4429 		if (ret)
4430 			return ret;
4431 
4432 		/* Update the shadow RSS key with user specified qids */
4433 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4434 		vport->rss_algo = hash_algo;
4435 	}
4436 
4437 	/* Update the shadow RSS table with user specified qids */
4438 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4439 		vport->rss_indirection_tbl[i] = indir[i];
4440 
4441 	/* Update the hardware */
4442 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4443 }
4444 
4445 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4446 {
4447 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4448 
4449 	if (nfc->data & RXH_L4_B_2_3)
4450 		hash_sets |= HCLGE_D_PORT_BIT;
4451 	else
4452 		hash_sets &= ~HCLGE_D_PORT_BIT;
4453 
4454 	if (nfc->data & RXH_IP_SRC)
4455 		hash_sets |= HCLGE_S_IP_BIT;
4456 	else
4457 		hash_sets &= ~HCLGE_S_IP_BIT;
4458 
4459 	if (nfc->data & RXH_IP_DST)
4460 		hash_sets |= HCLGE_D_IP_BIT;
4461 	else
4462 		hash_sets &= ~HCLGE_D_IP_BIT;
4463 
4464 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4465 		hash_sets |= HCLGE_V_TAG_BIT;
4466 
4467 	return hash_sets;
4468 }
4469 
4470 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4471 			       struct ethtool_rxnfc *nfc)
4472 {
4473 	struct hclge_vport *vport = hclge_get_vport(handle);
4474 	struct hclge_dev *hdev = vport->back;
4475 	struct hclge_rss_input_tuple_cmd *req;
4476 	struct hclge_desc desc;
4477 	u8 tuple_sets;
4478 	int ret;
4479 
4480 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4481 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4482 		return -EINVAL;
4483 
4484 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4485 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4486 
4487 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4488 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4489 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4490 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4491 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4492 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4493 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4494 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4495 
4496 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4497 	switch (nfc->flow_type) {
4498 	case TCP_V4_FLOW:
4499 		req->ipv4_tcp_en = tuple_sets;
4500 		break;
4501 	case TCP_V6_FLOW:
4502 		req->ipv6_tcp_en = tuple_sets;
4503 		break;
4504 	case UDP_V4_FLOW:
4505 		req->ipv4_udp_en = tuple_sets;
4506 		break;
4507 	case UDP_V6_FLOW:
4508 		req->ipv6_udp_en = tuple_sets;
4509 		break;
4510 	case SCTP_V4_FLOW:
4511 		req->ipv4_sctp_en = tuple_sets;
4512 		break;
4513 	case SCTP_V6_FLOW:
4514 		if ((nfc->data & RXH_L4_B_0_1) ||
4515 		    (nfc->data & RXH_L4_B_2_3))
4516 			return -EINVAL;
4517 
4518 		req->ipv6_sctp_en = tuple_sets;
4519 		break;
4520 	case IPV4_FLOW:
4521 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4522 		break;
4523 	case IPV6_FLOW:
4524 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4525 		break;
4526 	default:
4527 		return -EINVAL;
4528 	}
4529 
4530 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4531 	if (ret) {
4532 		dev_err(&hdev->pdev->dev,
4533 			"Set rss tuple fail, status = %d\n", ret);
4534 		return ret;
4535 	}
4536 
4537 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4538 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4539 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4540 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4541 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4542 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4543 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4544 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4545 	hclge_get_rss_type(vport);
4546 	return 0;
4547 }
4548 
4549 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4550 			       struct ethtool_rxnfc *nfc)
4551 {
4552 	struct hclge_vport *vport = hclge_get_vport(handle);
4553 	u8 tuple_sets;
4554 
4555 	nfc->data = 0;
4556 
4557 	switch (nfc->flow_type) {
4558 	case TCP_V4_FLOW:
4559 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4560 		break;
4561 	case UDP_V4_FLOW:
4562 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4563 		break;
4564 	case TCP_V6_FLOW:
4565 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4566 		break;
4567 	case UDP_V6_FLOW:
4568 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4569 		break;
4570 	case SCTP_V4_FLOW:
4571 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4572 		break;
4573 	case SCTP_V6_FLOW:
4574 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4575 		break;
4576 	case IPV4_FLOW:
4577 	case IPV6_FLOW:
4578 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4579 		break;
4580 	default:
4581 		return -EINVAL;
4582 	}
4583 
4584 	if (!tuple_sets)
4585 		return 0;
4586 
4587 	if (tuple_sets & HCLGE_D_PORT_BIT)
4588 		nfc->data |= RXH_L4_B_2_3;
4589 	if (tuple_sets & HCLGE_S_PORT_BIT)
4590 		nfc->data |= RXH_L4_B_0_1;
4591 	if (tuple_sets & HCLGE_D_IP_BIT)
4592 		nfc->data |= RXH_IP_DST;
4593 	if (tuple_sets & HCLGE_S_IP_BIT)
4594 		nfc->data |= RXH_IP_SRC;
4595 
4596 	return 0;
4597 }
4598 
4599 static int hclge_get_tc_size(struct hnae3_handle *handle)
4600 {
4601 	struct hclge_vport *vport = hclge_get_vport(handle);
4602 	struct hclge_dev *hdev = vport->back;
4603 
4604 	return hdev->rss_size_max;
4605 }
4606 
4607 int hclge_rss_init_hw(struct hclge_dev *hdev)
4608 {
4609 	struct hclge_vport *vport = hdev->vport;
4610 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4611 	u16 rss_size = vport[0].alloc_rss_size;
4612 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4613 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4614 	u8 *key = vport[0].rss_hash_key;
4615 	u8 hfunc = vport[0].rss_algo;
4616 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4617 	u16 roundup_size;
4618 	unsigned int i;
4619 	int ret;
4620 
4621 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4622 	if (ret)
4623 		return ret;
4624 
4625 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4626 	if (ret)
4627 		return ret;
4628 
4629 	ret = hclge_set_rss_input_tuple(hdev);
4630 	if (ret)
4631 		return ret;
4632 
4633 	/* Each TC have the same queue size, and tc_size set to hardware is
4634 	 * the log2 of roundup power of two of rss_size, the acutal queue
4635 	 * size is limited by indirection table.
4636 	 */
4637 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4638 		dev_err(&hdev->pdev->dev,
4639 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4640 			rss_size);
4641 		return -EINVAL;
4642 	}
4643 
4644 	roundup_size = roundup_pow_of_two(rss_size);
4645 	roundup_size = ilog2(roundup_size);
4646 
4647 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4648 		tc_valid[i] = 0;
4649 
4650 		if (!(hdev->hw_tc_map & BIT(i)))
4651 			continue;
4652 
4653 		tc_valid[i] = 1;
4654 		tc_size[i] = roundup_size;
4655 		tc_offset[i] = rss_size * i;
4656 	}
4657 
4658 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4659 }
4660 
4661 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4662 {
4663 	struct hclge_vport *vport = hdev->vport;
4664 	int i, j;
4665 
4666 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4667 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4668 			vport[j].rss_indirection_tbl[i] =
4669 				i % vport[j].alloc_rss_size;
4670 	}
4671 }
4672 
4673 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4674 {
4675 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4676 	struct hclge_vport *vport = hdev->vport;
4677 
4678 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4679 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4680 
4681 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4682 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4683 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4684 		vport[i].rss_tuple_sets.ipv4_udp_en =
4685 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4686 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4687 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4688 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4689 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4690 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4691 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4692 		vport[i].rss_tuple_sets.ipv6_udp_en =
4693 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4694 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4695 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4696 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4697 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4698 
4699 		vport[i].rss_algo = rss_algo;
4700 
4701 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4702 		       HCLGE_RSS_KEY_SIZE);
4703 	}
4704 
4705 	hclge_rss_indir_init_cfg(hdev);
4706 }
4707 
4708 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4709 				int vector_id, bool en,
4710 				struct hnae3_ring_chain_node *ring_chain)
4711 {
4712 	struct hclge_dev *hdev = vport->back;
4713 	struct hnae3_ring_chain_node *node;
4714 	struct hclge_desc desc;
4715 	struct hclge_ctrl_vector_chain_cmd *req =
4716 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4717 	enum hclge_cmd_status status;
4718 	enum hclge_opcode_type op;
4719 	u16 tqp_type_and_id;
4720 	int i;
4721 
4722 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4723 	hclge_cmd_setup_basic_desc(&desc, op, false);
4724 	req->int_vector_id_l = hnae3_get_field(vector_id,
4725 					       HCLGE_VECTOR_ID_L_M,
4726 					       HCLGE_VECTOR_ID_L_S);
4727 	req->int_vector_id_h = hnae3_get_field(vector_id,
4728 					       HCLGE_VECTOR_ID_H_M,
4729 					       HCLGE_VECTOR_ID_H_S);
4730 
4731 	i = 0;
4732 	for (node = ring_chain; node; node = node->next) {
4733 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4734 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4735 				HCLGE_INT_TYPE_S,
4736 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4737 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4738 				HCLGE_TQP_ID_S, node->tqp_index);
4739 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4740 				HCLGE_INT_GL_IDX_S,
4741 				hnae3_get_field(node->int_gl_idx,
4742 						HNAE3_RING_GL_IDX_M,
4743 						HNAE3_RING_GL_IDX_S));
4744 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4745 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4746 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4747 			req->vfid = vport->vport_id;
4748 
4749 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4750 			if (status) {
4751 				dev_err(&hdev->pdev->dev,
4752 					"Map TQP fail, status is %d.\n",
4753 					status);
4754 				return -EIO;
4755 			}
4756 			i = 0;
4757 
4758 			hclge_cmd_setup_basic_desc(&desc,
4759 						   op,
4760 						   false);
4761 			req->int_vector_id_l =
4762 				hnae3_get_field(vector_id,
4763 						HCLGE_VECTOR_ID_L_M,
4764 						HCLGE_VECTOR_ID_L_S);
4765 			req->int_vector_id_h =
4766 				hnae3_get_field(vector_id,
4767 						HCLGE_VECTOR_ID_H_M,
4768 						HCLGE_VECTOR_ID_H_S);
4769 		}
4770 	}
4771 
4772 	if (i > 0) {
4773 		req->int_cause_num = i;
4774 		req->vfid = vport->vport_id;
4775 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4776 		if (status) {
4777 			dev_err(&hdev->pdev->dev,
4778 				"Map TQP fail, status is %d.\n", status);
4779 			return -EIO;
4780 		}
4781 	}
4782 
4783 	return 0;
4784 }
4785 
4786 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4787 				    struct hnae3_ring_chain_node *ring_chain)
4788 {
4789 	struct hclge_vport *vport = hclge_get_vport(handle);
4790 	struct hclge_dev *hdev = vport->back;
4791 	int vector_id;
4792 
4793 	vector_id = hclge_get_vector_index(hdev, vector);
4794 	if (vector_id < 0) {
4795 		dev_err(&hdev->pdev->dev,
4796 			"failed to get vector index. vector=%d\n", vector);
4797 		return vector_id;
4798 	}
4799 
4800 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4801 }
4802 
4803 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4804 				       struct hnae3_ring_chain_node *ring_chain)
4805 {
4806 	struct hclge_vport *vport = hclge_get_vport(handle);
4807 	struct hclge_dev *hdev = vport->back;
4808 	int vector_id, ret;
4809 
4810 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4811 		return 0;
4812 
4813 	vector_id = hclge_get_vector_index(hdev, vector);
4814 	if (vector_id < 0) {
4815 		dev_err(&handle->pdev->dev,
4816 			"Get vector index fail. ret =%d\n", vector_id);
4817 		return vector_id;
4818 	}
4819 
4820 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4821 	if (ret)
4822 		dev_err(&handle->pdev->dev,
4823 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4824 			vector_id, ret);
4825 
4826 	return ret;
4827 }
4828 
4829 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4830 				      struct hclge_promisc_param *param)
4831 {
4832 	struct hclge_promisc_cfg_cmd *req;
4833 	struct hclge_desc desc;
4834 	int ret;
4835 
4836 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4837 
4838 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4839 	req->vf_id = param->vf_id;
4840 
4841 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4842 	 * pdev revision(0x20), new revision support them. The
4843 	 * value of this two fields will not return error when driver
4844 	 * send command to fireware in revision(0x20).
4845 	 */
4846 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4847 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4848 
4849 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4850 	if (ret)
4851 		dev_err(&hdev->pdev->dev,
4852 			"failed to set vport %d promisc mode, ret = %d.\n",
4853 			param->vf_id, ret);
4854 
4855 	return ret;
4856 }
4857 
4858 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4859 				     bool en_uc, bool en_mc, bool en_bc,
4860 				     int vport_id)
4861 {
4862 	if (!param)
4863 		return;
4864 
4865 	memset(param, 0, sizeof(struct hclge_promisc_param));
4866 	if (en_uc)
4867 		param->enable = HCLGE_PROMISC_EN_UC;
4868 	if (en_mc)
4869 		param->enable |= HCLGE_PROMISC_EN_MC;
4870 	if (en_bc)
4871 		param->enable |= HCLGE_PROMISC_EN_BC;
4872 	param->vf_id = vport_id;
4873 }
4874 
4875 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4876 				 bool en_mc_pmc, bool en_bc_pmc)
4877 {
4878 	struct hclge_dev *hdev = vport->back;
4879 	struct hclge_promisc_param param;
4880 
4881 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4882 				 vport->vport_id);
4883 	return hclge_cmd_set_promisc_mode(hdev, &param);
4884 }
4885 
4886 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4887 				  bool en_mc_pmc)
4888 {
4889 	struct hclge_vport *vport = hclge_get_vport(handle);
4890 	struct hclge_dev *hdev = vport->back;
4891 	bool en_bc_pmc = true;
4892 
4893 	/* For device whose version below V2, if broadcast promisc enabled,
4894 	 * vlan filter is always bypassed. So broadcast promisc should be
4895 	 * disabled until user enable promisc mode
4896 	 */
4897 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4898 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4899 
4900 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4901 					    en_bc_pmc);
4902 }
4903 
4904 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4905 {
4906 	struct hclge_vport *vport = hclge_get_vport(handle);
4907 	struct hclge_dev *hdev = vport->back;
4908 
4909 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4910 }
4911 
4912 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4913 {
4914 	struct hclge_get_fd_mode_cmd *req;
4915 	struct hclge_desc desc;
4916 	int ret;
4917 
4918 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4919 
4920 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4921 
4922 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4923 	if (ret) {
4924 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4925 		return ret;
4926 	}
4927 
4928 	*fd_mode = req->mode;
4929 
4930 	return ret;
4931 }
4932 
4933 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4934 				   u32 *stage1_entry_num,
4935 				   u32 *stage2_entry_num,
4936 				   u16 *stage1_counter_num,
4937 				   u16 *stage2_counter_num)
4938 {
4939 	struct hclge_get_fd_allocation_cmd *req;
4940 	struct hclge_desc desc;
4941 	int ret;
4942 
4943 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4944 
4945 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4946 
4947 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4948 	if (ret) {
4949 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4950 			ret);
4951 		return ret;
4952 	}
4953 
4954 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4955 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4956 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4957 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4958 
4959 	return ret;
4960 }
4961 
4962 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4963 				   enum HCLGE_FD_STAGE stage_num)
4964 {
4965 	struct hclge_set_fd_key_config_cmd *req;
4966 	struct hclge_fd_key_cfg *stage;
4967 	struct hclge_desc desc;
4968 	int ret;
4969 
4970 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4971 
4972 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4973 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4974 	req->stage = stage_num;
4975 	req->key_select = stage->key_sel;
4976 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4977 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4978 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4979 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4980 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4981 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4982 
4983 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4984 	if (ret)
4985 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4986 
4987 	return ret;
4988 }
4989 
4990 static int hclge_init_fd_config(struct hclge_dev *hdev)
4991 {
4992 #define LOW_2_WORDS		0x03
4993 	struct hclge_fd_key_cfg *key_cfg;
4994 	int ret;
4995 
4996 	if (!hnae3_dev_fd_supported(hdev))
4997 		return 0;
4998 
4999 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5000 	if (ret)
5001 		return ret;
5002 
5003 	switch (hdev->fd_cfg.fd_mode) {
5004 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5005 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5006 		break;
5007 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5008 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5009 		break;
5010 	default:
5011 		dev_err(&hdev->pdev->dev,
5012 			"Unsupported flow director mode %u\n",
5013 			hdev->fd_cfg.fd_mode);
5014 		return -EOPNOTSUPP;
5015 	}
5016 
5017 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5018 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
5019 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5020 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5021 	key_cfg->outer_sipv6_word_en = 0;
5022 	key_cfg->outer_dipv6_word_en = 0;
5023 
5024 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5025 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5026 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5027 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5028 
5029 	/* If use max 400bit key, we can support tuples for ether type */
5030 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5031 		key_cfg->tuple_active |=
5032 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5033 
5034 	/* roce_type is used to filter roce frames
5035 	 * dst_vport is used to specify the rule
5036 	 */
5037 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5038 
5039 	ret = hclge_get_fd_allocation(hdev,
5040 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5041 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5042 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5043 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5044 	if (ret)
5045 		return ret;
5046 
5047 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5048 }
5049 
5050 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5051 				int loc, u8 *key, bool is_add)
5052 {
5053 	struct hclge_fd_tcam_config_1_cmd *req1;
5054 	struct hclge_fd_tcam_config_2_cmd *req2;
5055 	struct hclge_fd_tcam_config_3_cmd *req3;
5056 	struct hclge_desc desc[3];
5057 	int ret;
5058 
5059 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5060 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5061 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5062 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5063 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5064 
5065 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5066 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5067 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5068 
5069 	req1->stage = stage;
5070 	req1->xy_sel = sel_x ? 1 : 0;
5071 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5072 	req1->index = cpu_to_le32(loc);
5073 	req1->entry_vld = sel_x ? is_add : 0;
5074 
5075 	if (key) {
5076 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5077 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5078 		       sizeof(req2->tcam_data));
5079 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5080 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5081 	}
5082 
5083 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5084 	if (ret)
5085 		dev_err(&hdev->pdev->dev,
5086 			"config tcam key fail, ret=%d\n",
5087 			ret);
5088 
5089 	return ret;
5090 }
5091 
5092 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5093 			      struct hclge_fd_ad_data *action)
5094 {
5095 	struct hclge_fd_ad_config_cmd *req;
5096 	struct hclge_desc desc;
5097 	u64 ad_data = 0;
5098 	int ret;
5099 
5100 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5101 
5102 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5103 	req->index = cpu_to_le32(loc);
5104 	req->stage = stage;
5105 
5106 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5107 		      action->write_rule_id_to_bd);
5108 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5109 			action->rule_id);
5110 	ad_data <<= 32;
5111 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5112 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5113 		      action->forward_to_direct_queue);
5114 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5115 			action->queue_id);
5116 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5117 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5118 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5119 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5120 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5121 			action->counter_id);
5122 
5123 	req->ad_data = cpu_to_le64(ad_data);
5124 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5125 	if (ret)
5126 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5127 
5128 	return ret;
5129 }
5130 
5131 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5132 				   struct hclge_fd_rule *rule)
5133 {
5134 	u16 tmp_x_s, tmp_y_s;
5135 	u32 tmp_x_l, tmp_y_l;
5136 	int i;
5137 
5138 	if (rule->unused_tuple & tuple_bit)
5139 		return true;
5140 
5141 	switch (tuple_bit) {
5142 	case BIT(INNER_DST_MAC):
5143 		for (i = 0; i < ETH_ALEN; i++) {
5144 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5145 			       rule->tuples_mask.dst_mac[i]);
5146 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5147 			       rule->tuples_mask.dst_mac[i]);
5148 		}
5149 
5150 		return true;
5151 	case BIT(INNER_SRC_MAC):
5152 		for (i = 0; i < ETH_ALEN; i++) {
5153 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5154 			       rule->tuples.src_mac[i]);
5155 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5156 			       rule->tuples.src_mac[i]);
5157 		}
5158 
5159 		return true;
5160 	case BIT(INNER_VLAN_TAG_FST):
5161 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5162 		       rule->tuples_mask.vlan_tag1);
5163 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5164 		       rule->tuples_mask.vlan_tag1);
5165 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5166 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5167 
5168 		return true;
5169 	case BIT(INNER_ETH_TYPE):
5170 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5171 		       rule->tuples_mask.ether_proto);
5172 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5173 		       rule->tuples_mask.ether_proto);
5174 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5175 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5176 
5177 		return true;
5178 	case BIT(INNER_IP_TOS):
5179 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5180 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5181 
5182 		return true;
5183 	case BIT(INNER_IP_PROTO):
5184 		calc_x(*key_x, rule->tuples.ip_proto,
5185 		       rule->tuples_mask.ip_proto);
5186 		calc_y(*key_y, rule->tuples.ip_proto,
5187 		       rule->tuples_mask.ip_proto);
5188 
5189 		return true;
5190 	case BIT(INNER_SRC_IP):
5191 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5192 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5193 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5194 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5195 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5196 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5197 
5198 		return true;
5199 	case BIT(INNER_DST_IP):
5200 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5201 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5202 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5203 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5204 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5205 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5206 
5207 		return true;
5208 	case BIT(INNER_SRC_PORT):
5209 		calc_x(tmp_x_s, rule->tuples.src_port,
5210 		       rule->tuples_mask.src_port);
5211 		calc_y(tmp_y_s, rule->tuples.src_port,
5212 		       rule->tuples_mask.src_port);
5213 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5214 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5215 
5216 		return true;
5217 	case BIT(INNER_DST_PORT):
5218 		calc_x(tmp_x_s, rule->tuples.dst_port,
5219 		       rule->tuples_mask.dst_port);
5220 		calc_y(tmp_y_s, rule->tuples.dst_port,
5221 		       rule->tuples_mask.dst_port);
5222 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5223 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5224 
5225 		return true;
5226 	default:
5227 		return false;
5228 	}
5229 }
5230 
5231 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5232 				 u8 vf_id, u8 network_port_id)
5233 {
5234 	u32 port_number = 0;
5235 
5236 	if (port_type == HOST_PORT) {
5237 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5238 				pf_id);
5239 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5240 				vf_id);
5241 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5242 	} else {
5243 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5244 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5245 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5246 	}
5247 
5248 	return port_number;
5249 }
5250 
5251 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5252 				       __le32 *key_x, __le32 *key_y,
5253 				       struct hclge_fd_rule *rule)
5254 {
5255 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5256 	u8 cur_pos = 0, tuple_size, shift_bits;
5257 	unsigned int i;
5258 
5259 	for (i = 0; i < MAX_META_DATA; i++) {
5260 		tuple_size = meta_data_key_info[i].key_length;
5261 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5262 
5263 		switch (tuple_bit) {
5264 		case BIT(ROCE_TYPE):
5265 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5266 			cur_pos += tuple_size;
5267 			break;
5268 		case BIT(DST_VPORT):
5269 			port_number = hclge_get_port_number(HOST_PORT, 0,
5270 							    rule->vf_id, 0);
5271 			hnae3_set_field(meta_data,
5272 					GENMASK(cur_pos + tuple_size, cur_pos),
5273 					cur_pos, port_number);
5274 			cur_pos += tuple_size;
5275 			break;
5276 		default:
5277 			break;
5278 		}
5279 	}
5280 
5281 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5282 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5283 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5284 
5285 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5286 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5287 }
5288 
5289 /* A complete key is combined with meta data key and tuple key.
5290  * Meta data key is stored at the MSB region, and tuple key is stored at
5291  * the LSB region, unused bits will be filled 0.
5292  */
5293 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5294 			    struct hclge_fd_rule *rule)
5295 {
5296 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5297 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5298 	u8 *cur_key_x, *cur_key_y;
5299 	u8 meta_data_region;
5300 	u8 tuple_size;
5301 	int ret;
5302 	u32 i;
5303 
5304 	memset(key_x, 0, sizeof(key_x));
5305 	memset(key_y, 0, sizeof(key_y));
5306 	cur_key_x = key_x;
5307 	cur_key_y = key_y;
5308 
5309 	for (i = 0 ; i < MAX_TUPLE; i++) {
5310 		bool tuple_valid;
5311 		u32 check_tuple;
5312 
5313 		tuple_size = tuple_key_info[i].key_length / 8;
5314 		check_tuple = key_cfg->tuple_active & BIT(i);
5315 
5316 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5317 						     cur_key_y, rule);
5318 		if (tuple_valid) {
5319 			cur_key_x += tuple_size;
5320 			cur_key_y += tuple_size;
5321 		}
5322 	}
5323 
5324 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5325 			MAX_META_DATA_LENGTH / 8;
5326 
5327 	hclge_fd_convert_meta_data(key_cfg,
5328 				   (__le32 *)(key_x + meta_data_region),
5329 				   (__le32 *)(key_y + meta_data_region),
5330 				   rule);
5331 
5332 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5333 				   true);
5334 	if (ret) {
5335 		dev_err(&hdev->pdev->dev,
5336 			"fd key_y config fail, loc=%u, ret=%d\n",
5337 			rule->queue_id, ret);
5338 		return ret;
5339 	}
5340 
5341 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5342 				   true);
5343 	if (ret)
5344 		dev_err(&hdev->pdev->dev,
5345 			"fd key_x config fail, loc=%u, ret=%d\n",
5346 			rule->queue_id, ret);
5347 	return ret;
5348 }
5349 
5350 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5351 			       struct hclge_fd_rule *rule)
5352 {
5353 	struct hclge_fd_ad_data ad_data;
5354 
5355 	ad_data.ad_id = rule->location;
5356 
5357 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5358 		ad_data.drop_packet = true;
5359 		ad_data.forward_to_direct_queue = false;
5360 		ad_data.queue_id = 0;
5361 	} else {
5362 		ad_data.drop_packet = false;
5363 		ad_data.forward_to_direct_queue = true;
5364 		ad_data.queue_id = rule->queue_id;
5365 	}
5366 
5367 	ad_data.use_counter = false;
5368 	ad_data.counter_id = 0;
5369 
5370 	ad_data.use_next_stage = false;
5371 	ad_data.next_input_key = 0;
5372 
5373 	ad_data.write_rule_id_to_bd = true;
5374 	ad_data.rule_id = rule->location;
5375 
5376 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5377 }
5378 
5379 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5380 				       u32 *unused_tuple)
5381 {
5382 	if (!spec || !unused_tuple)
5383 		return -EINVAL;
5384 
5385 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5386 
5387 	if (!spec->ip4src)
5388 		*unused_tuple |= BIT(INNER_SRC_IP);
5389 
5390 	if (!spec->ip4dst)
5391 		*unused_tuple |= BIT(INNER_DST_IP);
5392 
5393 	if (!spec->psrc)
5394 		*unused_tuple |= BIT(INNER_SRC_PORT);
5395 
5396 	if (!spec->pdst)
5397 		*unused_tuple |= BIT(INNER_DST_PORT);
5398 
5399 	if (!spec->tos)
5400 		*unused_tuple |= BIT(INNER_IP_TOS);
5401 
5402 	return 0;
5403 }
5404 
5405 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5406 				    u32 *unused_tuple)
5407 {
5408 	if (!spec || !unused_tuple)
5409 		return -EINVAL;
5410 
5411 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5412 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5413 
5414 	if (!spec->ip4src)
5415 		*unused_tuple |= BIT(INNER_SRC_IP);
5416 
5417 	if (!spec->ip4dst)
5418 		*unused_tuple |= BIT(INNER_DST_IP);
5419 
5420 	if (!spec->tos)
5421 		*unused_tuple |= BIT(INNER_IP_TOS);
5422 
5423 	if (!spec->proto)
5424 		*unused_tuple |= BIT(INNER_IP_PROTO);
5425 
5426 	if (spec->l4_4_bytes)
5427 		return -EOPNOTSUPP;
5428 
5429 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5430 		return -EOPNOTSUPP;
5431 
5432 	return 0;
5433 }
5434 
5435 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5436 				       u32 *unused_tuple)
5437 {
5438 	if (!spec || !unused_tuple)
5439 		return -EINVAL;
5440 
5441 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5442 		BIT(INNER_IP_TOS);
5443 
5444 	/* check whether src/dst ip address used */
5445 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5446 	    !spec->ip6src[2] && !spec->ip6src[3])
5447 		*unused_tuple |= BIT(INNER_SRC_IP);
5448 
5449 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5450 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5451 		*unused_tuple |= BIT(INNER_DST_IP);
5452 
5453 	if (!spec->psrc)
5454 		*unused_tuple |= BIT(INNER_SRC_PORT);
5455 
5456 	if (!spec->pdst)
5457 		*unused_tuple |= BIT(INNER_DST_PORT);
5458 
5459 	if (spec->tclass)
5460 		return -EOPNOTSUPP;
5461 
5462 	return 0;
5463 }
5464 
5465 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5466 				    u32 *unused_tuple)
5467 {
5468 	if (!spec || !unused_tuple)
5469 		return -EINVAL;
5470 
5471 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5472 		BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5473 
5474 	/* check whether src/dst ip address used */
5475 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5476 	    !spec->ip6src[2] && !spec->ip6src[3])
5477 		*unused_tuple |= BIT(INNER_SRC_IP);
5478 
5479 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5480 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5481 		*unused_tuple |= BIT(INNER_DST_IP);
5482 
5483 	if (!spec->l4_proto)
5484 		*unused_tuple |= BIT(INNER_IP_PROTO);
5485 
5486 	if (spec->tclass)
5487 		return -EOPNOTSUPP;
5488 
5489 	if (spec->l4_4_bytes)
5490 		return -EOPNOTSUPP;
5491 
5492 	return 0;
5493 }
5494 
5495 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5496 {
5497 	if (!spec || !unused_tuple)
5498 		return -EINVAL;
5499 
5500 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5501 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5502 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5503 
5504 	if (is_zero_ether_addr(spec->h_source))
5505 		*unused_tuple |= BIT(INNER_SRC_MAC);
5506 
5507 	if (is_zero_ether_addr(spec->h_dest))
5508 		*unused_tuple |= BIT(INNER_DST_MAC);
5509 
5510 	if (!spec->h_proto)
5511 		*unused_tuple |= BIT(INNER_ETH_TYPE);
5512 
5513 	return 0;
5514 }
5515 
5516 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5517 				    struct ethtool_rx_flow_spec *fs,
5518 				    u32 *unused_tuple)
5519 {
5520 	if (fs->flow_type & FLOW_EXT) {
5521 		if (fs->h_ext.vlan_etype) {
5522 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5523 			return -EOPNOTSUPP;
5524 		}
5525 
5526 		if (!fs->h_ext.vlan_tci)
5527 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5528 
5529 		if (fs->m_ext.vlan_tci &&
5530 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5531 			dev_err(&hdev->pdev->dev,
5532 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5533 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5534 			return -EINVAL;
5535 		}
5536 	} else {
5537 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5538 	}
5539 
5540 	if (fs->flow_type & FLOW_MAC_EXT) {
5541 		if (hdev->fd_cfg.fd_mode !=
5542 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5543 			dev_err(&hdev->pdev->dev,
5544 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
5545 			return -EOPNOTSUPP;
5546 		}
5547 
5548 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5549 			*unused_tuple |= BIT(INNER_DST_MAC);
5550 		else
5551 			*unused_tuple &= ~BIT(INNER_DST_MAC);
5552 	}
5553 
5554 	return 0;
5555 }
5556 
5557 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5558 			       struct ethtool_rx_flow_spec *fs,
5559 			       u32 *unused_tuple)
5560 {
5561 	u32 flow_type;
5562 	int ret;
5563 
5564 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5565 		dev_err(&hdev->pdev->dev,
5566 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
5567 			fs->location,
5568 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5569 		return -EINVAL;
5570 	}
5571 
5572 	if ((fs->flow_type & FLOW_EXT) &&
5573 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5574 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5575 		return -EOPNOTSUPP;
5576 	}
5577 
5578 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5579 	switch (flow_type) {
5580 	case SCTP_V4_FLOW:
5581 	case TCP_V4_FLOW:
5582 	case UDP_V4_FLOW:
5583 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5584 						  unused_tuple);
5585 		break;
5586 	case IP_USER_FLOW:
5587 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5588 					       unused_tuple);
5589 		break;
5590 	case SCTP_V6_FLOW:
5591 	case TCP_V6_FLOW:
5592 	case UDP_V6_FLOW:
5593 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5594 						  unused_tuple);
5595 		break;
5596 	case IPV6_USER_FLOW:
5597 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5598 					       unused_tuple);
5599 		break;
5600 	case ETHER_FLOW:
5601 		if (hdev->fd_cfg.fd_mode !=
5602 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5603 			dev_err(&hdev->pdev->dev,
5604 				"ETHER_FLOW is not supported in current fd mode!\n");
5605 			return -EOPNOTSUPP;
5606 		}
5607 
5608 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5609 						 unused_tuple);
5610 		break;
5611 	default:
5612 		dev_err(&hdev->pdev->dev,
5613 			"unsupported protocol type, protocol type = %#x\n",
5614 			flow_type);
5615 		return -EOPNOTSUPP;
5616 	}
5617 
5618 	if (ret) {
5619 		dev_err(&hdev->pdev->dev,
5620 			"failed to check flow union tuple, ret = %d\n",
5621 			ret);
5622 		return ret;
5623 	}
5624 
5625 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5626 }
5627 
5628 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5629 {
5630 	struct hclge_fd_rule *rule = NULL;
5631 	struct hlist_node *node2;
5632 
5633 	spin_lock_bh(&hdev->fd_rule_lock);
5634 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5635 		if (rule->location >= location)
5636 			break;
5637 	}
5638 
5639 	spin_unlock_bh(&hdev->fd_rule_lock);
5640 
5641 	return  rule && rule->location == location;
5642 }
5643 
5644 /* make sure being called after lock up with fd_rule_lock */
5645 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5646 				     struct hclge_fd_rule *new_rule,
5647 				     u16 location,
5648 				     bool is_add)
5649 {
5650 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5651 	struct hlist_node *node2;
5652 
5653 	if (is_add && !new_rule)
5654 		return -EINVAL;
5655 
5656 	hlist_for_each_entry_safe(rule, node2,
5657 				  &hdev->fd_rule_list, rule_node) {
5658 		if (rule->location >= location)
5659 			break;
5660 		parent = rule;
5661 	}
5662 
5663 	if (rule && rule->location == location) {
5664 		hlist_del(&rule->rule_node);
5665 		kfree(rule);
5666 		hdev->hclge_fd_rule_num--;
5667 
5668 		if (!is_add) {
5669 			if (!hdev->hclge_fd_rule_num)
5670 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5671 			clear_bit(location, hdev->fd_bmap);
5672 
5673 			return 0;
5674 		}
5675 	} else if (!is_add) {
5676 		dev_err(&hdev->pdev->dev,
5677 			"delete fail, rule %u is inexistent\n",
5678 			location);
5679 		return -EINVAL;
5680 	}
5681 
5682 	INIT_HLIST_NODE(&new_rule->rule_node);
5683 
5684 	if (parent)
5685 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5686 	else
5687 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5688 
5689 	set_bit(location, hdev->fd_bmap);
5690 	hdev->hclge_fd_rule_num++;
5691 	hdev->fd_active_type = new_rule->rule_type;
5692 
5693 	return 0;
5694 }
5695 
5696 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5697 			      struct ethtool_rx_flow_spec *fs,
5698 			      struct hclge_fd_rule *rule)
5699 {
5700 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5701 
5702 	switch (flow_type) {
5703 	case SCTP_V4_FLOW:
5704 	case TCP_V4_FLOW:
5705 	case UDP_V4_FLOW:
5706 		rule->tuples.src_ip[IPV4_INDEX] =
5707 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5708 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5709 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5710 
5711 		rule->tuples.dst_ip[IPV4_INDEX] =
5712 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5713 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5714 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5715 
5716 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5717 		rule->tuples_mask.src_port =
5718 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5719 
5720 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5721 		rule->tuples_mask.dst_port =
5722 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5723 
5724 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5725 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5726 
5727 		rule->tuples.ether_proto = ETH_P_IP;
5728 		rule->tuples_mask.ether_proto = 0xFFFF;
5729 
5730 		break;
5731 	case IP_USER_FLOW:
5732 		rule->tuples.src_ip[IPV4_INDEX] =
5733 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5734 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5735 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5736 
5737 		rule->tuples.dst_ip[IPV4_INDEX] =
5738 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5739 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5740 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5741 
5742 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5743 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5744 
5745 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5746 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5747 
5748 		rule->tuples.ether_proto = ETH_P_IP;
5749 		rule->tuples_mask.ether_proto = 0xFFFF;
5750 
5751 		break;
5752 	case SCTP_V6_FLOW:
5753 	case TCP_V6_FLOW:
5754 	case UDP_V6_FLOW:
5755 		be32_to_cpu_array(rule->tuples.src_ip,
5756 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5757 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5758 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5759 
5760 		be32_to_cpu_array(rule->tuples.dst_ip,
5761 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5762 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5763 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5764 
5765 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5766 		rule->tuples_mask.src_port =
5767 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5768 
5769 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5770 		rule->tuples_mask.dst_port =
5771 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5772 
5773 		rule->tuples.ether_proto = ETH_P_IPV6;
5774 		rule->tuples_mask.ether_proto = 0xFFFF;
5775 
5776 		break;
5777 	case IPV6_USER_FLOW:
5778 		be32_to_cpu_array(rule->tuples.src_ip,
5779 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5780 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5781 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5782 
5783 		be32_to_cpu_array(rule->tuples.dst_ip,
5784 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5785 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5786 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5787 
5788 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5789 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5790 
5791 		rule->tuples.ether_proto = ETH_P_IPV6;
5792 		rule->tuples_mask.ether_proto = 0xFFFF;
5793 
5794 		break;
5795 	case ETHER_FLOW:
5796 		ether_addr_copy(rule->tuples.src_mac,
5797 				fs->h_u.ether_spec.h_source);
5798 		ether_addr_copy(rule->tuples_mask.src_mac,
5799 				fs->m_u.ether_spec.h_source);
5800 
5801 		ether_addr_copy(rule->tuples.dst_mac,
5802 				fs->h_u.ether_spec.h_dest);
5803 		ether_addr_copy(rule->tuples_mask.dst_mac,
5804 				fs->m_u.ether_spec.h_dest);
5805 
5806 		rule->tuples.ether_proto =
5807 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5808 		rule->tuples_mask.ether_proto =
5809 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5810 
5811 		break;
5812 	default:
5813 		return -EOPNOTSUPP;
5814 	}
5815 
5816 	switch (flow_type) {
5817 	case SCTP_V4_FLOW:
5818 	case SCTP_V6_FLOW:
5819 		rule->tuples.ip_proto = IPPROTO_SCTP;
5820 		rule->tuples_mask.ip_proto = 0xFF;
5821 		break;
5822 	case TCP_V4_FLOW:
5823 	case TCP_V6_FLOW:
5824 		rule->tuples.ip_proto = IPPROTO_TCP;
5825 		rule->tuples_mask.ip_proto = 0xFF;
5826 		break;
5827 	case UDP_V4_FLOW:
5828 	case UDP_V6_FLOW:
5829 		rule->tuples.ip_proto = IPPROTO_UDP;
5830 		rule->tuples_mask.ip_proto = 0xFF;
5831 		break;
5832 	default:
5833 		break;
5834 	}
5835 
5836 	if (fs->flow_type & FLOW_EXT) {
5837 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5838 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5839 	}
5840 
5841 	if (fs->flow_type & FLOW_MAC_EXT) {
5842 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5843 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5844 	}
5845 
5846 	return 0;
5847 }
5848 
5849 /* make sure being called after lock up with fd_rule_lock */
5850 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5851 				struct hclge_fd_rule *rule)
5852 {
5853 	int ret;
5854 
5855 	if (!rule) {
5856 		dev_err(&hdev->pdev->dev,
5857 			"The flow director rule is NULL\n");
5858 		return -EINVAL;
5859 	}
5860 
5861 	/* it will never fail here, so needn't to check return value */
5862 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5863 
5864 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5865 	if (ret)
5866 		goto clear_rule;
5867 
5868 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5869 	if (ret)
5870 		goto clear_rule;
5871 
5872 	return 0;
5873 
5874 clear_rule:
5875 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5876 	return ret;
5877 }
5878 
5879 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5880 			      struct ethtool_rxnfc *cmd)
5881 {
5882 	struct hclge_vport *vport = hclge_get_vport(handle);
5883 	struct hclge_dev *hdev = vport->back;
5884 	u16 dst_vport_id = 0, q_index = 0;
5885 	struct ethtool_rx_flow_spec *fs;
5886 	struct hclge_fd_rule *rule;
5887 	u32 unused = 0;
5888 	u8 action;
5889 	int ret;
5890 
5891 	if (!hnae3_dev_fd_supported(hdev)) {
5892 		dev_err(&hdev->pdev->dev,
5893 			"flow table director is not supported\n");
5894 		return -EOPNOTSUPP;
5895 	}
5896 
5897 	if (!hdev->fd_en) {
5898 		dev_err(&hdev->pdev->dev,
5899 			"please enable flow director first\n");
5900 		return -EOPNOTSUPP;
5901 	}
5902 
5903 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5904 
5905 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5906 	if (ret)
5907 		return ret;
5908 
5909 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5910 		action = HCLGE_FD_ACTION_DROP_PACKET;
5911 	} else {
5912 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5913 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5914 		u16 tqps;
5915 
5916 		if (vf > hdev->num_req_vfs) {
5917 			dev_err(&hdev->pdev->dev,
5918 				"Error: vf id (%u) > max vf num (%u)\n",
5919 				vf, hdev->num_req_vfs);
5920 			return -EINVAL;
5921 		}
5922 
5923 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5924 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5925 
5926 		if (ring >= tqps) {
5927 			dev_err(&hdev->pdev->dev,
5928 				"Error: queue id (%u) > max tqp num (%u)\n",
5929 				ring, tqps - 1);
5930 			return -EINVAL;
5931 		}
5932 
5933 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5934 		q_index = ring;
5935 	}
5936 
5937 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5938 	if (!rule)
5939 		return -ENOMEM;
5940 
5941 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5942 	if (ret) {
5943 		kfree(rule);
5944 		return ret;
5945 	}
5946 
5947 	rule->flow_type = fs->flow_type;
5948 	rule->location = fs->location;
5949 	rule->unused_tuple = unused;
5950 	rule->vf_id = dst_vport_id;
5951 	rule->queue_id = q_index;
5952 	rule->action = action;
5953 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5954 
5955 	/* to avoid rule conflict, when user configure rule by ethtool,
5956 	 * we need to clear all arfs rules
5957 	 */
5958 	spin_lock_bh(&hdev->fd_rule_lock);
5959 	hclge_clear_arfs_rules(handle);
5960 
5961 	ret = hclge_fd_config_rule(hdev, rule);
5962 
5963 	spin_unlock_bh(&hdev->fd_rule_lock);
5964 
5965 	return ret;
5966 }
5967 
5968 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5969 			      struct ethtool_rxnfc *cmd)
5970 {
5971 	struct hclge_vport *vport = hclge_get_vport(handle);
5972 	struct hclge_dev *hdev = vport->back;
5973 	struct ethtool_rx_flow_spec *fs;
5974 	int ret;
5975 
5976 	if (!hnae3_dev_fd_supported(hdev))
5977 		return -EOPNOTSUPP;
5978 
5979 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5980 
5981 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5982 		return -EINVAL;
5983 
5984 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5985 		dev_err(&hdev->pdev->dev,
5986 			"Delete fail, rule %u is inexistent\n", fs->location);
5987 		return -ENOENT;
5988 	}
5989 
5990 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5991 				   NULL, false);
5992 	if (ret)
5993 		return ret;
5994 
5995 	spin_lock_bh(&hdev->fd_rule_lock);
5996 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5997 
5998 	spin_unlock_bh(&hdev->fd_rule_lock);
5999 
6000 	return ret;
6001 }
6002 
6003 /* make sure being called after lock up with fd_rule_lock */
6004 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6005 				     bool clear_list)
6006 {
6007 	struct hclge_vport *vport = hclge_get_vport(handle);
6008 	struct hclge_dev *hdev = vport->back;
6009 	struct hclge_fd_rule *rule;
6010 	struct hlist_node *node;
6011 	u16 location;
6012 
6013 	if (!hnae3_dev_fd_supported(hdev))
6014 		return;
6015 
6016 	for_each_set_bit(location, hdev->fd_bmap,
6017 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6018 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6019 				     NULL, false);
6020 
6021 	if (clear_list) {
6022 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6023 					  rule_node) {
6024 			hlist_del(&rule->rule_node);
6025 			kfree(rule);
6026 		}
6027 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6028 		hdev->hclge_fd_rule_num = 0;
6029 		bitmap_zero(hdev->fd_bmap,
6030 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6031 	}
6032 }
6033 
6034 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6035 {
6036 	struct hclge_vport *vport = hclge_get_vport(handle);
6037 	struct hclge_dev *hdev = vport->back;
6038 	struct hclge_fd_rule *rule;
6039 	struct hlist_node *node;
6040 	int ret;
6041 
6042 	/* Return ok here, because reset error handling will check this
6043 	 * return value. If error is returned here, the reset process will
6044 	 * fail.
6045 	 */
6046 	if (!hnae3_dev_fd_supported(hdev))
6047 		return 0;
6048 
6049 	/* if fd is disabled, should not restore it when reset */
6050 	if (!hdev->fd_en)
6051 		return 0;
6052 
6053 	spin_lock_bh(&hdev->fd_rule_lock);
6054 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6055 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6056 		if (!ret)
6057 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6058 
6059 		if (ret) {
6060 			dev_warn(&hdev->pdev->dev,
6061 				 "Restore rule %u failed, remove it\n",
6062 				 rule->location);
6063 			clear_bit(rule->location, hdev->fd_bmap);
6064 			hlist_del(&rule->rule_node);
6065 			kfree(rule);
6066 			hdev->hclge_fd_rule_num--;
6067 		}
6068 	}
6069 
6070 	if (hdev->hclge_fd_rule_num)
6071 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6072 
6073 	spin_unlock_bh(&hdev->fd_rule_lock);
6074 
6075 	return 0;
6076 }
6077 
6078 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6079 				 struct ethtool_rxnfc *cmd)
6080 {
6081 	struct hclge_vport *vport = hclge_get_vport(handle);
6082 	struct hclge_dev *hdev = vport->back;
6083 
6084 	if (!hnae3_dev_fd_supported(hdev))
6085 		return -EOPNOTSUPP;
6086 
6087 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6088 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6089 
6090 	return 0;
6091 }
6092 
6093 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6094 				     struct ethtool_tcpip4_spec *spec,
6095 				     struct ethtool_tcpip4_spec *spec_mask)
6096 {
6097 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6098 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6099 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6100 
6101 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6102 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6103 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6104 
6105 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6106 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6107 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6108 
6109 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6110 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6111 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6112 
6113 	spec->tos = rule->tuples.ip_tos;
6114 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6115 			0 : rule->tuples_mask.ip_tos;
6116 }
6117 
6118 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6119 				  struct ethtool_usrip4_spec *spec,
6120 				  struct ethtool_usrip4_spec *spec_mask)
6121 {
6122 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6123 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6124 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6125 
6126 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6127 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6128 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6129 
6130 	spec->tos = rule->tuples.ip_tos;
6131 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6132 			0 : rule->tuples_mask.ip_tos;
6133 
6134 	spec->proto = rule->tuples.ip_proto;
6135 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6136 			0 : rule->tuples_mask.ip_proto;
6137 
6138 	spec->ip_ver = ETH_RX_NFC_IP4;
6139 }
6140 
6141 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6142 				     struct ethtool_tcpip6_spec *spec,
6143 				     struct ethtool_tcpip6_spec *spec_mask)
6144 {
6145 	cpu_to_be32_array(spec->ip6src,
6146 			  rule->tuples.src_ip, IPV6_SIZE);
6147 	cpu_to_be32_array(spec->ip6dst,
6148 			  rule->tuples.dst_ip, IPV6_SIZE);
6149 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6150 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6151 	else
6152 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6153 				  IPV6_SIZE);
6154 
6155 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6156 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6157 	else
6158 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6159 				  IPV6_SIZE);
6160 
6161 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6162 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6163 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6164 
6165 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6166 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6167 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6168 }
6169 
6170 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6171 				  struct ethtool_usrip6_spec *spec,
6172 				  struct ethtool_usrip6_spec *spec_mask)
6173 {
6174 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6175 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6176 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6177 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6178 	else
6179 		cpu_to_be32_array(spec_mask->ip6src,
6180 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6181 
6182 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6183 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6184 	else
6185 		cpu_to_be32_array(spec_mask->ip6dst,
6186 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6187 
6188 	spec->l4_proto = rule->tuples.ip_proto;
6189 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6190 			0 : rule->tuples_mask.ip_proto;
6191 }
6192 
6193 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6194 				    struct ethhdr *spec,
6195 				    struct ethhdr *spec_mask)
6196 {
6197 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6198 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6199 
6200 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6201 		eth_zero_addr(spec_mask->h_source);
6202 	else
6203 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6204 
6205 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6206 		eth_zero_addr(spec_mask->h_dest);
6207 	else
6208 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6209 
6210 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6211 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6212 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6213 }
6214 
6215 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6216 				  struct hclge_fd_rule *rule)
6217 {
6218 	if (fs->flow_type & FLOW_EXT) {
6219 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6220 		fs->m_ext.vlan_tci =
6221 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6222 				cpu_to_be16(VLAN_VID_MASK) :
6223 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6224 	}
6225 
6226 	if (fs->flow_type & FLOW_MAC_EXT) {
6227 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6228 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6229 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6230 		else
6231 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6232 					rule->tuples_mask.dst_mac);
6233 	}
6234 }
6235 
6236 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6237 				  struct ethtool_rxnfc *cmd)
6238 {
6239 	struct hclge_vport *vport = hclge_get_vport(handle);
6240 	struct hclge_fd_rule *rule = NULL;
6241 	struct hclge_dev *hdev = vport->back;
6242 	struct ethtool_rx_flow_spec *fs;
6243 	struct hlist_node *node2;
6244 
6245 	if (!hnae3_dev_fd_supported(hdev))
6246 		return -EOPNOTSUPP;
6247 
6248 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6249 
6250 	spin_lock_bh(&hdev->fd_rule_lock);
6251 
6252 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6253 		if (rule->location >= fs->location)
6254 			break;
6255 	}
6256 
6257 	if (!rule || fs->location != rule->location) {
6258 		spin_unlock_bh(&hdev->fd_rule_lock);
6259 
6260 		return -ENOENT;
6261 	}
6262 
6263 	fs->flow_type = rule->flow_type;
6264 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6265 	case SCTP_V4_FLOW:
6266 	case TCP_V4_FLOW:
6267 	case UDP_V4_FLOW:
6268 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6269 					 &fs->m_u.tcp_ip4_spec);
6270 		break;
6271 	case IP_USER_FLOW:
6272 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6273 				      &fs->m_u.usr_ip4_spec);
6274 		break;
6275 	case SCTP_V6_FLOW:
6276 	case TCP_V6_FLOW:
6277 	case UDP_V6_FLOW:
6278 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6279 					 &fs->m_u.tcp_ip6_spec);
6280 		break;
6281 	case IPV6_USER_FLOW:
6282 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6283 				      &fs->m_u.usr_ip6_spec);
6284 		break;
6285 	/* The flow type of fd rule has been checked before adding in to rule
6286 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6287 	 * for the default case
6288 	 */
6289 	default:
6290 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6291 					&fs->m_u.ether_spec);
6292 		break;
6293 	}
6294 
6295 	hclge_fd_get_ext_info(fs, rule);
6296 
6297 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6298 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6299 	} else {
6300 		u64 vf_id;
6301 
6302 		fs->ring_cookie = rule->queue_id;
6303 		vf_id = rule->vf_id;
6304 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6305 		fs->ring_cookie |= vf_id;
6306 	}
6307 
6308 	spin_unlock_bh(&hdev->fd_rule_lock);
6309 
6310 	return 0;
6311 }
6312 
6313 static int hclge_get_all_rules(struct hnae3_handle *handle,
6314 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6315 {
6316 	struct hclge_vport *vport = hclge_get_vport(handle);
6317 	struct hclge_dev *hdev = vport->back;
6318 	struct hclge_fd_rule *rule;
6319 	struct hlist_node *node2;
6320 	int cnt = 0;
6321 
6322 	if (!hnae3_dev_fd_supported(hdev))
6323 		return -EOPNOTSUPP;
6324 
6325 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6326 
6327 	spin_lock_bh(&hdev->fd_rule_lock);
6328 	hlist_for_each_entry_safe(rule, node2,
6329 				  &hdev->fd_rule_list, rule_node) {
6330 		if (cnt == cmd->rule_cnt) {
6331 			spin_unlock_bh(&hdev->fd_rule_lock);
6332 			return -EMSGSIZE;
6333 		}
6334 
6335 		rule_locs[cnt] = rule->location;
6336 		cnt++;
6337 	}
6338 
6339 	spin_unlock_bh(&hdev->fd_rule_lock);
6340 
6341 	cmd->rule_cnt = cnt;
6342 
6343 	return 0;
6344 }
6345 
6346 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6347 				     struct hclge_fd_rule_tuples *tuples)
6348 {
6349 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6350 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6351 
6352 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6353 	tuples->ip_proto = fkeys->basic.ip_proto;
6354 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6355 
6356 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6357 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6358 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6359 	} else {
6360 		int i;
6361 
6362 		for (i = 0; i < IPV6_SIZE; i++) {
6363 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6364 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6365 		}
6366 	}
6367 }
6368 
6369 /* traverse all rules, check whether an existed rule has the same tuples */
6370 static struct hclge_fd_rule *
6371 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6372 			  const struct hclge_fd_rule_tuples *tuples)
6373 {
6374 	struct hclge_fd_rule *rule = NULL;
6375 	struct hlist_node *node;
6376 
6377 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6378 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6379 			return rule;
6380 	}
6381 
6382 	return NULL;
6383 }
6384 
6385 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6386 				     struct hclge_fd_rule *rule)
6387 {
6388 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6389 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6390 			     BIT(INNER_SRC_PORT);
6391 	rule->action = 0;
6392 	rule->vf_id = 0;
6393 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6394 	if (tuples->ether_proto == ETH_P_IP) {
6395 		if (tuples->ip_proto == IPPROTO_TCP)
6396 			rule->flow_type = TCP_V4_FLOW;
6397 		else
6398 			rule->flow_type = UDP_V4_FLOW;
6399 	} else {
6400 		if (tuples->ip_proto == IPPROTO_TCP)
6401 			rule->flow_type = TCP_V6_FLOW;
6402 		else
6403 			rule->flow_type = UDP_V6_FLOW;
6404 	}
6405 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6406 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6407 }
6408 
6409 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6410 				      u16 flow_id, struct flow_keys *fkeys)
6411 {
6412 	struct hclge_vport *vport = hclge_get_vport(handle);
6413 	struct hclge_fd_rule_tuples new_tuples = {};
6414 	struct hclge_dev *hdev = vport->back;
6415 	struct hclge_fd_rule *rule;
6416 	u16 tmp_queue_id;
6417 	u16 bit_id;
6418 	int ret;
6419 
6420 	if (!hnae3_dev_fd_supported(hdev))
6421 		return -EOPNOTSUPP;
6422 
6423 	/* when there is already fd rule existed add by user,
6424 	 * arfs should not work
6425 	 */
6426 	spin_lock_bh(&hdev->fd_rule_lock);
6427 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6428 		spin_unlock_bh(&hdev->fd_rule_lock);
6429 		return -EOPNOTSUPP;
6430 	}
6431 
6432 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6433 
6434 	/* check is there flow director filter existed for this flow,
6435 	 * if not, create a new filter for it;
6436 	 * if filter exist with different queue id, modify the filter;
6437 	 * if filter exist with same queue id, do nothing
6438 	 */
6439 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6440 	if (!rule) {
6441 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6442 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6443 			spin_unlock_bh(&hdev->fd_rule_lock);
6444 			return -ENOSPC;
6445 		}
6446 
6447 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6448 		if (!rule) {
6449 			spin_unlock_bh(&hdev->fd_rule_lock);
6450 			return -ENOMEM;
6451 		}
6452 
6453 		set_bit(bit_id, hdev->fd_bmap);
6454 		rule->location = bit_id;
6455 		rule->flow_id = flow_id;
6456 		rule->queue_id = queue_id;
6457 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6458 		ret = hclge_fd_config_rule(hdev, rule);
6459 
6460 		spin_unlock_bh(&hdev->fd_rule_lock);
6461 
6462 		if (ret)
6463 			return ret;
6464 
6465 		return rule->location;
6466 	}
6467 
6468 	spin_unlock_bh(&hdev->fd_rule_lock);
6469 
6470 	if (rule->queue_id == queue_id)
6471 		return rule->location;
6472 
6473 	tmp_queue_id = rule->queue_id;
6474 	rule->queue_id = queue_id;
6475 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6476 	if (ret) {
6477 		rule->queue_id = tmp_queue_id;
6478 		return ret;
6479 	}
6480 
6481 	return rule->location;
6482 }
6483 
6484 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6485 {
6486 #ifdef CONFIG_RFS_ACCEL
6487 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6488 	struct hclge_fd_rule *rule;
6489 	struct hlist_node *node;
6490 	HLIST_HEAD(del_list);
6491 
6492 	spin_lock_bh(&hdev->fd_rule_lock);
6493 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6494 		spin_unlock_bh(&hdev->fd_rule_lock);
6495 		return;
6496 	}
6497 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6498 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6499 					rule->flow_id, rule->location)) {
6500 			hlist_del_init(&rule->rule_node);
6501 			hlist_add_head(&rule->rule_node, &del_list);
6502 			hdev->hclge_fd_rule_num--;
6503 			clear_bit(rule->location, hdev->fd_bmap);
6504 		}
6505 	}
6506 	spin_unlock_bh(&hdev->fd_rule_lock);
6507 
6508 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6509 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6510 				     rule->location, NULL, false);
6511 		kfree(rule);
6512 	}
6513 #endif
6514 }
6515 
6516 /* make sure being called after lock up with fd_rule_lock */
6517 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6518 {
6519 #ifdef CONFIG_RFS_ACCEL
6520 	struct hclge_vport *vport = hclge_get_vport(handle);
6521 	struct hclge_dev *hdev = vport->back;
6522 
6523 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6524 		hclge_del_all_fd_entries(handle, true);
6525 #endif
6526 }
6527 
6528 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6529 {
6530 	struct hclge_vport *vport = hclge_get_vport(handle);
6531 	struct hclge_dev *hdev = vport->back;
6532 
6533 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6534 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6535 }
6536 
6537 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6538 {
6539 	struct hclge_vport *vport = hclge_get_vport(handle);
6540 	struct hclge_dev *hdev = vport->back;
6541 
6542 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6543 }
6544 
6545 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6546 {
6547 	struct hclge_vport *vport = hclge_get_vport(handle);
6548 	struct hclge_dev *hdev = vport->back;
6549 
6550 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6551 }
6552 
6553 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6554 {
6555 	struct hclge_vport *vport = hclge_get_vport(handle);
6556 	struct hclge_dev *hdev = vport->back;
6557 
6558 	return hdev->rst_stats.hw_reset_done_cnt;
6559 }
6560 
6561 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6562 {
6563 	struct hclge_vport *vport = hclge_get_vport(handle);
6564 	struct hclge_dev *hdev = vport->back;
6565 	bool clear;
6566 
6567 	hdev->fd_en = enable;
6568 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6569 
6570 	if (!enable) {
6571 		spin_lock_bh(&hdev->fd_rule_lock);
6572 		hclge_del_all_fd_entries(handle, clear);
6573 		spin_unlock_bh(&hdev->fd_rule_lock);
6574 	} else {
6575 		hclge_restore_fd_entries(handle);
6576 	}
6577 }
6578 
6579 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6580 {
6581 	struct hclge_desc desc;
6582 	struct hclge_config_mac_mode_cmd *req =
6583 		(struct hclge_config_mac_mode_cmd *)desc.data;
6584 	u32 loop_en = 0;
6585 	int ret;
6586 
6587 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6588 
6589 	if (enable) {
6590 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6591 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6592 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6593 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6594 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6595 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6596 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6597 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6598 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6599 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6600 	}
6601 
6602 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6603 
6604 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6605 	if (ret)
6606 		dev_err(&hdev->pdev->dev,
6607 			"mac enable fail, ret =%d.\n", ret);
6608 }
6609 
6610 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6611 				     u8 switch_param, u8 param_mask)
6612 {
6613 	struct hclge_mac_vlan_switch_cmd *req;
6614 	struct hclge_desc desc;
6615 	u32 func_id;
6616 	int ret;
6617 
6618 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6619 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6620 
6621 	/* read current config parameter */
6622 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6623 				   true);
6624 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6625 	req->func_id = cpu_to_le32(func_id);
6626 
6627 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6628 	if (ret) {
6629 		dev_err(&hdev->pdev->dev,
6630 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6631 		return ret;
6632 	}
6633 
6634 	/* modify and write new config parameter */
6635 	hclge_cmd_reuse_desc(&desc, false);
6636 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6637 	req->param_mask = param_mask;
6638 
6639 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6640 	if (ret)
6641 		dev_err(&hdev->pdev->dev,
6642 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6643 	return ret;
6644 }
6645 
6646 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6647 				       int link_ret)
6648 {
6649 #define HCLGE_PHY_LINK_STATUS_NUM  200
6650 
6651 	struct phy_device *phydev = hdev->hw.mac.phydev;
6652 	int i = 0;
6653 	int ret;
6654 
6655 	do {
6656 		ret = phy_read_status(phydev);
6657 		if (ret) {
6658 			dev_err(&hdev->pdev->dev,
6659 				"phy update link status fail, ret = %d\n", ret);
6660 			return;
6661 		}
6662 
6663 		if (phydev->link == link_ret)
6664 			break;
6665 
6666 		msleep(HCLGE_LINK_STATUS_MS);
6667 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6668 }
6669 
6670 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6671 {
6672 #define HCLGE_MAC_LINK_STATUS_NUM  100
6673 
6674 	int link_status;
6675 	int i = 0;
6676 	int ret;
6677 
6678 	do {
6679 		ret = hclge_get_mac_link_status(hdev, &link_status);
6680 		if (ret)
6681 			return ret;
6682 		if (link_status == link_ret)
6683 			return 0;
6684 
6685 		msleep(HCLGE_LINK_STATUS_MS);
6686 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6687 	return -EBUSY;
6688 }
6689 
6690 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6691 					  bool is_phy)
6692 {
6693 	int link_ret;
6694 
6695 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6696 
6697 	if (is_phy)
6698 		hclge_phy_link_status_wait(hdev, link_ret);
6699 
6700 	return hclge_mac_link_status_wait(hdev, link_ret);
6701 }
6702 
6703 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6704 {
6705 	struct hclge_config_mac_mode_cmd *req;
6706 	struct hclge_desc desc;
6707 	u32 loop_en;
6708 	int ret;
6709 
6710 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6711 	/* 1 Read out the MAC mode config at first */
6712 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6713 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6714 	if (ret) {
6715 		dev_err(&hdev->pdev->dev,
6716 			"mac loopback get fail, ret =%d.\n", ret);
6717 		return ret;
6718 	}
6719 
6720 	/* 2 Then setup the loopback flag */
6721 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6722 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6723 
6724 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6725 
6726 	/* 3 Config mac work mode with loopback flag
6727 	 * and its original configure parameters
6728 	 */
6729 	hclge_cmd_reuse_desc(&desc, false);
6730 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6731 	if (ret)
6732 		dev_err(&hdev->pdev->dev,
6733 			"mac loopback set fail, ret =%d.\n", ret);
6734 	return ret;
6735 }
6736 
6737 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6738 				     enum hnae3_loop loop_mode)
6739 {
6740 #define HCLGE_SERDES_RETRY_MS	10
6741 #define HCLGE_SERDES_RETRY_NUM	100
6742 
6743 	struct hclge_serdes_lb_cmd *req;
6744 	struct hclge_desc desc;
6745 	int ret, i = 0;
6746 	u8 loop_mode_b;
6747 
6748 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6749 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6750 
6751 	switch (loop_mode) {
6752 	case HNAE3_LOOP_SERIAL_SERDES:
6753 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6754 		break;
6755 	case HNAE3_LOOP_PARALLEL_SERDES:
6756 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6757 		break;
6758 	default:
6759 		dev_err(&hdev->pdev->dev,
6760 			"unsupported serdes loopback mode %d\n", loop_mode);
6761 		return -ENOTSUPP;
6762 	}
6763 
6764 	if (en) {
6765 		req->enable = loop_mode_b;
6766 		req->mask = loop_mode_b;
6767 	} else {
6768 		req->mask = loop_mode_b;
6769 	}
6770 
6771 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6772 	if (ret) {
6773 		dev_err(&hdev->pdev->dev,
6774 			"serdes loopback set fail, ret = %d\n", ret);
6775 		return ret;
6776 	}
6777 
6778 	do {
6779 		msleep(HCLGE_SERDES_RETRY_MS);
6780 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6781 					   true);
6782 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6783 		if (ret) {
6784 			dev_err(&hdev->pdev->dev,
6785 				"serdes loopback get, ret = %d\n", ret);
6786 			return ret;
6787 		}
6788 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6789 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6790 
6791 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6792 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6793 		return -EBUSY;
6794 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6795 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6796 		return -EIO;
6797 	}
6798 	return ret;
6799 }
6800 
6801 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6802 				     enum hnae3_loop loop_mode)
6803 {
6804 	int ret;
6805 
6806 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6807 	if (ret)
6808 		return ret;
6809 
6810 	hclge_cfg_mac_mode(hdev, en);
6811 
6812 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6813 	if (ret)
6814 		dev_err(&hdev->pdev->dev,
6815 			"serdes loopback config mac mode timeout\n");
6816 
6817 	return ret;
6818 }
6819 
6820 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6821 				     struct phy_device *phydev)
6822 {
6823 	int ret;
6824 
6825 	if (!phydev->suspended) {
6826 		ret = phy_suspend(phydev);
6827 		if (ret)
6828 			return ret;
6829 	}
6830 
6831 	ret = phy_resume(phydev);
6832 	if (ret)
6833 		return ret;
6834 
6835 	return phy_loopback(phydev, true);
6836 }
6837 
6838 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6839 				      struct phy_device *phydev)
6840 {
6841 	int ret;
6842 
6843 	ret = phy_loopback(phydev, false);
6844 	if (ret)
6845 		return ret;
6846 
6847 	return phy_suspend(phydev);
6848 }
6849 
6850 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6851 {
6852 	struct phy_device *phydev = hdev->hw.mac.phydev;
6853 	int ret;
6854 
6855 	if (!phydev)
6856 		return -ENOTSUPP;
6857 
6858 	if (en)
6859 		ret = hclge_enable_phy_loopback(hdev, phydev);
6860 	else
6861 		ret = hclge_disable_phy_loopback(hdev, phydev);
6862 	if (ret) {
6863 		dev_err(&hdev->pdev->dev,
6864 			"set phy loopback fail, ret = %d\n", ret);
6865 		return ret;
6866 	}
6867 
6868 	hclge_cfg_mac_mode(hdev, en);
6869 
6870 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6871 	if (ret)
6872 		dev_err(&hdev->pdev->dev,
6873 			"phy loopback config mac mode timeout\n");
6874 
6875 	return ret;
6876 }
6877 
6878 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6879 			    int stream_id, bool enable)
6880 {
6881 	struct hclge_desc desc;
6882 	struct hclge_cfg_com_tqp_queue_cmd *req =
6883 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6884 	int ret;
6885 
6886 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6887 	req->tqp_id = cpu_to_le16(tqp_id);
6888 	req->stream_id = cpu_to_le16(stream_id);
6889 	if (enable)
6890 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6891 
6892 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6893 	if (ret)
6894 		dev_err(&hdev->pdev->dev,
6895 			"Tqp enable fail, status =%d.\n", ret);
6896 	return ret;
6897 }
6898 
6899 static int hclge_set_loopback(struct hnae3_handle *handle,
6900 			      enum hnae3_loop loop_mode, bool en)
6901 {
6902 	struct hclge_vport *vport = hclge_get_vport(handle);
6903 	struct hnae3_knic_private_info *kinfo;
6904 	struct hclge_dev *hdev = vport->back;
6905 	int i, ret;
6906 
6907 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6908 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6909 	 * the same, the packets are looped back in the SSU. If SSU loopback
6910 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6911 	 */
6912 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
6913 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6914 
6915 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6916 						HCLGE_SWITCH_ALW_LPBK_MASK);
6917 		if (ret)
6918 			return ret;
6919 	}
6920 
6921 	switch (loop_mode) {
6922 	case HNAE3_LOOP_APP:
6923 		ret = hclge_set_app_loopback(hdev, en);
6924 		break;
6925 	case HNAE3_LOOP_SERIAL_SERDES:
6926 	case HNAE3_LOOP_PARALLEL_SERDES:
6927 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6928 		break;
6929 	case HNAE3_LOOP_PHY:
6930 		ret = hclge_set_phy_loopback(hdev, en);
6931 		break;
6932 	default:
6933 		ret = -ENOTSUPP;
6934 		dev_err(&hdev->pdev->dev,
6935 			"loop_mode %d is not supported\n", loop_mode);
6936 		break;
6937 	}
6938 
6939 	if (ret)
6940 		return ret;
6941 
6942 	kinfo = &vport->nic.kinfo;
6943 	for (i = 0; i < kinfo->num_tqps; i++) {
6944 		ret = hclge_tqp_enable(hdev, i, 0, en);
6945 		if (ret)
6946 			return ret;
6947 	}
6948 
6949 	return 0;
6950 }
6951 
6952 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6953 {
6954 	int ret;
6955 
6956 	ret = hclge_set_app_loopback(hdev, false);
6957 	if (ret)
6958 		return ret;
6959 
6960 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6961 	if (ret)
6962 		return ret;
6963 
6964 	return hclge_cfg_serdes_loopback(hdev, false,
6965 					 HNAE3_LOOP_PARALLEL_SERDES);
6966 }
6967 
6968 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6969 {
6970 	struct hclge_vport *vport = hclge_get_vport(handle);
6971 	struct hnae3_knic_private_info *kinfo;
6972 	struct hnae3_queue *queue;
6973 	struct hclge_tqp *tqp;
6974 	int i;
6975 
6976 	kinfo = &vport->nic.kinfo;
6977 	for (i = 0; i < kinfo->num_tqps; i++) {
6978 		queue = handle->kinfo.tqp[i];
6979 		tqp = container_of(queue, struct hclge_tqp, q);
6980 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6981 	}
6982 }
6983 
6984 static void hclge_flush_link_update(struct hclge_dev *hdev)
6985 {
6986 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6987 
6988 	unsigned long last = hdev->serv_processed_cnt;
6989 	int i = 0;
6990 
6991 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6992 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6993 	       last == hdev->serv_processed_cnt)
6994 		usleep_range(1, 1);
6995 }
6996 
6997 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6998 {
6999 	struct hclge_vport *vport = hclge_get_vport(handle);
7000 	struct hclge_dev *hdev = vport->back;
7001 
7002 	if (enable) {
7003 		hclge_task_schedule(hdev, 0);
7004 	} else {
7005 		/* Set the DOWN flag here to disable link updating */
7006 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7007 
7008 		/* flush memory to make sure DOWN is seen by service task */
7009 		smp_mb__before_atomic();
7010 		hclge_flush_link_update(hdev);
7011 	}
7012 }
7013 
7014 static int hclge_ae_start(struct hnae3_handle *handle)
7015 {
7016 	struct hclge_vport *vport = hclge_get_vport(handle);
7017 	struct hclge_dev *hdev = vport->back;
7018 
7019 	/* mac enable */
7020 	hclge_cfg_mac_mode(hdev, true);
7021 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7022 	hdev->hw.mac.link = 0;
7023 
7024 	/* reset tqp stats */
7025 	hclge_reset_tqp_stats(handle);
7026 
7027 	hclge_mac_start_phy(hdev);
7028 
7029 	return 0;
7030 }
7031 
7032 static void hclge_ae_stop(struct hnae3_handle *handle)
7033 {
7034 	struct hclge_vport *vport = hclge_get_vport(handle);
7035 	struct hclge_dev *hdev = vport->back;
7036 	int i;
7037 
7038 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7039 	spin_lock_bh(&hdev->fd_rule_lock);
7040 	hclge_clear_arfs_rules(handle);
7041 	spin_unlock_bh(&hdev->fd_rule_lock);
7042 
7043 	/* If it is not PF reset, the firmware will disable the MAC,
7044 	 * so it only need to stop phy here.
7045 	 */
7046 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7047 	    hdev->reset_type != HNAE3_FUNC_RESET) {
7048 		hclge_mac_stop_phy(hdev);
7049 		hclge_update_link_status(hdev);
7050 		return;
7051 	}
7052 
7053 	for (i = 0; i < handle->kinfo.num_tqps; i++)
7054 		hclge_reset_tqp(handle, i);
7055 
7056 	hclge_config_mac_tnl_int(hdev, false);
7057 
7058 	/* Mac disable */
7059 	hclge_cfg_mac_mode(hdev, false);
7060 
7061 	hclge_mac_stop_phy(hdev);
7062 
7063 	/* reset tqp stats */
7064 	hclge_reset_tqp_stats(handle);
7065 	hclge_update_link_status(hdev);
7066 }
7067 
7068 int hclge_vport_start(struct hclge_vport *vport)
7069 {
7070 	struct hclge_dev *hdev = vport->back;
7071 
7072 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7073 	vport->last_active_jiffies = jiffies;
7074 
7075 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7076 		if (vport->vport_id) {
7077 			hclge_restore_mac_table_common(vport);
7078 			hclge_restore_vport_vlan_table(vport);
7079 		} else {
7080 			hclge_restore_hw_table(hdev);
7081 		}
7082 	}
7083 
7084 	clear_bit(vport->vport_id, hdev->vport_config_block);
7085 
7086 	return 0;
7087 }
7088 
7089 void hclge_vport_stop(struct hclge_vport *vport)
7090 {
7091 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7092 }
7093 
7094 static int hclge_client_start(struct hnae3_handle *handle)
7095 {
7096 	struct hclge_vport *vport = hclge_get_vport(handle);
7097 
7098 	return hclge_vport_start(vport);
7099 }
7100 
7101 static void hclge_client_stop(struct hnae3_handle *handle)
7102 {
7103 	struct hclge_vport *vport = hclge_get_vport(handle);
7104 
7105 	hclge_vport_stop(vport);
7106 }
7107 
7108 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7109 					 u16 cmdq_resp, u8  resp_code,
7110 					 enum hclge_mac_vlan_tbl_opcode op)
7111 {
7112 	struct hclge_dev *hdev = vport->back;
7113 
7114 	if (cmdq_resp) {
7115 		dev_err(&hdev->pdev->dev,
7116 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7117 			cmdq_resp);
7118 		return -EIO;
7119 	}
7120 
7121 	if (op == HCLGE_MAC_VLAN_ADD) {
7122 		if (!resp_code || resp_code == 1)
7123 			return 0;
7124 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7125 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
7126 			return -ENOSPC;
7127 
7128 		dev_err(&hdev->pdev->dev,
7129 			"add mac addr failed for undefined, code=%u.\n",
7130 			resp_code);
7131 		return -EIO;
7132 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
7133 		if (!resp_code) {
7134 			return 0;
7135 		} else if (resp_code == 1) {
7136 			dev_dbg(&hdev->pdev->dev,
7137 				"remove mac addr failed for miss.\n");
7138 			return -ENOENT;
7139 		}
7140 
7141 		dev_err(&hdev->pdev->dev,
7142 			"remove mac addr failed for undefined, code=%u.\n",
7143 			resp_code);
7144 		return -EIO;
7145 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
7146 		if (!resp_code) {
7147 			return 0;
7148 		} else if (resp_code == 1) {
7149 			dev_dbg(&hdev->pdev->dev,
7150 				"lookup mac addr failed for miss.\n");
7151 			return -ENOENT;
7152 		}
7153 
7154 		dev_err(&hdev->pdev->dev,
7155 			"lookup mac addr failed for undefined, code=%u.\n",
7156 			resp_code);
7157 		return -EIO;
7158 	}
7159 
7160 	dev_err(&hdev->pdev->dev,
7161 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7162 
7163 	return -EINVAL;
7164 }
7165 
7166 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7167 {
7168 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7169 
7170 	unsigned int word_num;
7171 	unsigned int bit_num;
7172 
7173 	if (vfid > 255 || vfid < 0)
7174 		return -EIO;
7175 
7176 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7177 		word_num = vfid / 32;
7178 		bit_num  = vfid % 32;
7179 		if (clr)
7180 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7181 		else
7182 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7183 	} else {
7184 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7185 		bit_num  = vfid % 32;
7186 		if (clr)
7187 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7188 		else
7189 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7190 	}
7191 
7192 	return 0;
7193 }
7194 
7195 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7196 {
7197 #define HCLGE_DESC_NUMBER 3
7198 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7199 	int i, j;
7200 
7201 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7202 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7203 			if (desc[i].data[j])
7204 				return false;
7205 
7206 	return true;
7207 }
7208 
7209 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7210 				   const u8 *addr, bool is_mc)
7211 {
7212 	const unsigned char *mac_addr = addr;
7213 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7214 		       (mac_addr[0]) | (mac_addr[1] << 8);
7215 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7216 
7217 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7218 	if (is_mc) {
7219 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7220 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7221 	}
7222 
7223 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7224 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7225 }
7226 
7227 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7228 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
7229 {
7230 	struct hclge_dev *hdev = vport->back;
7231 	struct hclge_desc desc;
7232 	u8 resp_code;
7233 	u16 retval;
7234 	int ret;
7235 
7236 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7237 
7238 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7239 
7240 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7241 	if (ret) {
7242 		dev_err(&hdev->pdev->dev,
7243 			"del mac addr failed for cmd_send, ret =%d.\n",
7244 			ret);
7245 		return ret;
7246 	}
7247 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7248 	retval = le16_to_cpu(desc.retval);
7249 
7250 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7251 					     HCLGE_MAC_VLAN_REMOVE);
7252 }
7253 
7254 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7255 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7256 				     struct hclge_desc *desc,
7257 				     bool is_mc)
7258 {
7259 	struct hclge_dev *hdev = vport->back;
7260 	u8 resp_code;
7261 	u16 retval;
7262 	int ret;
7263 
7264 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7265 	if (is_mc) {
7266 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7267 		memcpy(desc[0].data,
7268 		       req,
7269 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7270 		hclge_cmd_setup_basic_desc(&desc[1],
7271 					   HCLGE_OPC_MAC_VLAN_ADD,
7272 					   true);
7273 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7274 		hclge_cmd_setup_basic_desc(&desc[2],
7275 					   HCLGE_OPC_MAC_VLAN_ADD,
7276 					   true);
7277 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7278 	} else {
7279 		memcpy(desc[0].data,
7280 		       req,
7281 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7282 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7283 	}
7284 	if (ret) {
7285 		dev_err(&hdev->pdev->dev,
7286 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7287 			ret);
7288 		return ret;
7289 	}
7290 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7291 	retval = le16_to_cpu(desc[0].retval);
7292 
7293 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7294 					     HCLGE_MAC_VLAN_LKUP);
7295 }
7296 
7297 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7298 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7299 				  struct hclge_desc *mc_desc)
7300 {
7301 	struct hclge_dev *hdev = vport->back;
7302 	int cfg_status;
7303 	u8 resp_code;
7304 	u16 retval;
7305 	int ret;
7306 
7307 	if (!mc_desc) {
7308 		struct hclge_desc desc;
7309 
7310 		hclge_cmd_setup_basic_desc(&desc,
7311 					   HCLGE_OPC_MAC_VLAN_ADD,
7312 					   false);
7313 		memcpy(desc.data, req,
7314 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7315 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7316 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7317 		retval = le16_to_cpu(desc.retval);
7318 
7319 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7320 							   resp_code,
7321 							   HCLGE_MAC_VLAN_ADD);
7322 	} else {
7323 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7324 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7325 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7326 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7327 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7328 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7329 		memcpy(mc_desc[0].data, req,
7330 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7331 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7332 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7333 		retval = le16_to_cpu(mc_desc[0].retval);
7334 
7335 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7336 							   resp_code,
7337 							   HCLGE_MAC_VLAN_ADD);
7338 	}
7339 
7340 	if (ret) {
7341 		dev_err(&hdev->pdev->dev,
7342 			"add mac addr failed for cmd_send, ret =%d.\n",
7343 			ret);
7344 		return ret;
7345 	}
7346 
7347 	return cfg_status;
7348 }
7349 
7350 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7351 			       u16 *allocated_size)
7352 {
7353 	struct hclge_umv_spc_alc_cmd *req;
7354 	struct hclge_desc desc;
7355 	int ret;
7356 
7357 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7358 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7359 
7360 	req->space_size = cpu_to_le32(space_size);
7361 
7362 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7363 	if (ret) {
7364 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7365 			ret);
7366 		return ret;
7367 	}
7368 
7369 	*allocated_size = le32_to_cpu(desc.data[1]);
7370 
7371 	return 0;
7372 }
7373 
7374 static int hclge_init_umv_space(struct hclge_dev *hdev)
7375 {
7376 	u16 allocated_size = 0;
7377 	int ret;
7378 
7379 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7380 	if (ret)
7381 		return ret;
7382 
7383 	if (allocated_size < hdev->wanted_umv_size)
7384 		dev_warn(&hdev->pdev->dev,
7385 			 "failed to alloc umv space, want %u, get %u\n",
7386 			 hdev->wanted_umv_size, allocated_size);
7387 
7388 	hdev->max_umv_size = allocated_size;
7389 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7390 	hdev->share_umv_size = hdev->priv_umv_size +
7391 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7392 
7393 	return 0;
7394 }
7395 
7396 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7397 {
7398 	struct hclge_vport *vport;
7399 	int i;
7400 
7401 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7402 		vport = &hdev->vport[i];
7403 		vport->used_umv_num = 0;
7404 	}
7405 
7406 	mutex_lock(&hdev->vport_lock);
7407 	hdev->share_umv_size = hdev->priv_umv_size +
7408 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7409 	mutex_unlock(&hdev->vport_lock);
7410 }
7411 
7412 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7413 {
7414 	struct hclge_dev *hdev = vport->back;
7415 	bool is_full;
7416 
7417 	if (need_lock)
7418 		mutex_lock(&hdev->vport_lock);
7419 
7420 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7421 		   hdev->share_umv_size == 0);
7422 
7423 	if (need_lock)
7424 		mutex_unlock(&hdev->vport_lock);
7425 
7426 	return is_full;
7427 }
7428 
7429 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7430 {
7431 	struct hclge_dev *hdev = vport->back;
7432 
7433 	if (is_free) {
7434 		if (vport->used_umv_num > hdev->priv_umv_size)
7435 			hdev->share_umv_size++;
7436 
7437 		if (vport->used_umv_num > 0)
7438 			vport->used_umv_num--;
7439 	} else {
7440 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7441 		    hdev->share_umv_size > 0)
7442 			hdev->share_umv_size--;
7443 		vport->used_umv_num++;
7444 	}
7445 }
7446 
7447 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7448 						  const u8 *mac_addr)
7449 {
7450 	struct hclge_mac_node *mac_node, *tmp;
7451 
7452 	list_for_each_entry_safe(mac_node, tmp, list, node)
7453 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7454 			return mac_node;
7455 
7456 	return NULL;
7457 }
7458 
7459 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7460 				  enum HCLGE_MAC_NODE_STATE state)
7461 {
7462 	switch (state) {
7463 	/* from set_rx_mode or tmp_add_list */
7464 	case HCLGE_MAC_TO_ADD:
7465 		if (mac_node->state == HCLGE_MAC_TO_DEL)
7466 			mac_node->state = HCLGE_MAC_ACTIVE;
7467 		break;
7468 	/* only from set_rx_mode */
7469 	case HCLGE_MAC_TO_DEL:
7470 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
7471 			list_del(&mac_node->node);
7472 			kfree(mac_node);
7473 		} else {
7474 			mac_node->state = HCLGE_MAC_TO_DEL;
7475 		}
7476 		break;
7477 	/* only from tmp_add_list, the mac_node->state won't be
7478 	 * ACTIVE.
7479 	 */
7480 	case HCLGE_MAC_ACTIVE:
7481 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7482 			mac_node->state = HCLGE_MAC_ACTIVE;
7483 
7484 		break;
7485 	}
7486 }
7487 
7488 int hclge_update_mac_list(struct hclge_vport *vport,
7489 			  enum HCLGE_MAC_NODE_STATE state,
7490 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
7491 			  const unsigned char *addr)
7492 {
7493 	struct hclge_dev *hdev = vport->back;
7494 	struct hclge_mac_node *mac_node;
7495 	struct list_head *list;
7496 
7497 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7498 		&vport->uc_mac_list : &vport->mc_mac_list;
7499 
7500 	spin_lock_bh(&vport->mac_list_lock);
7501 
7502 	/* if the mac addr is already in the mac list, no need to add a new
7503 	 * one into it, just check the mac addr state, convert it to a new
7504 	 * new state, or just remove it, or do nothing.
7505 	 */
7506 	mac_node = hclge_find_mac_node(list, addr);
7507 	if (mac_node) {
7508 		hclge_update_mac_node(mac_node, state);
7509 		spin_unlock_bh(&vport->mac_list_lock);
7510 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7511 		return 0;
7512 	}
7513 
7514 	/* if this address is never added, unnecessary to delete */
7515 	if (state == HCLGE_MAC_TO_DEL) {
7516 		spin_unlock_bh(&vport->mac_list_lock);
7517 		dev_err(&hdev->pdev->dev,
7518 			"failed to delete address %pM from mac list\n",
7519 			addr);
7520 		return -ENOENT;
7521 	}
7522 
7523 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7524 	if (!mac_node) {
7525 		spin_unlock_bh(&vport->mac_list_lock);
7526 		return -ENOMEM;
7527 	}
7528 
7529 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7530 
7531 	mac_node->state = state;
7532 	ether_addr_copy(mac_node->mac_addr, addr);
7533 	list_add_tail(&mac_node->node, list);
7534 
7535 	spin_unlock_bh(&vport->mac_list_lock);
7536 
7537 	return 0;
7538 }
7539 
7540 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7541 			     const unsigned char *addr)
7542 {
7543 	struct hclge_vport *vport = hclge_get_vport(handle);
7544 
7545 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7546 				     addr);
7547 }
7548 
7549 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7550 			     const unsigned char *addr)
7551 {
7552 	struct hclge_dev *hdev = vport->back;
7553 	struct hclge_mac_vlan_tbl_entry_cmd req;
7554 	struct hclge_desc desc;
7555 	u16 egress_port = 0;
7556 	int ret;
7557 
7558 	/* mac addr check */
7559 	if (is_zero_ether_addr(addr) ||
7560 	    is_broadcast_ether_addr(addr) ||
7561 	    is_multicast_ether_addr(addr)) {
7562 		dev_err(&hdev->pdev->dev,
7563 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7564 			 addr, is_zero_ether_addr(addr),
7565 			 is_broadcast_ether_addr(addr),
7566 			 is_multicast_ether_addr(addr));
7567 		return -EINVAL;
7568 	}
7569 
7570 	memset(&req, 0, sizeof(req));
7571 
7572 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7573 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7574 
7575 	req.egress_port = cpu_to_le16(egress_port);
7576 
7577 	hclge_prepare_mac_addr(&req, addr, false);
7578 
7579 	/* Lookup the mac address in the mac_vlan table, and add
7580 	 * it if the entry is inexistent. Repeated unicast entry
7581 	 * is not allowed in the mac vlan table.
7582 	 */
7583 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7584 	if (ret == -ENOENT) {
7585 		mutex_lock(&hdev->vport_lock);
7586 		if (!hclge_is_umv_space_full(vport, false)) {
7587 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7588 			if (!ret)
7589 				hclge_update_umv_space(vport, false);
7590 			mutex_unlock(&hdev->vport_lock);
7591 			return ret;
7592 		}
7593 		mutex_unlock(&hdev->vport_lock);
7594 
7595 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7596 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7597 				hdev->priv_umv_size);
7598 
7599 		return -ENOSPC;
7600 	}
7601 
7602 	/* check if we just hit the duplicate */
7603 	if (!ret) {
7604 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7605 			 vport->vport_id, addr);
7606 		return 0;
7607 	}
7608 
7609 	dev_err(&hdev->pdev->dev,
7610 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7611 		addr);
7612 
7613 	return ret;
7614 }
7615 
7616 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7617 			    const unsigned char *addr)
7618 {
7619 	struct hclge_vport *vport = hclge_get_vport(handle);
7620 
7621 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7622 				     addr);
7623 }
7624 
7625 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7626 			    const unsigned char *addr)
7627 {
7628 	struct hclge_dev *hdev = vport->back;
7629 	struct hclge_mac_vlan_tbl_entry_cmd req;
7630 	int ret;
7631 
7632 	/* mac addr check */
7633 	if (is_zero_ether_addr(addr) ||
7634 	    is_broadcast_ether_addr(addr) ||
7635 	    is_multicast_ether_addr(addr)) {
7636 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7637 			addr);
7638 		return -EINVAL;
7639 	}
7640 
7641 	memset(&req, 0, sizeof(req));
7642 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7643 	hclge_prepare_mac_addr(&req, addr, false);
7644 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7645 	if (!ret) {
7646 		mutex_lock(&hdev->vport_lock);
7647 		hclge_update_umv_space(vport, true);
7648 		mutex_unlock(&hdev->vport_lock);
7649 	} else if (ret == -ENOENT) {
7650 		ret = 0;
7651 	}
7652 
7653 	return ret;
7654 }
7655 
7656 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7657 			     const unsigned char *addr)
7658 {
7659 	struct hclge_vport *vport = hclge_get_vport(handle);
7660 
7661 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7662 				     addr);
7663 }
7664 
7665 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7666 			     const unsigned char *addr)
7667 {
7668 	struct hclge_dev *hdev = vport->back;
7669 	struct hclge_mac_vlan_tbl_entry_cmd req;
7670 	struct hclge_desc desc[3];
7671 	int status;
7672 
7673 	/* mac addr check */
7674 	if (!is_multicast_ether_addr(addr)) {
7675 		dev_err(&hdev->pdev->dev,
7676 			"Add mc mac err! invalid mac:%pM.\n",
7677 			 addr);
7678 		return -EINVAL;
7679 	}
7680 	memset(&req, 0, sizeof(req));
7681 	hclge_prepare_mac_addr(&req, addr, true);
7682 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7683 	if (status) {
7684 		/* This mac addr do not exist, add new entry for it */
7685 		memset(desc[0].data, 0, sizeof(desc[0].data));
7686 		memset(desc[1].data, 0, sizeof(desc[0].data));
7687 		memset(desc[2].data, 0, sizeof(desc[0].data));
7688 	}
7689 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7690 	if (status)
7691 		return status;
7692 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7693 
7694 	/* if already overflow, not to print each time */
7695 	if (status == -ENOSPC &&
7696 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7697 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7698 
7699 	return status;
7700 }
7701 
7702 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7703 			    const unsigned char *addr)
7704 {
7705 	struct hclge_vport *vport = hclge_get_vport(handle);
7706 
7707 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7708 				     addr);
7709 }
7710 
7711 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7712 			    const unsigned char *addr)
7713 {
7714 	struct hclge_dev *hdev = vport->back;
7715 	struct hclge_mac_vlan_tbl_entry_cmd req;
7716 	enum hclge_cmd_status status;
7717 	struct hclge_desc desc[3];
7718 
7719 	/* mac addr check */
7720 	if (!is_multicast_ether_addr(addr)) {
7721 		dev_dbg(&hdev->pdev->dev,
7722 			"Remove mc mac err! invalid mac:%pM.\n",
7723 			 addr);
7724 		return -EINVAL;
7725 	}
7726 
7727 	memset(&req, 0, sizeof(req));
7728 	hclge_prepare_mac_addr(&req, addr, true);
7729 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7730 	if (!status) {
7731 		/* This mac addr exist, remove this handle's VFID for it */
7732 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7733 		if (status)
7734 			return status;
7735 
7736 		if (hclge_is_all_function_id_zero(desc))
7737 			/* All the vfid is zero, so need to delete this entry */
7738 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7739 		else
7740 			/* Not all the vfid is zero, update the vfid */
7741 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7742 
7743 	} else if (status == -ENOENT) {
7744 		status = 0;
7745 	}
7746 
7747 	return status;
7748 }
7749 
7750 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7751 				      struct list_head *list,
7752 				      int (*sync)(struct hclge_vport *,
7753 						  const unsigned char *))
7754 {
7755 	struct hclge_mac_node *mac_node, *tmp;
7756 	int ret;
7757 
7758 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7759 		ret = sync(vport, mac_node->mac_addr);
7760 		if (!ret) {
7761 			mac_node->state = HCLGE_MAC_ACTIVE;
7762 		} else {
7763 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7764 				&vport->state);
7765 			break;
7766 		}
7767 	}
7768 }
7769 
7770 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7771 					struct list_head *list,
7772 					int (*unsync)(struct hclge_vport *,
7773 						      const unsigned char *))
7774 {
7775 	struct hclge_mac_node *mac_node, *tmp;
7776 	int ret;
7777 
7778 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7779 		ret = unsync(vport, mac_node->mac_addr);
7780 		if (!ret || ret == -ENOENT) {
7781 			list_del(&mac_node->node);
7782 			kfree(mac_node);
7783 		} else {
7784 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7785 				&vport->state);
7786 			break;
7787 		}
7788 	}
7789 }
7790 
7791 static bool hclge_sync_from_add_list(struct list_head *add_list,
7792 				     struct list_head *mac_list)
7793 {
7794 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7795 	bool all_added = true;
7796 
7797 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7798 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7799 			all_added = false;
7800 
7801 		/* if the mac address from tmp_add_list is not in the
7802 		 * uc/mc_mac_list, it means have received a TO_DEL request
7803 		 * during the time window of adding the mac address into mac
7804 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7805 		 * then it will be removed at next time. else it must be TO_ADD,
7806 		 * this address hasn't been added into mac table,
7807 		 * so just remove the mac node.
7808 		 */
7809 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7810 		if (new_node) {
7811 			hclge_update_mac_node(new_node, mac_node->state);
7812 			list_del(&mac_node->node);
7813 			kfree(mac_node);
7814 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7815 			mac_node->state = HCLGE_MAC_TO_DEL;
7816 			list_del(&mac_node->node);
7817 			list_add_tail(&mac_node->node, mac_list);
7818 		} else {
7819 			list_del(&mac_node->node);
7820 			kfree(mac_node);
7821 		}
7822 	}
7823 
7824 	return all_added;
7825 }
7826 
7827 static void hclge_sync_from_del_list(struct list_head *del_list,
7828 				     struct list_head *mac_list)
7829 {
7830 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7831 
7832 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7833 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7834 		if (new_node) {
7835 			/* If the mac addr exists in the mac list, it means
7836 			 * received a new TO_ADD request during the time window
7837 			 * of configuring the mac address. For the mac node
7838 			 * state is TO_ADD, and the address is already in the
7839 			 * in the hardware(due to delete fail), so we just need
7840 			 * to change the mac node state to ACTIVE.
7841 			 */
7842 			new_node->state = HCLGE_MAC_ACTIVE;
7843 			list_del(&mac_node->node);
7844 			kfree(mac_node);
7845 		} else {
7846 			list_del(&mac_node->node);
7847 			list_add_tail(&mac_node->node, mac_list);
7848 		}
7849 	}
7850 }
7851 
7852 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7853 					enum HCLGE_MAC_ADDR_TYPE mac_type,
7854 					bool is_all_added)
7855 {
7856 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7857 		if (is_all_added)
7858 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7859 		else
7860 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7861 	} else {
7862 		if (is_all_added)
7863 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7864 		else
7865 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7866 	}
7867 }
7868 
7869 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7870 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
7871 {
7872 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7873 	struct list_head tmp_add_list, tmp_del_list;
7874 	struct list_head *list;
7875 	bool all_added;
7876 
7877 	INIT_LIST_HEAD(&tmp_add_list);
7878 	INIT_LIST_HEAD(&tmp_del_list);
7879 
7880 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
7881 	 * we can add/delete these mac addr outside the spin lock
7882 	 */
7883 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7884 		&vport->uc_mac_list : &vport->mc_mac_list;
7885 
7886 	spin_lock_bh(&vport->mac_list_lock);
7887 
7888 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7889 		switch (mac_node->state) {
7890 		case HCLGE_MAC_TO_DEL:
7891 			list_del(&mac_node->node);
7892 			list_add_tail(&mac_node->node, &tmp_del_list);
7893 			break;
7894 		case HCLGE_MAC_TO_ADD:
7895 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7896 			if (!new_node)
7897 				goto stop_traverse;
7898 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7899 			new_node->state = mac_node->state;
7900 			list_add_tail(&new_node->node, &tmp_add_list);
7901 			break;
7902 		default:
7903 			break;
7904 		}
7905 	}
7906 
7907 stop_traverse:
7908 	spin_unlock_bh(&vport->mac_list_lock);
7909 
7910 	/* delete first, in order to get max mac table space for adding */
7911 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7912 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7913 					    hclge_rm_uc_addr_common);
7914 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7915 					  hclge_add_uc_addr_common);
7916 	} else {
7917 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7918 					    hclge_rm_mc_addr_common);
7919 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7920 					  hclge_add_mc_addr_common);
7921 	}
7922 
7923 	/* if some mac addresses were added/deleted fail, move back to the
7924 	 * mac_list, and retry at next time.
7925 	 */
7926 	spin_lock_bh(&vport->mac_list_lock);
7927 
7928 	hclge_sync_from_del_list(&tmp_del_list, list);
7929 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7930 
7931 	spin_unlock_bh(&vport->mac_list_lock);
7932 
7933 	hclge_update_overflow_flags(vport, mac_type, all_added);
7934 }
7935 
7936 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7937 {
7938 	struct hclge_dev *hdev = vport->back;
7939 
7940 	if (test_bit(vport->vport_id, hdev->vport_config_block))
7941 		return false;
7942 
7943 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7944 		return true;
7945 
7946 	return false;
7947 }
7948 
7949 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7950 {
7951 	int i;
7952 
7953 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7954 		struct hclge_vport *vport = &hdev->vport[i];
7955 
7956 		if (!hclge_need_sync_mac_table(vport))
7957 			continue;
7958 
7959 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7960 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7961 	}
7962 }
7963 
7964 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7965 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7966 {
7967 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7968 	struct hclge_mac_node *mac_cfg, *tmp;
7969 	struct hclge_dev *hdev = vport->back;
7970 	struct list_head tmp_del_list, *list;
7971 	int ret;
7972 
7973 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7974 		list = &vport->uc_mac_list;
7975 		unsync = hclge_rm_uc_addr_common;
7976 	} else {
7977 		list = &vport->mc_mac_list;
7978 		unsync = hclge_rm_mc_addr_common;
7979 	}
7980 
7981 	INIT_LIST_HEAD(&tmp_del_list);
7982 
7983 	if (!is_del_list)
7984 		set_bit(vport->vport_id, hdev->vport_config_block);
7985 
7986 	spin_lock_bh(&vport->mac_list_lock);
7987 
7988 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7989 		switch (mac_cfg->state) {
7990 		case HCLGE_MAC_TO_DEL:
7991 		case HCLGE_MAC_ACTIVE:
7992 			list_del(&mac_cfg->node);
7993 			list_add_tail(&mac_cfg->node, &tmp_del_list);
7994 			break;
7995 		case HCLGE_MAC_TO_ADD:
7996 			if (is_del_list) {
7997 				list_del(&mac_cfg->node);
7998 				kfree(mac_cfg);
7999 			}
8000 			break;
8001 		}
8002 	}
8003 
8004 	spin_unlock_bh(&vport->mac_list_lock);
8005 
8006 	list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
8007 		ret = unsync(vport, mac_cfg->mac_addr);
8008 		if (!ret || ret == -ENOENT) {
8009 			/* clear all mac addr from hardware, but remain these
8010 			 * mac addr in the mac list, and restore them after
8011 			 * vf reset finished.
8012 			 */
8013 			if (!is_del_list &&
8014 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8015 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8016 			} else {
8017 				list_del(&mac_cfg->node);
8018 				kfree(mac_cfg);
8019 			}
8020 		} else if (is_del_list) {
8021 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8022 		}
8023 	}
8024 
8025 	spin_lock_bh(&vport->mac_list_lock);
8026 
8027 	hclge_sync_from_del_list(&tmp_del_list, list);
8028 
8029 	spin_unlock_bh(&vport->mac_list_lock);
8030 }
8031 
8032 /* remove all mac address when uninitailize */
8033 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8034 					enum HCLGE_MAC_ADDR_TYPE mac_type)
8035 {
8036 	struct hclge_mac_node *mac_node, *tmp;
8037 	struct hclge_dev *hdev = vport->back;
8038 	struct list_head tmp_del_list, *list;
8039 
8040 	INIT_LIST_HEAD(&tmp_del_list);
8041 
8042 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8043 		&vport->uc_mac_list : &vport->mc_mac_list;
8044 
8045 	spin_lock_bh(&vport->mac_list_lock);
8046 
8047 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8048 		switch (mac_node->state) {
8049 		case HCLGE_MAC_TO_DEL:
8050 		case HCLGE_MAC_ACTIVE:
8051 			list_del(&mac_node->node);
8052 			list_add_tail(&mac_node->node, &tmp_del_list);
8053 			break;
8054 		case HCLGE_MAC_TO_ADD:
8055 			list_del(&mac_node->node);
8056 			kfree(mac_node);
8057 			break;
8058 		}
8059 	}
8060 
8061 	spin_unlock_bh(&vport->mac_list_lock);
8062 
8063 	if (mac_type == HCLGE_MAC_ADDR_UC)
8064 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8065 					    hclge_rm_uc_addr_common);
8066 	else
8067 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8068 					    hclge_rm_mc_addr_common);
8069 
8070 	if (!list_empty(&tmp_del_list))
8071 		dev_warn(&hdev->pdev->dev,
8072 			 "uninit %s mac list for vport %u not completely.\n",
8073 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8074 			 vport->vport_id);
8075 
8076 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8077 		list_del(&mac_node->node);
8078 		kfree(mac_node);
8079 	}
8080 }
8081 
8082 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8083 {
8084 	struct hclge_vport *vport;
8085 	int i;
8086 
8087 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8088 		vport = &hdev->vport[i];
8089 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8090 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8091 	}
8092 }
8093 
8094 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8095 					      u16 cmdq_resp, u8 resp_code)
8096 {
8097 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
8098 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
8099 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
8100 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
8101 
8102 	int return_status;
8103 
8104 	if (cmdq_resp) {
8105 		dev_err(&hdev->pdev->dev,
8106 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8107 			cmdq_resp);
8108 		return -EIO;
8109 	}
8110 
8111 	switch (resp_code) {
8112 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
8113 	case HCLGE_ETHERTYPE_ALREADY_ADD:
8114 		return_status = 0;
8115 		break;
8116 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8117 		dev_err(&hdev->pdev->dev,
8118 			"add mac ethertype failed for manager table overflow.\n");
8119 		return_status = -EIO;
8120 		break;
8121 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
8122 		dev_err(&hdev->pdev->dev,
8123 			"add mac ethertype failed for key conflict.\n");
8124 		return_status = -EIO;
8125 		break;
8126 	default:
8127 		dev_err(&hdev->pdev->dev,
8128 			"add mac ethertype failed for undefined, code=%u.\n",
8129 			resp_code);
8130 		return_status = -EIO;
8131 	}
8132 
8133 	return return_status;
8134 }
8135 
8136 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8137 				     u8 *mac_addr)
8138 {
8139 	struct hclge_mac_vlan_tbl_entry_cmd req;
8140 	struct hclge_dev *hdev = vport->back;
8141 	struct hclge_desc desc;
8142 	u16 egress_port = 0;
8143 	int i;
8144 
8145 	if (is_zero_ether_addr(mac_addr))
8146 		return false;
8147 
8148 	memset(&req, 0, sizeof(req));
8149 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8150 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8151 	req.egress_port = cpu_to_le16(egress_port);
8152 	hclge_prepare_mac_addr(&req, mac_addr, false);
8153 
8154 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8155 		return true;
8156 
8157 	vf_idx += HCLGE_VF_VPORT_START_NUM;
8158 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8159 		if (i != vf_idx &&
8160 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8161 			return true;
8162 
8163 	return false;
8164 }
8165 
8166 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8167 			    u8 *mac_addr)
8168 {
8169 	struct hclge_vport *vport = hclge_get_vport(handle);
8170 	struct hclge_dev *hdev = vport->back;
8171 
8172 	vport = hclge_get_vf_vport(hdev, vf);
8173 	if (!vport)
8174 		return -EINVAL;
8175 
8176 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8177 		dev_info(&hdev->pdev->dev,
8178 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
8179 			 mac_addr);
8180 		return 0;
8181 	}
8182 
8183 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8184 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8185 			mac_addr);
8186 		return -EEXIST;
8187 	}
8188 
8189 	ether_addr_copy(vport->vf_info.mac, mac_addr);
8190 
8191 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8192 		dev_info(&hdev->pdev->dev,
8193 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8194 			 vf, mac_addr);
8195 		return hclge_inform_reset_assert_to_vf(vport);
8196 	}
8197 
8198 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8199 		 vf, mac_addr);
8200 	return 0;
8201 }
8202 
8203 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8204 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
8205 {
8206 	struct hclge_desc desc;
8207 	u8 resp_code;
8208 	u16 retval;
8209 	int ret;
8210 
8211 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8212 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8213 
8214 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8215 	if (ret) {
8216 		dev_err(&hdev->pdev->dev,
8217 			"add mac ethertype failed for cmd_send, ret =%d.\n",
8218 			ret);
8219 		return ret;
8220 	}
8221 
8222 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8223 	retval = le16_to_cpu(desc.retval);
8224 
8225 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8226 }
8227 
8228 static int init_mgr_tbl(struct hclge_dev *hdev)
8229 {
8230 	int ret;
8231 	int i;
8232 
8233 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8234 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8235 		if (ret) {
8236 			dev_err(&hdev->pdev->dev,
8237 				"add mac ethertype failed, ret =%d.\n",
8238 				ret);
8239 			return ret;
8240 		}
8241 	}
8242 
8243 	return 0;
8244 }
8245 
8246 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8247 {
8248 	struct hclge_vport *vport = hclge_get_vport(handle);
8249 	struct hclge_dev *hdev = vport->back;
8250 
8251 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
8252 }
8253 
8254 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8255 				       const u8 *old_addr, const u8 *new_addr)
8256 {
8257 	struct list_head *list = &vport->uc_mac_list;
8258 	struct hclge_mac_node *old_node, *new_node;
8259 
8260 	new_node = hclge_find_mac_node(list, new_addr);
8261 	if (!new_node) {
8262 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8263 		if (!new_node)
8264 			return -ENOMEM;
8265 
8266 		new_node->state = HCLGE_MAC_TO_ADD;
8267 		ether_addr_copy(new_node->mac_addr, new_addr);
8268 		list_add(&new_node->node, list);
8269 	} else {
8270 		if (new_node->state == HCLGE_MAC_TO_DEL)
8271 			new_node->state = HCLGE_MAC_ACTIVE;
8272 
8273 		/* make sure the new addr is in the list head, avoid dev
8274 		 * addr may be not re-added into mac table for the umv space
8275 		 * limitation after global/imp reset which will clear mac
8276 		 * table by hardware.
8277 		 */
8278 		list_move(&new_node->node, list);
8279 	}
8280 
8281 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8282 		old_node = hclge_find_mac_node(list, old_addr);
8283 		if (old_node) {
8284 			if (old_node->state == HCLGE_MAC_TO_ADD) {
8285 				list_del(&old_node->node);
8286 				kfree(old_node);
8287 			} else {
8288 				old_node->state = HCLGE_MAC_TO_DEL;
8289 			}
8290 		}
8291 	}
8292 
8293 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8294 
8295 	return 0;
8296 }
8297 
8298 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8299 			      bool is_first)
8300 {
8301 	const unsigned char *new_addr = (const unsigned char *)p;
8302 	struct hclge_vport *vport = hclge_get_vport(handle);
8303 	struct hclge_dev *hdev = vport->back;
8304 	unsigned char *old_addr = NULL;
8305 	int ret;
8306 
8307 	/* mac addr check */
8308 	if (is_zero_ether_addr(new_addr) ||
8309 	    is_broadcast_ether_addr(new_addr) ||
8310 	    is_multicast_ether_addr(new_addr)) {
8311 		dev_err(&hdev->pdev->dev,
8312 			"change uc mac err! invalid mac: %pM.\n",
8313 			 new_addr);
8314 		return -EINVAL;
8315 	}
8316 
8317 	ret = hclge_pause_addr_cfg(hdev, new_addr);
8318 	if (ret) {
8319 		dev_err(&hdev->pdev->dev,
8320 			"failed to configure mac pause address, ret = %d\n",
8321 			ret);
8322 		return ret;
8323 	}
8324 
8325 	if (!is_first)
8326 		old_addr = hdev->hw.mac.mac_addr;
8327 
8328 	spin_lock_bh(&vport->mac_list_lock);
8329 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8330 	if (ret) {
8331 		dev_err(&hdev->pdev->dev,
8332 			"failed to change the mac addr:%pM, ret = %d\n",
8333 			new_addr, ret);
8334 		spin_unlock_bh(&vport->mac_list_lock);
8335 
8336 		if (!is_first)
8337 			hclge_pause_addr_cfg(hdev, old_addr);
8338 
8339 		return ret;
8340 	}
8341 	/* we must update dev addr with spin lock protect, preventing dev addr
8342 	 * being removed by set_rx_mode path.
8343 	 */
8344 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8345 	spin_unlock_bh(&vport->mac_list_lock);
8346 
8347 	hclge_task_schedule(hdev, 0);
8348 
8349 	return 0;
8350 }
8351 
8352 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8353 			  int cmd)
8354 {
8355 	struct hclge_vport *vport = hclge_get_vport(handle);
8356 	struct hclge_dev *hdev = vport->back;
8357 
8358 	if (!hdev->hw.mac.phydev)
8359 		return -EOPNOTSUPP;
8360 
8361 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8362 }
8363 
8364 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8365 				      u8 fe_type, bool filter_en, u8 vf_id)
8366 {
8367 	struct hclge_vlan_filter_ctrl_cmd *req;
8368 	struct hclge_desc desc;
8369 	int ret;
8370 
8371 	/* read current vlan filter parameter */
8372 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8373 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8374 	req->vlan_type = vlan_type;
8375 	req->vf_id = vf_id;
8376 
8377 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8378 	if (ret) {
8379 		dev_err(&hdev->pdev->dev,
8380 			"failed to get vlan filter config, ret = %d.\n", ret);
8381 		return ret;
8382 	}
8383 
8384 	/* modify and write new config parameter */
8385 	hclge_cmd_reuse_desc(&desc, false);
8386 	req->vlan_fe = filter_en ?
8387 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8388 
8389 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8390 	if (ret)
8391 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8392 			ret);
8393 
8394 	return ret;
8395 }
8396 
8397 #define HCLGE_FILTER_TYPE_VF		0
8398 #define HCLGE_FILTER_TYPE_PORT		1
8399 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
8400 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
8401 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
8402 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
8403 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
8404 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
8405 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
8406 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
8407 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
8408 
8409 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8410 {
8411 	struct hclge_vport *vport = hclge_get_vport(handle);
8412 	struct hclge_dev *hdev = vport->back;
8413 
8414 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8415 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8416 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
8417 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8418 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
8419 	} else {
8420 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8421 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8422 					   0);
8423 	}
8424 	if (enable)
8425 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
8426 	else
8427 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8428 }
8429 
8430 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8431 				    bool is_kill, u16 vlan,
8432 				    __be16 proto)
8433 {
8434 	struct hclge_vport *vport = &hdev->vport[vfid];
8435 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
8436 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
8437 	struct hclge_desc desc[2];
8438 	u8 vf_byte_val;
8439 	u8 vf_byte_off;
8440 	int ret;
8441 
8442 	/* if vf vlan table is full, firmware will close vf vlan filter, it
8443 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
8444 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
8445 	 * new vlan, because tx packets with these vlan id will be dropped.
8446 	 */
8447 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8448 		if (vport->vf_info.spoofchk && vlan) {
8449 			dev_err(&hdev->pdev->dev,
8450 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
8451 			return -EPERM;
8452 		}
8453 		return 0;
8454 	}
8455 
8456 	hclge_cmd_setup_basic_desc(&desc[0],
8457 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8458 	hclge_cmd_setup_basic_desc(&desc[1],
8459 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8460 
8461 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8462 
8463 	vf_byte_off = vfid / 8;
8464 	vf_byte_val = 1 << (vfid % 8);
8465 
8466 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8467 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8468 
8469 	req0->vlan_id  = cpu_to_le16(vlan);
8470 	req0->vlan_cfg = is_kill;
8471 
8472 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8473 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8474 	else
8475 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8476 
8477 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
8478 	if (ret) {
8479 		dev_err(&hdev->pdev->dev,
8480 			"Send vf vlan command fail, ret =%d.\n",
8481 			ret);
8482 		return ret;
8483 	}
8484 
8485 	if (!is_kill) {
8486 #define HCLGE_VF_VLAN_NO_ENTRY	2
8487 		if (!req0->resp_code || req0->resp_code == 1)
8488 			return 0;
8489 
8490 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8491 			set_bit(vfid, hdev->vf_vlan_full);
8492 			dev_warn(&hdev->pdev->dev,
8493 				 "vf vlan table is full, vf vlan filter is disabled\n");
8494 			return 0;
8495 		}
8496 
8497 		dev_err(&hdev->pdev->dev,
8498 			"Add vf vlan filter fail, ret =%u.\n",
8499 			req0->resp_code);
8500 	} else {
8501 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
8502 		if (!req0->resp_code)
8503 			return 0;
8504 
8505 		/* vf vlan filter is disabled when vf vlan table is full,
8506 		 * then new vlan id will not be added into vf vlan table.
8507 		 * Just return 0 without warning, avoid massive verbose
8508 		 * print logs when unload.
8509 		 */
8510 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8511 			return 0;
8512 
8513 		dev_err(&hdev->pdev->dev,
8514 			"Kill vf vlan filter fail, ret =%u.\n",
8515 			req0->resp_code);
8516 	}
8517 
8518 	return -EIO;
8519 }
8520 
8521 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8522 				      u16 vlan_id, bool is_kill)
8523 {
8524 	struct hclge_vlan_filter_pf_cfg_cmd *req;
8525 	struct hclge_desc desc;
8526 	u8 vlan_offset_byte_val;
8527 	u8 vlan_offset_byte;
8528 	u8 vlan_offset_160;
8529 	int ret;
8530 
8531 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8532 
8533 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8534 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8535 			   HCLGE_VLAN_BYTE_SIZE;
8536 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8537 
8538 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8539 	req->vlan_offset = vlan_offset_160;
8540 	req->vlan_cfg = is_kill;
8541 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8542 
8543 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8544 	if (ret)
8545 		dev_err(&hdev->pdev->dev,
8546 			"port vlan command, send fail, ret =%d.\n", ret);
8547 	return ret;
8548 }
8549 
8550 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8551 				    u16 vport_id, u16 vlan_id,
8552 				    bool is_kill)
8553 {
8554 	u16 vport_idx, vport_num = 0;
8555 	int ret;
8556 
8557 	if (is_kill && !vlan_id)
8558 		return 0;
8559 
8560 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8561 				       proto);
8562 	if (ret) {
8563 		dev_err(&hdev->pdev->dev,
8564 			"Set %u vport vlan filter config fail, ret =%d.\n",
8565 			vport_id, ret);
8566 		return ret;
8567 	}
8568 
8569 	/* vlan 0 may be added twice when 8021q module is enabled */
8570 	if (!is_kill && !vlan_id &&
8571 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
8572 		return 0;
8573 
8574 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8575 		dev_err(&hdev->pdev->dev,
8576 			"Add port vlan failed, vport %u is already in vlan %u\n",
8577 			vport_id, vlan_id);
8578 		return -EINVAL;
8579 	}
8580 
8581 	if (is_kill &&
8582 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8583 		dev_err(&hdev->pdev->dev,
8584 			"Delete port vlan failed, vport %u is not in vlan %u\n",
8585 			vport_id, vlan_id);
8586 		return -EINVAL;
8587 	}
8588 
8589 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8590 		vport_num++;
8591 
8592 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8593 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8594 						 is_kill);
8595 
8596 	return ret;
8597 }
8598 
8599 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8600 {
8601 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8602 	struct hclge_vport_vtag_tx_cfg_cmd *req;
8603 	struct hclge_dev *hdev = vport->back;
8604 	struct hclge_desc desc;
8605 	u16 bmap_index;
8606 	int status;
8607 
8608 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8609 
8610 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8611 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8612 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8613 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8614 		      vcfg->accept_tag1 ? 1 : 0);
8615 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8616 		      vcfg->accept_untag1 ? 1 : 0);
8617 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8618 		      vcfg->accept_tag2 ? 1 : 0);
8619 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8620 		      vcfg->accept_untag2 ? 1 : 0);
8621 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8622 		      vcfg->insert_tag1_en ? 1 : 0);
8623 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8624 		      vcfg->insert_tag2_en ? 1 : 0);
8625 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8626 
8627 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8628 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8629 			HCLGE_VF_NUM_PER_BYTE;
8630 	req->vf_bitmap[bmap_index] =
8631 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8632 
8633 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8634 	if (status)
8635 		dev_err(&hdev->pdev->dev,
8636 			"Send port txvlan cfg command fail, ret =%d\n",
8637 			status);
8638 
8639 	return status;
8640 }
8641 
8642 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8643 {
8644 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8645 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8646 	struct hclge_dev *hdev = vport->back;
8647 	struct hclge_desc desc;
8648 	u16 bmap_index;
8649 	int status;
8650 
8651 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8652 
8653 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8654 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8655 		      vcfg->strip_tag1_en ? 1 : 0);
8656 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8657 		      vcfg->strip_tag2_en ? 1 : 0);
8658 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8659 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8660 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8661 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8662 
8663 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8664 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8665 			HCLGE_VF_NUM_PER_BYTE;
8666 	req->vf_bitmap[bmap_index] =
8667 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8668 
8669 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8670 	if (status)
8671 		dev_err(&hdev->pdev->dev,
8672 			"Send port rxvlan cfg command fail, ret =%d\n",
8673 			status);
8674 
8675 	return status;
8676 }
8677 
8678 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8679 				  u16 port_base_vlan_state,
8680 				  u16 vlan_tag)
8681 {
8682 	int ret;
8683 
8684 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8685 		vport->txvlan_cfg.accept_tag1 = true;
8686 		vport->txvlan_cfg.insert_tag1_en = false;
8687 		vport->txvlan_cfg.default_tag1 = 0;
8688 	} else {
8689 		vport->txvlan_cfg.accept_tag1 = false;
8690 		vport->txvlan_cfg.insert_tag1_en = true;
8691 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8692 	}
8693 
8694 	vport->txvlan_cfg.accept_untag1 = true;
8695 
8696 	/* accept_tag2 and accept_untag2 are not supported on
8697 	 * pdev revision(0x20), new revision support them,
8698 	 * this two fields can not be configured by user.
8699 	 */
8700 	vport->txvlan_cfg.accept_tag2 = true;
8701 	vport->txvlan_cfg.accept_untag2 = true;
8702 	vport->txvlan_cfg.insert_tag2_en = false;
8703 	vport->txvlan_cfg.default_tag2 = 0;
8704 
8705 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8706 		vport->rxvlan_cfg.strip_tag1_en = false;
8707 		vport->rxvlan_cfg.strip_tag2_en =
8708 				vport->rxvlan_cfg.rx_vlan_offload_en;
8709 	} else {
8710 		vport->rxvlan_cfg.strip_tag1_en =
8711 				vport->rxvlan_cfg.rx_vlan_offload_en;
8712 		vport->rxvlan_cfg.strip_tag2_en = true;
8713 	}
8714 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8715 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8716 
8717 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8718 	if (ret)
8719 		return ret;
8720 
8721 	return hclge_set_vlan_rx_offload_cfg(vport);
8722 }
8723 
8724 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8725 {
8726 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8727 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8728 	struct hclge_desc desc;
8729 	int status;
8730 
8731 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8732 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8733 	rx_req->ot_fst_vlan_type =
8734 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8735 	rx_req->ot_sec_vlan_type =
8736 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8737 	rx_req->in_fst_vlan_type =
8738 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8739 	rx_req->in_sec_vlan_type =
8740 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8741 
8742 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8743 	if (status) {
8744 		dev_err(&hdev->pdev->dev,
8745 			"Send rxvlan protocol type command fail, ret =%d\n",
8746 			status);
8747 		return status;
8748 	}
8749 
8750 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8751 
8752 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8753 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8754 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8755 
8756 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8757 	if (status)
8758 		dev_err(&hdev->pdev->dev,
8759 			"Send txvlan protocol type command fail, ret =%d\n",
8760 			status);
8761 
8762 	return status;
8763 }
8764 
8765 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8766 {
8767 #define HCLGE_DEF_VLAN_TYPE		0x8100
8768 
8769 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8770 	struct hclge_vport *vport;
8771 	int ret;
8772 	int i;
8773 
8774 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8775 		/* for revision 0x21, vf vlan filter is per function */
8776 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8777 			vport = &hdev->vport[i];
8778 			ret = hclge_set_vlan_filter_ctrl(hdev,
8779 							 HCLGE_FILTER_TYPE_VF,
8780 							 HCLGE_FILTER_FE_EGRESS,
8781 							 true,
8782 							 vport->vport_id);
8783 			if (ret)
8784 				return ret;
8785 		}
8786 
8787 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8788 						 HCLGE_FILTER_FE_INGRESS, true,
8789 						 0);
8790 		if (ret)
8791 			return ret;
8792 	} else {
8793 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8794 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8795 						 true, 0);
8796 		if (ret)
8797 			return ret;
8798 	}
8799 
8800 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8801 
8802 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8803 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8804 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8805 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8806 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8807 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8808 
8809 	ret = hclge_set_vlan_protocol_type(hdev);
8810 	if (ret)
8811 		return ret;
8812 
8813 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8814 		u16 vlan_tag;
8815 
8816 		vport = &hdev->vport[i];
8817 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8818 
8819 		ret = hclge_vlan_offload_cfg(vport,
8820 					     vport->port_base_vlan_cfg.state,
8821 					     vlan_tag);
8822 		if (ret)
8823 			return ret;
8824 	}
8825 
8826 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8827 }
8828 
8829 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8830 				       bool writen_to_tbl)
8831 {
8832 	struct hclge_vport_vlan_cfg *vlan;
8833 
8834 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8835 	if (!vlan)
8836 		return;
8837 
8838 	vlan->hd_tbl_status = writen_to_tbl;
8839 	vlan->vlan_id = vlan_id;
8840 
8841 	list_add_tail(&vlan->node, &vport->vlan_list);
8842 }
8843 
8844 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8845 {
8846 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8847 	struct hclge_dev *hdev = vport->back;
8848 	int ret;
8849 
8850 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8851 		if (!vlan->hd_tbl_status) {
8852 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8853 						       vport->vport_id,
8854 						       vlan->vlan_id, false);
8855 			if (ret) {
8856 				dev_err(&hdev->pdev->dev,
8857 					"restore vport vlan list failed, ret=%d\n",
8858 					ret);
8859 				return ret;
8860 			}
8861 		}
8862 		vlan->hd_tbl_status = true;
8863 	}
8864 
8865 	return 0;
8866 }
8867 
8868 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8869 				      bool is_write_tbl)
8870 {
8871 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8872 	struct hclge_dev *hdev = vport->back;
8873 
8874 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8875 		if (vlan->vlan_id == vlan_id) {
8876 			if (is_write_tbl && vlan->hd_tbl_status)
8877 				hclge_set_vlan_filter_hw(hdev,
8878 							 htons(ETH_P_8021Q),
8879 							 vport->vport_id,
8880 							 vlan_id,
8881 							 true);
8882 
8883 			list_del(&vlan->node);
8884 			kfree(vlan);
8885 			break;
8886 		}
8887 	}
8888 }
8889 
8890 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8891 {
8892 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8893 	struct hclge_dev *hdev = vport->back;
8894 
8895 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8896 		if (vlan->hd_tbl_status)
8897 			hclge_set_vlan_filter_hw(hdev,
8898 						 htons(ETH_P_8021Q),
8899 						 vport->vport_id,
8900 						 vlan->vlan_id,
8901 						 true);
8902 
8903 		vlan->hd_tbl_status = false;
8904 		if (is_del_list) {
8905 			list_del(&vlan->node);
8906 			kfree(vlan);
8907 		}
8908 	}
8909 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
8910 }
8911 
8912 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8913 {
8914 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8915 	struct hclge_vport *vport;
8916 	int i;
8917 
8918 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8919 		vport = &hdev->vport[i];
8920 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8921 			list_del(&vlan->node);
8922 			kfree(vlan);
8923 		}
8924 	}
8925 }
8926 
8927 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8928 {
8929 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8930 	struct hclge_dev *hdev = vport->back;
8931 	u16 vlan_proto;
8932 	u16 vlan_id;
8933 	u16 state;
8934 	int ret;
8935 
8936 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8937 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8938 	state = vport->port_base_vlan_cfg.state;
8939 
8940 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8941 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8942 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8943 					 vport->vport_id, vlan_id,
8944 					 false);
8945 		return;
8946 	}
8947 
8948 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8949 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8950 					       vport->vport_id,
8951 					       vlan->vlan_id, false);
8952 		if (ret)
8953 			break;
8954 		vlan->hd_tbl_status = true;
8955 	}
8956 }
8957 
8958 /* For global reset and imp reset, hardware will clear the mac table,
8959  * so we change the mac address state from ACTIVE to TO_ADD, then they
8960  * can be restored in the service task after reset complete. Furtherly,
8961  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8962  * be restored after reset, so just remove these mac nodes from mac_list.
8963  */
8964 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8965 {
8966 	struct hclge_mac_node *mac_node, *tmp;
8967 
8968 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8969 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
8970 			mac_node->state = HCLGE_MAC_TO_ADD;
8971 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8972 			list_del(&mac_node->node);
8973 			kfree(mac_node);
8974 		}
8975 	}
8976 }
8977 
8978 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8979 {
8980 	spin_lock_bh(&vport->mac_list_lock);
8981 
8982 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8983 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8984 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8985 
8986 	spin_unlock_bh(&vport->mac_list_lock);
8987 }
8988 
8989 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8990 {
8991 	struct hclge_vport *vport = &hdev->vport[0];
8992 	struct hnae3_handle *handle = &vport->nic;
8993 
8994 	hclge_restore_mac_table_common(vport);
8995 	hclge_restore_vport_vlan_table(vport);
8996 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8997 
8998 	hclge_restore_fd_entries(handle);
8999 }
9000 
9001 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9002 {
9003 	struct hclge_vport *vport = hclge_get_vport(handle);
9004 
9005 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9006 		vport->rxvlan_cfg.strip_tag1_en = false;
9007 		vport->rxvlan_cfg.strip_tag2_en = enable;
9008 	} else {
9009 		vport->rxvlan_cfg.strip_tag1_en = enable;
9010 		vport->rxvlan_cfg.strip_tag2_en = true;
9011 	}
9012 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9013 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9014 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9015 
9016 	return hclge_set_vlan_rx_offload_cfg(vport);
9017 }
9018 
9019 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9020 					    u16 port_base_vlan_state,
9021 					    struct hclge_vlan_info *new_info,
9022 					    struct hclge_vlan_info *old_info)
9023 {
9024 	struct hclge_dev *hdev = vport->back;
9025 	int ret;
9026 
9027 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9028 		hclge_rm_vport_all_vlan_table(vport, false);
9029 		return hclge_set_vlan_filter_hw(hdev,
9030 						 htons(new_info->vlan_proto),
9031 						 vport->vport_id,
9032 						 new_info->vlan_tag,
9033 						 false);
9034 	}
9035 
9036 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9037 				       vport->vport_id, old_info->vlan_tag,
9038 				       true);
9039 	if (ret)
9040 		return ret;
9041 
9042 	return hclge_add_vport_all_vlan_table(vport);
9043 }
9044 
9045 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9046 				    struct hclge_vlan_info *vlan_info)
9047 {
9048 	struct hnae3_handle *nic = &vport->nic;
9049 	struct hclge_vlan_info *old_vlan_info;
9050 	struct hclge_dev *hdev = vport->back;
9051 	int ret;
9052 
9053 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9054 
9055 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9056 	if (ret)
9057 		return ret;
9058 
9059 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9060 		/* add new VLAN tag */
9061 		ret = hclge_set_vlan_filter_hw(hdev,
9062 					       htons(vlan_info->vlan_proto),
9063 					       vport->vport_id,
9064 					       vlan_info->vlan_tag,
9065 					       false);
9066 		if (ret)
9067 			return ret;
9068 
9069 		/* remove old VLAN tag */
9070 		ret = hclge_set_vlan_filter_hw(hdev,
9071 					       htons(old_vlan_info->vlan_proto),
9072 					       vport->vport_id,
9073 					       old_vlan_info->vlan_tag,
9074 					       true);
9075 		if (ret)
9076 			return ret;
9077 
9078 		goto update;
9079 	}
9080 
9081 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9082 					       old_vlan_info);
9083 	if (ret)
9084 		return ret;
9085 
9086 	/* update state only when disable/enable port based VLAN */
9087 	vport->port_base_vlan_cfg.state = state;
9088 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9089 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9090 	else
9091 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9092 
9093 update:
9094 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9095 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9096 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9097 
9098 	return 0;
9099 }
9100 
9101 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9102 					  enum hnae3_port_base_vlan_state state,
9103 					  u16 vlan)
9104 {
9105 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9106 		if (!vlan)
9107 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9108 		else
9109 			return HNAE3_PORT_BASE_VLAN_ENABLE;
9110 	} else {
9111 		if (!vlan)
9112 			return HNAE3_PORT_BASE_VLAN_DISABLE;
9113 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9114 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9115 		else
9116 			return HNAE3_PORT_BASE_VLAN_MODIFY;
9117 	}
9118 }
9119 
9120 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9121 				    u16 vlan, u8 qos, __be16 proto)
9122 {
9123 	struct hclge_vport *vport = hclge_get_vport(handle);
9124 	struct hclge_dev *hdev = vport->back;
9125 	struct hclge_vlan_info vlan_info;
9126 	u16 state;
9127 	int ret;
9128 
9129 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9130 		return -EOPNOTSUPP;
9131 
9132 	vport = hclge_get_vf_vport(hdev, vfid);
9133 	if (!vport)
9134 		return -EINVAL;
9135 
9136 	/* qos is a 3 bits value, so can not be bigger than 7 */
9137 	if (vlan > VLAN_N_VID - 1 || qos > 7)
9138 		return -EINVAL;
9139 	if (proto != htons(ETH_P_8021Q))
9140 		return -EPROTONOSUPPORT;
9141 
9142 	state = hclge_get_port_base_vlan_state(vport,
9143 					       vport->port_base_vlan_cfg.state,
9144 					       vlan);
9145 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9146 		return 0;
9147 
9148 	vlan_info.vlan_tag = vlan;
9149 	vlan_info.qos = qos;
9150 	vlan_info.vlan_proto = ntohs(proto);
9151 
9152 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9153 		return hclge_update_port_base_vlan_cfg(vport, state,
9154 						       &vlan_info);
9155 	} else {
9156 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9157 							vport->vport_id, state,
9158 							vlan, qos,
9159 							ntohs(proto));
9160 		return ret;
9161 	}
9162 }
9163 
9164 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9165 {
9166 	struct hclge_vlan_info *vlan_info;
9167 	struct hclge_vport *vport;
9168 	int ret;
9169 	int vf;
9170 
9171 	/* clear port base vlan for all vf */
9172 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9173 		vport = &hdev->vport[vf];
9174 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9175 
9176 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9177 					       vport->vport_id,
9178 					       vlan_info->vlan_tag, true);
9179 		if (ret)
9180 			dev_err(&hdev->pdev->dev,
9181 				"failed to clear vf vlan for vf%d, ret = %d\n",
9182 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9183 	}
9184 }
9185 
9186 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9187 			  u16 vlan_id, bool is_kill)
9188 {
9189 	struct hclge_vport *vport = hclge_get_vport(handle);
9190 	struct hclge_dev *hdev = vport->back;
9191 	bool writen_to_tbl = false;
9192 	int ret = 0;
9193 
9194 	/* When device is resetting or reset failed, firmware is unable to
9195 	 * handle mailbox. Just record the vlan id, and remove it after
9196 	 * reset finished.
9197 	 */
9198 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9199 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9200 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9201 		return -EBUSY;
9202 	}
9203 
9204 	/* when port base vlan enabled, we use port base vlan as the vlan
9205 	 * filter entry. In this case, we don't update vlan filter table
9206 	 * when user add new vlan or remove exist vlan, just update the vport
9207 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
9208 	 * table until port base vlan disabled
9209 	 */
9210 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9211 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9212 					       vlan_id, is_kill);
9213 		writen_to_tbl = true;
9214 	}
9215 
9216 	if (!ret) {
9217 		if (is_kill)
9218 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9219 		else
9220 			hclge_add_vport_vlan_table(vport, vlan_id,
9221 						   writen_to_tbl);
9222 	} else if (is_kill) {
9223 		/* when remove hw vlan filter failed, record the vlan id,
9224 		 * and try to remove it from hw later, to be consistence
9225 		 * with stack
9226 		 */
9227 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9228 	}
9229 	return ret;
9230 }
9231 
9232 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9233 {
9234 #define HCLGE_MAX_SYNC_COUNT	60
9235 
9236 	int i, ret, sync_cnt = 0;
9237 	u16 vlan_id;
9238 
9239 	/* start from vport 1 for PF is always alive */
9240 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9241 		struct hclge_vport *vport = &hdev->vport[i];
9242 
9243 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9244 					 VLAN_N_VID);
9245 		while (vlan_id != VLAN_N_VID) {
9246 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9247 						       vport->vport_id, vlan_id,
9248 						       true);
9249 			if (ret && ret != -EINVAL)
9250 				return;
9251 
9252 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9253 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9254 
9255 			sync_cnt++;
9256 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9257 				return;
9258 
9259 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9260 						 VLAN_N_VID);
9261 		}
9262 	}
9263 }
9264 
9265 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9266 {
9267 	struct hclge_config_max_frm_size_cmd *req;
9268 	struct hclge_desc desc;
9269 
9270 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9271 
9272 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9273 	req->max_frm_size = cpu_to_le16(new_mps);
9274 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9275 
9276 	return hclge_cmd_send(&hdev->hw, &desc, 1);
9277 }
9278 
9279 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9280 {
9281 	struct hclge_vport *vport = hclge_get_vport(handle);
9282 
9283 	return hclge_set_vport_mtu(vport, new_mtu);
9284 }
9285 
9286 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9287 {
9288 	struct hclge_dev *hdev = vport->back;
9289 	int i, max_frm_size, ret;
9290 
9291 	/* HW supprt 2 layer vlan */
9292 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9293 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9294 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
9295 		return -EINVAL;
9296 
9297 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9298 	mutex_lock(&hdev->vport_lock);
9299 	/* VF's mps must fit within hdev->mps */
9300 	if (vport->vport_id && max_frm_size > hdev->mps) {
9301 		mutex_unlock(&hdev->vport_lock);
9302 		return -EINVAL;
9303 	} else if (vport->vport_id) {
9304 		vport->mps = max_frm_size;
9305 		mutex_unlock(&hdev->vport_lock);
9306 		return 0;
9307 	}
9308 
9309 	/* PF's mps must be greater then VF's mps */
9310 	for (i = 1; i < hdev->num_alloc_vport; i++)
9311 		if (max_frm_size < hdev->vport[i].mps) {
9312 			mutex_unlock(&hdev->vport_lock);
9313 			return -EINVAL;
9314 		}
9315 
9316 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9317 
9318 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
9319 	if (ret) {
9320 		dev_err(&hdev->pdev->dev,
9321 			"Change mtu fail, ret =%d\n", ret);
9322 		goto out;
9323 	}
9324 
9325 	hdev->mps = max_frm_size;
9326 	vport->mps = max_frm_size;
9327 
9328 	ret = hclge_buffer_alloc(hdev);
9329 	if (ret)
9330 		dev_err(&hdev->pdev->dev,
9331 			"Allocate buffer fail, ret =%d\n", ret);
9332 
9333 out:
9334 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9335 	mutex_unlock(&hdev->vport_lock);
9336 	return ret;
9337 }
9338 
9339 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9340 				    bool enable)
9341 {
9342 	struct hclge_reset_tqp_queue_cmd *req;
9343 	struct hclge_desc desc;
9344 	int ret;
9345 
9346 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9347 
9348 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9349 	req->tqp_id = cpu_to_le16(queue_id);
9350 	if (enable)
9351 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9352 
9353 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9354 	if (ret) {
9355 		dev_err(&hdev->pdev->dev,
9356 			"Send tqp reset cmd error, status =%d\n", ret);
9357 		return ret;
9358 	}
9359 
9360 	return 0;
9361 }
9362 
9363 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9364 {
9365 	struct hclge_reset_tqp_queue_cmd *req;
9366 	struct hclge_desc desc;
9367 	int ret;
9368 
9369 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9370 
9371 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9372 	req->tqp_id = cpu_to_le16(queue_id);
9373 
9374 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9375 	if (ret) {
9376 		dev_err(&hdev->pdev->dev,
9377 			"Get reset status error, status =%d\n", ret);
9378 		return ret;
9379 	}
9380 
9381 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9382 }
9383 
9384 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9385 {
9386 	struct hnae3_queue *queue;
9387 	struct hclge_tqp *tqp;
9388 
9389 	queue = handle->kinfo.tqp[queue_id];
9390 	tqp = container_of(queue, struct hclge_tqp, q);
9391 
9392 	return tqp->index;
9393 }
9394 
9395 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9396 {
9397 	struct hclge_vport *vport = hclge_get_vport(handle);
9398 	struct hclge_dev *hdev = vport->back;
9399 	int reset_try_times = 0;
9400 	int reset_status;
9401 	u16 queue_gid;
9402 	int ret;
9403 
9404 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9405 
9406 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9407 	if (ret) {
9408 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9409 		return ret;
9410 	}
9411 
9412 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9413 	if (ret) {
9414 		dev_err(&hdev->pdev->dev,
9415 			"Send reset tqp cmd fail, ret = %d\n", ret);
9416 		return ret;
9417 	}
9418 
9419 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9420 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9421 		if (reset_status)
9422 			break;
9423 
9424 		/* Wait for tqp hw reset */
9425 		usleep_range(1000, 1200);
9426 	}
9427 
9428 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9429 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9430 		return ret;
9431 	}
9432 
9433 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9434 	if (ret)
9435 		dev_err(&hdev->pdev->dev,
9436 			"Deassert the soft reset fail, ret = %d\n", ret);
9437 
9438 	return ret;
9439 }
9440 
9441 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9442 {
9443 	struct hclge_dev *hdev = vport->back;
9444 	int reset_try_times = 0;
9445 	int reset_status;
9446 	u16 queue_gid;
9447 	int ret;
9448 
9449 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9450 
9451 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9452 	if (ret) {
9453 		dev_warn(&hdev->pdev->dev,
9454 			 "Send reset tqp cmd fail, ret = %d\n", ret);
9455 		return;
9456 	}
9457 
9458 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9459 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9460 		if (reset_status)
9461 			break;
9462 
9463 		/* Wait for tqp hw reset */
9464 		usleep_range(1000, 1200);
9465 	}
9466 
9467 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9468 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9469 		return;
9470 	}
9471 
9472 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9473 	if (ret)
9474 		dev_warn(&hdev->pdev->dev,
9475 			 "Deassert the soft reset fail, ret = %d\n", ret);
9476 }
9477 
9478 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9479 {
9480 	struct hclge_vport *vport = hclge_get_vport(handle);
9481 	struct hclge_dev *hdev = vport->back;
9482 
9483 	return hdev->fw_version;
9484 }
9485 
9486 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9487 {
9488 	struct phy_device *phydev = hdev->hw.mac.phydev;
9489 
9490 	if (!phydev)
9491 		return;
9492 
9493 	phy_set_asym_pause(phydev, rx_en, tx_en);
9494 }
9495 
9496 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9497 {
9498 	int ret;
9499 
9500 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9501 		return 0;
9502 
9503 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9504 	if (ret)
9505 		dev_err(&hdev->pdev->dev,
9506 			"configure pauseparam error, ret = %d.\n", ret);
9507 
9508 	return ret;
9509 }
9510 
9511 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9512 {
9513 	struct phy_device *phydev = hdev->hw.mac.phydev;
9514 	u16 remote_advertising = 0;
9515 	u16 local_advertising;
9516 	u32 rx_pause, tx_pause;
9517 	u8 flowctl;
9518 
9519 	if (!phydev->link || !phydev->autoneg)
9520 		return 0;
9521 
9522 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9523 
9524 	if (phydev->pause)
9525 		remote_advertising = LPA_PAUSE_CAP;
9526 
9527 	if (phydev->asym_pause)
9528 		remote_advertising |= LPA_PAUSE_ASYM;
9529 
9530 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9531 					   remote_advertising);
9532 	tx_pause = flowctl & FLOW_CTRL_TX;
9533 	rx_pause = flowctl & FLOW_CTRL_RX;
9534 
9535 	if (phydev->duplex == HCLGE_MAC_HALF) {
9536 		tx_pause = 0;
9537 		rx_pause = 0;
9538 	}
9539 
9540 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9541 }
9542 
9543 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9544 				 u32 *rx_en, u32 *tx_en)
9545 {
9546 	struct hclge_vport *vport = hclge_get_vport(handle);
9547 	struct hclge_dev *hdev = vport->back;
9548 	struct phy_device *phydev = hdev->hw.mac.phydev;
9549 
9550 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9551 
9552 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9553 		*rx_en = 0;
9554 		*tx_en = 0;
9555 		return;
9556 	}
9557 
9558 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9559 		*rx_en = 1;
9560 		*tx_en = 0;
9561 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9562 		*tx_en = 1;
9563 		*rx_en = 0;
9564 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9565 		*rx_en = 1;
9566 		*tx_en = 1;
9567 	} else {
9568 		*rx_en = 0;
9569 		*tx_en = 0;
9570 	}
9571 }
9572 
9573 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9574 					 u32 rx_en, u32 tx_en)
9575 {
9576 	if (rx_en && tx_en)
9577 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
9578 	else if (rx_en && !tx_en)
9579 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9580 	else if (!rx_en && tx_en)
9581 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9582 	else
9583 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
9584 
9585 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9586 }
9587 
9588 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9589 				u32 rx_en, u32 tx_en)
9590 {
9591 	struct hclge_vport *vport = hclge_get_vport(handle);
9592 	struct hclge_dev *hdev = vport->back;
9593 	struct phy_device *phydev = hdev->hw.mac.phydev;
9594 	u32 fc_autoneg;
9595 
9596 	if (phydev) {
9597 		fc_autoneg = hclge_get_autoneg(handle);
9598 		if (auto_neg != fc_autoneg) {
9599 			dev_info(&hdev->pdev->dev,
9600 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9601 			return -EOPNOTSUPP;
9602 		}
9603 	}
9604 
9605 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9606 		dev_info(&hdev->pdev->dev,
9607 			 "Priority flow control enabled. Cannot set link flow control.\n");
9608 		return -EOPNOTSUPP;
9609 	}
9610 
9611 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9612 
9613 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9614 
9615 	if (!auto_neg)
9616 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9617 
9618 	if (phydev)
9619 		return phy_start_aneg(phydev);
9620 
9621 	return -EOPNOTSUPP;
9622 }
9623 
9624 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9625 					  u8 *auto_neg, u32 *speed, u8 *duplex)
9626 {
9627 	struct hclge_vport *vport = hclge_get_vport(handle);
9628 	struct hclge_dev *hdev = vport->back;
9629 
9630 	if (speed)
9631 		*speed = hdev->hw.mac.speed;
9632 	if (duplex)
9633 		*duplex = hdev->hw.mac.duplex;
9634 	if (auto_neg)
9635 		*auto_neg = hdev->hw.mac.autoneg;
9636 }
9637 
9638 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9639 				 u8 *module_type)
9640 {
9641 	struct hclge_vport *vport = hclge_get_vport(handle);
9642 	struct hclge_dev *hdev = vport->back;
9643 
9644 	/* When nic is down, the service task is not running, doesn't update
9645 	 * the port information per second. Query the port information before
9646 	 * return the media type, ensure getting the correct media information.
9647 	 */
9648 	hclge_update_port_info(hdev);
9649 
9650 	if (media_type)
9651 		*media_type = hdev->hw.mac.media_type;
9652 
9653 	if (module_type)
9654 		*module_type = hdev->hw.mac.module_type;
9655 }
9656 
9657 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9658 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
9659 {
9660 	struct hclge_vport *vport = hclge_get_vport(handle);
9661 	struct hclge_dev *hdev = vport->back;
9662 	struct phy_device *phydev = hdev->hw.mac.phydev;
9663 	int mdix_ctrl, mdix, is_resolved;
9664 	unsigned int retval;
9665 
9666 	if (!phydev) {
9667 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9668 		*tp_mdix = ETH_TP_MDI_INVALID;
9669 		return;
9670 	}
9671 
9672 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9673 
9674 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9675 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9676 				    HCLGE_PHY_MDIX_CTRL_S);
9677 
9678 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9679 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9680 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9681 
9682 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9683 
9684 	switch (mdix_ctrl) {
9685 	case 0x0:
9686 		*tp_mdix_ctrl = ETH_TP_MDI;
9687 		break;
9688 	case 0x1:
9689 		*tp_mdix_ctrl = ETH_TP_MDI_X;
9690 		break;
9691 	case 0x3:
9692 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9693 		break;
9694 	default:
9695 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9696 		break;
9697 	}
9698 
9699 	if (!is_resolved)
9700 		*tp_mdix = ETH_TP_MDI_INVALID;
9701 	else if (mdix)
9702 		*tp_mdix = ETH_TP_MDI_X;
9703 	else
9704 		*tp_mdix = ETH_TP_MDI;
9705 }
9706 
9707 static void hclge_info_show(struct hclge_dev *hdev)
9708 {
9709 	struct device *dev = &hdev->pdev->dev;
9710 
9711 	dev_info(dev, "PF info begin:\n");
9712 
9713 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9714 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9715 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9716 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9717 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9718 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9719 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9720 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9721 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9722 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9723 	dev_info(dev, "This is %s PF\n",
9724 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9725 	dev_info(dev, "DCB %s\n",
9726 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9727 	dev_info(dev, "MQPRIO %s\n",
9728 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9729 
9730 	dev_info(dev, "PF info end.\n");
9731 }
9732 
9733 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9734 					  struct hclge_vport *vport)
9735 {
9736 	struct hnae3_client *client = vport->nic.client;
9737 	struct hclge_dev *hdev = ae_dev->priv;
9738 	int rst_cnt = hdev->rst_stats.reset_cnt;
9739 	int ret;
9740 
9741 	ret = client->ops->init_instance(&vport->nic);
9742 	if (ret)
9743 		return ret;
9744 
9745 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9746 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9747 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9748 		ret = -EBUSY;
9749 		goto init_nic_err;
9750 	}
9751 
9752 	/* Enable nic hw error interrupts */
9753 	ret = hclge_config_nic_hw_error(hdev, true);
9754 	if (ret) {
9755 		dev_err(&ae_dev->pdev->dev,
9756 			"fail(%d) to enable hw error interrupts\n", ret);
9757 		goto init_nic_err;
9758 	}
9759 
9760 	hnae3_set_client_init_flag(client, ae_dev, 1);
9761 
9762 	if (netif_msg_drv(&hdev->vport->nic))
9763 		hclge_info_show(hdev);
9764 
9765 	return ret;
9766 
9767 init_nic_err:
9768 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9769 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9770 		msleep(HCLGE_WAIT_RESET_DONE);
9771 
9772 	client->ops->uninit_instance(&vport->nic, 0);
9773 
9774 	return ret;
9775 }
9776 
9777 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9778 					   struct hclge_vport *vport)
9779 {
9780 	struct hclge_dev *hdev = ae_dev->priv;
9781 	struct hnae3_client *client;
9782 	int rst_cnt;
9783 	int ret;
9784 
9785 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9786 	    !hdev->nic_client)
9787 		return 0;
9788 
9789 	client = hdev->roce_client;
9790 	ret = hclge_init_roce_base_info(vport);
9791 	if (ret)
9792 		return ret;
9793 
9794 	rst_cnt = hdev->rst_stats.reset_cnt;
9795 	ret = client->ops->init_instance(&vport->roce);
9796 	if (ret)
9797 		return ret;
9798 
9799 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9800 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9801 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9802 		ret = -EBUSY;
9803 		goto init_roce_err;
9804 	}
9805 
9806 	/* Enable roce ras interrupts */
9807 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9808 	if (ret) {
9809 		dev_err(&ae_dev->pdev->dev,
9810 			"fail(%d) to enable roce ras interrupts\n", ret);
9811 		goto init_roce_err;
9812 	}
9813 
9814 	hnae3_set_client_init_flag(client, ae_dev, 1);
9815 
9816 	return 0;
9817 
9818 init_roce_err:
9819 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9820 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9821 		msleep(HCLGE_WAIT_RESET_DONE);
9822 
9823 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9824 
9825 	return ret;
9826 }
9827 
9828 static int hclge_init_client_instance(struct hnae3_client *client,
9829 				      struct hnae3_ae_dev *ae_dev)
9830 {
9831 	struct hclge_dev *hdev = ae_dev->priv;
9832 	struct hclge_vport *vport;
9833 	int i, ret;
9834 
9835 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9836 		vport = &hdev->vport[i];
9837 
9838 		switch (client->type) {
9839 		case HNAE3_CLIENT_KNIC:
9840 			hdev->nic_client = client;
9841 			vport->nic.client = client;
9842 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9843 			if (ret)
9844 				goto clear_nic;
9845 
9846 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9847 			if (ret)
9848 				goto clear_roce;
9849 
9850 			break;
9851 		case HNAE3_CLIENT_ROCE:
9852 			if (hnae3_dev_roce_supported(hdev)) {
9853 				hdev->roce_client = client;
9854 				vport->roce.client = client;
9855 			}
9856 
9857 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9858 			if (ret)
9859 				goto clear_roce;
9860 
9861 			break;
9862 		default:
9863 			return -EINVAL;
9864 		}
9865 	}
9866 
9867 	return 0;
9868 
9869 clear_nic:
9870 	hdev->nic_client = NULL;
9871 	vport->nic.client = NULL;
9872 	return ret;
9873 clear_roce:
9874 	hdev->roce_client = NULL;
9875 	vport->roce.client = NULL;
9876 	return ret;
9877 }
9878 
9879 static void hclge_uninit_client_instance(struct hnae3_client *client,
9880 					 struct hnae3_ae_dev *ae_dev)
9881 {
9882 	struct hclge_dev *hdev = ae_dev->priv;
9883 	struct hclge_vport *vport;
9884 	int i;
9885 
9886 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9887 		vport = &hdev->vport[i];
9888 		if (hdev->roce_client) {
9889 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9890 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9891 				msleep(HCLGE_WAIT_RESET_DONE);
9892 
9893 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9894 								0);
9895 			hdev->roce_client = NULL;
9896 			vport->roce.client = NULL;
9897 		}
9898 		if (client->type == HNAE3_CLIENT_ROCE)
9899 			return;
9900 		if (hdev->nic_client && client->ops->uninit_instance) {
9901 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9902 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9903 				msleep(HCLGE_WAIT_RESET_DONE);
9904 
9905 			client->ops->uninit_instance(&vport->nic, 0);
9906 			hdev->nic_client = NULL;
9907 			vport->nic.client = NULL;
9908 		}
9909 	}
9910 }
9911 
9912 static int hclge_dev_mem_map(struct hclge_dev *hdev)
9913 {
9914 #define HCLGE_MEM_BAR		4
9915 
9916 	struct pci_dev *pdev = hdev->pdev;
9917 	struct hclge_hw *hw = &hdev->hw;
9918 
9919 	/* for device does not have device memory, return directly */
9920 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
9921 		return 0;
9922 
9923 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
9924 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
9925 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
9926 	if (!hw->mem_base) {
9927 		dev_err(&pdev->dev, "failed to map device memory\n");
9928 		return -EFAULT;
9929 	}
9930 
9931 	return 0;
9932 }
9933 
9934 static int hclge_pci_init(struct hclge_dev *hdev)
9935 {
9936 	struct pci_dev *pdev = hdev->pdev;
9937 	struct hclge_hw *hw;
9938 	int ret;
9939 
9940 	ret = pci_enable_device(pdev);
9941 	if (ret) {
9942 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9943 		return ret;
9944 	}
9945 
9946 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9947 	if (ret) {
9948 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9949 		if (ret) {
9950 			dev_err(&pdev->dev,
9951 				"can't set consistent PCI DMA");
9952 			goto err_disable_device;
9953 		}
9954 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9955 	}
9956 
9957 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9958 	if (ret) {
9959 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9960 		goto err_disable_device;
9961 	}
9962 
9963 	pci_set_master(pdev);
9964 	hw = &hdev->hw;
9965 	hw->io_base = pcim_iomap(pdev, 2, 0);
9966 	if (!hw->io_base) {
9967 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9968 		ret = -ENOMEM;
9969 		goto err_clr_master;
9970 	}
9971 
9972 	ret = hclge_dev_mem_map(hdev);
9973 	if (ret)
9974 		goto err_unmap_io_base;
9975 
9976 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9977 
9978 	return 0;
9979 
9980 err_unmap_io_base:
9981 	pcim_iounmap(pdev, hdev->hw.io_base);
9982 err_clr_master:
9983 	pci_clear_master(pdev);
9984 	pci_release_regions(pdev);
9985 err_disable_device:
9986 	pci_disable_device(pdev);
9987 
9988 	return ret;
9989 }
9990 
9991 static void hclge_pci_uninit(struct hclge_dev *hdev)
9992 {
9993 	struct pci_dev *pdev = hdev->pdev;
9994 
9995 	if (hdev->hw.mem_base)
9996 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
9997 
9998 	pcim_iounmap(pdev, hdev->hw.io_base);
9999 	pci_free_irq_vectors(pdev);
10000 	pci_clear_master(pdev);
10001 	pci_release_mem_regions(pdev);
10002 	pci_disable_device(pdev);
10003 }
10004 
10005 static void hclge_state_init(struct hclge_dev *hdev)
10006 {
10007 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10008 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10009 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10010 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10011 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10012 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10013 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10014 }
10015 
10016 static void hclge_state_uninit(struct hclge_dev *hdev)
10017 {
10018 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10019 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10020 
10021 	if (hdev->reset_timer.function)
10022 		del_timer_sync(&hdev->reset_timer);
10023 	if (hdev->service_task.work.func)
10024 		cancel_delayed_work_sync(&hdev->service_task);
10025 }
10026 
10027 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10028 {
10029 #define HCLGE_FLR_RETRY_WAIT_MS	500
10030 #define HCLGE_FLR_RETRY_CNT	5
10031 
10032 	struct hclge_dev *hdev = ae_dev->priv;
10033 	int retry_cnt = 0;
10034 	int ret;
10035 
10036 retry:
10037 	down(&hdev->reset_sem);
10038 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10039 	hdev->reset_type = HNAE3_FLR_RESET;
10040 	ret = hclge_reset_prepare(hdev);
10041 	if (ret || hdev->reset_pending) {
10042 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10043 			ret);
10044 		if (hdev->reset_pending ||
10045 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10046 			dev_err(&hdev->pdev->dev,
10047 				"reset_pending:0x%lx, retry_cnt:%d\n",
10048 				hdev->reset_pending, retry_cnt);
10049 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10050 			up(&hdev->reset_sem);
10051 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
10052 			goto retry;
10053 		}
10054 	}
10055 
10056 	/* disable misc vector before FLR done */
10057 	hclge_enable_vector(&hdev->misc_vector, false);
10058 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10059 	hdev->rst_stats.flr_rst_cnt++;
10060 }
10061 
10062 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10063 {
10064 	struct hclge_dev *hdev = ae_dev->priv;
10065 	int ret;
10066 
10067 	hclge_enable_vector(&hdev->misc_vector, true);
10068 
10069 	ret = hclge_reset_rebuild(hdev);
10070 	if (ret)
10071 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10072 
10073 	hdev->reset_type = HNAE3_NONE_RESET;
10074 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10075 	up(&hdev->reset_sem);
10076 }
10077 
10078 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10079 {
10080 	u16 i;
10081 
10082 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10083 		struct hclge_vport *vport = &hdev->vport[i];
10084 		int ret;
10085 
10086 		 /* Send cmd to clear VF's FUNC_RST_ING */
10087 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10088 		if (ret)
10089 			dev_warn(&hdev->pdev->dev,
10090 				 "clear vf(%u) rst failed %d!\n",
10091 				 vport->vport_id, ret);
10092 	}
10093 }
10094 
10095 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10096 {
10097 	struct pci_dev *pdev = ae_dev->pdev;
10098 	struct hclge_dev *hdev;
10099 	int ret;
10100 
10101 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10102 	if (!hdev)
10103 		return -ENOMEM;
10104 
10105 	hdev->pdev = pdev;
10106 	hdev->ae_dev = ae_dev;
10107 	hdev->reset_type = HNAE3_NONE_RESET;
10108 	hdev->reset_level = HNAE3_FUNC_RESET;
10109 	ae_dev->priv = hdev;
10110 
10111 	/* HW supprt 2 layer vlan */
10112 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10113 
10114 	mutex_init(&hdev->vport_lock);
10115 	spin_lock_init(&hdev->fd_rule_lock);
10116 	sema_init(&hdev->reset_sem, 1);
10117 
10118 	ret = hclge_pci_init(hdev);
10119 	if (ret)
10120 		goto out;
10121 
10122 	/* Firmware command queue initialize */
10123 	ret = hclge_cmd_queue_init(hdev);
10124 	if (ret)
10125 		goto err_pci_uninit;
10126 
10127 	/* Firmware command initialize */
10128 	ret = hclge_cmd_init(hdev);
10129 	if (ret)
10130 		goto err_cmd_uninit;
10131 
10132 	ret = hclge_get_cap(hdev);
10133 	if (ret)
10134 		goto err_cmd_uninit;
10135 
10136 	ret = hclge_query_dev_specs(hdev);
10137 	if (ret) {
10138 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10139 			ret);
10140 		goto err_cmd_uninit;
10141 	}
10142 
10143 	ret = hclge_configure(hdev);
10144 	if (ret) {
10145 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10146 		goto err_cmd_uninit;
10147 	}
10148 
10149 	ret = hclge_init_msi(hdev);
10150 	if (ret) {
10151 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10152 		goto err_cmd_uninit;
10153 	}
10154 
10155 	ret = hclge_misc_irq_init(hdev);
10156 	if (ret)
10157 		goto err_msi_uninit;
10158 
10159 	ret = hclge_alloc_tqps(hdev);
10160 	if (ret) {
10161 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10162 		goto err_msi_irq_uninit;
10163 	}
10164 
10165 	ret = hclge_alloc_vport(hdev);
10166 	if (ret)
10167 		goto err_msi_irq_uninit;
10168 
10169 	ret = hclge_map_tqp(hdev);
10170 	if (ret)
10171 		goto err_msi_irq_uninit;
10172 
10173 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10174 		ret = hclge_mac_mdio_config(hdev);
10175 		if (ret)
10176 			goto err_msi_irq_uninit;
10177 	}
10178 
10179 	ret = hclge_init_umv_space(hdev);
10180 	if (ret)
10181 		goto err_mdiobus_unreg;
10182 
10183 	ret = hclge_mac_init(hdev);
10184 	if (ret) {
10185 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10186 		goto err_mdiobus_unreg;
10187 	}
10188 
10189 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10190 	if (ret) {
10191 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10192 		goto err_mdiobus_unreg;
10193 	}
10194 
10195 	ret = hclge_config_gro(hdev, true);
10196 	if (ret)
10197 		goto err_mdiobus_unreg;
10198 
10199 	ret = hclge_init_vlan_config(hdev);
10200 	if (ret) {
10201 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10202 		goto err_mdiobus_unreg;
10203 	}
10204 
10205 	ret = hclge_tm_schd_init(hdev);
10206 	if (ret) {
10207 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10208 		goto err_mdiobus_unreg;
10209 	}
10210 
10211 	hclge_rss_init_cfg(hdev);
10212 	ret = hclge_rss_init_hw(hdev);
10213 	if (ret) {
10214 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10215 		goto err_mdiobus_unreg;
10216 	}
10217 
10218 	ret = init_mgr_tbl(hdev);
10219 	if (ret) {
10220 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10221 		goto err_mdiobus_unreg;
10222 	}
10223 
10224 	ret = hclge_init_fd_config(hdev);
10225 	if (ret) {
10226 		dev_err(&pdev->dev,
10227 			"fd table init fail, ret=%d\n", ret);
10228 		goto err_mdiobus_unreg;
10229 	}
10230 
10231 	INIT_KFIFO(hdev->mac_tnl_log);
10232 
10233 	hclge_dcb_ops_set(hdev);
10234 
10235 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10236 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10237 
10238 	/* Setup affinity after service timer setup because add_timer_on
10239 	 * is called in affinity notify.
10240 	 */
10241 	hclge_misc_affinity_setup(hdev);
10242 
10243 	hclge_clear_all_event_cause(hdev);
10244 	hclge_clear_resetting_state(hdev);
10245 
10246 	/* Log and clear the hw errors those already occurred */
10247 	hclge_handle_all_hns_hw_errors(ae_dev);
10248 
10249 	/* request delayed reset for the error recovery because an immediate
10250 	 * global reset on a PF affecting pending initialization of other PFs
10251 	 */
10252 	if (ae_dev->hw_err_reset_req) {
10253 		enum hnae3_reset_type reset_level;
10254 
10255 		reset_level = hclge_get_reset_level(ae_dev,
10256 						    &ae_dev->hw_err_reset_req);
10257 		hclge_set_def_reset_request(ae_dev, reset_level);
10258 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10259 	}
10260 
10261 	/* Enable MISC vector(vector0) */
10262 	hclge_enable_vector(&hdev->misc_vector, true);
10263 
10264 	hclge_state_init(hdev);
10265 	hdev->last_reset_time = jiffies;
10266 
10267 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10268 		 HCLGE_DRIVER_NAME);
10269 
10270 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10271 
10272 	return 0;
10273 
10274 err_mdiobus_unreg:
10275 	if (hdev->hw.mac.phydev)
10276 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
10277 err_msi_irq_uninit:
10278 	hclge_misc_irq_uninit(hdev);
10279 err_msi_uninit:
10280 	pci_free_irq_vectors(pdev);
10281 err_cmd_uninit:
10282 	hclge_cmd_uninit(hdev);
10283 err_pci_uninit:
10284 	pcim_iounmap(pdev, hdev->hw.io_base);
10285 	pci_clear_master(pdev);
10286 	pci_release_regions(pdev);
10287 	pci_disable_device(pdev);
10288 out:
10289 	mutex_destroy(&hdev->vport_lock);
10290 	return ret;
10291 }
10292 
10293 static void hclge_stats_clear(struct hclge_dev *hdev)
10294 {
10295 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10296 }
10297 
10298 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10299 {
10300 	return hclge_config_switch_param(hdev, vf, enable,
10301 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10302 }
10303 
10304 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10305 {
10306 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10307 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
10308 					  enable, vf);
10309 }
10310 
10311 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10312 {
10313 	int ret;
10314 
10315 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10316 	if (ret) {
10317 		dev_err(&hdev->pdev->dev,
10318 			"Set vf %d mac spoof check %s failed, ret=%d\n",
10319 			vf, enable ? "on" : "off", ret);
10320 		return ret;
10321 	}
10322 
10323 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10324 	if (ret)
10325 		dev_err(&hdev->pdev->dev,
10326 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
10327 			vf, enable ? "on" : "off", ret);
10328 
10329 	return ret;
10330 }
10331 
10332 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10333 				 bool enable)
10334 {
10335 	struct hclge_vport *vport = hclge_get_vport(handle);
10336 	struct hclge_dev *hdev = vport->back;
10337 	u32 new_spoofchk = enable ? 1 : 0;
10338 	int ret;
10339 
10340 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10341 		return -EOPNOTSUPP;
10342 
10343 	vport = hclge_get_vf_vport(hdev, vf);
10344 	if (!vport)
10345 		return -EINVAL;
10346 
10347 	if (vport->vf_info.spoofchk == new_spoofchk)
10348 		return 0;
10349 
10350 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10351 		dev_warn(&hdev->pdev->dev,
10352 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10353 			 vf);
10354 	else if (enable && hclge_is_umv_space_full(vport, true))
10355 		dev_warn(&hdev->pdev->dev,
10356 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10357 			 vf);
10358 
10359 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10360 	if (ret)
10361 		return ret;
10362 
10363 	vport->vf_info.spoofchk = new_spoofchk;
10364 	return 0;
10365 }
10366 
10367 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10368 {
10369 	struct hclge_vport *vport = hdev->vport;
10370 	int ret;
10371 	int i;
10372 
10373 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10374 		return 0;
10375 
10376 	/* resume the vf spoof check state after reset */
10377 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10378 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10379 					       vport->vf_info.spoofchk);
10380 		if (ret)
10381 			return ret;
10382 
10383 		vport++;
10384 	}
10385 
10386 	return 0;
10387 }
10388 
10389 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10390 {
10391 	struct hclge_vport *vport = hclge_get_vport(handle);
10392 	struct hclge_dev *hdev = vport->back;
10393 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10394 	u32 new_trusted = enable ? 1 : 0;
10395 	bool en_bc_pmc;
10396 	int ret;
10397 
10398 	vport = hclge_get_vf_vport(hdev, vf);
10399 	if (!vport)
10400 		return -EINVAL;
10401 
10402 	if (vport->vf_info.trusted == new_trusted)
10403 		return 0;
10404 
10405 	/* Disable promisc mode for VF if it is not trusted any more. */
10406 	if (!enable && vport->vf_info.promisc_enable) {
10407 		en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10408 		ret = hclge_set_vport_promisc_mode(vport, false, false,
10409 						   en_bc_pmc);
10410 		if (ret)
10411 			return ret;
10412 		vport->vf_info.promisc_enable = 0;
10413 		hclge_inform_vf_promisc_info(vport);
10414 	}
10415 
10416 	vport->vf_info.trusted = new_trusted;
10417 
10418 	return 0;
10419 }
10420 
10421 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10422 {
10423 	int ret;
10424 	int vf;
10425 
10426 	/* reset vf rate to default value */
10427 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10428 		struct hclge_vport *vport = &hdev->vport[vf];
10429 
10430 		vport->vf_info.max_tx_rate = 0;
10431 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10432 		if (ret)
10433 			dev_err(&hdev->pdev->dev,
10434 				"vf%d failed to reset to default, ret=%d\n",
10435 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10436 	}
10437 }
10438 
10439 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10440 				     int min_tx_rate, int max_tx_rate)
10441 {
10442 	if (min_tx_rate != 0 ||
10443 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10444 		dev_err(&hdev->pdev->dev,
10445 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10446 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10447 		return -EINVAL;
10448 	}
10449 
10450 	return 0;
10451 }
10452 
10453 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10454 			     int min_tx_rate, int max_tx_rate, bool force)
10455 {
10456 	struct hclge_vport *vport = hclge_get_vport(handle);
10457 	struct hclge_dev *hdev = vport->back;
10458 	int ret;
10459 
10460 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10461 	if (ret)
10462 		return ret;
10463 
10464 	vport = hclge_get_vf_vport(hdev, vf);
10465 	if (!vport)
10466 		return -EINVAL;
10467 
10468 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10469 		return 0;
10470 
10471 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10472 	if (ret)
10473 		return ret;
10474 
10475 	vport->vf_info.max_tx_rate = max_tx_rate;
10476 
10477 	return 0;
10478 }
10479 
10480 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10481 {
10482 	struct hnae3_handle *handle = &hdev->vport->nic;
10483 	struct hclge_vport *vport;
10484 	int ret;
10485 	int vf;
10486 
10487 	/* resume the vf max_tx_rate after reset */
10488 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10489 		vport = hclge_get_vf_vport(hdev, vf);
10490 		if (!vport)
10491 			return -EINVAL;
10492 
10493 		/* zero means max rate, after reset, firmware already set it to
10494 		 * max rate, so just continue.
10495 		 */
10496 		if (!vport->vf_info.max_tx_rate)
10497 			continue;
10498 
10499 		ret = hclge_set_vf_rate(handle, vf, 0,
10500 					vport->vf_info.max_tx_rate, true);
10501 		if (ret) {
10502 			dev_err(&hdev->pdev->dev,
10503 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
10504 				vf, vport->vf_info.max_tx_rate, ret);
10505 			return ret;
10506 		}
10507 	}
10508 
10509 	return 0;
10510 }
10511 
10512 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10513 {
10514 	struct hclge_vport *vport = hdev->vport;
10515 	int i;
10516 
10517 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10518 		hclge_vport_stop(vport);
10519 		vport++;
10520 	}
10521 }
10522 
10523 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10524 {
10525 	struct hclge_dev *hdev = ae_dev->priv;
10526 	struct pci_dev *pdev = ae_dev->pdev;
10527 	int ret;
10528 
10529 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10530 
10531 	hclge_stats_clear(hdev);
10532 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10533 	 * so here should not clean table in memory.
10534 	 */
10535 	if (hdev->reset_type == HNAE3_IMP_RESET ||
10536 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
10537 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10538 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10539 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10540 		hclge_reset_umv_space(hdev);
10541 	}
10542 
10543 	ret = hclge_cmd_init(hdev);
10544 	if (ret) {
10545 		dev_err(&pdev->dev, "Cmd queue init failed\n");
10546 		return ret;
10547 	}
10548 
10549 	ret = hclge_map_tqp(hdev);
10550 	if (ret) {
10551 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10552 		return ret;
10553 	}
10554 
10555 	ret = hclge_mac_init(hdev);
10556 	if (ret) {
10557 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10558 		return ret;
10559 	}
10560 
10561 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10562 	if (ret) {
10563 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10564 		return ret;
10565 	}
10566 
10567 	ret = hclge_config_gro(hdev, true);
10568 	if (ret)
10569 		return ret;
10570 
10571 	ret = hclge_init_vlan_config(hdev);
10572 	if (ret) {
10573 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10574 		return ret;
10575 	}
10576 
10577 	ret = hclge_tm_init_hw(hdev, true);
10578 	if (ret) {
10579 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10580 		return ret;
10581 	}
10582 
10583 	ret = hclge_rss_init_hw(hdev);
10584 	if (ret) {
10585 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10586 		return ret;
10587 	}
10588 
10589 	ret = init_mgr_tbl(hdev);
10590 	if (ret) {
10591 		dev_err(&pdev->dev,
10592 			"failed to reinit manager table, ret = %d\n", ret);
10593 		return ret;
10594 	}
10595 
10596 	ret = hclge_init_fd_config(hdev);
10597 	if (ret) {
10598 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10599 		return ret;
10600 	}
10601 
10602 	/* Log and clear the hw errors those already occurred */
10603 	hclge_handle_all_hns_hw_errors(ae_dev);
10604 
10605 	/* Re-enable the hw error interrupts because
10606 	 * the interrupts get disabled on global reset.
10607 	 */
10608 	ret = hclge_config_nic_hw_error(hdev, true);
10609 	if (ret) {
10610 		dev_err(&pdev->dev,
10611 			"fail(%d) to re-enable NIC hw error interrupts\n",
10612 			ret);
10613 		return ret;
10614 	}
10615 
10616 	if (hdev->roce_client) {
10617 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
10618 		if (ret) {
10619 			dev_err(&pdev->dev,
10620 				"fail(%d) to re-enable roce ras interrupts\n",
10621 				ret);
10622 			return ret;
10623 		}
10624 	}
10625 
10626 	hclge_reset_vport_state(hdev);
10627 	ret = hclge_reset_vport_spoofchk(hdev);
10628 	if (ret)
10629 		return ret;
10630 
10631 	ret = hclge_resume_vf_rate(hdev);
10632 	if (ret)
10633 		return ret;
10634 
10635 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10636 		 HCLGE_DRIVER_NAME);
10637 
10638 	return 0;
10639 }
10640 
10641 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10642 {
10643 	struct hclge_dev *hdev = ae_dev->priv;
10644 	struct hclge_mac *mac = &hdev->hw.mac;
10645 
10646 	hclge_reset_vf_rate(hdev);
10647 	hclge_clear_vf_vlan(hdev);
10648 	hclge_misc_affinity_teardown(hdev);
10649 	hclge_state_uninit(hdev);
10650 	hclge_uninit_mac_table(hdev);
10651 
10652 	if (mac->phydev)
10653 		mdiobus_unregister(mac->mdio_bus);
10654 
10655 	/* Disable MISC vector(vector0) */
10656 	hclge_enable_vector(&hdev->misc_vector, false);
10657 	synchronize_irq(hdev->misc_vector.vector_irq);
10658 
10659 	/* Disable all hw interrupts */
10660 	hclge_config_mac_tnl_int(hdev, false);
10661 	hclge_config_nic_hw_error(hdev, false);
10662 	hclge_config_rocee_ras_interrupt(hdev, false);
10663 
10664 	hclge_cmd_uninit(hdev);
10665 	hclge_misc_irq_uninit(hdev);
10666 	hclge_pci_uninit(hdev);
10667 	mutex_destroy(&hdev->vport_lock);
10668 	hclge_uninit_vport_vlan_table(hdev);
10669 	ae_dev->priv = NULL;
10670 }
10671 
10672 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10673 {
10674 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10675 	struct hclge_vport *vport = hclge_get_vport(handle);
10676 	struct hclge_dev *hdev = vport->back;
10677 
10678 	return min_t(u32, hdev->rss_size_max,
10679 		     vport->alloc_tqps / kinfo->num_tc);
10680 }
10681 
10682 static void hclge_get_channels(struct hnae3_handle *handle,
10683 			       struct ethtool_channels *ch)
10684 {
10685 	ch->max_combined = hclge_get_max_channels(handle);
10686 	ch->other_count = 1;
10687 	ch->max_other = 1;
10688 	ch->combined_count = handle->kinfo.rss_size;
10689 }
10690 
10691 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10692 					u16 *alloc_tqps, u16 *max_rss_size)
10693 {
10694 	struct hclge_vport *vport = hclge_get_vport(handle);
10695 	struct hclge_dev *hdev = vport->back;
10696 
10697 	*alloc_tqps = vport->alloc_tqps;
10698 	*max_rss_size = hdev->rss_size_max;
10699 }
10700 
10701 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10702 			      bool rxfh_configured)
10703 {
10704 	struct hclge_vport *vport = hclge_get_vport(handle);
10705 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10706 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10707 	struct hclge_dev *hdev = vport->back;
10708 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10709 	u16 cur_rss_size = kinfo->rss_size;
10710 	u16 cur_tqps = kinfo->num_tqps;
10711 	u16 tc_valid[HCLGE_MAX_TC_NUM];
10712 	u16 roundup_size;
10713 	u32 *rss_indir;
10714 	unsigned int i;
10715 	int ret;
10716 
10717 	kinfo->req_rss_size = new_tqps_num;
10718 
10719 	ret = hclge_tm_vport_map_update(hdev);
10720 	if (ret) {
10721 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10722 		return ret;
10723 	}
10724 
10725 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
10726 	roundup_size = ilog2(roundup_size);
10727 	/* Set the RSS TC mode according to the new RSS size */
10728 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10729 		tc_valid[i] = 0;
10730 
10731 		if (!(hdev->hw_tc_map & BIT(i)))
10732 			continue;
10733 
10734 		tc_valid[i] = 1;
10735 		tc_size[i] = roundup_size;
10736 		tc_offset[i] = kinfo->rss_size * i;
10737 	}
10738 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10739 	if (ret)
10740 		return ret;
10741 
10742 	/* RSS indirection table has been configuared by user */
10743 	if (rxfh_configured)
10744 		goto out;
10745 
10746 	/* Reinitializes the rss indirect table according to the new RSS size */
10747 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10748 	if (!rss_indir)
10749 		return -ENOMEM;
10750 
10751 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10752 		rss_indir[i] = i % kinfo->rss_size;
10753 
10754 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10755 	if (ret)
10756 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10757 			ret);
10758 
10759 	kfree(rss_indir);
10760 
10761 out:
10762 	if (!ret)
10763 		dev_info(&hdev->pdev->dev,
10764 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10765 			 cur_rss_size, kinfo->rss_size,
10766 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10767 
10768 	return ret;
10769 }
10770 
10771 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10772 			      u32 *regs_num_64_bit)
10773 {
10774 	struct hclge_desc desc;
10775 	u32 total_num;
10776 	int ret;
10777 
10778 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10779 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10780 	if (ret) {
10781 		dev_err(&hdev->pdev->dev,
10782 			"Query register number cmd failed, ret = %d.\n", ret);
10783 		return ret;
10784 	}
10785 
10786 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10787 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10788 
10789 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10790 	if (!total_num)
10791 		return -EINVAL;
10792 
10793 	return 0;
10794 }
10795 
10796 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10797 				 void *data)
10798 {
10799 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10800 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10801 
10802 	struct hclge_desc *desc;
10803 	u32 *reg_val = data;
10804 	__le32 *desc_data;
10805 	int nodata_num;
10806 	int cmd_num;
10807 	int i, k, n;
10808 	int ret;
10809 
10810 	if (regs_num == 0)
10811 		return 0;
10812 
10813 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10814 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10815 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10816 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10817 	if (!desc)
10818 		return -ENOMEM;
10819 
10820 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10821 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10822 	if (ret) {
10823 		dev_err(&hdev->pdev->dev,
10824 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10825 		kfree(desc);
10826 		return ret;
10827 	}
10828 
10829 	for (i = 0; i < cmd_num; i++) {
10830 		if (i == 0) {
10831 			desc_data = (__le32 *)(&desc[i].data[0]);
10832 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10833 		} else {
10834 			desc_data = (__le32 *)(&desc[i]);
10835 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10836 		}
10837 		for (k = 0; k < n; k++) {
10838 			*reg_val++ = le32_to_cpu(*desc_data++);
10839 
10840 			regs_num--;
10841 			if (!regs_num)
10842 				break;
10843 		}
10844 	}
10845 
10846 	kfree(desc);
10847 	return 0;
10848 }
10849 
10850 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10851 				 void *data)
10852 {
10853 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10854 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10855 
10856 	struct hclge_desc *desc;
10857 	u64 *reg_val = data;
10858 	__le64 *desc_data;
10859 	int nodata_len;
10860 	int cmd_num;
10861 	int i, k, n;
10862 	int ret;
10863 
10864 	if (regs_num == 0)
10865 		return 0;
10866 
10867 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10868 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10869 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10870 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10871 	if (!desc)
10872 		return -ENOMEM;
10873 
10874 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10875 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10876 	if (ret) {
10877 		dev_err(&hdev->pdev->dev,
10878 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10879 		kfree(desc);
10880 		return ret;
10881 	}
10882 
10883 	for (i = 0; i < cmd_num; i++) {
10884 		if (i == 0) {
10885 			desc_data = (__le64 *)(&desc[i].data[0]);
10886 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10887 		} else {
10888 			desc_data = (__le64 *)(&desc[i]);
10889 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10890 		}
10891 		for (k = 0; k < n; k++) {
10892 			*reg_val++ = le64_to_cpu(*desc_data++);
10893 
10894 			regs_num--;
10895 			if (!regs_num)
10896 				break;
10897 		}
10898 	}
10899 
10900 	kfree(desc);
10901 	return 0;
10902 }
10903 
10904 #define MAX_SEPARATE_NUM	4
10905 #define SEPARATOR_VALUE		0xFDFCFBFA
10906 #define REG_NUM_PER_LINE	4
10907 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10908 #define REG_SEPARATOR_LINE	1
10909 #define REG_NUM_REMAIN_MASK	3
10910 #define BD_LIST_MAX_NUM		30
10911 
10912 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10913 {
10914 	int i;
10915 
10916 	/* initialize command BD except the last one */
10917 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10918 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10919 					   true);
10920 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10921 	}
10922 
10923 	/* initialize the last command BD */
10924 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10925 
10926 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10927 }
10928 
10929 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10930 				    int *bd_num_list,
10931 				    u32 type_num)
10932 {
10933 	u32 entries_per_desc, desc_index, index, offset, i;
10934 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10935 	int ret;
10936 
10937 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10938 	if (ret) {
10939 		dev_err(&hdev->pdev->dev,
10940 			"Get dfx bd num fail, status is %d.\n", ret);
10941 		return ret;
10942 	}
10943 
10944 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10945 	for (i = 0; i < type_num; i++) {
10946 		offset = hclge_dfx_bd_offset_list[i];
10947 		index = offset % entries_per_desc;
10948 		desc_index = offset / entries_per_desc;
10949 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10950 	}
10951 
10952 	return ret;
10953 }
10954 
10955 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10956 				  struct hclge_desc *desc_src, int bd_num,
10957 				  enum hclge_opcode_type cmd)
10958 {
10959 	struct hclge_desc *desc = desc_src;
10960 	int i, ret;
10961 
10962 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10963 	for (i = 0; i < bd_num - 1; i++) {
10964 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10965 		desc++;
10966 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10967 	}
10968 
10969 	desc = desc_src;
10970 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10971 	if (ret)
10972 		dev_err(&hdev->pdev->dev,
10973 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10974 			cmd, ret);
10975 
10976 	return ret;
10977 }
10978 
10979 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10980 				    void *data)
10981 {
10982 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10983 	struct hclge_desc *desc = desc_src;
10984 	u32 *reg = data;
10985 
10986 	entries_per_desc = ARRAY_SIZE(desc->data);
10987 	reg_num = entries_per_desc * bd_num;
10988 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10989 	for (i = 0; i < reg_num; i++) {
10990 		index = i % entries_per_desc;
10991 		desc_index = i / entries_per_desc;
10992 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10993 	}
10994 	for (i = 0; i < separator_num; i++)
10995 		*reg++ = SEPARATOR_VALUE;
10996 
10997 	return reg_num + separator_num;
10998 }
10999 
11000 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11001 {
11002 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11003 	int data_len_per_desc, bd_num, i;
11004 	int bd_num_list[BD_LIST_MAX_NUM];
11005 	u32 data_len;
11006 	int ret;
11007 
11008 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11009 	if (ret) {
11010 		dev_err(&hdev->pdev->dev,
11011 			"Get dfx reg bd num fail, status is %d.\n", ret);
11012 		return ret;
11013 	}
11014 
11015 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
11016 	*len = 0;
11017 	for (i = 0; i < dfx_reg_type_num; i++) {
11018 		bd_num = bd_num_list[i];
11019 		data_len = data_len_per_desc * bd_num;
11020 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11021 	}
11022 
11023 	return ret;
11024 }
11025 
11026 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11027 {
11028 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11029 	int bd_num, bd_num_max, buf_len, i;
11030 	int bd_num_list[BD_LIST_MAX_NUM];
11031 	struct hclge_desc *desc_src;
11032 	u32 *reg = data;
11033 	int ret;
11034 
11035 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11036 	if (ret) {
11037 		dev_err(&hdev->pdev->dev,
11038 			"Get dfx reg bd num fail, status is %d.\n", ret);
11039 		return ret;
11040 	}
11041 
11042 	bd_num_max = bd_num_list[0];
11043 	for (i = 1; i < dfx_reg_type_num; i++)
11044 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11045 
11046 	buf_len = sizeof(*desc_src) * bd_num_max;
11047 	desc_src = kzalloc(buf_len, GFP_KERNEL);
11048 	if (!desc_src)
11049 		return -ENOMEM;
11050 
11051 	for (i = 0; i < dfx_reg_type_num; i++) {
11052 		bd_num = bd_num_list[i];
11053 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11054 					     hclge_dfx_reg_opcode_list[i]);
11055 		if (ret) {
11056 			dev_err(&hdev->pdev->dev,
11057 				"Get dfx reg fail, status is %d.\n", ret);
11058 			break;
11059 		}
11060 
11061 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11062 	}
11063 
11064 	kfree(desc_src);
11065 	return ret;
11066 }
11067 
11068 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11069 			      struct hnae3_knic_private_info *kinfo)
11070 {
11071 #define HCLGE_RING_REG_OFFSET		0x200
11072 #define HCLGE_RING_INT_REG_OFFSET	0x4
11073 
11074 	int i, j, reg_num, separator_num;
11075 	int data_num_sum;
11076 	u32 *reg = data;
11077 
11078 	/* fetching per-PF registers valus from PF PCIe register space */
11079 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11080 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11081 	for (i = 0; i < reg_num; i++)
11082 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11083 	for (i = 0; i < separator_num; i++)
11084 		*reg++ = SEPARATOR_VALUE;
11085 	data_num_sum = reg_num + separator_num;
11086 
11087 	reg_num = ARRAY_SIZE(common_reg_addr_list);
11088 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11089 	for (i = 0; i < reg_num; i++)
11090 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11091 	for (i = 0; i < separator_num; i++)
11092 		*reg++ = SEPARATOR_VALUE;
11093 	data_num_sum += reg_num + separator_num;
11094 
11095 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
11096 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11097 	for (j = 0; j < kinfo->num_tqps; j++) {
11098 		for (i = 0; i < reg_num; i++)
11099 			*reg++ = hclge_read_dev(&hdev->hw,
11100 						ring_reg_addr_list[i] +
11101 						HCLGE_RING_REG_OFFSET * j);
11102 		for (i = 0; i < separator_num; i++)
11103 			*reg++ = SEPARATOR_VALUE;
11104 	}
11105 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11106 
11107 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11108 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11109 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
11110 		for (i = 0; i < reg_num; i++)
11111 			*reg++ = hclge_read_dev(&hdev->hw,
11112 						tqp_intr_reg_addr_list[i] +
11113 						HCLGE_RING_INT_REG_OFFSET * j);
11114 		for (i = 0; i < separator_num; i++)
11115 			*reg++ = SEPARATOR_VALUE;
11116 	}
11117 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11118 
11119 	return data_num_sum;
11120 }
11121 
11122 static int hclge_get_regs_len(struct hnae3_handle *handle)
11123 {
11124 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11125 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11126 	struct hclge_vport *vport = hclge_get_vport(handle);
11127 	struct hclge_dev *hdev = vport->back;
11128 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11129 	int regs_lines_32_bit, regs_lines_64_bit;
11130 	int ret;
11131 
11132 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11133 	if (ret) {
11134 		dev_err(&hdev->pdev->dev,
11135 			"Get register number failed, ret = %d.\n", ret);
11136 		return ret;
11137 	}
11138 
11139 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11140 	if (ret) {
11141 		dev_err(&hdev->pdev->dev,
11142 			"Get dfx reg len failed, ret = %d.\n", ret);
11143 		return ret;
11144 	}
11145 
11146 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11147 		REG_SEPARATOR_LINE;
11148 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11149 		REG_SEPARATOR_LINE;
11150 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11151 		REG_SEPARATOR_LINE;
11152 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11153 		REG_SEPARATOR_LINE;
11154 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11155 		REG_SEPARATOR_LINE;
11156 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11157 		REG_SEPARATOR_LINE;
11158 
11159 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11160 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11161 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11162 }
11163 
11164 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11165 			   void *data)
11166 {
11167 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11168 	struct hclge_vport *vport = hclge_get_vport(handle);
11169 	struct hclge_dev *hdev = vport->back;
11170 	u32 regs_num_32_bit, regs_num_64_bit;
11171 	int i, reg_num, separator_num, ret;
11172 	u32 *reg = data;
11173 
11174 	*version = hdev->fw_version;
11175 
11176 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11177 	if (ret) {
11178 		dev_err(&hdev->pdev->dev,
11179 			"Get register number failed, ret = %d.\n", ret);
11180 		return;
11181 	}
11182 
11183 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11184 
11185 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11186 	if (ret) {
11187 		dev_err(&hdev->pdev->dev,
11188 			"Get 32 bit register failed, ret = %d.\n", ret);
11189 		return;
11190 	}
11191 	reg_num = regs_num_32_bit;
11192 	reg += reg_num;
11193 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11194 	for (i = 0; i < separator_num; i++)
11195 		*reg++ = SEPARATOR_VALUE;
11196 
11197 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11198 	if (ret) {
11199 		dev_err(&hdev->pdev->dev,
11200 			"Get 64 bit register failed, ret = %d.\n", ret);
11201 		return;
11202 	}
11203 	reg_num = regs_num_64_bit * 2;
11204 	reg += reg_num;
11205 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11206 	for (i = 0; i < separator_num; i++)
11207 		*reg++ = SEPARATOR_VALUE;
11208 
11209 	ret = hclge_get_dfx_reg(hdev, reg);
11210 	if (ret)
11211 		dev_err(&hdev->pdev->dev,
11212 			"Get dfx register failed, ret = %d.\n", ret);
11213 }
11214 
11215 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11216 {
11217 	struct hclge_set_led_state_cmd *req;
11218 	struct hclge_desc desc;
11219 	int ret;
11220 
11221 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11222 
11223 	req = (struct hclge_set_led_state_cmd *)desc.data;
11224 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11225 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11226 
11227 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11228 	if (ret)
11229 		dev_err(&hdev->pdev->dev,
11230 			"Send set led state cmd error, ret =%d\n", ret);
11231 
11232 	return ret;
11233 }
11234 
11235 enum hclge_led_status {
11236 	HCLGE_LED_OFF,
11237 	HCLGE_LED_ON,
11238 	HCLGE_LED_NO_CHANGE = 0xFF,
11239 };
11240 
11241 static int hclge_set_led_id(struct hnae3_handle *handle,
11242 			    enum ethtool_phys_id_state status)
11243 {
11244 	struct hclge_vport *vport = hclge_get_vport(handle);
11245 	struct hclge_dev *hdev = vport->back;
11246 
11247 	switch (status) {
11248 	case ETHTOOL_ID_ACTIVE:
11249 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
11250 	case ETHTOOL_ID_INACTIVE:
11251 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11252 	default:
11253 		return -EINVAL;
11254 	}
11255 }
11256 
11257 static void hclge_get_link_mode(struct hnae3_handle *handle,
11258 				unsigned long *supported,
11259 				unsigned long *advertising)
11260 {
11261 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11262 	struct hclge_vport *vport = hclge_get_vport(handle);
11263 	struct hclge_dev *hdev = vport->back;
11264 	unsigned int idx = 0;
11265 
11266 	for (; idx < size; idx++) {
11267 		supported[idx] = hdev->hw.mac.supported[idx];
11268 		advertising[idx] = hdev->hw.mac.advertising[idx];
11269 	}
11270 }
11271 
11272 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11273 {
11274 	struct hclge_vport *vport = hclge_get_vport(handle);
11275 	struct hclge_dev *hdev = vport->back;
11276 
11277 	return hclge_config_gro(hdev, enable);
11278 }
11279 
11280 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11281 {
11282 	struct hclge_vport *vport = &hdev->vport[0];
11283 	struct hnae3_handle *handle = &vport->nic;
11284 	u8 tmp_flags;
11285 	int ret;
11286 
11287 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11288 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11289 		vport->last_promisc_flags = vport->overflow_promisc_flags;
11290 	}
11291 
11292 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11293 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11294 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11295 					     tmp_flags & HNAE3_MPE);
11296 		if (!ret) {
11297 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11298 			hclge_enable_vlan_filter(handle,
11299 						 tmp_flags & HNAE3_VLAN_FLTR);
11300 		}
11301 	}
11302 }
11303 
11304 static bool hclge_module_existed(struct hclge_dev *hdev)
11305 {
11306 	struct hclge_desc desc;
11307 	u32 existed;
11308 	int ret;
11309 
11310 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11311 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11312 	if (ret) {
11313 		dev_err(&hdev->pdev->dev,
11314 			"failed to get SFP exist state, ret = %d\n", ret);
11315 		return false;
11316 	}
11317 
11318 	existed = le32_to_cpu(desc.data[0]);
11319 
11320 	return existed != 0;
11321 }
11322 
11323 /* need 6 bds(total 140 bytes) in one reading
11324  * return the number of bytes actually read, 0 means read failed.
11325  */
11326 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11327 				     u32 len, u8 *data)
11328 {
11329 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11330 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11331 	u16 read_len;
11332 	u16 copy_len;
11333 	int ret;
11334 	int i;
11335 
11336 	/* setup all 6 bds to read module eeprom info. */
11337 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11338 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11339 					   true);
11340 
11341 		/* bd0~bd4 need next flag */
11342 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11343 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11344 	}
11345 
11346 	/* setup bd0, this bd contains offset and read length. */
11347 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11348 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11349 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11350 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
11351 
11352 	ret = hclge_cmd_send(&hdev->hw, desc, i);
11353 	if (ret) {
11354 		dev_err(&hdev->pdev->dev,
11355 			"failed to get SFP eeprom info, ret = %d\n", ret);
11356 		return 0;
11357 	}
11358 
11359 	/* copy sfp info from bd0 to out buffer. */
11360 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11361 	memcpy(data, sfp_info_bd0->data, copy_len);
11362 	read_len = copy_len;
11363 
11364 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
11365 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11366 		if (read_len >= len)
11367 			return read_len;
11368 
11369 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11370 		memcpy(data + read_len, desc[i].data, copy_len);
11371 		read_len += copy_len;
11372 	}
11373 
11374 	return read_len;
11375 }
11376 
11377 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11378 				   u32 len, u8 *data)
11379 {
11380 	struct hclge_vport *vport = hclge_get_vport(handle);
11381 	struct hclge_dev *hdev = vport->back;
11382 	u32 read_len = 0;
11383 	u16 data_len;
11384 
11385 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11386 		return -EOPNOTSUPP;
11387 
11388 	if (!hclge_module_existed(hdev))
11389 		return -ENXIO;
11390 
11391 	while (read_len < len) {
11392 		data_len = hclge_get_sfp_eeprom_info(hdev,
11393 						     offset + read_len,
11394 						     len - read_len,
11395 						     data + read_len);
11396 		if (!data_len)
11397 			return -EIO;
11398 
11399 		read_len += data_len;
11400 	}
11401 
11402 	return 0;
11403 }
11404 
11405 static const struct hnae3_ae_ops hclge_ops = {
11406 	.init_ae_dev = hclge_init_ae_dev,
11407 	.uninit_ae_dev = hclge_uninit_ae_dev,
11408 	.flr_prepare = hclge_flr_prepare,
11409 	.flr_done = hclge_flr_done,
11410 	.init_client_instance = hclge_init_client_instance,
11411 	.uninit_client_instance = hclge_uninit_client_instance,
11412 	.map_ring_to_vector = hclge_map_ring_to_vector,
11413 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11414 	.get_vector = hclge_get_vector,
11415 	.put_vector = hclge_put_vector,
11416 	.set_promisc_mode = hclge_set_promisc_mode,
11417 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
11418 	.set_loopback = hclge_set_loopback,
11419 	.start = hclge_ae_start,
11420 	.stop = hclge_ae_stop,
11421 	.client_start = hclge_client_start,
11422 	.client_stop = hclge_client_stop,
11423 	.get_status = hclge_get_status,
11424 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
11425 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11426 	.get_media_type = hclge_get_media_type,
11427 	.check_port_speed = hclge_check_port_speed,
11428 	.get_fec = hclge_get_fec,
11429 	.set_fec = hclge_set_fec,
11430 	.get_rss_key_size = hclge_get_rss_key_size,
11431 	.get_rss_indir_size = hclge_get_rss_indir_size,
11432 	.get_rss = hclge_get_rss,
11433 	.set_rss = hclge_set_rss,
11434 	.set_rss_tuple = hclge_set_rss_tuple,
11435 	.get_rss_tuple = hclge_get_rss_tuple,
11436 	.get_tc_size = hclge_get_tc_size,
11437 	.get_mac_addr = hclge_get_mac_addr,
11438 	.set_mac_addr = hclge_set_mac_addr,
11439 	.do_ioctl = hclge_do_ioctl,
11440 	.add_uc_addr = hclge_add_uc_addr,
11441 	.rm_uc_addr = hclge_rm_uc_addr,
11442 	.add_mc_addr = hclge_add_mc_addr,
11443 	.rm_mc_addr = hclge_rm_mc_addr,
11444 	.set_autoneg = hclge_set_autoneg,
11445 	.get_autoneg = hclge_get_autoneg,
11446 	.restart_autoneg = hclge_restart_autoneg,
11447 	.halt_autoneg = hclge_halt_autoneg,
11448 	.get_pauseparam = hclge_get_pauseparam,
11449 	.set_pauseparam = hclge_set_pauseparam,
11450 	.set_mtu = hclge_set_mtu,
11451 	.reset_queue = hclge_reset_tqp,
11452 	.get_stats = hclge_get_stats,
11453 	.get_mac_stats = hclge_get_mac_stat,
11454 	.update_stats = hclge_update_stats,
11455 	.get_strings = hclge_get_strings,
11456 	.get_sset_count = hclge_get_sset_count,
11457 	.get_fw_version = hclge_get_fw_version,
11458 	.get_mdix_mode = hclge_get_mdix_mode,
11459 	.enable_vlan_filter = hclge_enable_vlan_filter,
11460 	.set_vlan_filter = hclge_set_vlan_filter,
11461 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11462 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11463 	.reset_event = hclge_reset_event,
11464 	.get_reset_level = hclge_get_reset_level,
11465 	.set_default_reset_request = hclge_set_def_reset_request,
11466 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11467 	.set_channels = hclge_set_channels,
11468 	.get_channels = hclge_get_channels,
11469 	.get_regs_len = hclge_get_regs_len,
11470 	.get_regs = hclge_get_regs,
11471 	.set_led_id = hclge_set_led_id,
11472 	.get_link_mode = hclge_get_link_mode,
11473 	.add_fd_entry = hclge_add_fd_entry,
11474 	.del_fd_entry = hclge_del_fd_entry,
11475 	.del_all_fd_entries = hclge_del_all_fd_entries,
11476 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11477 	.get_fd_rule_info = hclge_get_fd_rule_info,
11478 	.get_fd_all_rules = hclge_get_all_rules,
11479 	.enable_fd = hclge_enable_fd,
11480 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
11481 	.dbg_run_cmd = hclge_dbg_run_cmd,
11482 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
11483 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
11484 	.ae_dev_resetting = hclge_ae_dev_resetting,
11485 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11486 	.set_gro_en = hclge_gro_en,
11487 	.get_global_queue_id = hclge_covert_handle_qid_global,
11488 	.set_timer_task = hclge_set_timer_task,
11489 	.mac_connect_phy = hclge_mac_connect_phy,
11490 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
11491 	.get_vf_config = hclge_get_vf_config,
11492 	.set_vf_link_state = hclge_set_vf_link_state,
11493 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
11494 	.set_vf_trust = hclge_set_vf_trust,
11495 	.set_vf_rate = hclge_set_vf_rate,
11496 	.set_vf_mac = hclge_set_vf_mac,
11497 	.get_module_eeprom = hclge_get_module_eeprom,
11498 	.get_cmdq_stat = hclge_get_cmdq_stat,
11499 };
11500 
11501 static struct hnae3_ae_algo ae_algo = {
11502 	.ops = &hclge_ops,
11503 	.pdev_id_table = ae_algo_pci_tbl,
11504 };
11505 
11506 static int hclge_init(void)
11507 {
11508 	pr_info("%s is initializing\n", HCLGE_NAME);
11509 
11510 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11511 	if (!hclge_wq) {
11512 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11513 		return -ENOMEM;
11514 	}
11515 
11516 	hnae3_register_ae_algo(&ae_algo);
11517 
11518 	return 0;
11519 }
11520 
11521 static void hclge_exit(void)
11522 {
11523 	hnae3_unregister_ae_algo(&ae_algo);
11524 	destroy_workqueue(hclge_wq);
11525 }
11526 module_init(hclge_init);
11527 module_exit(hclge_exit);
11528 
11529 MODULE_LICENSE("GPL");
11530 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11531 MODULE_DESCRIPTION("HCLGE Driver");
11532 MODULE_VERSION(HCLGE_MOD_VERSION);
11533