xref: /linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision e83332842a46c091992ad06145b5c1b65a08ab05)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 
27 #define HCLGE_NAME			"hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 
31 #define HCLGE_BUF_SIZE_UNIT	256U
32 #define HCLGE_BUF_MUL_BY	2
33 #define HCLGE_BUF_DIV_BY	2
34 #define NEED_RESERVE_TC_NUM	2
35 #define BUF_MAX_PERCENT		100
36 #define BUF_RESERVE_PERCENT	90
37 
38 #define HCLGE_RESET_MAX_FAIL_CNT	5
39 #define HCLGE_RESET_SYNC_TIME		100
40 #define HCLGE_PF_RESET_SYNC_TIME	20
41 #define HCLGE_PF_RESET_SYNC_CNT		1500
42 
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56 
57 #define HCLGE_LINK_STATUS_MS	10
58 
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 						   unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69 
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
405 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
409 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
418 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
421 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
424 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
427 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
428 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 	{ INNER_DST_IP, 32, KEY_OPT_IP,
430 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
433 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 	  offsetof(struct hclge_fd_rule, tuples.src_port),
437 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
439 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
440 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
442 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445 
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449 
450 	u64 *data = (u64 *)(&hdev->mac_stats);
451 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 	__le64 *desc_data;
453 	int i, k, n;
454 	int ret;
455 
456 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 	if (ret) {
459 		dev_err(&hdev->pdev->dev,
460 			"Get MAC pkt stats fail, status = %d.\n", ret);
461 
462 		return ret;
463 	}
464 
465 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 		/* for special opcode 0032, only the first desc has the head */
467 		if (unlikely(i == 0)) {
468 			desc_data = (__le64 *)(&desc[i].data[0]);
469 			n = HCLGE_RD_FIRST_STATS_NUM;
470 		} else {
471 			desc_data = (__le64 *)(&desc[i]);
472 			n = HCLGE_RD_OTHER_STATS_NUM;
473 		}
474 
475 		for (k = 0; k < n; k++) {
476 			*data += le64_to_cpu(*desc_data);
477 			data++;
478 			desc_data++;
479 		}
480 	}
481 
482 	return 0;
483 }
484 
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487 	u64 *data = (u64 *)(&hdev->mac_stats);
488 	struct hclge_desc *desc;
489 	__le64 *desc_data;
490 	u16 i, k, n;
491 	int ret;
492 
493 	/* This may be called inside atomic sections,
494 	 * so GFP_ATOMIC is more suitalbe here
495 	 */
496 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 	if (!desc)
498 		return -ENOMEM;
499 
500 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 	if (ret) {
503 		kfree(desc);
504 		return ret;
505 	}
506 
507 	for (i = 0; i < desc_num; i++) {
508 		/* for special opcode 0034, only the first desc has the head */
509 		if (i == 0) {
510 			desc_data = (__le64 *)(&desc[i].data[0]);
511 			n = HCLGE_RD_FIRST_STATS_NUM;
512 		} else {
513 			desc_data = (__le64 *)(&desc[i]);
514 			n = HCLGE_RD_OTHER_STATS_NUM;
515 		}
516 
517 		for (k = 0; k < n; k++) {
518 			*data += le64_to_cpu(*desc_data);
519 			data++;
520 			desc_data++;
521 		}
522 	}
523 
524 	kfree(desc);
525 
526 	return 0;
527 }
528 
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531 	struct hclge_desc desc;
532 	__le32 *desc_data;
533 	u32 reg_num;
534 	int ret;
535 
536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 	if (ret)
539 		return ret;
540 
541 	desc_data = (__le32 *)(&desc.data[0]);
542 	reg_num = le32_to_cpu(*desc_data);
543 
544 	*desc_num = 1 + ((reg_num - 3) >> 2) +
545 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546 
547 	return 0;
548 }
549 
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552 	u32 desc_num;
553 	int ret;
554 
555 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 	/* The firmware supports the new statistics acquisition method */
557 	if (!ret)
558 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 	else if (ret == -EOPNOTSUPP)
560 		ret = hclge_mac_update_stats_defective(hdev);
561 	else
562 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563 
564 	return ret;
565 }
566 
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568 {
569 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 	struct hclge_vport *vport = hclge_get_vport(handle);
571 	struct hclge_dev *hdev = vport->back;
572 	struct hnae3_queue *queue;
573 	struct hclge_desc desc[1];
574 	struct hclge_tqp *tqp;
575 	int ret, i;
576 
577 	for (i = 0; i < kinfo->num_tqps; i++) {
578 		queue = handle->kinfo.tqp[i];
579 		tqp = container_of(queue, struct hclge_tqp, q);
580 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
581 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582 					   true);
583 
584 		desc[0].data[0] = cpu_to_le32(tqp->index);
585 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
586 		if (ret) {
587 			dev_err(&hdev->pdev->dev,
588 				"Query tqp stat fail, status = %d,queue = %d\n",
589 				ret, i);
590 			return ret;
591 		}
592 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 			le32_to_cpu(desc[0].data[1]);
594 	}
595 
596 	for (i = 0; i < kinfo->num_tqps; i++) {
597 		queue = handle->kinfo.tqp[i];
598 		tqp = container_of(queue, struct hclge_tqp, q);
599 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
600 		hclge_cmd_setup_basic_desc(&desc[0],
601 					   HCLGE_OPC_QUERY_TX_STATS,
602 					   true);
603 
604 		desc[0].data[0] = cpu_to_le32(tqp->index);
605 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
606 		if (ret) {
607 			dev_err(&hdev->pdev->dev,
608 				"Query tqp stat fail, status = %d,queue = %d\n",
609 				ret, i);
610 			return ret;
611 		}
612 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 			le32_to_cpu(desc[0].data[1]);
614 	}
615 
616 	return 0;
617 }
618 
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	struct hclge_tqp *tqp;
623 	u64 *buff = data;
624 	int i;
625 
626 	for (i = 0; i < kinfo->num_tqps; i++) {
627 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629 	}
630 
631 	for (i = 0; i < kinfo->num_tqps; i++) {
632 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634 	}
635 
636 	return buff;
637 }
638 
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640 {
641 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642 
643 	/* each tqp has TX & RX two queues */
644 	return kinfo->num_tqps * (2);
645 }
646 
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648 {
649 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650 	u8 *buff = data;
651 	int i;
652 
653 	for (i = 0; i < kinfo->num_tqps; i++) {
654 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 			struct hclge_tqp, q);
656 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657 			 tqp->index);
658 		buff = buff + ETH_GSTRING_LEN;
659 	}
660 
661 	for (i = 0; i < kinfo->num_tqps; i++) {
662 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 			struct hclge_tqp, q);
664 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665 			 tqp->index);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return buff;
670 }
671 
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 				 const struct hclge_comm_stats_str strs[],
674 				 int size, u64 *data)
675 {
676 	u64 *buf = data;
677 	u32 i;
678 
679 	for (i = 0; i < size; i++)
680 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681 
682 	return buf + size;
683 }
684 
685 static u8 *hclge_comm_get_strings(u32 stringset,
686 				  const struct hclge_comm_stats_str strs[],
687 				  int size, u8 *data)
688 {
689 	char *buff = (char *)data;
690 	u32 i;
691 
692 	if (stringset != ETH_SS_STATS)
693 		return buff;
694 
695 	for (i = 0; i < size; i++) {
696 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 		buff = buff + ETH_GSTRING_LEN;
698 	}
699 
700 	return (u8 *)buff;
701 }
702 
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704 {
705 	struct hnae3_handle *handle;
706 	int status;
707 
708 	handle = &hdev->vport[0].nic;
709 	if (handle->client) {
710 		status = hclge_tqps_update_stats(handle);
711 		if (status) {
712 			dev_err(&hdev->pdev->dev,
713 				"Update TQPS stats fail, status = %d.\n",
714 				status);
715 		}
716 	}
717 
718 	status = hclge_mac_update_stats(hdev);
719 	if (status)
720 		dev_err(&hdev->pdev->dev,
721 			"Update MAC stats fail, status = %d.\n", status);
722 }
723 
724 static void hclge_update_stats(struct hnae3_handle *handle,
725 			       struct net_device_stats *net_stats)
726 {
727 	struct hclge_vport *vport = hclge_get_vport(handle);
728 	struct hclge_dev *hdev = vport->back;
729 	int status;
730 
731 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732 		return;
733 
734 	status = hclge_mac_update_stats(hdev);
735 	if (status)
736 		dev_err(&hdev->pdev->dev,
737 			"Update MAC stats fail, status = %d.\n",
738 			status);
739 
740 	status = hclge_tqps_update_stats(handle);
741 	if (status)
742 		dev_err(&hdev->pdev->dev,
743 			"Update TQPS stats fail, status = %d.\n",
744 			status);
745 
746 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747 }
748 
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750 {
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 		HNAE3_SUPPORT_PHY_LOOPBACK |\
753 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755 
756 	struct hclge_vport *vport = hclge_get_vport(handle);
757 	struct hclge_dev *hdev = vport->back;
758 	int count = 0;
759 
760 	/* Loopback test support rules:
761 	 * mac: only GE mode support
762 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 	 * phy: only support when phy device exist on board
764 	 */
765 	if (stringset == ETH_SS_TEST) {
766 		/* clear loopback bit flags at first */
767 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772 			count += 1;
773 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774 		}
775 
776 		count += 2;
777 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779 
780 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 		     hdev->hw.mac.phydev->drv->set_loopback) ||
782 		    hnae3_dev_phy_imp_supported(hdev)) {
783 			count += 1;
784 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785 		}
786 	} else if (stringset == ETH_SS_STATS) {
787 		count = ARRAY_SIZE(g_mac_stats_string) +
788 			hclge_tqps_get_sset_count(handle, stringset);
789 	}
790 
791 	return count;
792 }
793 
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795 			      u8 *data)
796 {
797 	u8 *p = (char *)data;
798 	int size;
799 
800 	if (stringset == ETH_SS_STATS) {
801 		size = ARRAY_SIZE(g_mac_stats_string);
802 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803 					   size, p);
804 		p = hclge_tqps_get_strings(handle, p);
805 	} else if (stringset == ETH_SS_TEST) {
806 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808 			       ETH_GSTRING_LEN);
809 			p += ETH_GSTRING_LEN;
810 		}
811 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813 			       ETH_GSTRING_LEN);
814 			p += ETH_GSTRING_LEN;
815 		}
816 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817 			memcpy(p,
818 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819 			       ETH_GSTRING_LEN);
820 			p += ETH_GSTRING_LEN;
821 		}
822 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824 			       ETH_GSTRING_LEN);
825 			p += ETH_GSTRING_LEN;
826 		}
827 	}
828 }
829 
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831 {
832 	struct hclge_vport *vport = hclge_get_vport(handle);
833 	struct hclge_dev *hdev = vport->back;
834 	u64 *p;
835 
836 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 				 ARRAY_SIZE(g_mac_stats_string), data);
838 	p = hclge_tqps_get_stats(handle, p);
839 }
840 
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 			       struct hns3_mac_stats *mac_stats)
843 {
844 	struct hclge_vport *vport = hclge_get_vport(handle);
845 	struct hclge_dev *hdev = vport->back;
846 
847 	hclge_update_stats(handle, NULL);
848 
849 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851 }
852 
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 				   struct hclge_func_status_cmd *status)
855 {
856 #define HCLGE_MAC_ID_MASK	0xF
857 
858 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859 		return -EINVAL;
860 
861 	/* Set the pf to main pf */
862 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 		hdev->flag |= HCLGE_FLAG_MAIN;
864 	else
865 		hdev->flag &= ~HCLGE_FLAG_MAIN;
866 
867 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868 	return 0;
869 }
870 
871 static int hclge_query_function_status(struct hclge_dev *hdev)
872 {
873 #define HCLGE_QUERY_MAX_CNT	5
874 
875 	struct hclge_func_status_cmd *req;
876 	struct hclge_desc desc;
877 	int timeout = 0;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 	req = (struct hclge_func_status_cmd *)desc.data;
882 
883 	do {
884 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 		if (ret) {
886 			dev_err(&hdev->pdev->dev,
887 				"query function status failed %d.\n", ret);
888 			return ret;
889 		}
890 
891 		/* Check pf reset is done */
892 		if (req->pf_state)
893 			break;
894 		usleep_range(1000, 2000);
895 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
896 
897 	return hclge_parse_func_status(hdev, req);
898 }
899 
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
901 {
902 	struct hclge_pf_res_cmd *req;
903 	struct hclge_desc desc;
904 	int ret;
905 
906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908 	if (ret) {
909 		dev_err(&hdev->pdev->dev,
910 			"query pf resource failed %d.\n", ret);
911 		return ret;
912 	}
913 
914 	req = (struct hclge_pf_res_cmd *)desc.data;
915 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 			 le16_to_cpu(req->ext_tqp_num);
917 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918 
919 	if (req->tx_buf_size)
920 		hdev->tx_buf_size =
921 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922 	else
923 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924 
925 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926 
927 	if (req->dv_buf_size)
928 		hdev->dv_buf_size =
929 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930 	else
931 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932 
933 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934 
935 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 		dev_err(&hdev->pdev->dev,
938 			"only %u msi resources available, not enough for pf(min:2).\n",
939 			hdev->num_nic_msi);
940 		return -EINVAL;
941 	}
942 
943 	if (hnae3_dev_roce_supported(hdev)) {
944 		hdev->num_roce_msi =
945 			le16_to_cpu(req->pf_intr_vector_number_roce);
946 
947 		/* PF should have NIC vectors and Roce vectors,
948 		 * NIC vectors are queued before Roce vectors.
949 		 */
950 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951 	} else {
952 		hdev->num_msi = hdev->num_nic_msi;
953 	}
954 
955 	return 0;
956 }
957 
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959 {
960 	switch (speed_cmd) {
961 	case 6:
962 		*speed = HCLGE_MAC_SPEED_10M;
963 		break;
964 	case 7:
965 		*speed = HCLGE_MAC_SPEED_100M;
966 		break;
967 	case 0:
968 		*speed = HCLGE_MAC_SPEED_1G;
969 		break;
970 	case 1:
971 		*speed = HCLGE_MAC_SPEED_10G;
972 		break;
973 	case 2:
974 		*speed = HCLGE_MAC_SPEED_25G;
975 		break;
976 	case 3:
977 		*speed = HCLGE_MAC_SPEED_40G;
978 		break;
979 	case 4:
980 		*speed = HCLGE_MAC_SPEED_50G;
981 		break;
982 	case 5:
983 		*speed = HCLGE_MAC_SPEED_100G;
984 		break;
985 	case 8:
986 		*speed = HCLGE_MAC_SPEED_200G;
987 		break;
988 	default:
989 		return -EINVAL;
990 	}
991 
992 	return 0;
993 }
994 
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996 {
997 	struct hclge_vport *vport = hclge_get_vport(handle);
998 	struct hclge_dev *hdev = vport->back;
999 	u32 speed_ability = hdev->hw.mac.speed_ability;
1000 	u32 speed_bit = 0;
1001 
1002 	switch (speed) {
1003 	case HCLGE_MAC_SPEED_10M:
1004 		speed_bit = HCLGE_SUPPORT_10M_BIT;
1005 		break;
1006 	case HCLGE_MAC_SPEED_100M:
1007 		speed_bit = HCLGE_SUPPORT_100M_BIT;
1008 		break;
1009 	case HCLGE_MAC_SPEED_1G:
1010 		speed_bit = HCLGE_SUPPORT_1G_BIT;
1011 		break;
1012 	case HCLGE_MAC_SPEED_10G:
1013 		speed_bit = HCLGE_SUPPORT_10G_BIT;
1014 		break;
1015 	case HCLGE_MAC_SPEED_25G:
1016 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1017 		break;
1018 	case HCLGE_MAC_SPEED_40G:
1019 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1020 		break;
1021 	case HCLGE_MAC_SPEED_50G:
1022 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1023 		break;
1024 	case HCLGE_MAC_SPEED_100G:
1025 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1026 		break;
1027 	case HCLGE_MAC_SPEED_200G:
1028 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1029 		break;
1030 	default:
1031 		return -EINVAL;
1032 	}
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define SPEED_ABILITY_EXT_SHIFT			8
1283 
1284 	struct hclge_cfg_param_cmd *req;
1285 	u64 mac_addr_tmp_high;
1286 	u16 speed_ability_ext;
1287 	u64 mac_addr_tmp;
1288 	unsigned int i;
1289 
1290 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1291 
1292 	/* get the configuration */
1293 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 					    HCLGE_CFG_TQP_DESC_N_M,
1297 					    HCLGE_CFG_TQP_DESC_N_S);
1298 
1299 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300 					HCLGE_CFG_PHY_ADDR_M,
1301 					HCLGE_CFG_PHY_ADDR_S);
1302 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 					  HCLGE_CFG_MEDIA_TP_M,
1304 					  HCLGE_CFG_MEDIA_TP_S);
1305 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306 					  HCLGE_CFG_RX_BUF_LEN_M,
1307 					  HCLGE_CFG_RX_BUF_LEN_S);
1308 	/* get mac_address */
1309 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311 					    HCLGE_CFG_MAC_ADDR_H_M,
1312 					    HCLGE_CFG_MAC_ADDR_H_S);
1313 
1314 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1315 
1316 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317 					     HCLGE_CFG_DEFAULT_SPEED_M,
1318 					     HCLGE_CFG_DEFAULT_SPEED_S);
1319 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320 					       HCLGE_CFG_RSS_SIZE_M,
1321 					       HCLGE_CFG_RSS_SIZE_S);
1322 
1323 	for (i = 0; i < ETH_ALEN; i++)
1324 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1325 
1326 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1328 
1329 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330 					     HCLGE_CFG_SPEED_ABILITY_M,
1331 					     HCLGE_CFG_SPEED_ABILITY_S);
1332 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1336 
1337 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1339 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1340 
1341 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1342 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1343 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1344 	if (!cfg->umv_space)
1345 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1346 
1347 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1348 					       HCLGE_CFG_PF_RSS_SIZE_M,
1349 					       HCLGE_CFG_PF_RSS_SIZE_S);
1350 
1351 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1352 	 * power of 2, instead of reading out directly. This would
1353 	 * be more flexible for future changes and expansions.
1354 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1355 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1356 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1357 	 */
1358 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1359 			       1U << cfg->pf_rss_size_max :
1360 			       cfg->vf_rss_size_max;
1361 }
1362 
1363 /* hclge_get_cfg: query the static parameter from flash
1364  * @hdev: pointer to struct hclge_dev
1365  * @hcfg: the config structure to be getted
1366  */
1367 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1368 {
1369 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1370 	struct hclge_cfg_param_cmd *req;
1371 	unsigned int i;
1372 	int ret;
1373 
1374 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1375 		u32 offset = 0;
1376 
1377 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1378 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1379 					   true);
1380 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1381 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1382 		/* Len should be united by 4 bytes when send to hardware */
1383 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1384 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1385 		req->offset = cpu_to_le32(offset);
1386 	}
1387 
1388 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1389 	if (ret) {
1390 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1391 		return ret;
1392 	}
1393 
1394 	hclge_parse_cfg(hcfg, desc);
1395 
1396 	return 0;
1397 }
1398 
1399 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1400 {
1401 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1402 
1403 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1404 
1405 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1407 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1408 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1409 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1410 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1411 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1412 }
1413 
1414 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1415 				  struct hclge_desc *desc)
1416 {
1417 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1418 	struct hclge_dev_specs_0_cmd *req0;
1419 	struct hclge_dev_specs_1_cmd *req1;
1420 
1421 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1422 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1423 
1424 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1425 	ae_dev->dev_specs.rss_ind_tbl_size =
1426 		le16_to_cpu(req0->rss_ind_tbl_size);
1427 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1428 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1429 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1430 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1431 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1432 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1433 }
1434 
1435 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1436 {
1437 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1438 
1439 	if (!dev_specs->max_non_tso_bd_num)
1440 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1441 	if (!dev_specs->rss_ind_tbl_size)
1442 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1443 	if (!dev_specs->rss_key_size)
1444 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1445 	if (!dev_specs->max_tm_rate)
1446 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1447 	if (!dev_specs->max_qset_num)
1448 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1449 	if (!dev_specs->max_int_gl)
1450 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1451 	if (!dev_specs->max_frm_size)
1452 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1453 }
1454 
1455 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1456 {
1457 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1458 	int ret;
1459 	int i;
1460 
1461 	/* set default specifications as devices lower than version V3 do not
1462 	 * support querying specifications from firmware.
1463 	 */
1464 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1465 		hclge_set_default_dev_specs(hdev);
1466 		return 0;
1467 	}
1468 
1469 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1470 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1471 					   true);
1472 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1473 	}
1474 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1475 
1476 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1477 	if (ret)
1478 		return ret;
1479 
1480 	hclge_parse_dev_specs(hdev, desc);
1481 	hclge_check_dev_specs(hdev);
1482 
1483 	return 0;
1484 }
1485 
1486 static int hclge_get_cap(struct hclge_dev *hdev)
1487 {
1488 	int ret;
1489 
1490 	ret = hclge_query_function_status(hdev);
1491 	if (ret) {
1492 		dev_err(&hdev->pdev->dev,
1493 			"query function status error %d.\n", ret);
1494 		return ret;
1495 	}
1496 
1497 	/* get pf resource */
1498 	return hclge_query_pf_resource(hdev);
1499 }
1500 
1501 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1502 {
1503 #define HCLGE_MIN_TX_DESC	64
1504 #define HCLGE_MIN_RX_DESC	64
1505 
1506 	if (!is_kdump_kernel())
1507 		return;
1508 
1509 	dev_info(&hdev->pdev->dev,
1510 		 "Running kdump kernel. Using minimal resources\n");
1511 
1512 	/* minimal queue pairs equals to the number of vports */
1513 	hdev->num_tqps = hdev->num_req_vfs + 1;
1514 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1515 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1516 }
1517 
1518 static int hclge_configure(struct hclge_dev *hdev)
1519 {
1520 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1521 	struct hclge_cfg cfg;
1522 	unsigned int i;
1523 	int ret;
1524 
1525 	ret = hclge_get_cfg(hdev, &cfg);
1526 	if (ret)
1527 		return ret;
1528 
1529 	hdev->base_tqp_pid = 0;
1530 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1531 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1532 	hdev->rx_buf_len = cfg.rx_buf_len;
1533 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1534 	hdev->hw.mac.media_type = cfg.media_type;
1535 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1536 	hdev->num_tx_desc = cfg.tqp_desc_num;
1537 	hdev->num_rx_desc = cfg.tqp_desc_num;
1538 	hdev->tm_info.num_pg = 1;
1539 	hdev->tc_max = cfg.tc_num;
1540 	hdev->tm_info.hw_pfc_map = 0;
1541 	hdev->wanted_umv_size = cfg.umv_space;
1542 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1543 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1544 
1545 	if (hnae3_dev_fd_supported(hdev)) {
1546 		hdev->fd_en = true;
1547 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1548 	}
1549 
1550 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1551 	if (ret) {
1552 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1553 			cfg.default_speed, ret);
1554 		return ret;
1555 	}
1556 
1557 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1558 
1559 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1560 
1561 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1562 	    (hdev->tc_max < 1)) {
1563 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1564 			 hdev->tc_max);
1565 		hdev->tc_max = 1;
1566 	}
1567 
1568 	/* Dev does not support DCB */
1569 	if (!hnae3_dev_dcb_supported(hdev)) {
1570 		hdev->tc_max = 1;
1571 		hdev->pfc_max = 0;
1572 	} else {
1573 		hdev->pfc_max = hdev->tc_max;
1574 	}
1575 
1576 	hdev->tm_info.num_tc = 1;
1577 
1578 	/* Currently not support uncontiuous tc */
1579 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1580 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1581 
1582 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1583 
1584 	hclge_init_kdump_kernel_config(hdev);
1585 
1586 	/* Set the init affinity based on pci func number */
1587 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1588 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1589 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1590 			&hdev->affinity_mask);
1591 
1592 	return ret;
1593 }
1594 
1595 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1596 			    u16 tso_mss_max)
1597 {
1598 	struct hclge_cfg_tso_status_cmd *req;
1599 	struct hclge_desc desc;
1600 
1601 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1602 
1603 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1604 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1605 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1606 
1607 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1608 }
1609 
1610 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1611 {
1612 	struct hclge_cfg_gro_status_cmd *req;
1613 	struct hclge_desc desc;
1614 	int ret;
1615 
1616 	if (!hnae3_dev_gro_supported(hdev))
1617 		return 0;
1618 
1619 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1620 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1621 
1622 	req->gro_en = en ? 1 : 0;
1623 
1624 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1625 	if (ret)
1626 		dev_err(&hdev->pdev->dev,
1627 			"GRO hardware config cmd failed, ret = %d\n", ret);
1628 
1629 	return ret;
1630 }
1631 
1632 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1633 {
1634 	struct hclge_tqp *tqp;
1635 	int i;
1636 
1637 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1638 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1639 	if (!hdev->htqp)
1640 		return -ENOMEM;
1641 
1642 	tqp = hdev->htqp;
1643 
1644 	for (i = 0; i < hdev->num_tqps; i++) {
1645 		tqp->dev = &hdev->pdev->dev;
1646 		tqp->index = i;
1647 
1648 		tqp->q.ae_algo = &ae_algo;
1649 		tqp->q.buf_size = hdev->rx_buf_len;
1650 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1651 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1652 
1653 		/* need an extended offset to configure queues >=
1654 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1655 		 */
1656 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1657 			tqp->q.io_base = hdev->hw.io_base +
1658 					 HCLGE_TQP_REG_OFFSET +
1659 					 i * HCLGE_TQP_REG_SIZE;
1660 		else
1661 			tqp->q.io_base = hdev->hw.io_base +
1662 					 HCLGE_TQP_REG_OFFSET +
1663 					 HCLGE_TQP_EXT_REG_OFFSET +
1664 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1665 					 HCLGE_TQP_REG_SIZE;
1666 
1667 		tqp++;
1668 	}
1669 
1670 	return 0;
1671 }
1672 
1673 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1674 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1675 {
1676 	struct hclge_tqp_map_cmd *req;
1677 	struct hclge_desc desc;
1678 	int ret;
1679 
1680 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1681 
1682 	req = (struct hclge_tqp_map_cmd *)desc.data;
1683 	req->tqp_id = cpu_to_le16(tqp_pid);
1684 	req->tqp_vf = func_id;
1685 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1686 	if (!is_pf)
1687 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1688 	req->tqp_vid = cpu_to_le16(tqp_vid);
1689 
1690 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1691 	if (ret)
1692 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1693 
1694 	return ret;
1695 }
1696 
1697 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1698 {
1699 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1700 	struct hclge_dev *hdev = vport->back;
1701 	int i, alloced;
1702 
1703 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1704 	     alloced < num_tqps; i++) {
1705 		if (!hdev->htqp[i].alloced) {
1706 			hdev->htqp[i].q.handle = &vport->nic;
1707 			hdev->htqp[i].q.tqp_index = alloced;
1708 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1709 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1710 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1711 			hdev->htqp[i].alloced = true;
1712 			alloced++;
1713 		}
1714 	}
1715 	vport->alloc_tqps = alloced;
1716 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1717 				vport->alloc_tqps / hdev->tm_info.num_tc);
1718 
1719 	/* ensure one to one mapping between irq and queue at default */
1720 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1721 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1722 
1723 	return 0;
1724 }
1725 
1726 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1727 			    u16 num_tx_desc, u16 num_rx_desc)
1728 
1729 {
1730 	struct hnae3_handle *nic = &vport->nic;
1731 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1732 	struct hclge_dev *hdev = vport->back;
1733 	int ret;
1734 
1735 	kinfo->num_tx_desc = num_tx_desc;
1736 	kinfo->num_rx_desc = num_rx_desc;
1737 
1738 	kinfo->rx_buf_len = hdev->rx_buf_len;
1739 
1740 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1741 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1742 	if (!kinfo->tqp)
1743 		return -ENOMEM;
1744 
1745 	ret = hclge_assign_tqp(vport, num_tqps);
1746 	if (ret)
1747 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1748 
1749 	return ret;
1750 }
1751 
1752 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1753 				  struct hclge_vport *vport)
1754 {
1755 	struct hnae3_handle *nic = &vport->nic;
1756 	struct hnae3_knic_private_info *kinfo;
1757 	u16 i;
1758 
1759 	kinfo = &nic->kinfo;
1760 	for (i = 0; i < vport->alloc_tqps; i++) {
1761 		struct hclge_tqp *q =
1762 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1763 		bool is_pf;
1764 		int ret;
1765 
1766 		is_pf = !(vport->vport_id);
1767 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1768 					     i, is_pf);
1769 		if (ret)
1770 			return ret;
1771 	}
1772 
1773 	return 0;
1774 }
1775 
1776 static int hclge_map_tqp(struct hclge_dev *hdev)
1777 {
1778 	struct hclge_vport *vport = hdev->vport;
1779 	u16 i, num_vport;
1780 
1781 	num_vport = hdev->num_req_vfs + 1;
1782 	for (i = 0; i < num_vport; i++)	{
1783 		int ret;
1784 
1785 		ret = hclge_map_tqp_to_vport(hdev, vport);
1786 		if (ret)
1787 			return ret;
1788 
1789 		vport++;
1790 	}
1791 
1792 	return 0;
1793 }
1794 
1795 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1796 {
1797 	struct hnae3_handle *nic = &vport->nic;
1798 	struct hclge_dev *hdev = vport->back;
1799 	int ret;
1800 
1801 	nic->pdev = hdev->pdev;
1802 	nic->ae_algo = &ae_algo;
1803 	nic->numa_node_mask = hdev->numa_node_mask;
1804 
1805 	ret = hclge_knic_setup(vport, num_tqps,
1806 			       hdev->num_tx_desc, hdev->num_rx_desc);
1807 	if (ret)
1808 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1809 
1810 	return ret;
1811 }
1812 
1813 static int hclge_alloc_vport(struct hclge_dev *hdev)
1814 {
1815 	struct pci_dev *pdev = hdev->pdev;
1816 	struct hclge_vport *vport;
1817 	u32 tqp_main_vport;
1818 	u32 tqp_per_vport;
1819 	int num_vport, i;
1820 	int ret;
1821 
1822 	/* We need to alloc a vport for main NIC of PF */
1823 	num_vport = hdev->num_req_vfs + 1;
1824 
1825 	if (hdev->num_tqps < num_vport) {
1826 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1827 			hdev->num_tqps, num_vport);
1828 		return -EINVAL;
1829 	}
1830 
1831 	/* Alloc the same number of TQPs for every vport */
1832 	tqp_per_vport = hdev->num_tqps / num_vport;
1833 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1834 
1835 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1836 			     GFP_KERNEL);
1837 	if (!vport)
1838 		return -ENOMEM;
1839 
1840 	hdev->vport = vport;
1841 	hdev->num_alloc_vport = num_vport;
1842 
1843 	if (IS_ENABLED(CONFIG_PCI_IOV))
1844 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1845 
1846 	for (i = 0; i < num_vport; i++) {
1847 		vport->back = hdev;
1848 		vport->vport_id = i;
1849 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1850 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1851 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1852 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1853 		vport->req_vlan_fltr_en = true;
1854 		INIT_LIST_HEAD(&vport->vlan_list);
1855 		INIT_LIST_HEAD(&vport->uc_mac_list);
1856 		INIT_LIST_HEAD(&vport->mc_mac_list);
1857 		spin_lock_init(&vport->mac_list_lock);
1858 
1859 		if (i == 0)
1860 			ret = hclge_vport_setup(vport, tqp_main_vport);
1861 		else
1862 			ret = hclge_vport_setup(vport, tqp_per_vport);
1863 		if (ret) {
1864 			dev_err(&pdev->dev,
1865 				"vport setup failed for vport %d, %d\n",
1866 				i, ret);
1867 			return ret;
1868 		}
1869 
1870 		vport++;
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1877 				    struct hclge_pkt_buf_alloc *buf_alloc)
1878 {
1879 /* TX buffer size is unit by 128 byte */
1880 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1881 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1882 	struct hclge_tx_buff_alloc_cmd *req;
1883 	struct hclge_desc desc;
1884 	int ret;
1885 	u8 i;
1886 
1887 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1888 
1889 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1890 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1891 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1892 
1893 		req->tx_pkt_buff[i] =
1894 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1895 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1896 	}
1897 
1898 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1899 	if (ret)
1900 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1901 			ret);
1902 
1903 	return ret;
1904 }
1905 
1906 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1907 				 struct hclge_pkt_buf_alloc *buf_alloc)
1908 {
1909 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1910 
1911 	if (ret)
1912 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1913 
1914 	return ret;
1915 }
1916 
1917 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1918 {
1919 	unsigned int i;
1920 	u32 cnt = 0;
1921 
1922 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1923 		if (hdev->hw_tc_map & BIT(i))
1924 			cnt++;
1925 	return cnt;
1926 }
1927 
1928 /* Get the number of pfc enabled TCs, which have private buffer */
1929 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1930 				  struct hclge_pkt_buf_alloc *buf_alloc)
1931 {
1932 	struct hclge_priv_buf *priv;
1933 	unsigned int i;
1934 	int cnt = 0;
1935 
1936 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1937 		priv = &buf_alloc->priv_buf[i];
1938 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1939 		    priv->enable)
1940 			cnt++;
1941 	}
1942 
1943 	return cnt;
1944 }
1945 
1946 /* Get the number of pfc disabled TCs, which have private buffer */
1947 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1948 				     struct hclge_pkt_buf_alloc *buf_alloc)
1949 {
1950 	struct hclge_priv_buf *priv;
1951 	unsigned int i;
1952 	int cnt = 0;
1953 
1954 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1955 		priv = &buf_alloc->priv_buf[i];
1956 		if (hdev->hw_tc_map & BIT(i) &&
1957 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1958 		    priv->enable)
1959 			cnt++;
1960 	}
1961 
1962 	return cnt;
1963 }
1964 
1965 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1966 {
1967 	struct hclge_priv_buf *priv;
1968 	u32 rx_priv = 0;
1969 	int i;
1970 
1971 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1972 		priv = &buf_alloc->priv_buf[i];
1973 		if (priv->enable)
1974 			rx_priv += priv->buf_size;
1975 	}
1976 	return rx_priv;
1977 }
1978 
1979 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1980 {
1981 	u32 i, total_tx_size = 0;
1982 
1983 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1984 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1985 
1986 	return total_tx_size;
1987 }
1988 
1989 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1990 				struct hclge_pkt_buf_alloc *buf_alloc,
1991 				u32 rx_all)
1992 {
1993 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1994 	u32 tc_num = hclge_get_tc_num(hdev);
1995 	u32 shared_buf, aligned_mps;
1996 	u32 rx_priv;
1997 	int i;
1998 
1999 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2000 
2001 	if (hnae3_dev_dcb_supported(hdev))
2002 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2003 					hdev->dv_buf_size;
2004 	else
2005 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2006 					+ hdev->dv_buf_size;
2007 
2008 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2009 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2010 			     HCLGE_BUF_SIZE_UNIT);
2011 
2012 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2013 	if (rx_all < rx_priv + shared_std)
2014 		return false;
2015 
2016 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2017 	buf_alloc->s_buf.buf_size = shared_buf;
2018 	if (hnae3_dev_dcb_supported(hdev)) {
2019 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2020 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2021 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2022 				  HCLGE_BUF_SIZE_UNIT);
2023 	} else {
2024 		buf_alloc->s_buf.self.high = aligned_mps +
2025 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2026 		buf_alloc->s_buf.self.low = aligned_mps;
2027 	}
2028 
2029 	if (hnae3_dev_dcb_supported(hdev)) {
2030 		hi_thrd = shared_buf - hdev->dv_buf_size;
2031 
2032 		if (tc_num <= NEED_RESERVE_TC_NUM)
2033 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2034 					/ BUF_MAX_PERCENT;
2035 
2036 		if (tc_num)
2037 			hi_thrd = hi_thrd / tc_num;
2038 
2039 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2040 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2041 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2042 	} else {
2043 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2044 		lo_thrd = aligned_mps;
2045 	}
2046 
2047 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2048 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2049 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2050 	}
2051 
2052 	return true;
2053 }
2054 
2055 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2056 				struct hclge_pkt_buf_alloc *buf_alloc)
2057 {
2058 	u32 i, total_size;
2059 
2060 	total_size = hdev->pkt_buf_size;
2061 
2062 	/* alloc tx buffer for all enabled tc */
2063 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2065 
2066 		if (hdev->hw_tc_map & BIT(i)) {
2067 			if (total_size < hdev->tx_buf_size)
2068 				return -ENOMEM;
2069 
2070 			priv->tx_buf_size = hdev->tx_buf_size;
2071 		} else {
2072 			priv->tx_buf_size = 0;
2073 		}
2074 
2075 		total_size -= priv->tx_buf_size;
2076 	}
2077 
2078 	return 0;
2079 }
2080 
2081 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2082 				  struct hclge_pkt_buf_alloc *buf_alloc)
2083 {
2084 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2085 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2086 	unsigned int i;
2087 
2088 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2089 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2090 
2091 		priv->enable = 0;
2092 		priv->wl.low = 0;
2093 		priv->wl.high = 0;
2094 		priv->buf_size = 0;
2095 
2096 		if (!(hdev->hw_tc_map & BIT(i)))
2097 			continue;
2098 
2099 		priv->enable = 1;
2100 
2101 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2102 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2103 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2104 						HCLGE_BUF_SIZE_UNIT);
2105 		} else {
2106 			priv->wl.low = 0;
2107 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2108 					aligned_mps;
2109 		}
2110 
2111 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2112 	}
2113 
2114 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2115 }
2116 
2117 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2118 					  struct hclge_pkt_buf_alloc *buf_alloc)
2119 {
2120 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2121 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2122 	int i;
2123 
2124 	/* let the last to be cleared first */
2125 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2126 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2127 		unsigned int mask = BIT((unsigned int)i);
2128 
2129 		if (hdev->hw_tc_map & mask &&
2130 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2131 			/* Clear the no pfc TC private buffer */
2132 			priv->wl.low = 0;
2133 			priv->wl.high = 0;
2134 			priv->buf_size = 0;
2135 			priv->enable = 0;
2136 			no_pfc_priv_num--;
2137 		}
2138 
2139 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2140 		    no_pfc_priv_num == 0)
2141 			break;
2142 	}
2143 
2144 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2145 }
2146 
2147 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2148 					struct hclge_pkt_buf_alloc *buf_alloc)
2149 {
2150 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2151 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2152 	int i;
2153 
2154 	/* let the last to be cleared first */
2155 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2156 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2157 		unsigned int mask = BIT((unsigned int)i);
2158 
2159 		if (hdev->hw_tc_map & mask &&
2160 		    hdev->tm_info.hw_pfc_map & mask) {
2161 			/* Reduce the number of pfc TC with private buffer */
2162 			priv->wl.low = 0;
2163 			priv->enable = 0;
2164 			priv->wl.high = 0;
2165 			priv->buf_size = 0;
2166 			pfc_priv_num--;
2167 		}
2168 
2169 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2170 		    pfc_priv_num == 0)
2171 			break;
2172 	}
2173 
2174 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2175 }
2176 
2177 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2178 				      struct hclge_pkt_buf_alloc *buf_alloc)
2179 {
2180 #define COMPENSATE_BUFFER	0x3C00
2181 #define COMPENSATE_HALF_MPS_NUM	5
2182 #define PRIV_WL_GAP		0x1800
2183 
2184 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2185 	u32 tc_num = hclge_get_tc_num(hdev);
2186 	u32 half_mps = hdev->mps >> 1;
2187 	u32 min_rx_priv;
2188 	unsigned int i;
2189 
2190 	if (tc_num)
2191 		rx_priv = rx_priv / tc_num;
2192 
2193 	if (tc_num <= NEED_RESERVE_TC_NUM)
2194 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2195 
2196 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2197 			COMPENSATE_HALF_MPS_NUM * half_mps;
2198 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2199 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2200 	if (rx_priv < min_rx_priv)
2201 		return false;
2202 
2203 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2204 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2205 
2206 		priv->enable = 0;
2207 		priv->wl.low = 0;
2208 		priv->wl.high = 0;
2209 		priv->buf_size = 0;
2210 
2211 		if (!(hdev->hw_tc_map & BIT(i)))
2212 			continue;
2213 
2214 		priv->enable = 1;
2215 		priv->buf_size = rx_priv;
2216 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2217 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2218 	}
2219 
2220 	buf_alloc->s_buf.buf_size = 0;
2221 
2222 	return true;
2223 }
2224 
2225 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2226  * @hdev: pointer to struct hclge_dev
2227  * @buf_alloc: pointer to buffer calculation data
2228  * @return: 0: calculate successful, negative: fail
2229  */
2230 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2231 				struct hclge_pkt_buf_alloc *buf_alloc)
2232 {
2233 	/* When DCB is not supported, rx private buffer is not allocated. */
2234 	if (!hnae3_dev_dcb_supported(hdev)) {
2235 		u32 rx_all = hdev->pkt_buf_size;
2236 
2237 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2238 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2239 			return -ENOMEM;
2240 
2241 		return 0;
2242 	}
2243 
2244 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2245 		return 0;
2246 
2247 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2248 		return 0;
2249 
2250 	/* try to decrease the buffer size */
2251 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2252 		return 0;
2253 
2254 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2255 		return 0;
2256 
2257 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2258 		return 0;
2259 
2260 	return -ENOMEM;
2261 }
2262 
2263 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2264 				   struct hclge_pkt_buf_alloc *buf_alloc)
2265 {
2266 	struct hclge_rx_priv_buff_cmd *req;
2267 	struct hclge_desc desc;
2268 	int ret;
2269 	int i;
2270 
2271 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2272 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2273 
2274 	/* Alloc private buffer TCs */
2275 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2276 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2277 
2278 		req->buf_num[i] =
2279 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2280 		req->buf_num[i] |=
2281 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2282 	}
2283 
2284 	req->shared_buf =
2285 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2286 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2287 
2288 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2289 	if (ret)
2290 		dev_err(&hdev->pdev->dev,
2291 			"rx private buffer alloc cmd failed %d\n", ret);
2292 
2293 	return ret;
2294 }
2295 
2296 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2297 				   struct hclge_pkt_buf_alloc *buf_alloc)
2298 {
2299 	struct hclge_rx_priv_wl_buf *req;
2300 	struct hclge_priv_buf *priv;
2301 	struct hclge_desc desc[2];
2302 	int i, j;
2303 	int ret;
2304 
2305 	for (i = 0; i < 2; i++) {
2306 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2307 					   false);
2308 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2309 
2310 		/* The first descriptor set the NEXT bit to 1 */
2311 		if (i == 0)
2312 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2313 		else
2314 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2315 
2316 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2317 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2318 
2319 			priv = &buf_alloc->priv_buf[idx];
2320 			req->tc_wl[j].high =
2321 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2322 			req->tc_wl[j].high |=
2323 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2324 			req->tc_wl[j].low =
2325 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2326 			req->tc_wl[j].low |=
2327 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2328 		}
2329 	}
2330 
2331 	/* Send 2 descriptor at one time */
2332 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2333 	if (ret)
2334 		dev_err(&hdev->pdev->dev,
2335 			"rx private waterline config cmd failed %d\n",
2336 			ret);
2337 	return ret;
2338 }
2339 
2340 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2341 				    struct hclge_pkt_buf_alloc *buf_alloc)
2342 {
2343 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2344 	struct hclge_rx_com_thrd *req;
2345 	struct hclge_desc desc[2];
2346 	struct hclge_tc_thrd *tc;
2347 	int i, j;
2348 	int ret;
2349 
2350 	for (i = 0; i < 2; i++) {
2351 		hclge_cmd_setup_basic_desc(&desc[i],
2352 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2353 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2354 
2355 		/* The first descriptor set the NEXT bit to 1 */
2356 		if (i == 0)
2357 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2358 		else
2359 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2360 
2361 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2362 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2363 
2364 			req->com_thrd[j].high =
2365 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2366 			req->com_thrd[j].high |=
2367 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2368 			req->com_thrd[j].low =
2369 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2370 			req->com_thrd[j].low |=
2371 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2372 		}
2373 	}
2374 
2375 	/* Send 2 descriptors at one time */
2376 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2377 	if (ret)
2378 		dev_err(&hdev->pdev->dev,
2379 			"common threshold config cmd failed %d\n", ret);
2380 	return ret;
2381 }
2382 
2383 static int hclge_common_wl_config(struct hclge_dev *hdev,
2384 				  struct hclge_pkt_buf_alloc *buf_alloc)
2385 {
2386 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2387 	struct hclge_rx_com_wl *req;
2388 	struct hclge_desc desc;
2389 	int ret;
2390 
2391 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2392 
2393 	req = (struct hclge_rx_com_wl *)desc.data;
2394 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2395 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2396 
2397 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2398 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2399 
2400 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2401 	if (ret)
2402 		dev_err(&hdev->pdev->dev,
2403 			"common waterline config cmd failed %d\n", ret);
2404 
2405 	return ret;
2406 }
2407 
2408 int hclge_buffer_alloc(struct hclge_dev *hdev)
2409 {
2410 	struct hclge_pkt_buf_alloc *pkt_buf;
2411 	int ret;
2412 
2413 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2414 	if (!pkt_buf)
2415 		return -ENOMEM;
2416 
2417 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2418 	if (ret) {
2419 		dev_err(&hdev->pdev->dev,
2420 			"could not calc tx buffer size for all TCs %d\n", ret);
2421 		goto out;
2422 	}
2423 
2424 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2425 	if (ret) {
2426 		dev_err(&hdev->pdev->dev,
2427 			"could not alloc tx buffers %d\n", ret);
2428 		goto out;
2429 	}
2430 
2431 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2432 	if (ret) {
2433 		dev_err(&hdev->pdev->dev,
2434 			"could not calc rx priv buffer size for all TCs %d\n",
2435 			ret);
2436 		goto out;
2437 	}
2438 
2439 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2440 	if (ret) {
2441 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2442 			ret);
2443 		goto out;
2444 	}
2445 
2446 	if (hnae3_dev_dcb_supported(hdev)) {
2447 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2448 		if (ret) {
2449 			dev_err(&hdev->pdev->dev,
2450 				"could not configure rx private waterline %d\n",
2451 				ret);
2452 			goto out;
2453 		}
2454 
2455 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2456 		if (ret) {
2457 			dev_err(&hdev->pdev->dev,
2458 				"could not configure common threshold %d\n",
2459 				ret);
2460 			goto out;
2461 		}
2462 	}
2463 
2464 	ret = hclge_common_wl_config(hdev, pkt_buf);
2465 	if (ret)
2466 		dev_err(&hdev->pdev->dev,
2467 			"could not configure common waterline %d\n", ret);
2468 
2469 out:
2470 	kfree(pkt_buf);
2471 	return ret;
2472 }
2473 
2474 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2475 {
2476 	struct hnae3_handle *roce = &vport->roce;
2477 	struct hnae3_handle *nic = &vport->nic;
2478 	struct hclge_dev *hdev = vport->back;
2479 
2480 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2481 
2482 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2483 		return -EINVAL;
2484 
2485 	roce->rinfo.base_vector = hdev->roce_base_vector;
2486 
2487 	roce->rinfo.netdev = nic->kinfo.netdev;
2488 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2489 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2490 
2491 	roce->pdev = nic->pdev;
2492 	roce->ae_algo = nic->ae_algo;
2493 	roce->numa_node_mask = nic->numa_node_mask;
2494 
2495 	return 0;
2496 }
2497 
2498 static int hclge_init_msi(struct hclge_dev *hdev)
2499 {
2500 	struct pci_dev *pdev = hdev->pdev;
2501 	int vectors;
2502 	int i;
2503 
2504 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2505 					hdev->num_msi,
2506 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2507 	if (vectors < 0) {
2508 		dev_err(&pdev->dev,
2509 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2510 			vectors);
2511 		return vectors;
2512 	}
2513 	if (vectors < hdev->num_msi)
2514 		dev_warn(&hdev->pdev->dev,
2515 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2516 			 hdev->num_msi, vectors);
2517 
2518 	hdev->num_msi = vectors;
2519 	hdev->num_msi_left = vectors;
2520 
2521 	hdev->base_msi_vector = pdev->irq;
2522 	hdev->roce_base_vector = hdev->base_msi_vector +
2523 				hdev->num_nic_msi;
2524 
2525 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2526 					   sizeof(u16), GFP_KERNEL);
2527 	if (!hdev->vector_status) {
2528 		pci_free_irq_vectors(pdev);
2529 		return -ENOMEM;
2530 	}
2531 
2532 	for (i = 0; i < hdev->num_msi; i++)
2533 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2534 
2535 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2536 					sizeof(int), GFP_KERNEL);
2537 	if (!hdev->vector_irq) {
2538 		pci_free_irq_vectors(pdev);
2539 		return -ENOMEM;
2540 	}
2541 
2542 	return 0;
2543 }
2544 
2545 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2546 {
2547 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2548 		duplex = HCLGE_MAC_FULL;
2549 
2550 	return duplex;
2551 }
2552 
2553 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2554 				      u8 duplex)
2555 {
2556 	struct hclge_config_mac_speed_dup_cmd *req;
2557 	struct hclge_desc desc;
2558 	int ret;
2559 
2560 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2561 
2562 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2563 
2564 	if (duplex)
2565 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2566 
2567 	switch (speed) {
2568 	case HCLGE_MAC_SPEED_10M:
2569 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 				HCLGE_CFG_SPEED_S, 6);
2571 		break;
2572 	case HCLGE_MAC_SPEED_100M:
2573 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 				HCLGE_CFG_SPEED_S, 7);
2575 		break;
2576 	case HCLGE_MAC_SPEED_1G:
2577 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578 				HCLGE_CFG_SPEED_S, 0);
2579 		break;
2580 	case HCLGE_MAC_SPEED_10G:
2581 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582 				HCLGE_CFG_SPEED_S, 1);
2583 		break;
2584 	case HCLGE_MAC_SPEED_25G:
2585 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 				HCLGE_CFG_SPEED_S, 2);
2587 		break;
2588 	case HCLGE_MAC_SPEED_40G:
2589 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 				HCLGE_CFG_SPEED_S, 3);
2591 		break;
2592 	case HCLGE_MAC_SPEED_50G:
2593 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 				HCLGE_CFG_SPEED_S, 4);
2595 		break;
2596 	case HCLGE_MAC_SPEED_100G:
2597 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598 				HCLGE_CFG_SPEED_S, 5);
2599 		break;
2600 	case HCLGE_MAC_SPEED_200G:
2601 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602 				HCLGE_CFG_SPEED_S, 8);
2603 		break;
2604 	default:
2605 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2606 		return -EINVAL;
2607 	}
2608 
2609 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2610 		      1);
2611 
2612 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2613 	if (ret) {
2614 		dev_err(&hdev->pdev->dev,
2615 			"mac speed/duplex config cmd failed %d.\n", ret);
2616 		return ret;
2617 	}
2618 
2619 	return 0;
2620 }
2621 
2622 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2623 {
2624 	struct hclge_mac *mac = &hdev->hw.mac;
2625 	int ret;
2626 
2627 	duplex = hclge_check_speed_dup(duplex, speed);
2628 	if (!mac->support_autoneg && mac->speed == speed &&
2629 	    mac->duplex == duplex)
2630 		return 0;
2631 
2632 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2633 	if (ret)
2634 		return ret;
2635 
2636 	hdev->hw.mac.speed = speed;
2637 	hdev->hw.mac.duplex = duplex;
2638 
2639 	return 0;
2640 }
2641 
2642 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2643 				     u8 duplex)
2644 {
2645 	struct hclge_vport *vport = hclge_get_vport(handle);
2646 	struct hclge_dev *hdev = vport->back;
2647 
2648 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2649 }
2650 
2651 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2652 {
2653 	struct hclge_config_auto_neg_cmd *req;
2654 	struct hclge_desc desc;
2655 	u32 flag = 0;
2656 	int ret;
2657 
2658 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2659 
2660 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2661 	if (enable)
2662 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2663 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2664 
2665 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2666 	if (ret)
2667 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2668 			ret);
2669 
2670 	return ret;
2671 }
2672 
2673 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2674 {
2675 	struct hclge_vport *vport = hclge_get_vport(handle);
2676 	struct hclge_dev *hdev = vport->back;
2677 
2678 	if (!hdev->hw.mac.support_autoneg) {
2679 		if (enable) {
2680 			dev_err(&hdev->pdev->dev,
2681 				"autoneg is not supported by current port\n");
2682 			return -EOPNOTSUPP;
2683 		} else {
2684 			return 0;
2685 		}
2686 	}
2687 
2688 	return hclge_set_autoneg_en(hdev, enable);
2689 }
2690 
2691 static int hclge_get_autoneg(struct hnae3_handle *handle)
2692 {
2693 	struct hclge_vport *vport = hclge_get_vport(handle);
2694 	struct hclge_dev *hdev = vport->back;
2695 	struct phy_device *phydev = hdev->hw.mac.phydev;
2696 
2697 	if (phydev)
2698 		return phydev->autoneg;
2699 
2700 	return hdev->hw.mac.autoneg;
2701 }
2702 
2703 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2704 {
2705 	struct hclge_vport *vport = hclge_get_vport(handle);
2706 	struct hclge_dev *hdev = vport->back;
2707 	int ret;
2708 
2709 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2710 
2711 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2712 	if (ret)
2713 		return ret;
2714 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2715 }
2716 
2717 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2718 {
2719 	struct hclge_vport *vport = hclge_get_vport(handle);
2720 	struct hclge_dev *hdev = vport->back;
2721 
2722 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2723 		return hclge_set_autoneg_en(hdev, !halt);
2724 
2725 	return 0;
2726 }
2727 
2728 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2729 {
2730 	struct hclge_config_fec_cmd *req;
2731 	struct hclge_desc desc;
2732 	int ret;
2733 
2734 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2735 
2736 	req = (struct hclge_config_fec_cmd *)desc.data;
2737 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2738 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2739 	if (fec_mode & BIT(HNAE3_FEC_RS))
2740 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2741 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2742 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2743 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2744 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2745 
2746 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2747 	if (ret)
2748 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2749 
2750 	return ret;
2751 }
2752 
2753 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2754 {
2755 	struct hclge_vport *vport = hclge_get_vport(handle);
2756 	struct hclge_dev *hdev = vport->back;
2757 	struct hclge_mac *mac = &hdev->hw.mac;
2758 	int ret;
2759 
2760 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2761 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2762 		return -EINVAL;
2763 	}
2764 
2765 	ret = hclge_set_fec_hw(hdev, fec_mode);
2766 	if (ret)
2767 		return ret;
2768 
2769 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2770 	return 0;
2771 }
2772 
2773 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2774 			  u8 *fec_mode)
2775 {
2776 	struct hclge_vport *vport = hclge_get_vport(handle);
2777 	struct hclge_dev *hdev = vport->back;
2778 	struct hclge_mac *mac = &hdev->hw.mac;
2779 
2780 	if (fec_ability)
2781 		*fec_ability = mac->fec_ability;
2782 	if (fec_mode)
2783 		*fec_mode = mac->fec_mode;
2784 }
2785 
2786 static int hclge_mac_init(struct hclge_dev *hdev)
2787 {
2788 	struct hclge_mac *mac = &hdev->hw.mac;
2789 	int ret;
2790 
2791 	hdev->support_sfp_query = true;
2792 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2793 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2794 					 hdev->hw.mac.duplex);
2795 	if (ret)
2796 		return ret;
2797 
2798 	if (hdev->hw.mac.support_autoneg) {
2799 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2800 		if (ret)
2801 			return ret;
2802 	}
2803 
2804 	mac->link = 0;
2805 
2806 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2807 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2808 		if (ret)
2809 			return ret;
2810 	}
2811 
2812 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2813 	if (ret) {
2814 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2815 		return ret;
2816 	}
2817 
2818 	ret = hclge_set_default_loopback(hdev);
2819 	if (ret)
2820 		return ret;
2821 
2822 	ret = hclge_buffer_alloc(hdev);
2823 	if (ret)
2824 		dev_err(&hdev->pdev->dev,
2825 			"allocate buffer fail, ret=%d\n", ret);
2826 
2827 	return ret;
2828 }
2829 
2830 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2831 {
2832 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2834 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835 				    hclge_wq, &hdev->service_task, 0);
2836 }
2837 
2838 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2839 {
2840 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2842 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843 				    hclge_wq, &hdev->service_task, 0);
2844 }
2845 
2846 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2847 {
2848 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849 	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2850 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2851 				    hclge_wq, &hdev->service_task, 0);
2852 }
2853 
2854 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2855 {
2856 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2857 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2858 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2859 				    hclge_wq, &hdev->service_task,
2860 				    delay_time);
2861 }
2862 
2863 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2864 {
2865 	struct hclge_link_status_cmd *req;
2866 	struct hclge_desc desc;
2867 	int ret;
2868 
2869 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2870 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2871 	if (ret) {
2872 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2873 			ret);
2874 		return ret;
2875 	}
2876 
2877 	req = (struct hclge_link_status_cmd *)desc.data;
2878 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2879 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2880 
2881 	return 0;
2882 }
2883 
2884 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2885 {
2886 	struct phy_device *phydev = hdev->hw.mac.phydev;
2887 
2888 	*link_status = HCLGE_LINK_STATUS_DOWN;
2889 
2890 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2891 		return 0;
2892 
2893 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2894 		return 0;
2895 
2896 	return hclge_get_mac_link_status(hdev, link_status);
2897 }
2898 
2899 static void hclge_push_link_status(struct hclge_dev *hdev)
2900 {
2901 	struct hclge_vport *vport;
2902 	int ret;
2903 	u16 i;
2904 
2905 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2906 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2907 
2908 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2909 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2910 			continue;
2911 
2912 		ret = hclge_push_vf_link_status(vport);
2913 		if (ret) {
2914 			dev_err(&hdev->pdev->dev,
2915 				"failed to push link status to vf%u, ret = %d\n",
2916 				i, ret);
2917 		}
2918 	}
2919 }
2920 
2921 static void hclge_update_link_status(struct hclge_dev *hdev)
2922 {
2923 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2924 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2925 	struct hnae3_client *rclient = hdev->roce_client;
2926 	struct hnae3_client *client = hdev->nic_client;
2927 	int state;
2928 	int ret;
2929 
2930 	if (!client)
2931 		return;
2932 
2933 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2934 		return;
2935 
2936 	ret = hclge_get_mac_phy_link(hdev, &state);
2937 	if (ret) {
2938 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2939 		return;
2940 	}
2941 
2942 	if (state != hdev->hw.mac.link) {
2943 		client->ops->link_status_change(handle, state);
2944 		hclge_config_mac_tnl_int(hdev, state);
2945 		if (rclient && rclient->ops->link_status_change)
2946 			rclient->ops->link_status_change(rhandle, state);
2947 
2948 		hdev->hw.mac.link = state;
2949 		hclge_push_link_status(hdev);
2950 	}
2951 
2952 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2953 }
2954 
2955 static void hclge_update_port_capability(struct hclge_dev *hdev,
2956 					 struct hclge_mac *mac)
2957 {
2958 	if (hnae3_dev_fec_supported(hdev))
2959 		/* update fec ability by speed */
2960 		hclge_convert_setting_fec(mac);
2961 
2962 	/* firmware can not identify back plane type, the media type
2963 	 * read from configuration can help deal it
2964 	 */
2965 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2966 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2967 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2968 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2969 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2970 
2971 	if (mac->support_autoneg) {
2972 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2973 		linkmode_copy(mac->advertising, mac->supported);
2974 	} else {
2975 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2976 				   mac->supported);
2977 		linkmode_zero(mac->advertising);
2978 	}
2979 }
2980 
2981 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2982 {
2983 	struct hclge_sfp_info_cmd *resp;
2984 	struct hclge_desc desc;
2985 	int ret;
2986 
2987 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2988 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2989 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2990 	if (ret == -EOPNOTSUPP) {
2991 		dev_warn(&hdev->pdev->dev,
2992 			 "IMP do not support get SFP speed %d\n", ret);
2993 		return ret;
2994 	} else if (ret) {
2995 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2996 		return ret;
2997 	}
2998 
2999 	*speed = le32_to_cpu(resp->speed);
3000 
3001 	return 0;
3002 }
3003 
3004 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3005 {
3006 	struct hclge_sfp_info_cmd *resp;
3007 	struct hclge_desc desc;
3008 	int ret;
3009 
3010 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3011 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3012 
3013 	resp->query_type = QUERY_ACTIVE_SPEED;
3014 
3015 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3016 	if (ret == -EOPNOTSUPP) {
3017 		dev_warn(&hdev->pdev->dev,
3018 			 "IMP does not support get SFP info %d\n", ret);
3019 		return ret;
3020 	} else if (ret) {
3021 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3022 		return ret;
3023 	}
3024 
3025 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3026 	 * set to mac->speed.
3027 	 */
3028 	if (!le32_to_cpu(resp->speed))
3029 		return 0;
3030 
3031 	mac->speed = le32_to_cpu(resp->speed);
3032 	/* if resp->speed_ability is 0, it means it's an old version
3033 	 * firmware, do not update these params
3034 	 */
3035 	if (resp->speed_ability) {
3036 		mac->module_type = le32_to_cpu(resp->module_type);
3037 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3038 		mac->autoneg = resp->autoneg;
3039 		mac->support_autoneg = resp->autoneg_ability;
3040 		mac->speed_type = QUERY_ACTIVE_SPEED;
3041 		if (!resp->active_fec)
3042 			mac->fec_mode = 0;
3043 		else
3044 			mac->fec_mode = BIT(resp->active_fec);
3045 	} else {
3046 		mac->speed_type = QUERY_SFP_SPEED;
3047 	}
3048 
3049 	return 0;
3050 }
3051 
3052 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3053 					struct ethtool_link_ksettings *cmd)
3054 {
3055 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3056 	struct hclge_vport *vport = hclge_get_vport(handle);
3057 	struct hclge_phy_link_ksetting_0_cmd *req0;
3058 	struct hclge_phy_link_ksetting_1_cmd *req1;
3059 	u32 supported, advertising, lp_advertising;
3060 	struct hclge_dev *hdev = vport->back;
3061 	int ret;
3062 
3063 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3064 				   true);
3065 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3066 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3067 				   true);
3068 
3069 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3070 	if (ret) {
3071 		dev_err(&hdev->pdev->dev,
3072 			"failed to get phy link ksetting, ret = %d.\n", ret);
3073 		return ret;
3074 	}
3075 
3076 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3077 	cmd->base.autoneg = req0->autoneg;
3078 	cmd->base.speed = le32_to_cpu(req0->speed);
3079 	cmd->base.duplex = req0->duplex;
3080 	cmd->base.port = req0->port;
3081 	cmd->base.transceiver = req0->transceiver;
3082 	cmd->base.phy_address = req0->phy_address;
3083 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3084 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3085 	supported = le32_to_cpu(req0->supported);
3086 	advertising = le32_to_cpu(req0->advertising);
3087 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3088 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3089 						supported);
3090 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3091 						advertising);
3092 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3093 						lp_advertising);
3094 
3095 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3096 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3097 	cmd->base.master_slave_state = req1->master_slave_state;
3098 
3099 	return 0;
3100 }
3101 
3102 static int
3103 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3104 			     const struct ethtool_link_ksettings *cmd)
3105 {
3106 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3107 	struct hclge_vport *vport = hclge_get_vport(handle);
3108 	struct hclge_phy_link_ksetting_0_cmd *req0;
3109 	struct hclge_phy_link_ksetting_1_cmd *req1;
3110 	struct hclge_dev *hdev = vport->back;
3111 	u32 advertising;
3112 	int ret;
3113 
3114 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3115 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3116 	     (cmd->base.duplex != DUPLEX_HALF &&
3117 	      cmd->base.duplex != DUPLEX_FULL)))
3118 		return -EINVAL;
3119 
3120 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3121 				   false);
3122 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3123 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3124 				   false);
3125 
3126 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3127 	req0->autoneg = cmd->base.autoneg;
3128 	req0->speed = cpu_to_le32(cmd->base.speed);
3129 	req0->duplex = cmd->base.duplex;
3130 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3131 						cmd->link_modes.advertising);
3132 	req0->advertising = cpu_to_le32(advertising);
3133 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3134 
3135 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3136 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3137 
3138 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3139 	if (ret) {
3140 		dev_err(&hdev->pdev->dev,
3141 			"failed to set phy link ksettings, ret = %d.\n", ret);
3142 		return ret;
3143 	}
3144 
3145 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3146 	hdev->hw.mac.speed = cmd->base.speed;
3147 	hdev->hw.mac.duplex = cmd->base.duplex;
3148 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3149 
3150 	return 0;
3151 }
3152 
3153 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3154 {
3155 	struct ethtool_link_ksettings cmd;
3156 	int ret;
3157 
3158 	if (!hnae3_dev_phy_imp_supported(hdev))
3159 		return 0;
3160 
3161 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3162 	if (ret)
3163 		return ret;
3164 
3165 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3166 	hdev->hw.mac.speed = cmd.base.speed;
3167 	hdev->hw.mac.duplex = cmd.base.duplex;
3168 
3169 	return 0;
3170 }
3171 
3172 static int hclge_tp_port_init(struct hclge_dev *hdev)
3173 {
3174 	struct ethtool_link_ksettings cmd;
3175 
3176 	if (!hnae3_dev_phy_imp_supported(hdev))
3177 		return 0;
3178 
3179 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3180 	cmd.base.speed = hdev->hw.mac.speed;
3181 	cmd.base.duplex = hdev->hw.mac.duplex;
3182 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3183 
3184 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3185 }
3186 
3187 static int hclge_update_port_info(struct hclge_dev *hdev)
3188 {
3189 	struct hclge_mac *mac = &hdev->hw.mac;
3190 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3191 	int ret;
3192 
3193 	/* get the port info from SFP cmd if not copper port */
3194 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3195 		return hclge_update_tp_port_info(hdev);
3196 
3197 	/* if IMP does not support get SFP/qSFP info, return directly */
3198 	if (!hdev->support_sfp_query)
3199 		return 0;
3200 
3201 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3202 		ret = hclge_get_sfp_info(hdev, mac);
3203 	else
3204 		ret = hclge_get_sfp_speed(hdev, &speed);
3205 
3206 	if (ret == -EOPNOTSUPP) {
3207 		hdev->support_sfp_query = false;
3208 		return ret;
3209 	} else if (ret) {
3210 		return ret;
3211 	}
3212 
3213 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3214 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3215 			hclge_update_port_capability(hdev, mac);
3216 			return 0;
3217 		}
3218 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3219 					       HCLGE_MAC_FULL);
3220 	} else {
3221 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3222 			return 0; /* do nothing if no SFP */
3223 
3224 		/* must config full duplex for SFP */
3225 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3226 	}
3227 }
3228 
3229 static int hclge_get_status(struct hnae3_handle *handle)
3230 {
3231 	struct hclge_vport *vport = hclge_get_vport(handle);
3232 	struct hclge_dev *hdev = vport->back;
3233 
3234 	hclge_update_link_status(hdev);
3235 
3236 	return hdev->hw.mac.link;
3237 }
3238 
3239 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3240 {
3241 	if (!pci_num_vf(hdev->pdev)) {
3242 		dev_err(&hdev->pdev->dev,
3243 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3244 		return NULL;
3245 	}
3246 
3247 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3248 		dev_err(&hdev->pdev->dev,
3249 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3250 			vf, pci_num_vf(hdev->pdev));
3251 		return NULL;
3252 	}
3253 
3254 	/* VF start from 1 in vport */
3255 	vf += HCLGE_VF_VPORT_START_NUM;
3256 	return &hdev->vport[vf];
3257 }
3258 
3259 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3260 			       struct ifla_vf_info *ivf)
3261 {
3262 	struct hclge_vport *vport = hclge_get_vport(handle);
3263 	struct hclge_dev *hdev = vport->back;
3264 
3265 	vport = hclge_get_vf_vport(hdev, vf);
3266 	if (!vport)
3267 		return -EINVAL;
3268 
3269 	ivf->vf = vf;
3270 	ivf->linkstate = vport->vf_info.link_state;
3271 	ivf->spoofchk = vport->vf_info.spoofchk;
3272 	ivf->trusted = vport->vf_info.trusted;
3273 	ivf->min_tx_rate = 0;
3274 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3275 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3276 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3277 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3278 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3279 
3280 	return 0;
3281 }
3282 
3283 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3284 				   int link_state)
3285 {
3286 	struct hclge_vport *vport = hclge_get_vport(handle);
3287 	struct hclge_dev *hdev = vport->back;
3288 	int link_state_old;
3289 	int ret;
3290 
3291 	vport = hclge_get_vf_vport(hdev, vf);
3292 	if (!vport)
3293 		return -EINVAL;
3294 
3295 	link_state_old = vport->vf_info.link_state;
3296 	vport->vf_info.link_state = link_state;
3297 
3298 	ret = hclge_push_vf_link_status(vport);
3299 	if (ret) {
3300 		vport->vf_info.link_state = link_state_old;
3301 		dev_err(&hdev->pdev->dev,
3302 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3303 	}
3304 
3305 	return ret;
3306 }
3307 
3308 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3309 {
3310 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3311 
3312 	/* fetch the events from their corresponding regs */
3313 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3314 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3315 	hw_err_src_reg = hclge_read_dev(&hdev->hw,
3316 					HCLGE_RAS_PF_OTHER_INT_STS_REG);
3317 
3318 	/* Assumption: If by any chance reset and mailbox events are reported
3319 	 * together then we will only process reset event in this go and will
3320 	 * defer the processing of the mailbox events. Since, we would have not
3321 	 * cleared RX CMDQ event this time we would receive again another
3322 	 * interrupt from H/W just for the mailbox.
3323 	 *
3324 	 * check for vector0 reset event sources
3325 	 */
3326 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3327 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3328 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3329 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3330 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3331 		hdev->rst_stats.imp_rst_cnt++;
3332 		return HCLGE_VECTOR0_EVENT_RST;
3333 	}
3334 
3335 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3336 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3337 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3338 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3339 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3340 		hdev->rst_stats.global_rst_cnt++;
3341 		return HCLGE_VECTOR0_EVENT_RST;
3342 	}
3343 
3344 	/* check for vector0 msix event and hardware error event source */
3345 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3346 	    hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3347 		return HCLGE_VECTOR0_EVENT_ERR;
3348 
3349 	/* check for vector0 mailbox(=CMDQ RX) event source */
3350 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3351 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3352 		*clearval = cmdq_src_reg;
3353 		return HCLGE_VECTOR0_EVENT_MBX;
3354 	}
3355 
3356 	/* print other vector0 event source */
3357 	dev_info(&hdev->pdev->dev,
3358 		 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3359 		 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3360 
3361 	return HCLGE_VECTOR0_EVENT_OTHER;
3362 }
3363 
3364 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3365 				    u32 regclr)
3366 {
3367 	switch (event_type) {
3368 	case HCLGE_VECTOR0_EVENT_RST:
3369 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3370 		break;
3371 	case HCLGE_VECTOR0_EVENT_MBX:
3372 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3373 		break;
3374 	default:
3375 		break;
3376 	}
3377 }
3378 
3379 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3380 {
3381 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3382 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3383 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3384 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3385 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3386 }
3387 
3388 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3389 {
3390 	writel(enable ? 1 : 0, vector->addr);
3391 }
3392 
3393 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3394 {
3395 	struct hclge_dev *hdev = data;
3396 	u32 clearval = 0;
3397 	u32 event_cause;
3398 
3399 	hclge_enable_vector(&hdev->misc_vector, false);
3400 	event_cause = hclge_check_event_cause(hdev, &clearval);
3401 
3402 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3403 	switch (event_cause) {
3404 	case HCLGE_VECTOR0_EVENT_ERR:
3405 		hclge_errhand_task_schedule(hdev);
3406 		break;
3407 	case HCLGE_VECTOR0_EVENT_RST:
3408 		hclge_reset_task_schedule(hdev);
3409 		break;
3410 	case HCLGE_VECTOR0_EVENT_MBX:
3411 		/* If we are here then,
3412 		 * 1. Either we are not handling any mbx task and we are not
3413 		 *    scheduled as well
3414 		 *                        OR
3415 		 * 2. We could be handling a mbx task but nothing more is
3416 		 *    scheduled.
3417 		 * In both cases, we should schedule mbx task as there are more
3418 		 * mbx messages reported by this interrupt.
3419 		 */
3420 		hclge_mbx_task_schedule(hdev);
3421 		break;
3422 	default:
3423 		dev_warn(&hdev->pdev->dev,
3424 			 "received unknown or unhandled event of vector0\n");
3425 		break;
3426 	}
3427 
3428 	hclge_clear_event_cause(hdev, event_cause, clearval);
3429 
3430 	/* Enable interrupt if it is not caused by reset event or error event */
3431 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3432 	    event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3433 		hclge_enable_vector(&hdev->misc_vector, true);
3434 
3435 	return IRQ_HANDLED;
3436 }
3437 
3438 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3439 {
3440 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3441 		dev_warn(&hdev->pdev->dev,
3442 			 "vector(vector_id %d) has been freed.\n", vector_id);
3443 		return;
3444 	}
3445 
3446 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3447 	hdev->num_msi_left += 1;
3448 	hdev->num_msi_used -= 1;
3449 }
3450 
3451 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3452 {
3453 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3454 
3455 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3456 
3457 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3458 	hdev->vector_status[0] = 0;
3459 
3460 	hdev->num_msi_left -= 1;
3461 	hdev->num_msi_used += 1;
3462 }
3463 
3464 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3465 				      const cpumask_t *mask)
3466 {
3467 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3468 					      affinity_notify);
3469 
3470 	cpumask_copy(&hdev->affinity_mask, mask);
3471 }
3472 
3473 static void hclge_irq_affinity_release(struct kref *ref)
3474 {
3475 }
3476 
3477 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3478 {
3479 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3480 			      &hdev->affinity_mask);
3481 
3482 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3483 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3484 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3485 				  &hdev->affinity_notify);
3486 }
3487 
3488 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3489 {
3490 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3491 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3492 }
3493 
3494 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3495 {
3496 	int ret;
3497 
3498 	hclge_get_misc_vector(hdev);
3499 
3500 	/* this would be explicitly freed in the end */
3501 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3502 		 HCLGE_NAME, pci_name(hdev->pdev));
3503 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3504 			  0, hdev->misc_vector.name, hdev);
3505 	if (ret) {
3506 		hclge_free_vector(hdev, 0);
3507 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3508 			hdev->misc_vector.vector_irq);
3509 	}
3510 
3511 	return ret;
3512 }
3513 
3514 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3515 {
3516 	free_irq(hdev->misc_vector.vector_irq, hdev);
3517 	hclge_free_vector(hdev, 0);
3518 }
3519 
3520 int hclge_notify_client(struct hclge_dev *hdev,
3521 			enum hnae3_reset_notify_type type)
3522 {
3523 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3524 	struct hnae3_client *client = hdev->nic_client;
3525 	int ret;
3526 
3527 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3528 		return 0;
3529 
3530 	if (!client->ops->reset_notify)
3531 		return -EOPNOTSUPP;
3532 
3533 	ret = client->ops->reset_notify(handle, type);
3534 	if (ret)
3535 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3536 			type, ret);
3537 
3538 	return ret;
3539 }
3540 
3541 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3542 				    enum hnae3_reset_notify_type type)
3543 {
3544 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3545 	struct hnae3_client *client = hdev->roce_client;
3546 	int ret;
3547 
3548 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3549 		return 0;
3550 
3551 	if (!client->ops->reset_notify)
3552 		return -EOPNOTSUPP;
3553 
3554 	ret = client->ops->reset_notify(handle, type);
3555 	if (ret)
3556 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3557 			type, ret);
3558 
3559 	return ret;
3560 }
3561 
3562 static int hclge_reset_wait(struct hclge_dev *hdev)
3563 {
3564 #define HCLGE_RESET_WATI_MS	100
3565 #define HCLGE_RESET_WAIT_CNT	350
3566 
3567 	u32 val, reg, reg_bit;
3568 	u32 cnt = 0;
3569 
3570 	switch (hdev->reset_type) {
3571 	case HNAE3_IMP_RESET:
3572 		reg = HCLGE_GLOBAL_RESET_REG;
3573 		reg_bit = HCLGE_IMP_RESET_BIT;
3574 		break;
3575 	case HNAE3_GLOBAL_RESET:
3576 		reg = HCLGE_GLOBAL_RESET_REG;
3577 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3578 		break;
3579 	case HNAE3_FUNC_RESET:
3580 		reg = HCLGE_FUN_RST_ING;
3581 		reg_bit = HCLGE_FUN_RST_ING_B;
3582 		break;
3583 	default:
3584 		dev_err(&hdev->pdev->dev,
3585 			"Wait for unsupported reset type: %d\n",
3586 			hdev->reset_type);
3587 		return -EINVAL;
3588 	}
3589 
3590 	val = hclge_read_dev(&hdev->hw, reg);
3591 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3592 		msleep(HCLGE_RESET_WATI_MS);
3593 		val = hclge_read_dev(&hdev->hw, reg);
3594 		cnt++;
3595 	}
3596 
3597 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3598 		dev_warn(&hdev->pdev->dev,
3599 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3600 		return -EBUSY;
3601 	}
3602 
3603 	return 0;
3604 }
3605 
3606 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3607 {
3608 	struct hclge_vf_rst_cmd *req;
3609 	struct hclge_desc desc;
3610 
3611 	req = (struct hclge_vf_rst_cmd *)desc.data;
3612 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3613 	req->dest_vfid = func_id;
3614 
3615 	if (reset)
3616 		req->vf_rst = 0x1;
3617 
3618 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3619 }
3620 
3621 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3622 {
3623 	int i;
3624 
3625 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3626 		struct hclge_vport *vport = &hdev->vport[i];
3627 		int ret;
3628 
3629 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3630 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3631 		if (ret) {
3632 			dev_err(&hdev->pdev->dev,
3633 				"set vf(%u) rst failed %d!\n",
3634 				vport->vport_id, ret);
3635 			return ret;
3636 		}
3637 
3638 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3639 			continue;
3640 
3641 		/* Inform VF to process the reset.
3642 		 * hclge_inform_reset_assert_to_vf may fail if VF
3643 		 * driver is not loaded.
3644 		 */
3645 		ret = hclge_inform_reset_assert_to_vf(vport);
3646 		if (ret)
3647 			dev_warn(&hdev->pdev->dev,
3648 				 "inform reset to vf(%u) failed %d!\n",
3649 				 vport->vport_id, ret);
3650 	}
3651 
3652 	return 0;
3653 }
3654 
3655 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3656 {
3657 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3658 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3659 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3660 		return;
3661 
3662 	hclge_mbx_handler(hdev);
3663 
3664 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3665 }
3666 
3667 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3668 {
3669 	struct hclge_pf_rst_sync_cmd *req;
3670 	struct hclge_desc desc;
3671 	int cnt = 0;
3672 	int ret;
3673 
3674 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3675 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3676 
3677 	do {
3678 		/* vf need to down netdev by mbx during PF or FLR reset */
3679 		hclge_mailbox_service_task(hdev);
3680 
3681 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3682 		/* for compatible with old firmware, wait
3683 		 * 100 ms for VF to stop IO
3684 		 */
3685 		if (ret == -EOPNOTSUPP) {
3686 			msleep(HCLGE_RESET_SYNC_TIME);
3687 			return;
3688 		} else if (ret) {
3689 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3690 				 ret);
3691 			return;
3692 		} else if (req->all_vf_ready) {
3693 			return;
3694 		}
3695 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3696 		hclge_cmd_reuse_desc(&desc, true);
3697 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3698 
3699 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3700 }
3701 
3702 void hclge_report_hw_error(struct hclge_dev *hdev,
3703 			   enum hnae3_hw_error_type type)
3704 {
3705 	struct hnae3_client *client = hdev->nic_client;
3706 
3707 	if (!client || !client->ops->process_hw_error ||
3708 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3709 		return;
3710 
3711 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3712 }
3713 
3714 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3715 {
3716 	u32 reg_val;
3717 
3718 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3719 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3720 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3721 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3722 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3723 	}
3724 
3725 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3726 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3727 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3728 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3729 	}
3730 }
3731 
3732 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3733 {
3734 	struct hclge_desc desc;
3735 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3736 	int ret;
3737 
3738 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3739 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3740 	req->fun_reset_vfid = func_id;
3741 
3742 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3743 	if (ret)
3744 		dev_err(&hdev->pdev->dev,
3745 			"send function reset cmd fail, status =%d\n", ret);
3746 
3747 	return ret;
3748 }
3749 
3750 static void hclge_do_reset(struct hclge_dev *hdev)
3751 {
3752 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3753 	struct pci_dev *pdev = hdev->pdev;
3754 	u32 val;
3755 
3756 	if (hclge_get_hw_reset_stat(handle)) {
3757 		dev_info(&pdev->dev, "hardware reset not finish\n");
3758 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3759 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3760 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3761 		return;
3762 	}
3763 
3764 	switch (hdev->reset_type) {
3765 	case HNAE3_GLOBAL_RESET:
3766 		dev_info(&pdev->dev, "global reset requested\n");
3767 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3768 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3769 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3770 		break;
3771 	case HNAE3_FUNC_RESET:
3772 		dev_info(&pdev->dev, "PF reset requested\n");
3773 		/* schedule again to check later */
3774 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3775 		hclge_reset_task_schedule(hdev);
3776 		break;
3777 	default:
3778 		dev_warn(&pdev->dev,
3779 			 "unsupported reset type: %d\n", hdev->reset_type);
3780 		break;
3781 	}
3782 }
3783 
3784 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3785 						   unsigned long *addr)
3786 {
3787 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3788 	struct hclge_dev *hdev = ae_dev->priv;
3789 
3790 	/* return the highest priority reset level amongst all */
3791 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3792 		rst_level = HNAE3_IMP_RESET;
3793 		clear_bit(HNAE3_IMP_RESET, addr);
3794 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3795 		clear_bit(HNAE3_FUNC_RESET, addr);
3796 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3797 		rst_level = HNAE3_GLOBAL_RESET;
3798 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3799 		clear_bit(HNAE3_FUNC_RESET, addr);
3800 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3801 		rst_level = HNAE3_FUNC_RESET;
3802 		clear_bit(HNAE3_FUNC_RESET, addr);
3803 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3804 		rst_level = HNAE3_FLR_RESET;
3805 		clear_bit(HNAE3_FLR_RESET, addr);
3806 	}
3807 
3808 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3809 	    rst_level < hdev->reset_type)
3810 		return HNAE3_NONE_RESET;
3811 
3812 	return rst_level;
3813 }
3814 
3815 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3816 {
3817 	u32 clearval = 0;
3818 
3819 	switch (hdev->reset_type) {
3820 	case HNAE3_IMP_RESET:
3821 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3822 		break;
3823 	case HNAE3_GLOBAL_RESET:
3824 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3825 		break;
3826 	default:
3827 		break;
3828 	}
3829 
3830 	if (!clearval)
3831 		return;
3832 
3833 	/* For revision 0x20, the reset interrupt source
3834 	 * can only be cleared after hardware reset done
3835 	 */
3836 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3837 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3838 				clearval);
3839 
3840 	hclge_enable_vector(&hdev->misc_vector, true);
3841 }
3842 
3843 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3844 {
3845 	u32 reg_val;
3846 
3847 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3848 	if (enable)
3849 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3850 	else
3851 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3852 
3853 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3854 }
3855 
3856 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3857 {
3858 	int ret;
3859 
3860 	ret = hclge_set_all_vf_rst(hdev, true);
3861 	if (ret)
3862 		return ret;
3863 
3864 	hclge_func_reset_sync_vf(hdev);
3865 
3866 	return 0;
3867 }
3868 
3869 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3870 {
3871 	u32 reg_val;
3872 	int ret = 0;
3873 
3874 	switch (hdev->reset_type) {
3875 	case HNAE3_FUNC_RESET:
3876 		ret = hclge_func_reset_notify_vf(hdev);
3877 		if (ret)
3878 			return ret;
3879 
3880 		ret = hclge_func_reset_cmd(hdev, 0);
3881 		if (ret) {
3882 			dev_err(&hdev->pdev->dev,
3883 				"asserting function reset fail %d!\n", ret);
3884 			return ret;
3885 		}
3886 
3887 		/* After performaning pf reset, it is not necessary to do the
3888 		 * mailbox handling or send any command to firmware, because
3889 		 * any mailbox handling or command to firmware is only valid
3890 		 * after hclge_cmd_init is called.
3891 		 */
3892 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3893 		hdev->rst_stats.pf_rst_cnt++;
3894 		break;
3895 	case HNAE3_FLR_RESET:
3896 		ret = hclge_func_reset_notify_vf(hdev);
3897 		if (ret)
3898 			return ret;
3899 		break;
3900 	case HNAE3_IMP_RESET:
3901 		hclge_handle_imp_error(hdev);
3902 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3903 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3904 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3905 		break;
3906 	default:
3907 		break;
3908 	}
3909 
3910 	/* inform hardware that preparatory work is done */
3911 	msleep(HCLGE_RESET_SYNC_TIME);
3912 	hclge_reset_handshake(hdev, true);
3913 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3914 
3915 	return ret;
3916 }
3917 
3918 static void hclge_show_rst_info(struct hclge_dev *hdev)
3919 {
3920 	char *buf;
3921 
3922 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3923 	if (!buf)
3924 		return;
3925 
3926 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3927 
3928 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3929 
3930 	kfree(buf);
3931 }
3932 
3933 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3934 {
3935 #define MAX_RESET_FAIL_CNT 5
3936 
3937 	if (hdev->reset_pending) {
3938 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3939 			 hdev->reset_pending);
3940 		return true;
3941 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3942 		   HCLGE_RESET_INT_M) {
3943 		dev_info(&hdev->pdev->dev,
3944 			 "reset failed because new reset interrupt\n");
3945 		hclge_clear_reset_cause(hdev);
3946 		return false;
3947 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3948 		hdev->rst_stats.reset_fail_cnt++;
3949 		set_bit(hdev->reset_type, &hdev->reset_pending);
3950 		dev_info(&hdev->pdev->dev,
3951 			 "re-schedule reset task(%u)\n",
3952 			 hdev->rst_stats.reset_fail_cnt);
3953 		return true;
3954 	}
3955 
3956 	hclge_clear_reset_cause(hdev);
3957 
3958 	/* recover the handshake status when reset fail */
3959 	hclge_reset_handshake(hdev, true);
3960 
3961 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3962 
3963 	hclge_show_rst_info(hdev);
3964 
3965 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3966 
3967 	return false;
3968 }
3969 
3970 static void hclge_update_reset_level(struct hclge_dev *hdev)
3971 {
3972 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3973 	enum hnae3_reset_type reset_level;
3974 
3975 	/* reset request will not be set during reset, so clear
3976 	 * pending reset request to avoid unnecessary reset
3977 	 * caused by the same reason.
3978 	 */
3979 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
3980 
3981 	/* if default_reset_request has a higher level reset request,
3982 	 * it should be handled as soon as possible. since some errors
3983 	 * need this kind of reset to fix.
3984 	 */
3985 	reset_level = hclge_get_reset_level(ae_dev,
3986 					    &hdev->default_reset_request);
3987 	if (reset_level != HNAE3_NONE_RESET)
3988 		set_bit(reset_level, &hdev->reset_request);
3989 }
3990 
3991 static int hclge_set_rst_done(struct hclge_dev *hdev)
3992 {
3993 	struct hclge_pf_rst_done_cmd *req;
3994 	struct hclge_desc desc;
3995 	int ret;
3996 
3997 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3998 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3999 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4000 
4001 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4002 	/* To be compatible with the old firmware, which does not support
4003 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4004 	 * return success
4005 	 */
4006 	if (ret == -EOPNOTSUPP) {
4007 		dev_warn(&hdev->pdev->dev,
4008 			 "current firmware does not support command(0x%x)!\n",
4009 			 HCLGE_OPC_PF_RST_DONE);
4010 		return 0;
4011 	} else if (ret) {
4012 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4013 			ret);
4014 	}
4015 
4016 	return ret;
4017 }
4018 
4019 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4020 {
4021 	int ret = 0;
4022 
4023 	switch (hdev->reset_type) {
4024 	case HNAE3_FUNC_RESET:
4025 	case HNAE3_FLR_RESET:
4026 		ret = hclge_set_all_vf_rst(hdev, false);
4027 		break;
4028 	case HNAE3_GLOBAL_RESET:
4029 	case HNAE3_IMP_RESET:
4030 		ret = hclge_set_rst_done(hdev);
4031 		break;
4032 	default:
4033 		break;
4034 	}
4035 
4036 	/* clear up the handshake status after re-initialize done */
4037 	hclge_reset_handshake(hdev, false);
4038 
4039 	return ret;
4040 }
4041 
4042 static int hclge_reset_stack(struct hclge_dev *hdev)
4043 {
4044 	int ret;
4045 
4046 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4047 	if (ret)
4048 		return ret;
4049 
4050 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4051 	if (ret)
4052 		return ret;
4053 
4054 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4055 }
4056 
4057 static int hclge_reset_prepare(struct hclge_dev *hdev)
4058 {
4059 	int ret;
4060 
4061 	hdev->rst_stats.reset_cnt++;
4062 	/* perform reset of the stack & ae device for a client */
4063 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4064 	if (ret)
4065 		return ret;
4066 
4067 	rtnl_lock();
4068 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4069 	rtnl_unlock();
4070 	if (ret)
4071 		return ret;
4072 
4073 	return hclge_reset_prepare_wait(hdev);
4074 }
4075 
4076 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4077 {
4078 	int ret;
4079 
4080 	hdev->rst_stats.hw_reset_done_cnt++;
4081 
4082 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4083 	if (ret)
4084 		return ret;
4085 
4086 	rtnl_lock();
4087 	ret = hclge_reset_stack(hdev);
4088 	rtnl_unlock();
4089 	if (ret)
4090 		return ret;
4091 
4092 	hclge_clear_reset_cause(hdev);
4093 
4094 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4095 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4096 	 * times
4097 	 */
4098 	if (ret &&
4099 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4100 		return ret;
4101 
4102 	ret = hclge_reset_prepare_up(hdev);
4103 	if (ret)
4104 		return ret;
4105 
4106 	rtnl_lock();
4107 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4108 	rtnl_unlock();
4109 	if (ret)
4110 		return ret;
4111 
4112 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4113 	if (ret)
4114 		return ret;
4115 
4116 	hdev->last_reset_time = jiffies;
4117 	hdev->rst_stats.reset_fail_cnt = 0;
4118 	hdev->rst_stats.reset_done_cnt++;
4119 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4120 
4121 	hclge_update_reset_level(hdev);
4122 
4123 	return 0;
4124 }
4125 
4126 static void hclge_reset(struct hclge_dev *hdev)
4127 {
4128 	if (hclge_reset_prepare(hdev))
4129 		goto err_reset;
4130 
4131 	if (hclge_reset_wait(hdev))
4132 		goto err_reset;
4133 
4134 	if (hclge_reset_rebuild(hdev))
4135 		goto err_reset;
4136 
4137 	return;
4138 
4139 err_reset:
4140 	if (hclge_reset_err_handle(hdev))
4141 		hclge_reset_task_schedule(hdev);
4142 }
4143 
4144 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4145 {
4146 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4147 	struct hclge_dev *hdev = ae_dev->priv;
4148 
4149 	/* We might end up getting called broadly because of 2 below cases:
4150 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4151 	 *    normalcy is to reset.
4152 	 * 2. A new reset request from the stack due to timeout
4153 	 *
4154 	 * check if this is a new reset request and we are not here just because
4155 	 * last reset attempt did not succeed and watchdog hit us again. We will
4156 	 * know this if last reset request did not occur very recently (watchdog
4157 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4158 	 * In case of new request we reset the "reset level" to PF reset.
4159 	 * And if it is a repeat reset request of the most recent one then we
4160 	 * want to make sure we throttle the reset request. Therefore, we will
4161 	 * not allow it again before 3*HZ times.
4162 	 */
4163 
4164 	if (time_before(jiffies, (hdev->last_reset_time +
4165 				  HCLGE_RESET_INTERVAL))) {
4166 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4167 		return;
4168 	}
4169 
4170 	if (hdev->default_reset_request) {
4171 		hdev->reset_level =
4172 			hclge_get_reset_level(ae_dev,
4173 					      &hdev->default_reset_request);
4174 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4175 		hdev->reset_level = HNAE3_FUNC_RESET;
4176 	}
4177 
4178 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4179 		 hdev->reset_level);
4180 
4181 	/* request reset & schedule reset task */
4182 	set_bit(hdev->reset_level, &hdev->reset_request);
4183 	hclge_reset_task_schedule(hdev);
4184 
4185 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4186 		hdev->reset_level++;
4187 }
4188 
4189 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4190 					enum hnae3_reset_type rst_type)
4191 {
4192 	struct hclge_dev *hdev = ae_dev->priv;
4193 
4194 	set_bit(rst_type, &hdev->default_reset_request);
4195 }
4196 
4197 static void hclge_reset_timer(struct timer_list *t)
4198 {
4199 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4200 
4201 	/* if default_reset_request has no value, it means that this reset
4202 	 * request has already be handled, so just return here
4203 	 */
4204 	if (!hdev->default_reset_request)
4205 		return;
4206 
4207 	dev_info(&hdev->pdev->dev,
4208 		 "triggering reset in reset timer\n");
4209 	hclge_reset_event(hdev->pdev, NULL);
4210 }
4211 
4212 static void hclge_reset_subtask(struct hclge_dev *hdev)
4213 {
4214 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4215 
4216 	/* check if there is any ongoing reset in the hardware. This status can
4217 	 * be checked from reset_pending. If there is then, we need to wait for
4218 	 * hardware to complete reset.
4219 	 *    a. If we are able to figure out in reasonable time that hardware
4220 	 *       has fully resetted then, we can proceed with driver, client
4221 	 *       reset.
4222 	 *    b. else, we can come back later to check this status so re-sched
4223 	 *       now.
4224 	 */
4225 	hdev->last_reset_time = jiffies;
4226 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4227 	if (hdev->reset_type != HNAE3_NONE_RESET)
4228 		hclge_reset(hdev);
4229 
4230 	/* check if we got any *new* reset requests to be honored */
4231 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4232 	if (hdev->reset_type != HNAE3_NONE_RESET)
4233 		hclge_do_reset(hdev);
4234 
4235 	hdev->reset_type = HNAE3_NONE_RESET;
4236 }
4237 
4238 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4239 {
4240 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4241 	enum hnae3_reset_type reset_type;
4242 
4243 	if (ae_dev->hw_err_reset_req) {
4244 		reset_type = hclge_get_reset_level(ae_dev,
4245 						   &ae_dev->hw_err_reset_req);
4246 		hclge_set_def_reset_request(ae_dev, reset_type);
4247 	}
4248 
4249 	if (hdev->default_reset_request && ae_dev->ops->reset_event)
4250 		ae_dev->ops->reset_event(hdev->pdev, NULL);
4251 
4252 	/* enable interrupt after error handling complete */
4253 	hclge_enable_vector(&hdev->misc_vector, true);
4254 }
4255 
4256 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4257 {
4258 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4259 
4260 	ae_dev->hw_err_reset_req = 0;
4261 
4262 	if (hclge_find_error_source(hdev)) {
4263 		hclge_handle_error_info_log(ae_dev);
4264 		hclge_handle_mac_tnl(hdev);
4265 	}
4266 
4267 	hclge_handle_err_reset_request(hdev);
4268 }
4269 
4270 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4271 {
4272 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4273 	struct device *dev = &hdev->pdev->dev;
4274 	u32 msix_sts_reg;
4275 
4276 	msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4277 	if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4278 		if (hclge_handle_hw_msix_error
4279 				(hdev, &hdev->default_reset_request))
4280 			dev_info(dev, "received msix interrupt 0x%x\n",
4281 				 msix_sts_reg);
4282 	}
4283 
4284 	hclge_handle_hw_ras_error(ae_dev);
4285 
4286 	hclge_handle_err_reset_request(hdev);
4287 }
4288 
4289 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4290 {
4291 	if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4292 		return;
4293 
4294 	if (hnae3_dev_ras_imp_supported(hdev))
4295 		hclge_handle_err_recovery(hdev);
4296 	else
4297 		hclge_misc_err_recovery(hdev);
4298 }
4299 
4300 static void hclge_reset_service_task(struct hclge_dev *hdev)
4301 {
4302 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4303 		return;
4304 
4305 	down(&hdev->reset_sem);
4306 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4307 
4308 	hclge_reset_subtask(hdev);
4309 
4310 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4311 	up(&hdev->reset_sem);
4312 }
4313 
4314 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4315 {
4316 	int i;
4317 
4318 	/* start from vport 1 for PF is always alive */
4319 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4320 		struct hclge_vport *vport = &hdev->vport[i];
4321 
4322 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4323 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4324 
4325 		/* If vf is not alive, set to default value */
4326 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4327 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4328 	}
4329 }
4330 
4331 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4332 {
4333 	unsigned long delta = round_jiffies_relative(HZ);
4334 
4335 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4336 		return;
4337 
4338 	/* Always handle the link updating to make sure link state is
4339 	 * updated when it is triggered by mbx.
4340 	 */
4341 	hclge_update_link_status(hdev);
4342 	hclge_sync_mac_table(hdev);
4343 	hclge_sync_promisc_mode(hdev);
4344 	hclge_sync_fd_table(hdev);
4345 
4346 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4347 		delta = jiffies - hdev->last_serv_processed;
4348 
4349 		if (delta < round_jiffies_relative(HZ)) {
4350 			delta = round_jiffies_relative(HZ) - delta;
4351 			goto out;
4352 		}
4353 	}
4354 
4355 	hdev->serv_processed_cnt++;
4356 	hclge_update_vport_alive(hdev);
4357 
4358 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4359 		hdev->last_serv_processed = jiffies;
4360 		goto out;
4361 	}
4362 
4363 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4364 		hclge_update_stats_for_all(hdev);
4365 
4366 	hclge_update_port_info(hdev);
4367 	hclge_sync_vlan_filter(hdev);
4368 
4369 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4370 		hclge_rfs_filter_expire(hdev);
4371 
4372 	hdev->last_serv_processed = jiffies;
4373 
4374 out:
4375 	hclge_task_schedule(hdev, delta);
4376 }
4377 
4378 static void hclge_service_task(struct work_struct *work)
4379 {
4380 	struct hclge_dev *hdev =
4381 		container_of(work, struct hclge_dev, service_task.work);
4382 
4383 	hclge_errhand_service_task(hdev);
4384 	hclge_reset_service_task(hdev);
4385 	hclge_mailbox_service_task(hdev);
4386 	hclge_periodic_service_task(hdev);
4387 
4388 	/* Handle error recovery, reset and mbx again in case periodical task
4389 	 * delays the handling by calling hclge_task_schedule() in
4390 	 * hclge_periodic_service_task().
4391 	 */
4392 	hclge_errhand_service_task(hdev);
4393 	hclge_reset_service_task(hdev);
4394 	hclge_mailbox_service_task(hdev);
4395 }
4396 
4397 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4398 {
4399 	/* VF handle has no client */
4400 	if (!handle->client)
4401 		return container_of(handle, struct hclge_vport, nic);
4402 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4403 		return container_of(handle, struct hclge_vport, roce);
4404 	else
4405 		return container_of(handle, struct hclge_vport, nic);
4406 }
4407 
4408 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4409 				  struct hnae3_vector_info *vector_info)
4410 {
4411 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4412 
4413 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4414 
4415 	/* need an extend offset to config vector >= 64 */
4416 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4417 		vector_info->io_addr = hdev->hw.io_base +
4418 				HCLGE_VECTOR_REG_BASE +
4419 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4420 	else
4421 		vector_info->io_addr = hdev->hw.io_base +
4422 				HCLGE_VECTOR_EXT_REG_BASE +
4423 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4424 				HCLGE_VECTOR_REG_OFFSET_H +
4425 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4426 				HCLGE_VECTOR_REG_OFFSET;
4427 
4428 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4429 	hdev->vector_irq[idx] = vector_info->vector;
4430 }
4431 
4432 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4433 			    struct hnae3_vector_info *vector_info)
4434 {
4435 	struct hclge_vport *vport = hclge_get_vport(handle);
4436 	struct hnae3_vector_info *vector = vector_info;
4437 	struct hclge_dev *hdev = vport->back;
4438 	int alloc = 0;
4439 	u16 i = 0;
4440 	u16 j;
4441 
4442 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4443 	vector_num = min(hdev->num_msi_left, vector_num);
4444 
4445 	for (j = 0; j < vector_num; j++) {
4446 		while (++i < hdev->num_nic_msi) {
4447 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4448 				hclge_get_vector_info(hdev, i, vector);
4449 				vector++;
4450 				alloc++;
4451 
4452 				break;
4453 			}
4454 		}
4455 	}
4456 	hdev->num_msi_left -= alloc;
4457 	hdev->num_msi_used += alloc;
4458 
4459 	return alloc;
4460 }
4461 
4462 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4463 {
4464 	int i;
4465 
4466 	for (i = 0; i < hdev->num_msi; i++)
4467 		if (vector == hdev->vector_irq[i])
4468 			return i;
4469 
4470 	return -EINVAL;
4471 }
4472 
4473 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4474 {
4475 	struct hclge_vport *vport = hclge_get_vport(handle);
4476 	struct hclge_dev *hdev = vport->back;
4477 	int vector_id;
4478 
4479 	vector_id = hclge_get_vector_index(hdev, vector);
4480 	if (vector_id < 0) {
4481 		dev_err(&hdev->pdev->dev,
4482 			"Get vector index fail. vector = %d\n", vector);
4483 		return vector_id;
4484 	}
4485 
4486 	hclge_free_vector(hdev, vector_id);
4487 
4488 	return 0;
4489 }
4490 
4491 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4492 {
4493 	return HCLGE_RSS_KEY_SIZE;
4494 }
4495 
4496 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4497 				  const u8 hfunc, const u8 *key)
4498 {
4499 	struct hclge_rss_config_cmd *req;
4500 	unsigned int key_offset = 0;
4501 	struct hclge_desc desc;
4502 	int key_counts;
4503 	int key_size;
4504 	int ret;
4505 
4506 	key_counts = HCLGE_RSS_KEY_SIZE;
4507 	req = (struct hclge_rss_config_cmd *)desc.data;
4508 
4509 	while (key_counts) {
4510 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4511 					   false);
4512 
4513 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4514 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4515 
4516 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4517 		memcpy(req->hash_key,
4518 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4519 
4520 		key_counts -= key_size;
4521 		key_offset++;
4522 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4523 		if (ret) {
4524 			dev_err(&hdev->pdev->dev,
4525 				"Configure RSS config fail, status = %d\n",
4526 				ret);
4527 			return ret;
4528 		}
4529 	}
4530 	return 0;
4531 }
4532 
4533 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4534 {
4535 	struct hclge_rss_indirection_table_cmd *req;
4536 	struct hclge_desc desc;
4537 	int rss_cfg_tbl_num;
4538 	u8 rss_msb_oft;
4539 	u8 rss_msb_val;
4540 	int ret;
4541 	u16 qid;
4542 	int i;
4543 	u32 j;
4544 
4545 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4546 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4547 			  HCLGE_RSS_CFG_TBL_SIZE;
4548 
4549 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4550 		hclge_cmd_setup_basic_desc
4551 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4552 
4553 		req->start_table_index =
4554 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4555 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4556 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4557 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4558 			req->rss_qid_l[j] = qid & 0xff;
4559 			rss_msb_oft =
4560 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4561 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4562 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4563 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4564 		}
4565 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4566 		if (ret) {
4567 			dev_err(&hdev->pdev->dev,
4568 				"Configure rss indir table fail,status = %d\n",
4569 				ret);
4570 			return ret;
4571 		}
4572 	}
4573 	return 0;
4574 }
4575 
4576 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4577 				 u16 *tc_size, u16 *tc_offset)
4578 {
4579 	struct hclge_rss_tc_mode_cmd *req;
4580 	struct hclge_desc desc;
4581 	int ret;
4582 	int i;
4583 
4584 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4585 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4586 
4587 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4588 		u16 mode = 0;
4589 
4590 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4591 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4592 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4593 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4594 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4595 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4596 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4597 
4598 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4599 	}
4600 
4601 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4602 	if (ret)
4603 		dev_err(&hdev->pdev->dev,
4604 			"Configure rss tc mode fail, status = %d\n", ret);
4605 
4606 	return ret;
4607 }
4608 
4609 static void hclge_get_rss_type(struct hclge_vport *vport)
4610 {
4611 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4612 	    vport->rss_tuple_sets.ipv4_udp_en ||
4613 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4614 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4615 	    vport->rss_tuple_sets.ipv6_udp_en ||
4616 	    vport->rss_tuple_sets.ipv6_sctp_en)
4617 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4618 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4619 		 vport->rss_tuple_sets.ipv6_fragment_en)
4620 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4621 	else
4622 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4623 }
4624 
4625 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4626 {
4627 	struct hclge_rss_input_tuple_cmd *req;
4628 	struct hclge_desc desc;
4629 	int ret;
4630 
4631 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4632 
4633 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4634 
4635 	/* Get the tuple cfg from pf */
4636 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4637 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4638 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4639 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4640 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4641 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4642 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4643 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4644 	hclge_get_rss_type(&hdev->vport[0]);
4645 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4646 	if (ret)
4647 		dev_err(&hdev->pdev->dev,
4648 			"Configure rss input fail, status = %d\n", ret);
4649 	return ret;
4650 }
4651 
4652 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4653 			 u8 *key, u8 *hfunc)
4654 {
4655 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4656 	struct hclge_vport *vport = hclge_get_vport(handle);
4657 	int i;
4658 
4659 	/* Get hash algorithm */
4660 	if (hfunc) {
4661 		switch (vport->rss_algo) {
4662 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4663 			*hfunc = ETH_RSS_HASH_TOP;
4664 			break;
4665 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4666 			*hfunc = ETH_RSS_HASH_XOR;
4667 			break;
4668 		default:
4669 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4670 			break;
4671 		}
4672 	}
4673 
4674 	/* Get the RSS Key required by the user */
4675 	if (key)
4676 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4677 
4678 	/* Get indirect table */
4679 	if (indir)
4680 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4681 			indir[i] =  vport->rss_indirection_tbl[i];
4682 
4683 	return 0;
4684 }
4685 
4686 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4687 			 const  u8 *key, const  u8 hfunc)
4688 {
4689 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4690 	struct hclge_vport *vport = hclge_get_vport(handle);
4691 	struct hclge_dev *hdev = vport->back;
4692 	u8 hash_algo;
4693 	int ret, i;
4694 
4695 	/* Set the RSS Hash Key if specififed by the user */
4696 	if (key) {
4697 		switch (hfunc) {
4698 		case ETH_RSS_HASH_TOP:
4699 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4700 			break;
4701 		case ETH_RSS_HASH_XOR:
4702 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4703 			break;
4704 		case ETH_RSS_HASH_NO_CHANGE:
4705 			hash_algo = vport->rss_algo;
4706 			break;
4707 		default:
4708 			return -EINVAL;
4709 		}
4710 
4711 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4712 		if (ret)
4713 			return ret;
4714 
4715 		/* Update the shadow RSS key with user specified qids */
4716 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4717 		vport->rss_algo = hash_algo;
4718 	}
4719 
4720 	/* Update the shadow RSS table with user specified qids */
4721 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4722 		vport->rss_indirection_tbl[i] = indir[i];
4723 
4724 	/* Update the hardware */
4725 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4726 }
4727 
4728 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4729 {
4730 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4731 
4732 	if (nfc->data & RXH_L4_B_2_3)
4733 		hash_sets |= HCLGE_D_PORT_BIT;
4734 	else
4735 		hash_sets &= ~HCLGE_D_PORT_BIT;
4736 
4737 	if (nfc->data & RXH_IP_SRC)
4738 		hash_sets |= HCLGE_S_IP_BIT;
4739 	else
4740 		hash_sets &= ~HCLGE_S_IP_BIT;
4741 
4742 	if (nfc->data & RXH_IP_DST)
4743 		hash_sets |= HCLGE_D_IP_BIT;
4744 	else
4745 		hash_sets &= ~HCLGE_D_IP_BIT;
4746 
4747 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4748 		hash_sets |= HCLGE_V_TAG_BIT;
4749 
4750 	return hash_sets;
4751 }
4752 
4753 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4754 				    struct ethtool_rxnfc *nfc,
4755 				    struct hclge_rss_input_tuple_cmd *req)
4756 {
4757 	struct hclge_dev *hdev = vport->back;
4758 	u8 tuple_sets;
4759 
4760 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4761 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4762 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4763 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4764 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4765 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4766 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4767 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4768 
4769 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4770 	switch (nfc->flow_type) {
4771 	case TCP_V4_FLOW:
4772 		req->ipv4_tcp_en = tuple_sets;
4773 		break;
4774 	case TCP_V6_FLOW:
4775 		req->ipv6_tcp_en = tuple_sets;
4776 		break;
4777 	case UDP_V4_FLOW:
4778 		req->ipv4_udp_en = tuple_sets;
4779 		break;
4780 	case UDP_V6_FLOW:
4781 		req->ipv6_udp_en = tuple_sets;
4782 		break;
4783 	case SCTP_V4_FLOW:
4784 		req->ipv4_sctp_en = tuple_sets;
4785 		break;
4786 	case SCTP_V6_FLOW:
4787 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4788 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4789 			return -EINVAL;
4790 
4791 		req->ipv6_sctp_en = tuple_sets;
4792 		break;
4793 	case IPV4_FLOW:
4794 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4795 		break;
4796 	case IPV6_FLOW:
4797 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4798 		break;
4799 	default:
4800 		return -EINVAL;
4801 	}
4802 
4803 	return 0;
4804 }
4805 
4806 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4807 			       struct ethtool_rxnfc *nfc)
4808 {
4809 	struct hclge_vport *vport = hclge_get_vport(handle);
4810 	struct hclge_dev *hdev = vport->back;
4811 	struct hclge_rss_input_tuple_cmd *req;
4812 	struct hclge_desc desc;
4813 	int ret;
4814 
4815 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4816 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4817 		return -EINVAL;
4818 
4819 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4820 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4821 
4822 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4823 	if (ret) {
4824 		dev_err(&hdev->pdev->dev,
4825 			"failed to init rss tuple cmd, ret = %d\n", ret);
4826 		return ret;
4827 	}
4828 
4829 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4830 	if (ret) {
4831 		dev_err(&hdev->pdev->dev,
4832 			"Set rss tuple fail, status = %d\n", ret);
4833 		return ret;
4834 	}
4835 
4836 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4837 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4838 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4839 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4840 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4841 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4842 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4843 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4844 	hclge_get_rss_type(vport);
4845 	return 0;
4846 }
4847 
4848 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4849 				     u8 *tuple_sets)
4850 {
4851 	switch (flow_type) {
4852 	case TCP_V4_FLOW:
4853 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4854 		break;
4855 	case UDP_V4_FLOW:
4856 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4857 		break;
4858 	case TCP_V6_FLOW:
4859 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4860 		break;
4861 	case UDP_V6_FLOW:
4862 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4863 		break;
4864 	case SCTP_V4_FLOW:
4865 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4866 		break;
4867 	case SCTP_V6_FLOW:
4868 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4869 		break;
4870 	case IPV4_FLOW:
4871 	case IPV6_FLOW:
4872 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4873 		break;
4874 	default:
4875 		return -EINVAL;
4876 	}
4877 
4878 	return 0;
4879 }
4880 
4881 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4882 {
4883 	u64 tuple_data = 0;
4884 
4885 	if (tuple_sets & HCLGE_D_PORT_BIT)
4886 		tuple_data |= RXH_L4_B_2_3;
4887 	if (tuple_sets & HCLGE_S_PORT_BIT)
4888 		tuple_data |= RXH_L4_B_0_1;
4889 	if (tuple_sets & HCLGE_D_IP_BIT)
4890 		tuple_data |= RXH_IP_DST;
4891 	if (tuple_sets & HCLGE_S_IP_BIT)
4892 		tuple_data |= RXH_IP_SRC;
4893 
4894 	return tuple_data;
4895 }
4896 
4897 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4898 			       struct ethtool_rxnfc *nfc)
4899 {
4900 	struct hclge_vport *vport = hclge_get_vport(handle);
4901 	u8 tuple_sets;
4902 	int ret;
4903 
4904 	nfc->data = 0;
4905 
4906 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4907 	if (ret || !tuple_sets)
4908 		return ret;
4909 
4910 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4911 
4912 	return 0;
4913 }
4914 
4915 static int hclge_get_tc_size(struct hnae3_handle *handle)
4916 {
4917 	struct hclge_vport *vport = hclge_get_vport(handle);
4918 	struct hclge_dev *hdev = vport->back;
4919 
4920 	return hdev->pf_rss_size_max;
4921 }
4922 
4923 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4924 {
4925 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4926 	struct hclge_vport *vport = hdev->vport;
4927 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4928 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4929 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4930 	struct hnae3_tc_info *tc_info;
4931 	u16 roundup_size;
4932 	u16 rss_size;
4933 	int i;
4934 
4935 	tc_info = &vport->nic.kinfo.tc_info;
4936 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4937 		rss_size = tc_info->tqp_count[i];
4938 		tc_valid[i] = 0;
4939 
4940 		if (!(hdev->hw_tc_map & BIT(i)))
4941 			continue;
4942 
4943 		/* tc_size set to hardware is the log2 of roundup power of two
4944 		 * of rss_size, the acutal queue size is limited by indirection
4945 		 * table.
4946 		 */
4947 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4948 		    rss_size == 0) {
4949 			dev_err(&hdev->pdev->dev,
4950 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4951 				rss_size);
4952 			return -EINVAL;
4953 		}
4954 
4955 		roundup_size = roundup_pow_of_two(rss_size);
4956 		roundup_size = ilog2(roundup_size);
4957 
4958 		tc_valid[i] = 1;
4959 		tc_size[i] = roundup_size;
4960 		tc_offset[i] = tc_info->tqp_offset[i];
4961 	}
4962 
4963 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4964 }
4965 
4966 int hclge_rss_init_hw(struct hclge_dev *hdev)
4967 {
4968 	struct hclge_vport *vport = hdev->vport;
4969 	u16 *rss_indir = vport[0].rss_indirection_tbl;
4970 	u8 *key = vport[0].rss_hash_key;
4971 	u8 hfunc = vport[0].rss_algo;
4972 	int ret;
4973 
4974 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4975 	if (ret)
4976 		return ret;
4977 
4978 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4979 	if (ret)
4980 		return ret;
4981 
4982 	ret = hclge_set_rss_input_tuple(hdev);
4983 	if (ret)
4984 		return ret;
4985 
4986 	return hclge_init_rss_tc_mode(hdev);
4987 }
4988 
4989 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4990 {
4991 	struct hclge_vport *vport = &hdev->vport[0];
4992 	int i;
4993 
4994 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4995 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
4996 }
4997 
4998 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4999 {
5000 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5001 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5002 	struct hclge_vport *vport = &hdev->vport[0];
5003 	u16 *rss_ind_tbl;
5004 
5005 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5006 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5007 
5008 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5009 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5010 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5011 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5012 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5013 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5014 	vport->rss_tuple_sets.ipv6_sctp_en =
5015 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5016 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5017 		HCLGE_RSS_INPUT_TUPLE_SCTP;
5018 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5019 
5020 	vport->rss_algo = rss_algo;
5021 
5022 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5023 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
5024 	if (!rss_ind_tbl)
5025 		return -ENOMEM;
5026 
5027 	vport->rss_indirection_tbl = rss_ind_tbl;
5028 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5029 
5030 	hclge_rss_indir_init_cfg(hdev);
5031 
5032 	return 0;
5033 }
5034 
5035 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5036 				int vector_id, bool en,
5037 				struct hnae3_ring_chain_node *ring_chain)
5038 {
5039 	struct hclge_dev *hdev = vport->back;
5040 	struct hnae3_ring_chain_node *node;
5041 	struct hclge_desc desc;
5042 	struct hclge_ctrl_vector_chain_cmd *req =
5043 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5044 	enum hclge_cmd_status status;
5045 	enum hclge_opcode_type op;
5046 	u16 tqp_type_and_id;
5047 	int i;
5048 
5049 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5050 	hclge_cmd_setup_basic_desc(&desc, op, false);
5051 	req->int_vector_id_l = hnae3_get_field(vector_id,
5052 					       HCLGE_VECTOR_ID_L_M,
5053 					       HCLGE_VECTOR_ID_L_S);
5054 	req->int_vector_id_h = hnae3_get_field(vector_id,
5055 					       HCLGE_VECTOR_ID_H_M,
5056 					       HCLGE_VECTOR_ID_H_S);
5057 
5058 	i = 0;
5059 	for (node = ring_chain; node; node = node->next) {
5060 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5061 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5062 				HCLGE_INT_TYPE_S,
5063 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5064 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5065 				HCLGE_TQP_ID_S, node->tqp_index);
5066 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5067 				HCLGE_INT_GL_IDX_S,
5068 				hnae3_get_field(node->int_gl_idx,
5069 						HNAE3_RING_GL_IDX_M,
5070 						HNAE3_RING_GL_IDX_S));
5071 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5072 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5073 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5074 			req->vfid = vport->vport_id;
5075 
5076 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5077 			if (status) {
5078 				dev_err(&hdev->pdev->dev,
5079 					"Map TQP fail, status is %d.\n",
5080 					status);
5081 				return -EIO;
5082 			}
5083 			i = 0;
5084 
5085 			hclge_cmd_setup_basic_desc(&desc,
5086 						   op,
5087 						   false);
5088 			req->int_vector_id_l =
5089 				hnae3_get_field(vector_id,
5090 						HCLGE_VECTOR_ID_L_M,
5091 						HCLGE_VECTOR_ID_L_S);
5092 			req->int_vector_id_h =
5093 				hnae3_get_field(vector_id,
5094 						HCLGE_VECTOR_ID_H_M,
5095 						HCLGE_VECTOR_ID_H_S);
5096 		}
5097 	}
5098 
5099 	if (i > 0) {
5100 		req->int_cause_num = i;
5101 		req->vfid = vport->vport_id;
5102 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5103 		if (status) {
5104 			dev_err(&hdev->pdev->dev,
5105 				"Map TQP fail, status is %d.\n", status);
5106 			return -EIO;
5107 		}
5108 	}
5109 
5110 	return 0;
5111 }
5112 
5113 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5114 				    struct hnae3_ring_chain_node *ring_chain)
5115 {
5116 	struct hclge_vport *vport = hclge_get_vport(handle);
5117 	struct hclge_dev *hdev = vport->back;
5118 	int vector_id;
5119 
5120 	vector_id = hclge_get_vector_index(hdev, vector);
5121 	if (vector_id < 0) {
5122 		dev_err(&hdev->pdev->dev,
5123 			"failed to get vector index. vector=%d\n", vector);
5124 		return vector_id;
5125 	}
5126 
5127 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5128 }
5129 
5130 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5131 				       struct hnae3_ring_chain_node *ring_chain)
5132 {
5133 	struct hclge_vport *vport = hclge_get_vport(handle);
5134 	struct hclge_dev *hdev = vport->back;
5135 	int vector_id, ret;
5136 
5137 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5138 		return 0;
5139 
5140 	vector_id = hclge_get_vector_index(hdev, vector);
5141 	if (vector_id < 0) {
5142 		dev_err(&handle->pdev->dev,
5143 			"Get vector index fail. ret =%d\n", vector_id);
5144 		return vector_id;
5145 	}
5146 
5147 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5148 	if (ret)
5149 		dev_err(&handle->pdev->dev,
5150 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5151 			vector_id, ret);
5152 
5153 	return ret;
5154 }
5155 
5156 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5157 				      bool en_uc, bool en_mc, bool en_bc)
5158 {
5159 	struct hclge_vport *vport = &hdev->vport[vf_id];
5160 	struct hnae3_handle *handle = &vport->nic;
5161 	struct hclge_promisc_cfg_cmd *req;
5162 	struct hclge_desc desc;
5163 	bool uc_tx_en = en_uc;
5164 	u8 promisc_cfg = 0;
5165 	int ret;
5166 
5167 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5168 
5169 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5170 	req->vf_id = vf_id;
5171 
5172 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5173 		uc_tx_en = false;
5174 
5175 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5176 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5177 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5178 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5179 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5180 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5181 	req->extend_promisc = promisc_cfg;
5182 
5183 	/* to be compatible with DEVICE_VERSION_V1/2 */
5184 	promisc_cfg = 0;
5185 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5186 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5187 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5188 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5189 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5190 	req->promisc = promisc_cfg;
5191 
5192 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5193 	if (ret)
5194 		dev_err(&hdev->pdev->dev,
5195 			"failed to set vport %u promisc mode, ret = %d.\n",
5196 			vf_id, ret);
5197 
5198 	return ret;
5199 }
5200 
5201 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5202 				 bool en_mc_pmc, bool en_bc_pmc)
5203 {
5204 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5205 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5206 }
5207 
5208 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5209 				  bool en_mc_pmc)
5210 {
5211 	struct hclge_vport *vport = hclge_get_vport(handle);
5212 	struct hclge_dev *hdev = vport->back;
5213 	bool en_bc_pmc = true;
5214 
5215 	/* For device whose version below V2, if broadcast promisc enabled,
5216 	 * vlan filter is always bypassed. So broadcast promisc should be
5217 	 * disabled until user enable promisc mode
5218 	 */
5219 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5220 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5221 
5222 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5223 					    en_bc_pmc);
5224 }
5225 
5226 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5227 {
5228 	struct hclge_vport *vport = hclge_get_vport(handle);
5229 
5230 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5231 }
5232 
5233 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5234 {
5235 	if (hlist_empty(&hdev->fd_rule_list))
5236 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5237 }
5238 
5239 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5240 {
5241 	if (!test_bit(location, hdev->fd_bmap)) {
5242 		set_bit(location, hdev->fd_bmap);
5243 		hdev->hclge_fd_rule_num++;
5244 	}
5245 }
5246 
5247 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5248 {
5249 	if (test_bit(location, hdev->fd_bmap)) {
5250 		clear_bit(location, hdev->fd_bmap);
5251 		hdev->hclge_fd_rule_num--;
5252 	}
5253 }
5254 
5255 static void hclge_fd_free_node(struct hclge_dev *hdev,
5256 			       struct hclge_fd_rule *rule)
5257 {
5258 	hlist_del(&rule->rule_node);
5259 	kfree(rule);
5260 	hclge_sync_fd_state(hdev);
5261 }
5262 
5263 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5264 				      struct hclge_fd_rule *old_rule,
5265 				      struct hclge_fd_rule *new_rule,
5266 				      enum HCLGE_FD_NODE_STATE state)
5267 {
5268 	switch (state) {
5269 	case HCLGE_FD_TO_ADD:
5270 	case HCLGE_FD_ACTIVE:
5271 		/* 1) if the new state is TO_ADD, just replace the old rule
5272 		 * with the same location, no matter its state, because the
5273 		 * new rule will be configured to the hardware.
5274 		 * 2) if the new state is ACTIVE, it means the new rule
5275 		 * has been configured to the hardware, so just replace
5276 		 * the old rule node with the same location.
5277 		 * 3) for it doesn't add a new node to the list, so it's
5278 		 * unnecessary to update the rule number and fd_bmap.
5279 		 */
5280 		new_rule->rule_node.next = old_rule->rule_node.next;
5281 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5282 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5283 		kfree(new_rule);
5284 		break;
5285 	case HCLGE_FD_DELETED:
5286 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5287 		hclge_fd_free_node(hdev, old_rule);
5288 		break;
5289 	case HCLGE_FD_TO_DEL:
5290 		/* if new request is TO_DEL, and old rule is existent
5291 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5292 		 * because we delete rule by location, other rule content
5293 		 * is unncessary.
5294 		 * 2) the state of old rule is ACTIVE, we need to change its
5295 		 * state to TO_DEL, so the rule will be deleted when periodic
5296 		 * task being scheduled.
5297 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5298 		 * been added to hardware, so we just delete the rule node from
5299 		 * fd_rule_list directly.
5300 		 */
5301 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5302 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5303 			hclge_fd_free_node(hdev, old_rule);
5304 			return;
5305 		}
5306 		old_rule->state = HCLGE_FD_TO_DEL;
5307 		break;
5308 	}
5309 }
5310 
5311 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5312 						u16 location,
5313 						struct hclge_fd_rule **parent)
5314 {
5315 	struct hclge_fd_rule *rule;
5316 	struct hlist_node *node;
5317 
5318 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5319 		if (rule->location == location)
5320 			return rule;
5321 		else if (rule->location > location)
5322 			return NULL;
5323 		/* record the parent node, use to keep the nodes in fd_rule_list
5324 		 * in ascend order.
5325 		 */
5326 		*parent = rule;
5327 	}
5328 
5329 	return NULL;
5330 }
5331 
5332 /* insert fd rule node in ascend order according to rule->location */
5333 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5334 				      struct hclge_fd_rule *rule,
5335 				      struct hclge_fd_rule *parent)
5336 {
5337 	INIT_HLIST_NODE(&rule->rule_node);
5338 
5339 	if (parent)
5340 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5341 	else
5342 		hlist_add_head(&rule->rule_node, hlist);
5343 }
5344 
5345 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5346 				     struct hclge_fd_user_def_cfg *cfg)
5347 {
5348 	struct hclge_fd_user_def_cfg_cmd *req;
5349 	struct hclge_desc desc;
5350 	u16 data = 0;
5351 	int ret;
5352 
5353 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5354 
5355 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5356 
5357 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5358 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5359 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5360 	req->ol2_cfg = cpu_to_le16(data);
5361 
5362 	data = 0;
5363 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5364 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5365 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5366 	req->ol3_cfg = cpu_to_le16(data);
5367 
5368 	data = 0;
5369 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5370 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5371 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5372 	req->ol4_cfg = cpu_to_le16(data);
5373 
5374 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5375 	if (ret)
5376 		dev_err(&hdev->pdev->dev,
5377 			"failed to set fd user def data, ret= %d\n", ret);
5378 	return ret;
5379 }
5380 
5381 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5382 {
5383 	int ret;
5384 
5385 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5386 		return;
5387 
5388 	if (!locked)
5389 		spin_lock_bh(&hdev->fd_rule_lock);
5390 
5391 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5392 	if (ret)
5393 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5394 
5395 	if (!locked)
5396 		spin_unlock_bh(&hdev->fd_rule_lock);
5397 }
5398 
5399 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5400 					  struct hclge_fd_rule *rule)
5401 {
5402 	struct hlist_head *hlist = &hdev->fd_rule_list;
5403 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5404 	struct hclge_fd_user_def_info *info, *old_info;
5405 	struct hclge_fd_user_def_cfg *cfg;
5406 
5407 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5408 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5409 		return 0;
5410 
5411 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5412 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5413 	info = &rule->ep.user_def;
5414 
5415 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5416 		return 0;
5417 
5418 	if (cfg->ref_cnt > 1)
5419 		goto error;
5420 
5421 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5422 	if (fd_rule) {
5423 		old_info = &fd_rule->ep.user_def;
5424 		if (info->layer == old_info->layer)
5425 			return 0;
5426 	}
5427 
5428 error:
5429 	dev_err(&hdev->pdev->dev,
5430 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5431 		info->layer + 1);
5432 	return -ENOSPC;
5433 }
5434 
5435 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5436 					 struct hclge_fd_rule *rule)
5437 {
5438 	struct hclge_fd_user_def_cfg *cfg;
5439 
5440 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5441 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5442 		return;
5443 
5444 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5445 	if (!cfg->ref_cnt) {
5446 		cfg->offset = rule->ep.user_def.offset;
5447 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5448 	}
5449 	cfg->ref_cnt++;
5450 }
5451 
5452 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5453 					 struct hclge_fd_rule *rule)
5454 {
5455 	struct hclge_fd_user_def_cfg *cfg;
5456 
5457 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5458 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5459 		return;
5460 
5461 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5462 	if (!cfg->ref_cnt)
5463 		return;
5464 
5465 	cfg->ref_cnt--;
5466 	if (!cfg->ref_cnt) {
5467 		cfg->offset = 0;
5468 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5469 	}
5470 }
5471 
5472 static void hclge_update_fd_list(struct hclge_dev *hdev,
5473 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5474 				 struct hclge_fd_rule *new_rule)
5475 {
5476 	struct hlist_head *hlist = &hdev->fd_rule_list;
5477 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5478 
5479 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5480 	if (fd_rule) {
5481 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5482 		if (state == HCLGE_FD_ACTIVE)
5483 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5484 		hclge_sync_fd_user_def_cfg(hdev, true);
5485 
5486 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5487 		return;
5488 	}
5489 
5490 	/* it's unlikely to fail here, because we have checked the rule
5491 	 * exist before.
5492 	 */
5493 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5494 		dev_warn(&hdev->pdev->dev,
5495 			 "failed to delete fd rule %u, it's inexistent\n",
5496 			 location);
5497 		return;
5498 	}
5499 
5500 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5501 	hclge_sync_fd_user_def_cfg(hdev, true);
5502 
5503 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5504 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5505 
5506 	if (state == HCLGE_FD_TO_ADD) {
5507 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5508 		hclge_task_schedule(hdev, 0);
5509 	}
5510 }
5511 
5512 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5513 {
5514 	struct hclge_get_fd_mode_cmd *req;
5515 	struct hclge_desc desc;
5516 	int ret;
5517 
5518 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5519 
5520 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5521 
5522 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5523 	if (ret) {
5524 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5525 		return ret;
5526 	}
5527 
5528 	*fd_mode = req->mode;
5529 
5530 	return ret;
5531 }
5532 
5533 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5534 				   u32 *stage1_entry_num,
5535 				   u32 *stage2_entry_num,
5536 				   u16 *stage1_counter_num,
5537 				   u16 *stage2_counter_num)
5538 {
5539 	struct hclge_get_fd_allocation_cmd *req;
5540 	struct hclge_desc desc;
5541 	int ret;
5542 
5543 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5544 
5545 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5546 
5547 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5548 	if (ret) {
5549 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5550 			ret);
5551 		return ret;
5552 	}
5553 
5554 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5555 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5556 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5557 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5558 
5559 	return ret;
5560 }
5561 
5562 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5563 				   enum HCLGE_FD_STAGE stage_num)
5564 {
5565 	struct hclge_set_fd_key_config_cmd *req;
5566 	struct hclge_fd_key_cfg *stage;
5567 	struct hclge_desc desc;
5568 	int ret;
5569 
5570 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5571 
5572 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5573 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5574 	req->stage = stage_num;
5575 	req->key_select = stage->key_sel;
5576 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5577 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5578 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5579 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5580 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5581 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5582 
5583 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5584 	if (ret)
5585 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5586 
5587 	return ret;
5588 }
5589 
5590 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5591 {
5592 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5593 
5594 	spin_lock_bh(&hdev->fd_rule_lock);
5595 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5596 	spin_unlock_bh(&hdev->fd_rule_lock);
5597 
5598 	hclge_fd_set_user_def_cmd(hdev, cfg);
5599 }
5600 
5601 static int hclge_init_fd_config(struct hclge_dev *hdev)
5602 {
5603 #define LOW_2_WORDS		0x03
5604 	struct hclge_fd_key_cfg *key_cfg;
5605 	int ret;
5606 
5607 	if (!hnae3_dev_fd_supported(hdev))
5608 		return 0;
5609 
5610 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5611 	if (ret)
5612 		return ret;
5613 
5614 	switch (hdev->fd_cfg.fd_mode) {
5615 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5616 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5617 		break;
5618 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5619 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5620 		break;
5621 	default:
5622 		dev_err(&hdev->pdev->dev,
5623 			"Unsupported flow director mode %u\n",
5624 			hdev->fd_cfg.fd_mode);
5625 		return -EOPNOTSUPP;
5626 	}
5627 
5628 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5629 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5630 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5631 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5632 	key_cfg->outer_sipv6_word_en = 0;
5633 	key_cfg->outer_dipv6_word_en = 0;
5634 
5635 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5636 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5637 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5638 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5639 
5640 	/* If use max 400bit key, we can support tuples for ether type */
5641 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5642 		key_cfg->tuple_active |=
5643 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5644 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5645 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5646 	}
5647 
5648 	/* roce_type is used to filter roce frames
5649 	 * dst_vport is used to specify the rule
5650 	 */
5651 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5652 
5653 	ret = hclge_get_fd_allocation(hdev,
5654 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5655 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5656 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5657 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5658 	if (ret)
5659 		return ret;
5660 
5661 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5662 }
5663 
5664 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5665 				int loc, u8 *key, bool is_add)
5666 {
5667 	struct hclge_fd_tcam_config_1_cmd *req1;
5668 	struct hclge_fd_tcam_config_2_cmd *req2;
5669 	struct hclge_fd_tcam_config_3_cmd *req3;
5670 	struct hclge_desc desc[3];
5671 	int ret;
5672 
5673 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5674 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5675 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5676 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5677 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5678 
5679 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5680 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5681 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5682 
5683 	req1->stage = stage;
5684 	req1->xy_sel = sel_x ? 1 : 0;
5685 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5686 	req1->index = cpu_to_le32(loc);
5687 	req1->entry_vld = sel_x ? is_add : 0;
5688 
5689 	if (key) {
5690 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5691 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5692 		       sizeof(req2->tcam_data));
5693 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5694 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5695 	}
5696 
5697 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5698 	if (ret)
5699 		dev_err(&hdev->pdev->dev,
5700 			"config tcam key fail, ret=%d\n",
5701 			ret);
5702 
5703 	return ret;
5704 }
5705 
5706 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5707 			      struct hclge_fd_ad_data *action)
5708 {
5709 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5710 	struct hclge_fd_ad_config_cmd *req;
5711 	struct hclge_desc desc;
5712 	u64 ad_data = 0;
5713 	int ret;
5714 
5715 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5716 
5717 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5718 	req->index = cpu_to_le32(loc);
5719 	req->stage = stage;
5720 
5721 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5722 		      action->write_rule_id_to_bd);
5723 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5724 			action->rule_id);
5725 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5726 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5727 			      action->override_tc);
5728 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5729 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5730 	}
5731 	ad_data <<= 32;
5732 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5733 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5734 		      action->forward_to_direct_queue);
5735 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5736 			action->queue_id);
5737 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5738 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5739 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5740 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5741 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5742 			action->counter_id);
5743 
5744 	req->ad_data = cpu_to_le64(ad_data);
5745 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5746 	if (ret)
5747 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5748 
5749 	return ret;
5750 }
5751 
5752 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5753 				   struct hclge_fd_rule *rule)
5754 {
5755 	int offset, moffset, ip_offset;
5756 	enum HCLGE_FD_KEY_OPT key_opt;
5757 	u16 tmp_x_s, tmp_y_s;
5758 	u32 tmp_x_l, tmp_y_l;
5759 	u8 *p = (u8 *)rule;
5760 	int i;
5761 
5762 	if (rule->unused_tuple & BIT(tuple_bit))
5763 		return true;
5764 
5765 	key_opt = tuple_key_info[tuple_bit].key_opt;
5766 	offset = tuple_key_info[tuple_bit].offset;
5767 	moffset = tuple_key_info[tuple_bit].moffset;
5768 
5769 	switch (key_opt) {
5770 	case KEY_OPT_U8:
5771 		calc_x(*key_x, p[offset], p[moffset]);
5772 		calc_y(*key_y, p[offset], p[moffset]);
5773 
5774 		return true;
5775 	case KEY_OPT_LE16:
5776 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5777 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5778 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5779 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5780 
5781 		return true;
5782 	case KEY_OPT_LE32:
5783 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5784 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5785 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5786 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5787 
5788 		return true;
5789 	case KEY_OPT_MAC:
5790 		for (i = 0; i < ETH_ALEN; i++) {
5791 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5792 			       p[moffset + i]);
5793 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5794 			       p[moffset + i]);
5795 		}
5796 
5797 		return true;
5798 	case KEY_OPT_IP:
5799 		ip_offset = IPV4_INDEX * sizeof(u32);
5800 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5801 		       *(u32 *)(&p[moffset + ip_offset]));
5802 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5803 		       *(u32 *)(&p[moffset + ip_offset]));
5804 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5805 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5806 
5807 		return true;
5808 	default:
5809 		return false;
5810 	}
5811 }
5812 
5813 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5814 				 u8 vf_id, u8 network_port_id)
5815 {
5816 	u32 port_number = 0;
5817 
5818 	if (port_type == HOST_PORT) {
5819 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5820 				pf_id);
5821 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5822 				vf_id);
5823 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5824 	} else {
5825 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5826 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5827 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5828 	}
5829 
5830 	return port_number;
5831 }
5832 
5833 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5834 				       __le32 *key_x, __le32 *key_y,
5835 				       struct hclge_fd_rule *rule)
5836 {
5837 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5838 	u8 cur_pos = 0, tuple_size, shift_bits;
5839 	unsigned int i;
5840 
5841 	for (i = 0; i < MAX_META_DATA; i++) {
5842 		tuple_size = meta_data_key_info[i].key_length;
5843 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5844 
5845 		switch (tuple_bit) {
5846 		case BIT(ROCE_TYPE):
5847 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5848 			cur_pos += tuple_size;
5849 			break;
5850 		case BIT(DST_VPORT):
5851 			port_number = hclge_get_port_number(HOST_PORT, 0,
5852 							    rule->vf_id, 0);
5853 			hnae3_set_field(meta_data,
5854 					GENMASK(cur_pos + tuple_size, cur_pos),
5855 					cur_pos, port_number);
5856 			cur_pos += tuple_size;
5857 			break;
5858 		default:
5859 			break;
5860 		}
5861 	}
5862 
5863 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5864 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5865 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5866 
5867 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5868 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5869 }
5870 
5871 /* A complete key is combined with meta data key and tuple key.
5872  * Meta data key is stored at the MSB region, and tuple key is stored at
5873  * the LSB region, unused bits will be filled 0.
5874  */
5875 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5876 			    struct hclge_fd_rule *rule)
5877 {
5878 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5879 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5880 	u8 *cur_key_x, *cur_key_y;
5881 	u8 meta_data_region;
5882 	u8 tuple_size;
5883 	int ret;
5884 	u32 i;
5885 
5886 	memset(key_x, 0, sizeof(key_x));
5887 	memset(key_y, 0, sizeof(key_y));
5888 	cur_key_x = key_x;
5889 	cur_key_y = key_y;
5890 
5891 	for (i = 0 ; i < MAX_TUPLE; i++) {
5892 		bool tuple_valid;
5893 
5894 		tuple_size = tuple_key_info[i].key_length / 8;
5895 		if (!(key_cfg->tuple_active & BIT(i)))
5896 			continue;
5897 
5898 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5899 						     cur_key_y, rule);
5900 		if (tuple_valid) {
5901 			cur_key_x += tuple_size;
5902 			cur_key_y += tuple_size;
5903 		}
5904 	}
5905 
5906 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5907 			MAX_META_DATA_LENGTH / 8;
5908 
5909 	hclge_fd_convert_meta_data(key_cfg,
5910 				   (__le32 *)(key_x + meta_data_region),
5911 				   (__le32 *)(key_y + meta_data_region),
5912 				   rule);
5913 
5914 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5915 				   true);
5916 	if (ret) {
5917 		dev_err(&hdev->pdev->dev,
5918 			"fd key_y config fail, loc=%u, ret=%d\n",
5919 			rule->queue_id, ret);
5920 		return ret;
5921 	}
5922 
5923 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5924 				   true);
5925 	if (ret)
5926 		dev_err(&hdev->pdev->dev,
5927 			"fd key_x config fail, loc=%u, ret=%d\n",
5928 			rule->queue_id, ret);
5929 	return ret;
5930 }
5931 
5932 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5933 			       struct hclge_fd_rule *rule)
5934 {
5935 	struct hclge_vport *vport = hdev->vport;
5936 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5937 	struct hclge_fd_ad_data ad_data;
5938 
5939 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5940 	ad_data.ad_id = rule->location;
5941 
5942 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5943 		ad_data.drop_packet = true;
5944 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5945 		ad_data.override_tc = true;
5946 		ad_data.queue_id =
5947 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5948 		ad_data.tc_size =
5949 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5950 	} else {
5951 		ad_data.forward_to_direct_queue = true;
5952 		ad_data.queue_id = rule->queue_id;
5953 	}
5954 
5955 	ad_data.use_counter = false;
5956 	ad_data.counter_id = 0;
5957 
5958 	ad_data.use_next_stage = false;
5959 	ad_data.next_input_key = 0;
5960 
5961 	ad_data.write_rule_id_to_bd = true;
5962 	ad_data.rule_id = rule->location;
5963 
5964 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5965 }
5966 
5967 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5968 				       u32 *unused_tuple)
5969 {
5970 	if (!spec || !unused_tuple)
5971 		return -EINVAL;
5972 
5973 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5974 
5975 	if (!spec->ip4src)
5976 		*unused_tuple |= BIT(INNER_SRC_IP);
5977 
5978 	if (!spec->ip4dst)
5979 		*unused_tuple |= BIT(INNER_DST_IP);
5980 
5981 	if (!spec->psrc)
5982 		*unused_tuple |= BIT(INNER_SRC_PORT);
5983 
5984 	if (!spec->pdst)
5985 		*unused_tuple |= BIT(INNER_DST_PORT);
5986 
5987 	if (!spec->tos)
5988 		*unused_tuple |= BIT(INNER_IP_TOS);
5989 
5990 	return 0;
5991 }
5992 
5993 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5994 				    u32 *unused_tuple)
5995 {
5996 	if (!spec || !unused_tuple)
5997 		return -EINVAL;
5998 
5999 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6000 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6001 
6002 	if (!spec->ip4src)
6003 		*unused_tuple |= BIT(INNER_SRC_IP);
6004 
6005 	if (!spec->ip4dst)
6006 		*unused_tuple |= BIT(INNER_DST_IP);
6007 
6008 	if (!spec->tos)
6009 		*unused_tuple |= BIT(INNER_IP_TOS);
6010 
6011 	if (!spec->proto)
6012 		*unused_tuple |= BIT(INNER_IP_PROTO);
6013 
6014 	if (spec->l4_4_bytes)
6015 		return -EOPNOTSUPP;
6016 
6017 	if (spec->ip_ver != ETH_RX_NFC_IP4)
6018 		return -EOPNOTSUPP;
6019 
6020 	return 0;
6021 }
6022 
6023 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6024 				       u32 *unused_tuple)
6025 {
6026 	if (!spec || !unused_tuple)
6027 		return -EINVAL;
6028 
6029 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6030 
6031 	/* check whether src/dst ip address used */
6032 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6033 		*unused_tuple |= BIT(INNER_SRC_IP);
6034 
6035 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6036 		*unused_tuple |= BIT(INNER_DST_IP);
6037 
6038 	if (!spec->psrc)
6039 		*unused_tuple |= BIT(INNER_SRC_PORT);
6040 
6041 	if (!spec->pdst)
6042 		*unused_tuple |= BIT(INNER_DST_PORT);
6043 
6044 	if (!spec->tclass)
6045 		*unused_tuple |= BIT(INNER_IP_TOS);
6046 
6047 	return 0;
6048 }
6049 
6050 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6051 				    u32 *unused_tuple)
6052 {
6053 	if (!spec || !unused_tuple)
6054 		return -EINVAL;
6055 
6056 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6057 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6058 
6059 	/* check whether src/dst ip address used */
6060 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6061 		*unused_tuple |= BIT(INNER_SRC_IP);
6062 
6063 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6064 		*unused_tuple |= BIT(INNER_DST_IP);
6065 
6066 	if (!spec->l4_proto)
6067 		*unused_tuple |= BIT(INNER_IP_PROTO);
6068 
6069 	if (!spec->tclass)
6070 		*unused_tuple |= BIT(INNER_IP_TOS);
6071 
6072 	if (spec->l4_4_bytes)
6073 		return -EOPNOTSUPP;
6074 
6075 	return 0;
6076 }
6077 
6078 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6079 {
6080 	if (!spec || !unused_tuple)
6081 		return -EINVAL;
6082 
6083 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6084 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6085 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6086 
6087 	if (is_zero_ether_addr(spec->h_source))
6088 		*unused_tuple |= BIT(INNER_SRC_MAC);
6089 
6090 	if (is_zero_ether_addr(spec->h_dest))
6091 		*unused_tuple |= BIT(INNER_DST_MAC);
6092 
6093 	if (!spec->h_proto)
6094 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6095 
6096 	return 0;
6097 }
6098 
6099 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6100 				    struct ethtool_rx_flow_spec *fs,
6101 				    u32 *unused_tuple)
6102 {
6103 	if (fs->flow_type & FLOW_EXT) {
6104 		if (fs->h_ext.vlan_etype) {
6105 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6106 			return -EOPNOTSUPP;
6107 		}
6108 
6109 		if (!fs->h_ext.vlan_tci)
6110 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6111 
6112 		if (fs->m_ext.vlan_tci &&
6113 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6114 			dev_err(&hdev->pdev->dev,
6115 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6116 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6117 			return -EINVAL;
6118 		}
6119 	} else {
6120 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6121 	}
6122 
6123 	if (fs->flow_type & FLOW_MAC_EXT) {
6124 		if (hdev->fd_cfg.fd_mode !=
6125 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6126 			dev_err(&hdev->pdev->dev,
6127 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6128 			return -EOPNOTSUPP;
6129 		}
6130 
6131 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6132 			*unused_tuple |= BIT(INNER_DST_MAC);
6133 		else
6134 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6135 	}
6136 
6137 	return 0;
6138 }
6139 
6140 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6141 				       struct hclge_fd_user_def_info *info)
6142 {
6143 	switch (flow_type) {
6144 	case ETHER_FLOW:
6145 		info->layer = HCLGE_FD_USER_DEF_L2;
6146 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6147 		break;
6148 	case IP_USER_FLOW:
6149 	case IPV6_USER_FLOW:
6150 		info->layer = HCLGE_FD_USER_DEF_L3;
6151 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6152 		break;
6153 	case TCP_V4_FLOW:
6154 	case UDP_V4_FLOW:
6155 	case TCP_V6_FLOW:
6156 	case UDP_V6_FLOW:
6157 		info->layer = HCLGE_FD_USER_DEF_L4;
6158 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6159 		break;
6160 	default:
6161 		return -EOPNOTSUPP;
6162 	}
6163 
6164 	return 0;
6165 }
6166 
6167 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6168 {
6169 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6170 }
6171 
6172 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6173 					 struct ethtool_rx_flow_spec *fs,
6174 					 u32 *unused_tuple,
6175 					 struct hclge_fd_user_def_info *info)
6176 {
6177 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6178 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6179 	u16 data, offset, data_mask, offset_mask;
6180 	int ret;
6181 
6182 	info->layer = HCLGE_FD_USER_DEF_NONE;
6183 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6184 
6185 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6186 		return 0;
6187 
6188 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6189 	 * for data, and bit32~47 is used for offset.
6190 	 */
6191 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6192 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6193 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6194 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6195 
6196 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6197 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6198 		return -EOPNOTSUPP;
6199 	}
6200 
6201 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6202 		dev_err(&hdev->pdev->dev,
6203 			"user-def offset[%u] should be no more than %u\n",
6204 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6205 		return -EINVAL;
6206 	}
6207 
6208 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6209 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6210 		return -EINVAL;
6211 	}
6212 
6213 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6214 	if (ret) {
6215 		dev_err(&hdev->pdev->dev,
6216 			"unsupported flow type for user-def bytes, ret = %d\n",
6217 			ret);
6218 		return ret;
6219 	}
6220 
6221 	info->data = data;
6222 	info->data_mask = data_mask;
6223 	info->offset = offset;
6224 
6225 	return 0;
6226 }
6227 
6228 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6229 			       struct ethtool_rx_flow_spec *fs,
6230 			       u32 *unused_tuple,
6231 			       struct hclge_fd_user_def_info *info)
6232 {
6233 	u32 flow_type;
6234 	int ret;
6235 
6236 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6237 		dev_err(&hdev->pdev->dev,
6238 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6239 			fs->location,
6240 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6241 		return -EINVAL;
6242 	}
6243 
6244 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6245 	if (ret)
6246 		return ret;
6247 
6248 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6249 	switch (flow_type) {
6250 	case SCTP_V4_FLOW:
6251 	case TCP_V4_FLOW:
6252 	case UDP_V4_FLOW:
6253 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6254 						  unused_tuple);
6255 		break;
6256 	case IP_USER_FLOW:
6257 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6258 					       unused_tuple);
6259 		break;
6260 	case SCTP_V6_FLOW:
6261 	case TCP_V6_FLOW:
6262 	case UDP_V6_FLOW:
6263 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6264 						  unused_tuple);
6265 		break;
6266 	case IPV6_USER_FLOW:
6267 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6268 					       unused_tuple);
6269 		break;
6270 	case ETHER_FLOW:
6271 		if (hdev->fd_cfg.fd_mode !=
6272 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6273 			dev_err(&hdev->pdev->dev,
6274 				"ETHER_FLOW is not supported in current fd mode!\n");
6275 			return -EOPNOTSUPP;
6276 		}
6277 
6278 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6279 						 unused_tuple);
6280 		break;
6281 	default:
6282 		dev_err(&hdev->pdev->dev,
6283 			"unsupported protocol type, protocol type = %#x\n",
6284 			flow_type);
6285 		return -EOPNOTSUPP;
6286 	}
6287 
6288 	if (ret) {
6289 		dev_err(&hdev->pdev->dev,
6290 			"failed to check flow union tuple, ret = %d\n",
6291 			ret);
6292 		return ret;
6293 	}
6294 
6295 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6296 }
6297 
6298 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6299 				      struct ethtool_rx_flow_spec *fs,
6300 				      struct hclge_fd_rule *rule, u8 ip_proto)
6301 {
6302 	rule->tuples.src_ip[IPV4_INDEX] =
6303 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6304 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6305 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6306 
6307 	rule->tuples.dst_ip[IPV4_INDEX] =
6308 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6309 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6310 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6311 
6312 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6313 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6314 
6315 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6316 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6317 
6318 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6319 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6320 
6321 	rule->tuples.ether_proto = ETH_P_IP;
6322 	rule->tuples_mask.ether_proto = 0xFFFF;
6323 
6324 	rule->tuples.ip_proto = ip_proto;
6325 	rule->tuples_mask.ip_proto = 0xFF;
6326 }
6327 
6328 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6329 				   struct ethtool_rx_flow_spec *fs,
6330 				   struct hclge_fd_rule *rule)
6331 {
6332 	rule->tuples.src_ip[IPV4_INDEX] =
6333 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6334 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6335 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6336 
6337 	rule->tuples.dst_ip[IPV4_INDEX] =
6338 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6339 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6340 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6341 
6342 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6343 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6344 
6345 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6346 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6347 
6348 	rule->tuples.ether_proto = ETH_P_IP;
6349 	rule->tuples_mask.ether_proto = 0xFFFF;
6350 }
6351 
6352 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6353 				      struct ethtool_rx_flow_spec *fs,
6354 				      struct hclge_fd_rule *rule, u8 ip_proto)
6355 {
6356 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6357 			  IPV6_SIZE);
6358 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6359 			  IPV6_SIZE);
6360 
6361 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6362 			  IPV6_SIZE);
6363 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6364 			  IPV6_SIZE);
6365 
6366 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6367 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6368 
6369 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6370 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6371 
6372 	rule->tuples.ether_proto = ETH_P_IPV6;
6373 	rule->tuples_mask.ether_proto = 0xFFFF;
6374 
6375 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6376 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6377 
6378 	rule->tuples.ip_proto = ip_proto;
6379 	rule->tuples_mask.ip_proto = 0xFF;
6380 }
6381 
6382 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6383 				   struct ethtool_rx_flow_spec *fs,
6384 				   struct hclge_fd_rule *rule)
6385 {
6386 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6387 			  IPV6_SIZE);
6388 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6389 			  IPV6_SIZE);
6390 
6391 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6392 			  IPV6_SIZE);
6393 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6394 			  IPV6_SIZE);
6395 
6396 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6397 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6398 
6399 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6400 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6401 
6402 	rule->tuples.ether_proto = ETH_P_IPV6;
6403 	rule->tuples_mask.ether_proto = 0xFFFF;
6404 }
6405 
6406 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6407 				     struct ethtool_rx_flow_spec *fs,
6408 				     struct hclge_fd_rule *rule)
6409 {
6410 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6411 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6412 
6413 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6414 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6415 
6416 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6417 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6418 }
6419 
6420 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6421 					struct hclge_fd_rule *rule)
6422 {
6423 	switch (info->layer) {
6424 	case HCLGE_FD_USER_DEF_L2:
6425 		rule->tuples.l2_user_def = info->data;
6426 		rule->tuples_mask.l2_user_def = info->data_mask;
6427 		break;
6428 	case HCLGE_FD_USER_DEF_L3:
6429 		rule->tuples.l3_user_def = info->data;
6430 		rule->tuples_mask.l3_user_def = info->data_mask;
6431 		break;
6432 	case HCLGE_FD_USER_DEF_L4:
6433 		rule->tuples.l4_user_def = (u32)info->data << 16;
6434 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6435 		break;
6436 	default:
6437 		break;
6438 	}
6439 
6440 	rule->ep.user_def = *info;
6441 }
6442 
6443 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6444 			      struct ethtool_rx_flow_spec *fs,
6445 			      struct hclge_fd_rule *rule,
6446 			      struct hclge_fd_user_def_info *info)
6447 {
6448 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6449 
6450 	switch (flow_type) {
6451 	case SCTP_V4_FLOW:
6452 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6453 		break;
6454 	case TCP_V4_FLOW:
6455 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6456 		break;
6457 	case UDP_V4_FLOW:
6458 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6459 		break;
6460 	case IP_USER_FLOW:
6461 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6462 		break;
6463 	case SCTP_V6_FLOW:
6464 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6465 		break;
6466 	case TCP_V6_FLOW:
6467 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6468 		break;
6469 	case UDP_V6_FLOW:
6470 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6471 		break;
6472 	case IPV6_USER_FLOW:
6473 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6474 		break;
6475 	case ETHER_FLOW:
6476 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6477 		break;
6478 	default:
6479 		return -EOPNOTSUPP;
6480 	}
6481 
6482 	if (fs->flow_type & FLOW_EXT) {
6483 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6484 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6485 		hclge_fd_get_user_def_tuple(info, rule);
6486 	}
6487 
6488 	if (fs->flow_type & FLOW_MAC_EXT) {
6489 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6490 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6491 	}
6492 
6493 	return 0;
6494 }
6495 
6496 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6497 				struct hclge_fd_rule *rule)
6498 {
6499 	int ret;
6500 
6501 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6502 	if (ret)
6503 		return ret;
6504 
6505 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6506 }
6507 
6508 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6509 				     struct hclge_fd_rule *rule)
6510 {
6511 	int ret;
6512 
6513 	spin_lock_bh(&hdev->fd_rule_lock);
6514 
6515 	if (hdev->fd_active_type != rule->rule_type &&
6516 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6517 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6518 		dev_err(&hdev->pdev->dev,
6519 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6520 			rule->rule_type, hdev->fd_active_type);
6521 		spin_unlock_bh(&hdev->fd_rule_lock);
6522 		return -EINVAL;
6523 	}
6524 
6525 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6526 	if (ret)
6527 		goto out;
6528 
6529 	ret = hclge_clear_arfs_rules(hdev);
6530 	if (ret)
6531 		goto out;
6532 
6533 	ret = hclge_fd_config_rule(hdev, rule);
6534 	if (ret)
6535 		goto out;
6536 
6537 	rule->state = HCLGE_FD_ACTIVE;
6538 	hdev->fd_active_type = rule->rule_type;
6539 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6540 
6541 out:
6542 	spin_unlock_bh(&hdev->fd_rule_lock);
6543 	return ret;
6544 }
6545 
6546 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6547 {
6548 	struct hclge_vport *vport = hclge_get_vport(handle);
6549 	struct hclge_dev *hdev = vport->back;
6550 
6551 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6552 }
6553 
6554 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6555 				      u16 *vport_id, u8 *action, u16 *queue_id)
6556 {
6557 	struct hclge_vport *vport = hdev->vport;
6558 
6559 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6560 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6561 	} else {
6562 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6563 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6564 		u16 tqps;
6565 
6566 		if (vf > hdev->num_req_vfs) {
6567 			dev_err(&hdev->pdev->dev,
6568 				"Error: vf id (%u) > max vf num (%u)\n",
6569 				vf, hdev->num_req_vfs);
6570 			return -EINVAL;
6571 		}
6572 
6573 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6574 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6575 
6576 		if (ring >= tqps) {
6577 			dev_err(&hdev->pdev->dev,
6578 				"Error: queue id (%u) > max tqp num (%u)\n",
6579 				ring, tqps - 1);
6580 			return -EINVAL;
6581 		}
6582 
6583 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6584 		*queue_id = ring;
6585 	}
6586 
6587 	return 0;
6588 }
6589 
6590 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6591 			      struct ethtool_rxnfc *cmd)
6592 {
6593 	struct hclge_vport *vport = hclge_get_vport(handle);
6594 	struct hclge_dev *hdev = vport->back;
6595 	struct hclge_fd_user_def_info info;
6596 	u16 dst_vport_id = 0, q_index = 0;
6597 	struct ethtool_rx_flow_spec *fs;
6598 	struct hclge_fd_rule *rule;
6599 	u32 unused = 0;
6600 	u8 action;
6601 	int ret;
6602 
6603 	if (!hnae3_dev_fd_supported(hdev)) {
6604 		dev_err(&hdev->pdev->dev,
6605 			"flow table director is not supported\n");
6606 		return -EOPNOTSUPP;
6607 	}
6608 
6609 	if (!hdev->fd_en) {
6610 		dev_err(&hdev->pdev->dev,
6611 			"please enable flow director first\n");
6612 		return -EOPNOTSUPP;
6613 	}
6614 
6615 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6616 
6617 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6618 	if (ret)
6619 		return ret;
6620 
6621 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6622 					 &action, &q_index);
6623 	if (ret)
6624 		return ret;
6625 
6626 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6627 	if (!rule)
6628 		return -ENOMEM;
6629 
6630 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6631 	if (ret) {
6632 		kfree(rule);
6633 		return ret;
6634 	}
6635 
6636 	rule->flow_type = fs->flow_type;
6637 	rule->location = fs->location;
6638 	rule->unused_tuple = unused;
6639 	rule->vf_id = dst_vport_id;
6640 	rule->queue_id = q_index;
6641 	rule->action = action;
6642 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6643 
6644 	ret = hclge_add_fd_entry_common(hdev, rule);
6645 	if (ret)
6646 		kfree(rule);
6647 
6648 	return ret;
6649 }
6650 
6651 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6652 			      struct ethtool_rxnfc *cmd)
6653 {
6654 	struct hclge_vport *vport = hclge_get_vport(handle);
6655 	struct hclge_dev *hdev = vport->back;
6656 	struct ethtool_rx_flow_spec *fs;
6657 	int ret;
6658 
6659 	if (!hnae3_dev_fd_supported(hdev))
6660 		return -EOPNOTSUPP;
6661 
6662 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6663 
6664 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6665 		return -EINVAL;
6666 
6667 	spin_lock_bh(&hdev->fd_rule_lock);
6668 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6669 	    !test_bit(fs->location, hdev->fd_bmap)) {
6670 		dev_err(&hdev->pdev->dev,
6671 			"Delete fail, rule %u is inexistent\n", fs->location);
6672 		spin_unlock_bh(&hdev->fd_rule_lock);
6673 		return -ENOENT;
6674 	}
6675 
6676 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6677 				   NULL, false);
6678 	if (ret)
6679 		goto out;
6680 
6681 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6682 
6683 out:
6684 	spin_unlock_bh(&hdev->fd_rule_lock);
6685 	return ret;
6686 }
6687 
6688 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6689 					 bool clear_list)
6690 {
6691 	struct hclge_fd_rule *rule;
6692 	struct hlist_node *node;
6693 	u16 location;
6694 
6695 	if (!hnae3_dev_fd_supported(hdev))
6696 		return;
6697 
6698 	spin_lock_bh(&hdev->fd_rule_lock);
6699 
6700 	for_each_set_bit(location, hdev->fd_bmap,
6701 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6702 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6703 				     NULL, false);
6704 
6705 	if (clear_list) {
6706 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6707 					  rule_node) {
6708 			hlist_del(&rule->rule_node);
6709 			kfree(rule);
6710 		}
6711 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6712 		hdev->hclge_fd_rule_num = 0;
6713 		bitmap_zero(hdev->fd_bmap,
6714 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6715 	}
6716 
6717 	spin_unlock_bh(&hdev->fd_rule_lock);
6718 }
6719 
6720 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6721 {
6722 	hclge_clear_fd_rules_in_list(hdev, true);
6723 	hclge_fd_disable_user_def(hdev);
6724 }
6725 
6726 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6727 {
6728 	struct hclge_vport *vport = hclge_get_vport(handle);
6729 	struct hclge_dev *hdev = vport->back;
6730 	struct hclge_fd_rule *rule;
6731 	struct hlist_node *node;
6732 
6733 	/* Return ok here, because reset error handling will check this
6734 	 * return value. If error is returned here, the reset process will
6735 	 * fail.
6736 	 */
6737 	if (!hnae3_dev_fd_supported(hdev))
6738 		return 0;
6739 
6740 	/* if fd is disabled, should not restore it when reset */
6741 	if (!hdev->fd_en)
6742 		return 0;
6743 
6744 	spin_lock_bh(&hdev->fd_rule_lock);
6745 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6746 		if (rule->state == HCLGE_FD_ACTIVE)
6747 			rule->state = HCLGE_FD_TO_ADD;
6748 	}
6749 	spin_unlock_bh(&hdev->fd_rule_lock);
6750 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6751 
6752 	return 0;
6753 }
6754 
6755 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6756 				 struct ethtool_rxnfc *cmd)
6757 {
6758 	struct hclge_vport *vport = hclge_get_vport(handle);
6759 	struct hclge_dev *hdev = vport->back;
6760 
6761 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6762 		return -EOPNOTSUPP;
6763 
6764 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6765 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6766 
6767 	return 0;
6768 }
6769 
6770 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6771 				     struct ethtool_tcpip4_spec *spec,
6772 				     struct ethtool_tcpip4_spec *spec_mask)
6773 {
6774 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6775 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6776 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6777 
6778 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6779 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6780 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6781 
6782 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6783 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6784 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6785 
6786 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6787 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6788 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6789 
6790 	spec->tos = rule->tuples.ip_tos;
6791 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6792 			0 : rule->tuples_mask.ip_tos;
6793 }
6794 
6795 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6796 				  struct ethtool_usrip4_spec *spec,
6797 				  struct ethtool_usrip4_spec *spec_mask)
6798 {
6799 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6800 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6801 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6802 
6803 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6804 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6805 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6806 
6807 	spec->tos = rule->tuples.ip_tos;
6808 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6809 			0 : rule->tuples_mask.ip_tos;
6810 
6811 	spec->proto = rule->tuples.ip_proto;
6812 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6813 			0 : rule->tuples_mask.ip_proto;
6814 
6815 	spec->ip_ver = ETH_RX_NFC_IP4;
6816 }
6817 
6818 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6819 				     struct ethtool_tcpip6_spec *spec,
6820 				     struct ethtool_tcpip6_spec *spec_mask)
6821 {
6822 	cpu_to_be32_array(spec->ip6src,
6823 			  rule->tuples.src_ip, IPV6_SIZE);
6824 	cpu_to_be32_array(spec->ip6dst,
6825 			  rule->tuples.dst_ip, IPV6_SIZE);
6826 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6827 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6828 	else
6829 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6830 				  IPV6_SIZE);
6831 
6832 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6833 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6834 	else
6835 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6836 				  IPV6_SIZE);
6837 
6838 	spec->tclass = rule->tuples.ip_tos;
6839 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6840 			0 : rule->tuples_mask.ip_tos;
6841 
6842 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6843 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6844 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6845 
6846 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6847 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6848 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6849 }
6850 
6851 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6852 				  struct ethtool_usrip6_spec *spec,
6853 				  struct ethtool_usrip6_spec *spec_mask)
6854 {
6855 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6856 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6857 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6858 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6859 	else
6860 		cpu_to_be32_array(spec_mask->ip6src,
6861 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6862 
6863 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6864 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6865 	else
6866 		cpu_to_be32_array(spec_mask->ip6dst,
6867 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6868 
6869 	spec->tclass = rule->tuples.ip_tos;
6870 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6871 			0 : rule->tuples_mask.ip_tos;
6872 
6873 	spec->l4_proto = rule->tuples.ip_proto;
6874 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6875 			0 : rule->tuples_mask.ip_proto;
6876 }
6877 
6878 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6879 				    struct ethhdr *spec,
6880 				    struct ethhdr *spec_mask)
6881 {
6882 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6883 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6884 
6885 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6886 		eth_zero_addr(spec_mask->h_source);
6887 	else
6888 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6889 
6890 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6891 		eth_zero_addr(spec_mask->h_dest);
6892 	else
6893 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6894 
6895 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6896 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6897 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6898 }
6899 
6900 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6901 				       struct hclge_fd_rule *rule)
6902 {
6903 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6904 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6905 		fs->h_ext.data[0] = 0;
6906 		fs->h_ext.data[1] = 0;
6907 		fs->m_ext.data[0] = 0;
6908 		fs->m_ext.data[1] = 0;
6909 	} else {
6910 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6911 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6912 		fs->m_ext.data[0] =
6913 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6914 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6915 	}
6916 }
6917 
6918 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6919 				  struct hclge_fd_rule *rule)
6920 {
6921 	if (fs->flow_type & FLOW_EXT) {
6922 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6923 		fs->m_ext.vlan_tci =
6924 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6925 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6926 
6927 		hclge_fd_get_user_def_info(fs, rule);
6928 	}
6929 
6930 	if (fs->flow_type & FLOW_MAC_EXT) {
6931 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6932 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6933 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6934 		else
6935 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6936 					rule->tuples_mask.dst_mac);
6937 	}
6938 }
6939 
6940 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6941 				  struct ethtool_rxnfc *cmd)
6942 {
6943 	struct hclge_vport *vport = hclge_get_vport(handle);
6944 	struct hclge_fd_rule *rule = NULL;
6945 	struct hclge_dev *hdev = vport->back;
6946 	struct ethtool_rx_flow_spec *fs;
6947 	struct hlist_node *node2;
6948 
6949 	if (!hnae3_dev_fd_supported(hdev))
6950 		return -EOPNOTSUPP;
6951 
6952 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6953 
6954 	spin_lock_bh(&hdev->fd_rule_lock);
6955 
6956 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6957 		if (rule->location >= fs->location)
6958 			break;
6959 	}
6960 
6961 	if (!rule || fs->location != rule->location) {
6962 		spin_unlock_bh(&hdev->fd_rule_lock);
6963 
6964 		return -ENOENT;
6965 	}
6966 
6967 	fs->flow_type = rule->flow_type;
6968 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6969 	case SCTP_V4_FLOW:
6970 	case TCP_V4_FLOW:
6971 	case UDP_V4_FLOW:
6972 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6973 					 &fs->m_u.tcp_ip4_spec);
6974 		break;
6975 	case IP_USER_FLOW:
6976 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6977 				      &fs->m_u.usr_ip4_spec);
6978 		break;
6979 	case SCTP_V6_FLOW:
6980 	case TCP_V6_FLOW:
6981 	case UDP_V6_FLOW:
6982 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6983 					 &fs->m_u.tcp_ip6_spec);
6984 		break;
6985 	case IPV6_USER_FLOW:
6986 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6987 				      &fs->m_u.usr_ip6_spec);
6988 		break;
6989 	/* The flow type of fd rule has been checked before adding in to rule
6990 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6991 	 * for the default case
6992 	 */
6993 	default:
6994 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6995 					&fs->m_u.ether_spec);
6996 		break;
6997 	}
6998 
6999 	hclge_fd_get_ext_info(fs, rule);
7000 
7001 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7002 		fs->ring_cookie = RX_CLS_FLOW_DISC;
7003 	} else {
7004 		u64 vf_id;
7005 
7006 		fs->ring_cookie = rule->queue_id;
7007 		vf_id = rule->vf_id;
7008 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7009 		fs->ring_cookie |= vf_id;
7010 	}
7011 
7012 	spin_unlock_bh(&hdev->fd_rule_lock);
7013 
7014 	return 0;
7015 }
7016 
7017 static int hclge_get_all_rules(struct hnae3_handle *handle,
7018 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
7019 {
7020 	struct hclge_vport *vport = hclge_get_vport(handle);
7021 	struct hclge_dev *hdev = vport->back;
7022 	struct hclge_fd_rule *rule;
7023 	struct hlist_node *node2;
7024 	int cnt = 0;
7025 
7026 	if (!hnae3_dev_fd_supported(hdev))
7027 		return -EOPNOTSUPP;
7028 
7029 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7030 
7031 	spin_lock_bh(&hdev->fd_rule_lock);
7032 	hlist_for_each_entry_safe(rule, node2,
7033 				  &hdev->fd_rule_list, rule_node) {
7034 		if (cnt == cmd->rule_cnt) {
7035 			spin_unlock_bh(&hdev->fd_rule_lock);
7036 			return -EMSGSIZE;
7037 		}
7038 
7039 		if (rule->state == HCLGE_FD_TO_DEL)
7040 			continue;
7041 
7042 		rule_locs[cnt] = rule->location;
7043 		cnt++;
7044 	}
7045 
7046 	spin_unlock_bh(&hdev->fd_rule_lock);
7047 
7048 	cmd->rule_cnt = cnt;
7049 
7050 	return 0;
7051 }
7052 
7053 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7054 				     struct hclge_fd_rule_tuples *tuples)
7055 {
7056 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7057 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7058 
7059 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7060 	tuples->ip_proto = fkeys->basic.ip_proto;
7061 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7062 
7063 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7064 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7065 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7066 	} else {
7067 		int i;
7068 
7069 		for (i = 0; i < IPV6_SIZE; i++) {
7070 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7071 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7072 		}
7073 	}
7074 }
7075 
7076 /* traverse all rules, check whether an existed rule has the same tuples */
7077 static struct hclge_fd_rule *
7078 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7079 			  const struct hclge_fd_rule_tuples *tuples)
7080 {
7081 	struct hclge_fd_rule *rule = NULL;
7082 	struct hlist_node *node;
7083 
7084 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7085 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7086 			return rule;
7087 	}
7088 
7089 	return NULL;
7090 }
7091 
7092 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7093 				     struct hclge_fd_rule *rule)
7094 {
7095 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7096 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7097 			     BIT(INNER_SRC_PORT);
7098 	rule->action = 0;
7099 	rule->vf_id = 0;
7100 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7101 	rule->state = HCLGE_FD_TO_ADD;
7102 	if (tuples->ether_proto == ETH_P_IP) {
7103 		if (tuples->ip_proto == IPPROTO_TCP)
7104 			rule->flow_type = TCP_V4_FLOW;
7105 		else
7106 			rule->flow_type = UDP_V4_FLOW;
7107 	} else {
7108 		if (tuples->ip_proto == IPPROTO_TCP)
7109 			rule->flow_type = TCP_V6_FLOW;
7110 		else
7111 			rule->flow_type = UDP_V6_FLOW;
7112 	}
7113 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7114 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7115 }
7116 
7117 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7118 				      u16 flow_id, struct flow_keys *fkeys)
7119 {
7120 	struct hclge_vport *vport = hclge_get_vport(handle);
7121 	struct hclge_fd_rule_tuples new_tuples = {};
7122 	struct hclge_dev *hdev = vport->back;
7123 	struct hclge_fd_rule *rule;
7124 	u16 bit_id;
7125 
7126 	if (!hnae3_dev_fd_supported(hdev))
7127 		return -EOPNOTSUPP;
7128 
7129 	/* when there is already fd rule existed add by user,
7130 	 * arfs should not work
7131 	 */
7132 	spin_lock_bh(&hdev->fd_rule_lock);
7133 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7134 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7135 		spin_unlock_bh(&hdev->fd_rule_lock);
7136 		return -EOPNOTSUPP;
7137 	}
7138 
7139 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7140 
7141 	/* check is there flow director filter existed for this flow,
7142 	 * if not, create a new filter for it;
7143 	 * if filter exist with different queue id, modify the filter;
7144 	 * if filter exist with same queue id, do nothing
7145 	 */
7146 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7147 	if (!rule) {
7148 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7149 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7150 			spin_unlock_bh(&hdev->fd_rule_lock);
7151 			return -ENOSPC;
7152 		}
7153 
7154 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7155 		if (!rule) {
7156 			spin_unlock_bh(&hdev->fd_rule_lock);
7157 			return -ENOMEM;
7158 		}
7159 
7160 		rule->location = bit_id;
7161 		rule->arfs.flow_id = flow_id;
7162 		rule->queue_id = queue_id;
7163 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7164 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7165 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7166 	} else if (rule->queue_id != queue_id) {
7167 		rule->queue_id = queue_id;
7168 		rule->state = HCLGE_FD_TO_ADD;
7169 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7170 		hclge_task_schedule(hdev, 0);
7171 	}
7172 	spin_unlock_bh(&hdev->fd_rule_lock);
7173 	return rule->location;
7174 }
7175 
7176 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7177 {
7178 #ifdef CONFIG_RFS_ACCEL
7179 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7180 	struct hclge_fd_rule *rule;
7181 	struct hlist_node *node;
7182 
7183 	spin_lock_bh(&hdev->fd_rule_lock);
7184 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7185 		spin_unlock_bh(&hdev->fd_rule_lock);
7186 		return;
7187 	}
7188 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7189 		if (rule->state != HCLGE_FD_ACTIVE)
7190 			continue;
7191 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7192 					rule->arfs.flow_id, rule->location)) {
7193 			rule->state = HCLGE_FD_TO_DEL;
7194 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7195 		}
7196 	}
7197 	spin_unlock_bh(&hdev->fd_rule_lock);
7198 #endif
7199 }
7200 
7201 /* make sure being called after lock up with fd_rule_lock */
7202 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7203 {
7204 #ifdef CONFIG_RFS_ACCEL
7205 	struct hclge_fd_rule *rule;
7206 	struct hlist_node *node;
7207 	int ret;
7208 
7209 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7210 		return 0;
7211 
7212 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7213 		switch (rule->state) {
7214 		case HCLGE_FD_TO_DEL:
7215 		case HCLGE_FD_ACTIVE:
7216 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7217 						   rule->location, NULL, false);
7218 			if (ret)
7219 				return ret;
7220 			fallthrough;
7221 		case HCLGE_FD_TO_ADD:
7222 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7223 			hlist_del(&rule->rule_node);
7224 			kfree(rule);
7225 			break;
7226 		default:
7227 			break;
7228 		}
7229 	}
7230 	hclge_sync_fd_state(hdev);
7231 
7232 #endif
7233 	return 0;
7234 }
7235 
7236 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7237 				    struct hclge_fd_rule *rule)
7238 {
7239 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7240 		struct flow_match_basic match;
7241 		u16 ethtype_key, ethtype_mask;
7242 
7243 		flow_rule_match_basic(flow, &match);
7244 		ethtype_key = ntohs(match.key->n_proto);
7245 		ethtype_mask = ntohs(match.mask->n_proto);
7246 
7247 		if (ethtype_key == ETH_P_ALL) {
7248 			ethtype_key = 0;
7249 			ethtype_mask = 0;
7250 		}
7251 		rule->tuples.ether_proto = ethtype_key;
7252 		rule->tuples_mask.ether_proto = ethtype_mask;
7253 		rule->tuples.ip_proto = match.key->ip_proto;
7254 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7255 	} else {
7256 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7257 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7258 	}
7259 }
7260 
7261 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7262 				  struct hclge_fd_rule *rule)
7263 {
7264 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7265 		struct flow_match_eth_addrs match;
7266 
7267 		flow_rule_match_eth_addrs(flow, &match);
7268 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7269 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7270 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7271 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7272 	} else {
7273 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7274 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7275 	}
7276 }
7277 
7278 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7279 				   struct hclge_fd_rule *rule)
7280 {
7281 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7282 		struct flow_match_vlan match;
7283 
7284 		flow_rule_match_vlan(flow, &match);
7285 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7286 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7287 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7288 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7289 	} else {
7290 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7291 	}
7292 }
7293 
7294 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7295 				 struct hclge_fd_rule *rule)
7296 {
7297 	u16 addr_type = 0;
7298 
7299 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7300 		struct flow_match_control match;
7301 
7302 		flow_rule_match_control(flow, &match);
7303 		addr_type = match.key->addr_type;
7304 	}
7305 
7306 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7307 		struct flow_match_ipv4_addrs match;
7308 
7309 		flow_rule_match_ipv4_addrs(flow, &match);
7310 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7311 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7312 						be32_to_cpu(match.mask->src);
7313 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7314 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7315 						be32_to_cpu(match.mask->dst);
7316 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7317 		struct flow_match_ipv6_addrs match;
7318 
7319 		flow_rule_match_ipv6_addrs(flow, &match);
7320 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7321 				  IPV6_SIZE);
7322 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7323 				  match.mask->src.s6_addr32, IPV6_SIZE);
7324 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7325 				  IPV6_SIZE);
7326 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7327 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7328 	} else {
7329 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7330 		rule->unused_tuple |= BIT(INNER_DST_IP);
7331 	}
7332 }
7333 
7334 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7335 				   struct hclge_fd_rule *rule)
7336 {
7337 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7338 		struct flow_match_ports match;
7339 
7340 		flow_rule_match_ports(flow, &match);
7341 
7342 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7343 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7344 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7345 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7346 	} else {
7347 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7348 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7349 	}
7350 }
7351 
7352 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7353 				  struct flow_cls_offload *cls_flower,
7354 				  struct hclge_fd_rule *rule)
7355 {
7356 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7357 	struct flow_dissector *dissector = flow->match.dissector;
7358 
7359 	if (dissector->used_keys &
7360 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7361 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7362 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7363 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7364 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7365 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7366 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7367 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7368 			dissector->used_keys);
7369 		return -EOPNOTSUPP;
7370 	}
7371 
7372 	hclge_get_cls_key_basic(flow, rule);
7373 	hclge_get_cls_key_mac(flow, rule);
7374 	hclge_get_cls_key_vlan(flow, rule);
7375 	hclge_get_cls_key_ip(flow, rule);
7376 	hclge_get_cls_key_port(flow, rule);
7377 
7378 	return 0;
7379 }
7380 
7381 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7382 				  struct flow_cls_offload *cls_flower, int tc)
7383 {
7384 	u32 prio = cls_flower->common.prio;
7385 
7386 	if (tc < 0 || tc > hdev->tc_max) {
7387 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7388 		return -EINVAL;
7389 	}
7390 
7391 	if (prio == 0 ||
7392 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7393 		dev_err(&hdev->pdev->dev,
7394 			"prio %u should be in range[1, %u]\n",
7395 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7396 		return -EINVAL;
7397 	}
7398 
7399 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7400 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7401 		return -EINVAL;
7402 	}
7403 	return 0;
7404 }
7405 
7406 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7407 				struct flow_cls_offload *cls_flower,
7408 				int tc)
7409 {
7410 	struct hclge_vport *vport = hclge_get_vport(handle);
7411 	struct hclge_dev *hdev = vport->back;
7412 	struct hclge_fd_rule *rule;
7413 	int ret;
7414 
7415 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7416 	if (ret) {
7417 		dev_err(&hdev->pdev->dev,
7418 			"failed to check cls flower params, ret = %d\n", ret);
7419 		return ret;
7420 	}
7421 
7422 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7423 	if (!rule)
7424 		return -ENOMEM;
7425 
7426 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7427 	if (ret) {
7428 		kfree(rule);
7429 		return ret;
7430 	}
7431 
7432 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7433 	rule->cls_flower.tc = tc;
7434 	rule->location = cls_flower->common.prio - 1;
7435 	rule->vf_id = 0;
7436 	rule->cls_flower.cookie = cls_flower->cookie;
7437 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7438 
7439 	ret = hclge_add_fd_entry_common(hdev, rule);
7440 	if (ret)
7441 		kfree(rule);
7442 
7443 	return ret;
7444 }
7445 
7446 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7447 						   unsigned long cookie)
7448 {
7449 	struct hclge_fd_rule *rule;
7450 	struct hlist_node *node;
7451 
7452 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7453 		if (rule->cls_flower.cookie == cookie)
7454 			return rule;
7455 	}
7456 
7457 	return NULL;
7458 }
7459 
7460 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7461 				struct flow_cls_offload *cls_flower)
7462 {
7463 	struct hclge_vport *vport = hclge_get_vport(handle);
7464 	struct hclge_dev *hdev = vport->back;
7465 	struct hclge_fd_rule *rule;
7466 	int ret;
7467 
7468 	spin_lock_bh(&hdev->fd_rule_lock);
7469 
7470 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7471 	if (!rule) {
7472 		spin_unlock_bh(&hdev->fd_rule_lock);
7473 		return -EINVAL;
7474 	}
7475 
7476 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7477 				   NULL, false);
7478 	if (ret) {
7479 		spin_unlock_bh(&hdev->fd_rule_lock);
7480 		return ret;
7481 	}
7482 
7483 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7484 	spin_unlock_bh(&hdev->fd_rule_lock);
7485 
7486 	return 0;
7487 }
7488 
7489 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7490 {
7491 	struct hclge_fd_rule *rule;
7492 	struct hlist_node *node;
7493 	int ret = 0;
7494 
7495 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7496 		return;
7497 
7498 	spin_lock_bh(&hdev->fd_rule_lock);
7499 
7500 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7501 		switch (rule->state) {
7502 		case HCLGE_FD_TO_ADD:
7503 			ret = hclge_fd_config_rule(hdev, rule);
7504 			if (ret)
7505 				goto out;
7506 			rule->state = HCLGE_FD_ACTIVE;
7507 			break;
7508 		case HCLGE_FD_TO_DEL:
7509 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7510 						   rule->location, NULL, false);
7511 			if (ret)
7512 				goto out;
7513 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7514 			hclge_fd_free_node(hdev, rule);
7515 			break;
7516 		default:
7517 			break;
7518 		}
7519 	}
7520 
7521 out:
7522 	if (ret)
7523 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7524 
7525 	spin_unlock_bh(&hdev->fd_rule_lock);
7526 }
7527 
7528 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7529 {
7530 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7531 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7532 
7533 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7534 	}
7535 
7536 	hclge_sync_fd_user_def_cfg(hdev, false);
7537 
7538 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7539 }
7540 
7541 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7542 {
7543 	struct hclge_vport *vport = hclge_get_vport(handle);
7544 	struct hclge_dev *hdev = vport->back;
7545 
7546 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7547 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7548 }
7549 
7550 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7551 {
7552 	struct hclge_vport *vport = hclge_get_vport(handle);
7553 	struct hclge_dev *hdev = vport->back;
7554 
7555 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7556 }
7557 
7558 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7559 {
7560 	struct hclge_vport *vport = hclge_get_vport(handle);
7561 	struct hclge_dev *hdev = vport->back;
7562 
7563 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7564 }
7565 
7566 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7567 {
7568 	struct hclge_vport *vport = hclge_get_vport(handle);
7569 	struct hclge_dev *hdev = vport->back;
7570 
7571 	return hdev->rst_stats.hw_reset_done_cnt;
7572 }
7573 
7574 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7575 {
7576 	struct hclge_vport *vport = hclge_get_vport(handle);
7577 	struct hclge_dev *hdev = vport->back;
7578 
7579 	hdev->fd_en = enable;
7580 
7581 	if (!enable)
7582 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7583 	else
7584 		hclge_restore_fd_entries(handle);
7585 
7586 	hclge_task_schedule(hdev, 0);
7587 }
7588 
7589 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7590 {
7591 	struct hclge_desc desc;
7592 	struct hclge_config_mac_mode_cmd *req =
7593 		(struct hclge_config_mac_mode_cmd *)desc.data;
7594 	u32 loop_en = 0;
7595 	int ret;
7596 
7597 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7598 
7599 	if (enable) {
7600 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7601 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7602 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7603 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7604 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7605 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7606 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7607 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7608 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7609 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7610 	}
7611 
7612 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7613 
7614 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7615 	if (ret)
7616 		dev_err(&hdev->pdev->dev,
7617 			"mac enable fail, ret =%d.\n", ret);
7618 }
7619 
7620 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7621 				     u8 switch_param, u8 param_mask)
7622 {
7623 	struct hclge_mac_vlan_switch_cmd *req;
7624 	struct hclge_desc desc;
7625 	u32 func_id;
7626 	int ret;
7627 
7628 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7629 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7630 
7631 	/* read current config parameter */
7632 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7633 				   true);
7634 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7635 	req->func_id = cpu_to_le32(func_id);
7636 
7637 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7638 	if (ret) {
7639 		dev_err(&hdev->pdev->dev,
7640 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7641 		return ret;
7642 	}
7643 
7644 	/* modify and write new config parameter */
7645 	hclge_cmd_reuse_desc(&desc, false);
7646 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7647 	req->param_mask = param_mask;
7648 
7649 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7650 	if (ret)
7651 		dev_err(&hdev->pdev->dev,
7652 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7653 	return ret;
7654 }
7655 
7656 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7657 				       int link_ret)
7658 {
7659 #define HCLGE_PHY_LINK_STATUS_NUM  200
7660 
7661 	struct phy_device *phydev = hdev->hw.mac.phydev;
7662 	int i = 0;
7663 	int ret;
7664 
7665 	do {
7666 		ret = phy_read_status(phydev);
7667 		if (ret) {
7668 			dev_err(&hdev->pdev->dev,
7669 				"phy update link status fail, ret = %d\n", ret);
7670 			return;
7671 		}
7672 
7673 		if (phydev->link == link_ret)
7674 			break;
7675 
7676 		msleep(HCLGE_LINK_STATUS_MS);
7677 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7678 }
7679 
7680 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7681 {
7682 #define HCLGE_MAC_LINK_STATUS_NUM  100
7683 
7684 	int link_status;
7685 	int i = 0;
7686 	int ret;
7687 
7688 	do {
7689 		ret = hclge_get_mac_link_status(hdev, &link_status);
7690 		if (ret)
7691 			return ret;
7692 		if (link_status == link_ret)
7693 			return 0;
7694 
7695 		msleep(HCLGE_LINK_STATUS_MS);
7696 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7697 	return -EBUSY;
7698 }
7699 
7700 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7701 					  bool is_phy)
7702 {
7703 	int link_ret;
7704 
7705 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7706 
7707 	if (is_phy)
7708 		hclge_phy_link_status_wait(hdev, link_ret);
7709 
7710 	return hclge_mac_link_status_wait(hdev, link_ret);
7711 }
7712 
7713 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7714 {
7715 	struct hclge_config_mac_mode_cmd *req;
7716 	struct hclge_desc desc;
7717 	u32 loop_en;
7718 	int ret;
7719 
7720 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7721 	/* 1 Read out the MAC mode config at first */
7722 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7723 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7724 	if (ret) {
7725 		dev_err(&hdev->pdev->dev,
7726 			"mac loopback get fail, ret =%d.\n", ret);
7727 		return ret;
7728 	}
7729 
7730 	/* 2 Then setup the loopback flag */
7731 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7732 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7733 
7734 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7735 
7736 	/* 3 Config mac work mode with loopback flag
7737 	 * and its original configure parameters
7738 	 */
7739 	hclge_cmd_reuse_desc(&desc, false);
7740 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7741 	if (ret)
7742 		dev_err(&hdev->pdev->dev,
7743 			"mac loopback set fail, ret =%d.\n", ret);
7744 	return ret;
7745 }
7746 
7747 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7748 				     enum hnae3_loop loop_mode)
7749 {
7750 #define HCLGE_COMMON_LB_RETRY_MS	10
7751 #define HCLGE_COMMON_LB_RETRY_NUM	100
7752 
7753 	struct hclge_common_lb_cmd *req;
7754 	struct hclge_desc desc;
7755 	int ret, i = 0;
7756 	u8 loop_mode_b;
7757 
7758 	req = (struct hclge_common_lb_cmd *)desc.data;
7759 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7760 
7761 	switch (loop_mode) {
7762 	case HNAE3_LOOP_SERIAL_SERDES:
7763 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7764 		break;
7765 	case HNAE3_LOOP_PARALLEL_SERDES:
7766 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7767 		break;
7768 	case HNAE3_LOOP_PHY:
7769 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7770 		break;
7771 	default:
7772 		dev_err(&hdev->pdev->dev,
7773 			"unsupported common loopback mode %d\n", loop_mode);
7774 		return -ENOTSUPP;
7775 	}
7776 
7777 	if (en) {
7778 		req->enable = loop_mode_b;
7779 		req->mask = loop_mode_b;
7780 	} else {
7781 		req->mask = loop_mode_b;
7782 	}
7783 
7784 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7785 	if (ret) {
7786 		dev_err(&hdev->pdev->dev,
7787 			"common loopback set fail, ret = %d\n", ret);
7788 		return ret;
7789 	}
7790 
7791 	do {
7792 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7793 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7794 					   true);
7795 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7796 		if (ret) {
7797 			dev_err(&hdev->pdev->dev,
7798 				"common loopback get, ret = %d\n", ret);
7799 			return ret;
7800 		}
7801 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7802 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7803 
7804 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7805 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7806 		return -EBUSY;
7807 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7808 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7809 		return -EIO;
7810 	}
7811 	return ret;
7812 }
7813 
7814 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7815 				     enum hnae3_loop loop_mode)
7816 {
7817 	int ret;
7818 
7819 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7820 	if (ret)
7821 		return ret;
7822 
7823 	hclge_cfg_mac_mode(hdev, en);
7824 
7825 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7826 	if (ret)
7827 		dev_err(&hdev->pdev->dev,
7828 			"serdes loopback config mac mode timeout\n");
7829 
7830 	return ret;
7831 }
7832 
7833 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7834 				     struct phy_device *phydev)
7835 {
7836 	int ret;
7837 
7838 	if (!phydev->suspended) {
7839 		ret = phy_suspend(phydev);
7840 		if (ret)
7841 			return ret;
7842 	}
7843 
7844 	ret = phy_resume(phydev);
7845 	if (ret)
7846 		return ret;
7847 
7848 	return phy_loopback(phydev, true);
7849 }
7850 
7851 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7852 				      struct phy_device *phydev)
7853 {
7854 	int ret;
7855 
7856 	ret = phy_loopback(phydev, false);
7857 	if (ret)
7858 		return ret;
7859 
7860 	return phy_suspend(phydev);
7861 }
7862 
7863 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7864 {
7865 	struct phy_device *phydev = hdev->hw.mac.phydev;
7866 	int ret;
7867 
7868 	if (!phydev) {
7869 		if (hnae3_dev_phy_imp_supported(hdev))
7870 			return hclge_set_common_loopback(hdev, en,
7871 							 HNAE3_LOOP_PHY);
7872 		return -ENOTSUPP;
7873 	}
7874 
7875 	if (en)
7876 		ret = hclge_enable_phy_loopback(hdev, phydev);
7877 	else
7878 		ret = hclge_disable_phy_loopback(hdev, phydev);
7879 	if (ret) {
7880 		dev_err(&hdev->pdev->dev,
7881 			"set phy loopback fail, ret = %d\n", ret);
7882 		return ret;
7883 	}
7884 
7885 	hclge_cfg_mac_mode(hdev, en);
7886 
7887 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7888 	if (ret)
7889 		dev_err(&hdev->pdev->dev,
7890 			"phy loopback config mac mode timeout\n");
7891 
7892 	return ret;
7893 }
7894 
7895 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7896 				     u16 stream_id, bool enable)
7897 {
7898 	struct hclge_desc desc;
7899 	struct hclge_cfg_com_tqp_queue_cmd *req =
7900 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7901 
7902 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7903 	req->tqp_id = cpu_to_le16(tqp_id);
7904 	req->stream_id = cpu_to_le16(stream_id);
7905 	if (enable)
7906 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7907 
7908 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7909 }
7910 
7911 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7912 {
7913 	struct hclge_vport *vport = hclge_get_vport(handle);
7914 	struct hclge_dev *hdev = vport->back;
7915 	int ret;
7916 	u16 i;
7917 
7918 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
7919 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7920 		if (ret)
7921 			return ret;
7922 	}
7923 	return 0;
7924 }
7925 
7926 static int hclge_set_loopback(struct hnae3_handle *handle,
7927 			      enum hnae3_loop loop_mode, bool en)
7928 {
7929 	struct hclge_vport *vport = hclge_get_vport(handle);
7930 	struct hclge_dev *hdev = vport->back;
7931 	int ret;
7932 
7933 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7934 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7935 	 * the same, the packets are looped back in the SSU. If SSU loopback
7936 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7937 	 */
7938 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7939 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7940 
7941 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7942 						HCLGE_SWITCH_ALW_LPBK_MASK);
7943 		if (ret)
7944 			return ret;
7945 	}
7946 
7947 	switch (loop_mode) {
7948 	case HNAE3_LOOP_APP:
7949 		ret = hclge_set_app_loopback(hdev, en);
7950 		break;
7951 	case HNAE3_LOOP_SERIAL_SERDES:
7952 	case HNAE3_LOOP_PARALLEL_SERDES:
7953 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
7954 		break;
7955 	case HNAE3_LOOP_PHY:
7956 		ret = hclge_set_phy_loopback(hdev, en);
7957 		break;
7958 	default:
7959 		ret = -ENOTSUPP;
7960 		dev_err(&hdev->pdev->dev,
7961 			"loop_mode %d is not supported\n", loop_mode);
7962 		break;
7963 	}
7964 
7965 	if (ret)
7966 		return ret;
7967 
7968 	ret = hclge_tqp_enable(handle, en);
7969 	if (ret)
7970 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7971 			en ? "enable" : "disable", ret);
7972 
7973 	return ret;
7974 }
7975 
7976 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7977 {
7978 	int ret;
7979 
7980 	ret = hclge_set_app_loopback(hdev, false);
7981 	if (ret)
7982 		return ret;
7983 
7984 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7985 	if (ret)
7986 		return ret;
7987 
7988 	return hclge_cfg_common_loopback(hdev, false,
7989 					 HNAE3_LOOP_PARALLEL_SERDES);
7990 }
7991 
7992 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7993 {
7994 	struct hclge_vport *vport = hclge_get_vport(handle);
7995 	struct hnae3_knic_private_info *kinfo;
7996 	struct hnae3_queue *queue;
7997 	struct hclge_tqp *tqp;
7998 	int i;
7999 
8000 	kinfo = &vport->nic.kinfo;
8001 	for (i = 0; i < kinfo->num_tqps; i++) {
8002 		queue = handle->kinfo.tqp[i];
8003 		tqp = container_of(queue, struct hclge_tqp, q);
8004 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8005 	}
8006 }
8007 
8008 static void hclge_flush_link_update(struct hclge_dev *hdev)
8009 {
8010 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
8011 
8012 	unsigned long last = hdev->serv_processed_cnt;
8013 	int i = 0;
8014 
8015 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8016 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8017 	       last == hdev->serv_processed_cnt)
8018 		usleep_range(1, 1);
8019 }
8020 
8021 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8022 {
8023 	struct hclge_vport *vport = hclge_get_vport(handle);
8024 	struct hclge_dev *hdev = vport->back;
8025 
8026 	if (enable) {
8027 		hclge_task_schedule(hdev, 0);
8028 	} else {
8029 		/* Set the DOWN flag here to disable link updating */
8030 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
8031 
8032 		/* flush memory to make sure DOWN is seen by service task */
8033 		smp_mb__before_atomic();
8034 		hclge_flush_link_update(hdev);
8035 	}
8036 }
8037 
8038 static int hclge_ae_start(struct hnae3_handle *handle)
8039 {
8040 	struct hclge_vport *vport = hclge_get_vport(handle);
8041 	struct hclge_dev *hdev = vport->back;
8042 
8043 	/* mac enable */
8044 	hclge_cfg_mac_mode(hdev, true);
8045 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8046 	hdev->hw.mac.link = 0;
8047 
8048 	/* reset tqp stats */
8049 	hclge_reset_tqp_stats(handle);
8050 
8051 	hclge_mac_start_phy(hdev);
8052 
8053 	return 0;
8054 }
8055 
8056 static void hclge_ae_stop(struct hnae3_handle *handle)
8057 {
8058 	struct hclge_vport *vport = hclge_get_vport(handle);
8059 	struct hclge_dev *hdev = vport->back;
8060 
8061 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8062 	spin_lock_bh(&hdev->fd_rule_lock);
8063 	hclge_clear_arfs_rules(hdev);
8064 	spin_unlock_bh(&hdev->fd_rule_lock);
8065 
8066 	/* If it is not PF reset, the firmware will disable the MAC,
8067 	 * so it only need to stop phy here.
8068 	 */
8069 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8070 	    hdev->reset_type != HNAE3_FUNC_RESET) {
8071 		hclge_mac_stop_phy(hdev);
8072 		hclge_update_link_status(hdev);
8073 		return;
8074 	}
8075 
8076 	hclge_reset_tqp(handle);
8077 
8078 	hclge_config_mac_tnl_int(hdev, false);
8079 
8080 	/* Mac disable */
8081 	hclge_cfg_mac_mode(hdev, false);
8082 
8083 	hclge_mac_stop_phy(hdev);
8084 
8085 	/* reset tqp stats */
8086 	hclge_reset_tqp_stats(handle);
8087 	hclge_update_link_status(hdev);
8088 }
8089 
8090 int hclge_vport_start(struct hclge_vport *vport)
8091 {
8092 	struct hclge_dev *hdev = vport->back;
8093 
8094 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8095 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8096 	vport->last_active_jiffies = jiffies;
8097 
8098 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8099 		if (vport->vport_id) {
8100 			hclge_restore_mac_table_common(vport);
8101 			hclge_restore_vport_vlan_table(vport);
8102 		} else {
8103 			hclge_restore_hw_table(hdev);
8104 		}
8105 	}
8106 
8107 	clear_bit(vport->vport_id, hdev->vport_config_block);
8108 
8109 	return 0;
8110 }
8111 
8112 void hclge_vport_stop(struct hclge_vport *vport)
8113 {
8114 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8115 }
8116 
8117 static int hclge_client_start(struct hnae3_handle *handle)
8118 {
8119 	struct hclge_vport *vport = hclge_get_vport(handle);
8120 
8121 	return hclge_vport_start(vport);
8122 }
8123 
8124 static void hclge_client_stop(struct hnae3_handle *handle)
8125 {
8126 	struct hclge_vport *vport = hclge_get_vport(handle);
8127 
8128 	hclge_vport_stop(vport);
8129 }
8130 
8131 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8132 					 u16 cmdq_resp, u8  resp_code,
8133 					 enum hclge_mac_vlan_tbl_opcode op)
8134 {
8135 	struct hclge_dev *hdev = vport->back;
8136 
8137 	if (cmdq_resp) {
8138 		dev_err(&hdev->pdev->dev,
8139 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8140 			cmdq_resp);
8141 		return -EIO;
8142 	}
8143 
8144 	if (op == HCLGE_MAC_VLAN_ADD) {
8145 		if (!resp_code || resp_code == 1)
8146 			return 0;
8147 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8148 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8149 			return -ENOSPC;
8150 
8151 		dev_err(&hdev->pdev->dev,
8152 			"add mac addr failed for undefined, code=%u.\n",
8153 			resp_code);
8154 		return -EIO;
8155 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8156 		if (!resp_code) {
8157 			return 0;
8158 		} else if (resp_code == 1) {
8159 			dev_dbg(&hdev->pdev->dev,
8160 				"remove mac addr failed for miss.\n");
8161 			return -ENOENT;
8162 		}
8163 
8164 		dev_err(&hdev->pdev->dev,
8165 			"remove mac addr failed for undefined, code=%u.\n",
8166 			resp_code);
8167 		return -EIO;
8168 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8169 		if (!resp_code) {
8170 			return 0;
8171 		} else if (resp_code == 1) {
8172 			dev_dbg(&hdev->pdev->dev,
8173 				"lookup mac addr failed for miss.\n");
8174 			return -ENOENT;
8175 		}
8176 
8177 		dev_err(&hdev->pdev->dev,
8178 			"lookup mac addr failed for undefined, code=%u.\n",
8179 			resp_code);
8180 		return -EIO;
8181 	}
8182 
8183 	dev_err(&hdev->pdev->dev,
8184 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8185 
8186 	return -EINVAL;
8187 }
8188 
8189 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8190 {
8191 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8192 
8193 	unsigned int word_num;
8194 	unsigned int bit_num;
8195 
8196 	if (vfid > 255 || vfid < 0)
8197 		return -EIO;
8198 
8199 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8200 		word_num = vfid / 32;
8201 		bit_num  = vfid % 32;
8202 		if (clr)
8203 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8204 		else
8205 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8206 	} else {
8207 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8208 		bit_num  = vfid % 32;
8209 		if (clr)
8210 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8211 		else
8212 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8213 	}
8214 
8215 	return 0;
8216 }
8217 
8218 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8219 {
8220 #define HCLGE_DESC_NUMBER 3
8221 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8222 	int i, j;
8223 
8224 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8225 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8226 			if (desc[i].data[j])
8227 				return false;
8228 
8229 	return true;
8230 }
8231 
8232 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8233 				   const u8 *addr, bool is_mc)
8234 {
8235 	const unsigned char *mac_addr = addr;
8236 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8237 		       (mac_addr[0]) | (mac_addr[1] << 8);
8238 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8239 
8240 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8241 	if (is_mc) {
8242 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8243 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8244 	}
8245 
8246 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8247 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8248 }
8249 
8250 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8251 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8252 {
8253 	struct hclge_dev *hdev = vport->back;
8254 	struct hclge_desc desc;
8255 	u8 resp_code;
8256 	u16 retval;
8257 	int ret;
8258 
8259 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8260 
8261 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8262 
8263 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8264 	if (ret) {
8265 		dev_err(&hdev->pdev->dev,
8266 			"del mac addr failed for cmd_send, ret =%d.\n",
8267 			ret);
8268 		return ret;
8269 	}
8270 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8271 	retval = le16_to_cpu(desc.retval);
8272 
8273 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8274 					     HCLGE_MAC_VLAN_REMOVE);
8275 }
8276 
8277 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8278 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8279 				     struct hclge_desc *desc,
8280 				     bool is_mc)
8281 {
8282 	struct hclge_dev *hdev = vport->back;
8283 	u8 resp_code;
8284 	u16 retval;
8285 	int ret;
8286 
8287 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8288 	if (is_mc) {
8289 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8290 		memcpy(desc[0].data,
8291 		       req,
8292 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8293 		hclge_cmd_setup_basic_desc(&desc[1],
8294 					   HCLGE_OPC_MAC_VLAN_ADD,
8295 					   true);
8296 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8297 		hclge_cmd_setup_basic_desc(&desc[2],
8298 					   HCLGE_OPC_MAC_VLAN_ADD,
8299 					   true);
8300 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8301 	} else {
8302 		memcpy(desc[0].data,
8303 		       req,
8304 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8305 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8306 	}
8307 	if (ret) {
8308 		dev_err(&hdev->pdev->dev,
8309 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8310 			ret);
8311 		return ret;
8312 	}
8313 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8314 	retval = le16_to_cpu(desc[0].retval);
8315 
8316 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8317 					     HCLGE_MAC_VLAN_LKUP);
8318 }
8319 
8320 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8321 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8322 				  struct hclge_desc *mc_desc)
8323 {
8324 	struct hclge_dev *hdev = vport->back;
8325 	int cfg_status;
8326 	u8 resp_code;
8327 	u16 retval;
8328 	int ret;
8329 
8330 	if (!mc_desc) {
8331 		struct hclge_desc desc;
8332 
8333 		hclge_cmd_setup_basic_desc(&desc,
8334 					   HCLGE_OPC_MAC_VLAN_ADD,
8335 					   false);
8336 		memcpy(desc.data, req,
8337 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8338 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8339 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8340 		retval = le16_to_cpu(desc.retval);
8341 
8342 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8343 							   resp_code,
8344 							   HCLGE_MAC_VLAN_ADD);
8345 	} else {
8346 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8347 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8348 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8349 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8350 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8351 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8352 		memcpy(mc_desc[0].data, req,
8353 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8354 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8355 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8356 		retval = le16_to_cpu(mc_desc[0].retval);
8357 
8358 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8359 							   resp_code,
8360 							   HCLGE_MAC_VLAN_ADD);
8361 	}
8362 
8363 	if (ret) {
8364 		dev_err(&hdev->pdev->dev,
8365 			"add mac addr failed for cmd_send, ret =%d.\n",
8366 			ret);
8367 		return ret;
8368 	}
8369 
8370 	return cfg_status;
8371 }
8372 
8373 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8374 			       u16 *allocated_size)
8375 {
8376 	struct hclge_umv_spc_alc_cmd *req;
8377 	struct hclge_desc desc;
8378 	int ret;
8379 
8380 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8381 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8382 
8383 	req->space_size = cpu_to_le32(space_size);
8384 
8385 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8386 	if (ret) {
8387 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8388 			ret);
8389 		return ret;
8390 	}
8391 
8392 	*allocated_size = le32_to_cpu(desc.data[1]);
8393 
8394 	return 0;
8395 }
8396 
8397 static int hclge_init_umv_space(struct hclge_dev *hdev)
8398 {
8399 	u16 allocated_size = 0;
8400 	int ret;
8401 
8402 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8403 	if (ret)
8404 		return ret;
8405 
8406 	if (allocated_size < hdev->wanted_umv_size)
8407 		dev_warn(&hdev->pdev->dev,
8408 			 "failed to alloc umv space, want %u, get %u\n",
8409 			 hdev->wanted_umv_size, allocated_size);
8410 
8411 	hdev->max_umv_size = allocated_size;
8412 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8413 	hdev->share_umv_size = hdev->priv_umv_size +
8414 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8415 
8416 	return 0;
8417 }
8418 
8419 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8420 {
8421 	struct hclge_vport *vport;
8422 	int i;
8423 
8424 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8425 		vport = &hdev->vport[i];
8426 		vport->used_umv_num = 0;
8427 	}
8428 
8429 	mutex_lock(&hdev->vport_lock);
8430 	hdev->share_umv_size = hdev->priv_umv_size +
8431 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8432 	mutex_unlock(&hdev->vport_lock);
8433 }
8434 
8435 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8436 {
8437 	struct hclge_dev *hdev = vport->back;
8438 	bool is_full;
8439 
8440 	if (need_lock)
8441 		mutex_lock(&hdev->vport_lock);
8442 
8443 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8444 		   hdev->share_umv_size == 0);
8445 
8446 	if (need_lock)
8447 		mutex_unlock(&hdev->vport_lock);
8448 
8449 	return is_full;
8450 }
8451 
8452 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8453 {
8454 	struct hclge_dev *hdev = vport->back;
8455 
8456 	if (is_free) {
8457 		if (vport->used_umv_num > hdev->priv_umv_size)
8458 			hdev->share_umv_size++;
8459 
8460 		if (vport->used_umv_num > 0)
8461 			vport->used_umv_num--;
8462 	} else {
8463 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8464 		    hdev->share_umv_size > 0)
8465 			hdev->share_umv_size--;
8466 		vport->used_umv_num++;
8467 	}
8468 }
8469 
8470 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8471 						  const u8 *mac_addr)
8472 {
8473 	struct hclge_mac_node *mac_node, *tmp;
8474 
8475 	list_for_each_entry_safe(mac_node, tmp, list, node)
8476 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8477 			return mac_node;
8478 
8479 	return NULL;
8480 }
8481 
8482 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8483 				  enum HCLGE_MAC_NODE_STATE state)
8484 {
8485 	switch (state) {
8486 	/* from set_rx_mode or tmp_add_list */
8487 	case HCLGE_MAC_TO_ADD:
8488 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8489 			mac_node->state = HCLGE_MAC_ACTIVE;
8490 		break;
8491 	/* only from set_rx_mode */
8492 	case HCLGE_MAC_TO_DEL:
8493 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8494 			list_del(&mac_node->node);
8495 			kfree(mac_node);
8496 		} else {
8497 			mac_node->state = HCLGE_MAC_TO_DEL;
8498 		}
8499 		break;
8500 	/* only from tmp_add_list, the mac_node->state won't be
8501 	 * ACTIVE.
8502 	 */
8503 	case HCLGE_MAC_ACTIVE:
8504 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8505 			mac_node->state = HCLGE_MAC_ACTIVE;
8506 
8507 		break;
8508 	}
8509 }
8510 
8511 int hclge_update_mac_list(struct hclge_vport *vport,
8512 			  enum HCLGE_MAC_NODE_STATE state,
8513 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8514 			  const unsigned char *addr)
8515 {
8516 	struct hclge_dev *hdev = vport->back;
8517 	struct hclge_mac_node *mac_node;
8518 	struct list_head *list;
8519 
8520 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8521 		&vport->uc_mac_list : &vport->mc_mac_list;
8522 
8523 	spin_lock_bh(&vport->mac_list_lock);
8524 
8525 	/* if the mac addr is already in the mac list, no need to add a new
8526 	 * one into it, just check the mac addr state, convert it to a new
8527 	 * state, or just remove it, or do nothing.
8528 	 */
8529 	mac_node = hclge_find_mac_node(list, addr);
8530 	if (mac_node) {
8531 		hclge_update_mac_node(mac_node, state);
8532 		spin_unlock_bh(&vport->mac_list_lock);
8533 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8534 		return 0;
8535 	}
8536 
8537 	/* if this address is never added, unnecessary to delete */
8538 	if (state == HCLGE_MAC_TO_DEL) {
8539 		spin_unlock_bh(&vport->mac_list_lock);
8540 		dev_err(&hdev->pdev->dev,
8541 			"failed to delete address %pM from mac list\n",
8542 			addr);
8543 		return -ENOENT;
8544 	}
8545 
8546 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8547 	if (!mac_node) {
8548 		spin_unlock_bh(&vport->mac_list_lock);
8549 		return -ENOMEM;
8550 	}
8551 
8552 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8553 
8554 	mac_node->state = state;
8555 	ether_addr_copy(mac_node->mac_addr, addr);
8556 	list_add_tail(&mac_node->node, list);
8557 
8558 	spin_unlock_bh(&vport->mac_list_lock);
8559 
8560 	return 0;
8561 }
8562 
8563 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8564 			     const unsigned char *addr)
8565 {
8566 	struct hclge_vport *vport = hclge_get_vport(handle);
8567 
8568 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8569 				     addr);
8570 }
8571 
8572 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8573 			     const unsigned char *addr)
8574 {
8575 	struct hclge_dev *hdev = vport->back;
8576 	struct hclge_mac_vlan_tbl_entry_cmd req;
8577 	struct hclge_desc desc;
8578 	u16 egress_port = 0;
8579 	int ret;
8580 
8581 	/* mac addr check */
8582 	if (is_zero_ether_addr(addr) ||
8583 	    is_broadcast_ether_addr(addr) ||
8584 	    is_multicast_ether_addr(addr)) {
8585 		dev_err(&hdev->pdev->dev,
8586 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8587 			 addr, is_zero_ether_addr(addr),
8588 			 is_broadcast_ether_addr(addr),
8589 			 is_multicast_ether_addr(addr));
8590 		return -EINVAL;
8591 	}
8592 
8593 	memset(&req, 0, sizeof(req));
8594 
8595 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8596 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8597 
8598 	req.egress_port = cpu_to_le16(egress_port);
8599 
8600 	hclge_prepare_mac_addr(&req, addr, false);
8601 
8602 	/* Lookup the mac address in the mac_vlan table, and add
8603 	 * it if the entry is inexistent. Repeated unicast entry
8604 	 * is not allowed in the mac vlan table.
8605 	 */
8606 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8607 	if (ret == -ENOENT) {
8608 		mutex_lock(&hdev->vport_lock);
8609 		if (!hclge_is_umv_space_full(vport, false)) {
8610 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8611 			if (!ret)
8612 				hclge_update_umv_space(vport, false);
8613 			mutex_unlock(&hdev->vport_lock);
8614 			return ret;
8615 		}
8616 		mutex_unlock(&hdev->vport_lock);
8617 
8618 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8619 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8620 				hdev->priv_umv_size);
8621 
8622 		return -ENOSPC;
8623 	}
8624 
8625 	/* check if we just hit the duplicate */
8626 	if (!ret) {
8627 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8628 			 vport->vport_id, addr);
8629 		return 0;
8630 	}
8631 
8632 	dev_err(&hdev->pdev->dev,
8633 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8634 		addr);
8635 
8636 	return ret;
8637 }
8638 
8639 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8640 			    const unsigned char *addr)
8641 {
8642 	struct hclge_vport *vport = hclge_get_vport(handle);
8643 
8644 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8645 				     addr);
8646 }
8647 
8648 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8649 			    const unsigned char *addr)
8650 {
8651 	struct hclge_dev *hdev = vport->back;
8652 	struct hclge_mac_vlan_tbl_entry_cmd req;
8653 	int ret;
8654 
8655 	/* mac addr check */
8656 	if (is_zero_ether_addr(addr) ||
8657 	    is_broadcast_ether_addr(addr) ||
8658 	    is_multicast_ether_addr(addr)) {
8659 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8660 			addr);
8661 		return -EINVAL;
8662 	}
8663 
8664 	memset(&req, 0, sizeof(req));
8665 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8666 	hclge_prepare_mac_addr(&req, addr, false);
8667 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8668 	if (!ret) {
8669 		mutex_lock(&hdev->vport_lock);
8670 		hclge_update_umv_space(vport, true);
8671 		mutex_unlock(&hdev->vport_lock);
8672 	} else if (ret == -ENOENT) {
8673 		ret = 0;
8674 	}
8675 
8676 	return ret;
8677 }
8678 
8679 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8680 			     const unsigned char *addr)
8681 {
8682 	struct hclge_vport *vport = hclge_get_vport(handle);
8683 
8684 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8685 				     addr);
8686 }
8687 
8688 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8689 			     const unsigned char *addr)
8690 {
8691 	struct hclge_dev *hdev = vport->back;
8692 	struct hclge_mac_vlan_tbl_entry_cmd req;
8693 	struct hclge_desc desc[3];
8694 	int status;
8695 
8696 	/* mac addr check */
8697 	if (!is_multicast_ether_addr(addr)) {
8698 		dev_err(&hdev->pdev->dev,
8699 			"Add mc mac err! invalid mac:%pM.\n",
8700 			 addr);
8701 		return -EINVAL;
8702 	}
8703 	memset(&req, 0, sizeof(req));
8704 	hclge_prepare_mac_addr(&req, addr, true);
8705 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8706 	if (status) {
8707 		/* This mac addr do not exist, add new entry for it */
8708 		memset(desc[0].data, 0, sizeof(desc[0].data));
8709 		memset(desc[1].data, 0, sizeof(desc[0].data));
8710 		memset(desc[2].data, 0, sizeof(desc[0].data));
8711 	}
8712 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8713 	if (status)
8714 		return status;
8715 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8716 	/* if already overflow, not to print each time */
8717 	if (status == -ENOSPC &&
8718 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8719 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8720 
8721 	return status;
8722 }
8723 
8724 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8725 			    const unsigned char *addr)
8726 {
8727 	struct hclge_vport *vport = hclge_get_vport(handle);
8728 
8729 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8730 				     addr);
8731 }
8732 
8733 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8734 			    const unsigned char *addr)
8735 {
8736 	struct hclge_dev *hdev = vport->back;
8737 	struct hclge_mac_vlan_tbl_entry_cmd req;
8738 	enum hclge_cmd_status status;
8739 	struct hclge_desc desc[3];
8740 
8741 	/* mac addr check */
8742 	if (!is_multicast_ether_addr(addr)) {
8743 		dev_dbg(&hdev->pdev->dev,
8744 			"Remove mc mac err! invalid mac:%pM.\n",
8745 			 addr);
8746 		return -EINVAL;
8747 	}
8748 
8749 	memset(&req, 0, sizeof(req));
8750 	hclge_prepare_mac_addr(&req, addr, true);
8751 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8752 	if (!status) {
8753 		/* This mac addr exist, remove this handle's VFID for it */
8754 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8755 		if (status)
8756 			return status;
8757 
8758 		if (hclge_is_all_function_id_zero(desc))
8759 			/* All the vfid is zero, so need to delete this entry */
8760 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8761 		else
8762 			/* Not all the vfid is zero, update the vfid */
8763 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8764 	} else if (status == -ENOENT) {
8765 		status = 0;
8766 	}
8767 
8768 	return status;
8769 }
8770 
8771 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8772 				      struct list_head *list,
8773 				      int (*sync)(struct hclge_vport *,
8774 						  const unsigned char *))
8775 {
8776 	struct hclge_mac_node *mac_node, *tmp;
8777 	int ret;
8778 
8779 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8780 		ret = sync(vport, mac_node->mac_addr);
8781 		if (!ret) {
8782 			mac_node->state = HCLGE_MAC_ACTIVE;
8783 		} else {
8784 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8785 				&vport->state);
8786 			break;
8787 		}
8788 	}
8789 }
8790 
8791 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8792 					struct list_head *list,
8793 					int (*unsync)(struct hclge_vport *,
8794 						      const unsigned char *))
8795 {
8796 	struct hclge_mac_node *mac_node, *tmp;
8797 	int ret;
8798 
8799 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8800 		ret = unsync(vport, mac_node->mac_addr);
8801 		if (!ret || ret == -ENOENT) {
8802 			list_del(&mac_node->node);
8803 			kfree(mac_node);
8804 		} else {
8805 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8806 				&vport->state);
8807 			break;
8808 		}
8809 	}
8810 }
8811 
8812 static bool hclge_sync_from_add_list(struct list_head *add_list,
8813 				     struct list_head *mac_list)
8814 {
8815 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8816 	bool all_added = true;
8817 
8818 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8819 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8820 			all_added = false;
8821 
8822 		/* if the mac address from tmp_add_list is not in the
8823 		 * uc/mc_mac_list, it means have received a TO_DEL request
8824 		 * during the time window of adding the mac address into mac
8825 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8826 		 * then it will be removed at next time. else it must be TO_ADD,
8827 		 * this address hasn't been added into mac table,
8828 		 * so just remove the mac node.
8829 		 */
8830 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8831 		if (new_node) {
8832 			hclge_update_mac_node(new_node, mac_node->state);
8833 			list_del(&mac_node->node);
8834 			kfree(mac_node);
8835 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8836 			mac_node->state = HCLGE_MAC_TO_DEL;
8837 			list_del(&mac_node->node);
8838 			list_add_tail(&mac_node->node, mac_list);
8839 		} else {
8840 			list_del(&mac_node->node);
8841 			kfree(mac_node);
8842 		}
8843 	}
8844 
8845 	return all_added;
8846 }
8847 
8848 static void hclge_sync_from_del_list(struct list_head *del_list,
8849 				     struct list_head *mac_list)
8850 {
8851 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8852 
8853 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8854 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8855 		if (new_node) {
8856 			/* If the mac addr exists in the mac list, it means
8857 			 * received a new TO_ADD request during the time window
8858 			 * of configuring the mac address. For the mac node
8859 			 * state is TO_ADD, and the address is already in the
8860 			 * in the hardware(due to delete fail), so we just need
8861 			 * to change the mac node state to ACTIVE.
8862 			 */
8863 			new_node->state = HCLGE_MAC_ACTIVE;
8864 			list_del(&mac_node->node);
8865 			kfree(mac_node);
8866 		} else {
8867 			list_del(&mac_node->node);
8868 			list_add_tail(&mac_node->node, mac_list);
8869 		}
8870 	}
8871 }
8872 
8873 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8874 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8875 					bool is_all_added)
8876 {
8877 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8878 		if (is_all_added)
8879 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8880 		else
8881 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8882 	} else {
8883 		if (is_all_added)
8884 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8885 		else
8886 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8887 	}
8888 }
8889 
8890 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8891 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8892 {
8893 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8894 	struct list_head tmp_add_list, tmp_del_list;
8895 	struct list_head *list;
8896 	bool all_added;
8897 
8898 	INIT_LIST_HEAD(&tmp_add_list);
8899 	INIT_LIST_HEAD(&tmp_del_list);
8900 
8901 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8902 	 * we can add/delete these mac addr outside the spin lock
8903 	 */
8904 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8905 		&vport->uc_mac_list : &vport->mc_mac_list;
8906 
8907 	spin_lock_bh(&vport->mac_list_lock);
8908 
8909 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8910 		switch (mac_node->state) {
8911 		case HCLGE_MAC_TO_DEL:
8912 			list_del(&mac_node->node);
8913 			list_add_tail(&mac_node->node, &tmp_del_list);
8914 			break;
8915 		case HCLGE_MAC_TO_ADD:
8916 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8917 			if (!new_node)
8918 				goto stop_traverse;
8919 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8920 			new_node->state = mac_node->state;
8921 			list_add_tail(&new_node->node, &tmp_add_list);
8922 			break;
8923 		default:
8924 			break;
8925 		}
8926 	}
8927 
8928 stop_traverse:
8929 	spin_unlock_bh(&vport->mac_list_lock);
8930 
8931 	/* delete first, in order to get max mac table space for adding */
8932 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8933 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8934 					    hclge_rm_uc_addr_common);
8935 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8936 					  hclge_add_uc_addr_common);
8937 	} else {
8938 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8939 					    hclge_rm_mc_addr_common);
8940 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8941 					  hclge_add_mc_addr_common);
8942 	}
8943 
8944 	/* if some mac addresses were added/deleted fail, move back to the
8945 	 * mac_list, and retry at next time.
8946 	 */
8947 	spin_lock_bh(&vport->mac_list_lock);
8948 
8949 	hclge_sync_from_del_list(&tmp_del_list, list);
8950 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8951 
8952 	spin_unlock_bh(&vport->mac_list_lock);
8953 
8954 	hclge_update_overflow_flags(vport, mac_type, all_added);
8955 }
8956 
8957 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8958 {
8959 	struct hclge_dev *hdev = vport->back;
8960 
8961 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8962 		return false;
8963 
8964 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8965 		return true;
8966 
8967 	return false;
8968 }
8969 
8970 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8971 {
8972 	int i;
8973 
8974 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8975 		struct hclge_vport *vport = &hdev->vport[i];
8976 
8977 		if (!hclge_need_sync_mac_table(vport))
8978 			continue;
8979 
8980 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8981 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8982 	}
8983 }
8984 
8985 static void hclge_build_del_list(struct list_head *list,
8986 				 bool is_del_list,
8987 				 struct list_head *tmp_del_list)
8988 {
8989 	struct hclge_mac_node *mac_cfg, *tmp;
8990 
8991 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8992 		switch (mac_cfg->state) {
8993 		case HCLGE_MAC_TO_DEL:
8994 		case HCLGE_MAC_ACTIVE:
8995 			list_del(&mac_cfg->node);
8996 			list_add_tail(&mac_cfg->node, tmp_del_list);
8997 			break;
8998 		case HCLGE_MAC_TO_ADD:
8999 			if (is_del_list) {
9000 				list_del(&mac_cfg->node);
9001 				kfree(mac_cfg);
9002 			}
9003 			break;
9004 		}
9005 	}
9006 }
9007 
9008 static void hclge_unsync_del_list(struct hclge_vport *vport,
9009 				  int (*unsync)(struct hclge_vport *vport,
9010 						const unsigned char *addr),
9011 				  bool is_del_list,
9012 				  struct list_head *tmp_del_list)
9013 {
9014 	struct hclge_mac_node *mac_cfg, *tmp;
9015 	int ret;
9016 
9017 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9018 		ret = unsync(vport, mac_cfg->mac_addr);
9019 		if (!ret || ret == -ENOENT) {
9020 			/* clear all mac addr from hardware, but remain these
9021 			 * mac addr in the mac list, and restore them after
9022 			 * vf reset finished.
9023 			 */
9024 			if (!is_del_list &&
9025 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
9026 				mac_cfg->state = HCLGE_MAC_TO_ADD;
9027 			} else {
9028 				list_del(&mac_cfg->node);
9029 				kfree(mac_cfg);
9030 			}
9031 		} else if (is_del_list) {
9032 			mac_cfg->state = HCLGE_MAC_TO_DEL;
9033 		}
9034 	}
9035 }
9036 
9037 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9038 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9039 {
9040 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9041 	struct hclge_dev *hdev = vport->back;
9042 	struct list_head tmp_del_list, *list;
9043 
9044 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9045 		list = &vport->uc_mac_list;
9046 		unsync = hclge_rm_uc_addr_common;
9047 	} else {
9048 		list = &vport->mc_mac_list;
9049 		unsync = hclge_rm_mc_addr_common;
9050 	}
9051 
9052 	INIT_LIST_HEAD(&tmp_del_list);
9053 
9054 	if (!is_del_list)
9055 		set_bit(vport->vport_id, hdev->vport_config_block);
9056 
9057 	spin_lock_bh(&vport->mac_list_lock);
9058 
9059 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9060 
9061 	spin_unlock_bh(&vport->mac_list_lock);
9062 
9063 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9064 
9065 	spin_lock_bh(&vport->mac_list_lock);
9066 
9067 	hclge_sync_from_del_list(&tmp_del_list, list);
9068 
9069 	spin_unlock_bh(&vport->mac_list_lock);
9070 }
9071 
9072 /* remove all mac address when uninitailize */
9073 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9074 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9075 {
9076 	struct hclge_mac_node *mac_node, *tmp;
9077 	struct hclge_dev *hdev = vport->back;
9078 	struct list_head tmp_del_list, *list;
9079 
9080 	INIT_LIST_HEAD(&tmp_del_list);
9081 
9082 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9083 		&vport->uc_mac_list : &vport->mc_mac_list;
9084 
9085 	spin_lock_bh(&vport->mac_list_lock);
9086 
9087 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9088 		switch (mac_node->state) {
9089 		case HCLGE_MAC_TO_DEL:
9090 		case HCLGE_MAC_ACTIVE:
9091 			list_del(&mac_node->node);
9092 			list_add_tail(&mac_node->node, &tmp_del_list);
9093 			break;
9094 		case HCLGE_MAC_TO_ADD:
9095 			list_del(&mac_node->node);
9096 			kfree(mac_node);
9097 			break;
9098 		}
9099 	}
9100 
9101 	spin_unlock_bh(&vport->mac_list_lock);
9102 
9103 	if (mac_type == HCLGE_MAC_ADDR_UC)
9104 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9105 					    hclge_rm_uc_addr_common);
9106 	else
9107 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9108 					    hclge_rm_mc_addr_common);
9109 
9110 	if (!list_empty(&tmp_del_list))
9111 		dev_warn(&hdev->pdev->dev,
9112 			 "uninit %s mac list for vport %u not completely.\n",
9113 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9114 			 vport->vport_id);
9115 
9116 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9117 		list_del(&mac_node->node);
9118 		kfree(mac_node);
9119 	}
9120 }
9121 
9122 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9123 {
9124 	struct hclge_vport *vport;
9125 	int i;
9126 
9127 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9128 		vport = &hdev->vport[i];
9129 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9130 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9131 	}
9132 }
9133 
9134 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9135 					      u16 cmdq_resp, u8 resp_code)
9136 {
9137 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9138 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9139 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9140 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9141 
9142 	int return_status;
9143 
9144 	if (cmdq_resp) {
9145 		dev_err(&hdev->pdev->dev,
9146 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9147 			cmdq_resp);
9148 		return -EIO;
9149 	}
9150 
9151 	switch (resp_code) {
9152 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9153 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9154 		return_status = 0;
9155 		break;
9156 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9157 		dev_err(&hdev->pdev->dev,
9158 			"add mac ethertype failed for manager table overflow.\n");
9159 		return_status = -EIO;
9160 		break;
9161 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9162 		dev_err(&hdev->pdev->dev,
9163 			"add mac ethertype failed for key conflict.\n");
9164 		return_status = -EIO;
9165 		break;
9166 	default:
9167 		dev_err(&hdev->pdev->dev,
9168 			"add mac ethertype failed for undefined, code=%u.\n",
9169 			resp_code);
9170 		return_status = -EIO;
9171 	}
9172 
9173 	return return_status;
9174 }
9175 
9176 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9177 				     u8 *mac_addr)
9178 {
9179 	struct hclge_mac_vlan_tbl_entry_cmd req;
9180 	struct hclge_dev *hdev = vport->back;
9181 	struct hclge_desc desc;
9182 	u16 egress_port = 0;
9183 	int i;
9184 
9185 	if (is_zero_ether_addr(mac_addr))
9186 		return false;
9187 
9188 	memset(&req, 0, sizeof(req));
9189 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9190 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9191 	req.egress_port = cpu_to_le16(egress_port);
9192 	hclge_prepare_mac_addr(&req, mac_addr, false);
9193 
9194 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9195 		return true;
9196 
9197 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9198 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9199 		if (i != vf_idx &&
9200 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9201 			return true;
9202 
9203 	return false;
9204 }
9205 
9206 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9207 			    u8 *mac_addr)
9208 {
9209 	struct hclge_vport *vport = hclge_get_vport(handle);
9210 	struct hclge_dev *hdev = vport->back;
9211 
9212 	vport = hclge_get_vf_vport(hdev, vf);
9213 	if (!vport)
9214 		return -EINVAL;
9215 
9216 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9217 		dev_info(&hdev->pdev->dev,
9218 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9219 			 mac_addr);
9220 		return 0;
9221 	}
9222 
9223 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9224 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9225 			mac_addr);
9226 		return -EEXIST;
9227 	}
9228 
9229 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9230 
9231 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9232 		dev_info(&hdev->pdev->dev,
9233 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9234 			 vf, mac_addr);
9235 		return hclge_inform_reset_assert_to_vf(vport);
9236 	}
9237 
9238 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9239 		 vf, mac_addr);
9240 	return 0;
9241 }
9242 
9243 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9244 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9245 {
9246 	struct hclge_desc desc;
9247 	u8 resp_code;
9248 	u16 retval;
9249 	int ret;
9250 
9251 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9252 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9253 
9254 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9255 	if (ret) {
9256 		dev_err(&hdev->pdev->dev,
9257 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9258 			ret);
9259 		return ret;
9260 	}
9261 
9262 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9263 	retval = le16_to_cpu(desc.retval);
9264 
9265 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9266 }
9267 
9268 static int init_mgr_tbl(struct hclge_dev *hdev)
9269 {
9270 	int ret;
9271 	int i;
9272 
9273 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9274 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9275 		if (ret) {
9276 			dev_err(&hdev->pdev->dev,
9277 				"add mac ethertype failed, ret =%d.\n",
9278 				ret);
9279 			return ret;
9280 		}
9281 	}
9282 
9283 	return 0;
9284 }
9285 
9286 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9287 {
9288 	struct hclge_vport *vport = hclge_get_vport(handle);
9289 	struct hclge_dev *hdev = vport->back;
9290 
9291 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9292 }
9293 
9294 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9295 				       const u8 *old_addr, const u8 *new_addr)
9296 {
9297 	struct list_head *list = &vport->uc_mac_list;
9298 	struct hclge_mac_node *old_node, *new_node;
9299 
9300 	new_node = hclge_find_mac_node(list, new_addr);
9301 	if (!new_node) {
9302 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9303 		if (!new_node)
9304 			return -ENOMEM;
9305 
9306 		new_node->state = HCLGE_MAC_TO_ADD;
9307 		ether_addr_copy(new_node->mac_addr, new_addr);
9308 		list_add(&new_node->node, list);
9309 	} else {
9310 		if (new_node->state == HCLGE_MAC_TO_DEL)
9311 			new_node->state = HCLGE_MAC_ACTIVE;
9312 
9313 		/* make sure the new addr is in the list head, avoid dev
9314 		 * addr may be not re-added into mac table for the umv space
9315 		 * limitation after global/imp reset which will clear mac
9316 		 * table by hardware.
9317 		 */
9318 		list_move(&new_node->node, list);
9319 	}
9320 
9321 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9322 		old_node = hclge_find_mac_node(list, old_addr);
9323 		if (old_node) {
9324 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9325 				list_del(&old_node->node);
9326 				kfree(old_node);
9327 			} else {
9328 				old_node->state = HCLGE_MAC_TO_DEL;
9329 			}
9330 		}
9331 	}
9332 
9333 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9334 
9335 	return 0;
9336 }
9337 
9338 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9339 			      bool is_first)
9340 {
9341 	const unsigned char *new_addr = (const unsigned char *)p;
9342 	struct hclge_vport *vport = hclge_get_vport(handle);
9343 	struct hclge_dev *hdev = vport->back;
9344 	unsigned char *old_addr = NULL;
9345 	int ret;
9346 
9347 	/* mac addr check */
9348 	if (is_zero_ether_addr(new_addr) ||
9349 	    is_broadcast_ether_addr(new_addr) ||
9350 	    is_multicast_ether_addr(new_addr)) {
9351 		dev_err(&hdev->pdev->dev,
9352 			"change uc mac err! invalid mac: %pM.\n",
9353 			 new_addr);
9354 		return -EINVAL;
9355 	}
9356 
9357 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9358 	if (ret) {
9359 		dev_err(&hdev->pdev->dev,
9360 			"failed to configure mac pause address, ret = %d\n",
9361 			ret);
9362 		return ret;
9363 	}
9364 
9365 	if (!is_first)
9366 		old_addr = hdev->hw.mac.mac_addr;
9367 
9368 	spin_lock_bh(&vport->mac_list_lock);
9369 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9370 	if (ret) {
9371 		dev_err(&hdev->pdev->dev,
9372 			"failed to change the mac addr:%pM, ret = %d\n",
9373 			new_addr, ret);
9374 		spin_unlock_bh(&vport->mac_list_lock);
9375 
9376 		if (!is_first)
9377 			hclge_pause_addr_cfg(hdev, old_addr);
9378 
9379 		return ret;
9380 	}
9381 	/* we must update dev addr with spin lock protect, preventing dev addr
9382 	 * being removed by set_rx_mode path.
9383 	 */
9384 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9385 	spin_unlock_bh(&vport->mac_list_lock);
9386 
9387 	hclge_task_schedule(hdev, 0);
9388 
9389 	return 0;
9390 }
9391 
9392 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9393 {
9394 	struct mii_ioctl_data *data = if_mii(ifr);
9395 
9396 	if (!hnae3_dev_phy_imp_supported(hdev))
9397 		return -EOPNOTSUPP;
9398 
9399 	switch (cmd) {
9400 	case SIOCGMIIPHY:
9401 		data->phy_id = hdev->hw.mac.phy_addr;
9402 		/* this command reads phy id and register at the same time */
9403 		fallthrough;
9404 	case SIOCGMIIREG:
9405 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9406 		return 0;
9407 
9408 	case SIOCSMIIREG:
9409 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9410 	default:
9411 		return -EOPNOTSUPP;
9412 	}
9413 }
9414 
9415 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9416 			  int cmd)
9417 {
9418 	struct hclge_vport *vport = hclge_get_vport(handle);
9419 	struct hclge_dev *hdev = vport->back;
9420 
9421 	if (!hdev->hw.mac.phydev)
9422 		return hclge_mii_ioctl(hdev, ifr, cmd);
9423 
9424 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9425 }
9426 
9427 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9428 					     bool bypass_en)
9429 {
9430 	struct hclge_port_vlan_filter_bypass_cmd *req;
9431 	struct hclge_desc desc;
9432 	int ret;
9433 
9434 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9435 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9436 	req->vf_id = vf_id;
9437 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9438 		      bypass_en ? 1 : 0);
9439 
9440 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9441 	if (ret)
9442 		dev_err(&hdev->pdev->dev,
9443 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9444 			vf_id, ret);
9445 
9446 	return ret;
9447 }
9448 
9449 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9450 				      u8 fe_type, bool filter_en, u8 vf_id)
9451 {
9452 	struct hclge_vlan_filter_ctrl_cmd *req;
9453 	struct hclge_desc desc;
9454 	int ret;
9455 
9456 	/* read current vlan filter parameter */
9457 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9458 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9459 	req->vlan_type = vlan_type;
9460 	req->vf_id = vf_id;
9461 
9462 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9463 	if (ret) {
9464 		dev_err(&hdev->pdev->dev,
9465 			"failed to get vlan filter config, ret = %d.\n", ret);
9466 		return ret;
9467 	}
9468 
9469 	/* modify and write new config parameter */
9470 	hclge_cmd_reuse_desc(&desc, false);
9471 	req->vlan_fe = filter_en ?
9472 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9473 
9474 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9475 	if (ret)
9476 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9477 			ret);
9478 
9479 	return ret;
9480 }
9481 
9482 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9483 {
9484 	struct hclge_dev *hdev = vport->back;
9485 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9486 	int ret;
9487 
9488 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9489 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9490 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9491 						  enable, vport->vport_id);
9492 
9493 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9494 					 HCLGE_FILTER_FE_EGRESS, enable,
9495 					 vport->vport_id);
9496 	if (ret)
9497 		return ret;
9498 
9499 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
9500 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9501 							!enable);
9502 	else if (!vport->vport_id)
9503 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9504 						 HCLGE_FILTER_FE_INGRESS,
9505 						 enable, 0);
9506 
9507 	return ret;
9508 }
9509 
9510 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9511 {
9512 	struct hnae3_handle *handle = &vport->nic;
9513 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9514 	struct hclge_dev *hdev = vport->back;
9515 
9516 	if (vport->vport_id) {
9517 		if (vport->port_base_vlan_cfg.state !=
9518 			HNAE3_PORT_BASE_VLAN_DISABLE)
9519 			return true;
9520 
9521 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9522 			return false;
9523 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9524 		return false;
9525 	}
9526 
9527 	if (!vport->req_vlan_fltr_en)
9528 		return false;
9529 
9530 	/* compatible with former device, always enable vlan filter */
9531 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9532 		return true;
9533 
9534 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9535 		if (vlan->vlan_id != 0)
9536 			return true;
9537 
9538 	return false;
9539 }
9540 
9541 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9542 {
9543 	struct hclge_dev *hdev = vport->back;
9544 	bool need_en;
9545 	int ret;
9546 
9547 	mutex_lock(&hdev->vport_lock);
9548 
9549 	vport->req_vlan_fltr_en = request_en;
9550 
9551 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9552 	if (need_en == vport->cur_vlan_fltr_en) {
9553 		mutex_unlock(&hdev->vport_lock);
9554 		return 0;
9555 	}
9556 
9557 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9558 	if (ret) {
9559 		mutex_unlock(&hdev->vport_lock);
9560 		return ret;
9561 	}
9562 
9563 	vport->cur_vlan_fltr_en = need_en;
9564 
9565 	mutex_unlock(&hdev->vport_lock);
9566 
9567 	return 0;
9568 }
9569 
9570 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9571 {
9572 	struct hclge_vport *vport = hclge_get_vport(handle);
9573 
9574 	return hclge_enable_vport_vlan_filter(vport, enable);
9575 }
9576 
9577 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9578 					bool is_kill, u16 vlan,
9579 					struct hclge_desc *desc)
9580 {
9581 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9582 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9583 	u8 vf_byte_val;
9584 	u8 vf_byte_off;
9585 	int ret;
9586 
9587 	hclge_cmd_setup_basic_desc(&desc[0],
9588 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9589 	hclge_cmd_setup_basic_desc(&desc[1],
9590 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9591 
9592 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9593 
9594 	vf_byte_off = vfid / 8;
9595 	vf_byte_val = 1 << (vfid % 8);
9596 
9597 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9598 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9599 
9600 	req0->vlan_id  = cpu_to_le16(vlan);
9601 	req0->vlan_cfg = is_kill;
9602 
9603 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9604 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9605 	else
9606 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9607 
9608 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9609 	if (ret) {
9610 		dev_err(&hdev->pdev->dev,
9611 			"Send vf vlan command fail, ret =%d.\n",
9612 			ret);
9613 		return ret;
9614 	}
9615 
9616 	return 0;
9617 }
9618 
9619 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9620 					  bool is_kill, struct hclge_desc *desc)
9621 {
9622 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9623 
9624 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9625 
9626 	if (!is_kill) {
9627 #define HCLGE_VF_VLAN_NO_ENTRY	2
9628 		if (!req->resp_code || req->resp_code == 1)
9629 			return 0;
9630 
9631 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9632 			set_bit(vfid, hdev->vf_vlan_full);
9633 			dev_warn(&hdev->pdev->dev,
9634 				 "vf vlan table is full, vf vlan filter is disabled\n");
9635 			return 0;
9636 		}
9637 
9638 		dev_err(&hdev->pdev->dev,
9639 			"Add vf vlan filter fail, ret =%u.\n",
9640 			req->resp_code);
9641 	} else {
9642 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9643 		if (!req->resp_code)
9644 			return 0;
9645 
9646 		/* vf vlan filter is disabled when vf vlan table is full,
9647 		 * then new vlan id will not be added into vf vlan table.
9648 		 * Just return 0 without warning, avoid massive verbose
9649 		 * print logs when unload.
9650 		 */
9651 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9652 			return 0;
9653 
9654 		dev_err(&hdev->pdev->dev,
9655 			"Kill vf vlan filter fail, ret =%u.\n",
9656 			req->resp_code);
9657 	}
9658 
9659 	return -EIO;
9660 }
9661 
9662 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9663 				    bool is_kill, u16 vlan)
9664 {
9665 	struct hclge_vport *vport = &hdev->vport[vfid];
9666 	struct hclge_desc desc[2];
9667 	int ret;
9668 
9669 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9670 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9671 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9672 	 * new vlan, because tx packets with these vlan id will be dropped.
9673 	 */
9674 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9675 		if (vport->vf_info.spoofchk && vlan) {
9676 			dev_err(&hdev->pdev->dev,
9677 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9678 			return -EPERM;
9679 		}
9680 		return 0;
9681 	}
9682 
9683 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9684 	if (ret)
9685 		return ret;
9686 
9687 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9688 }
9689 
9690 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9691 				      u16 vlan_id, bool is_kill)
9692 {
9693 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9694 	struct hclge_desc desc;
9695 	u8 vlan_offset_byte_val;
9696 	u8 vlan_offset_byte;
9697 	u8 vlan_offset_160;
9698 	int ret;
9699 
9700 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9701 
9702 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9703 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9704 			   HCLGE_VLAN_BYTE_SIZE;
9705 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9706 
9707 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9708 	req->vlan_offset = vlan_offset_160;
9709 	req->vlan_cfg = is_kill;
9710 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9711 
9712 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9713 	if (ret)
9714 		dev_err(&hdev->pdev->dev,
9715 			"port vlan command, send fail, ret =%d.\n", ret);
9716 	return ret;
9717 }
9718 
9719 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9720 				    u16 vport_id, u16 vlan_id,
9721 				    bool is_kill)
9722 {
9723 	u16 vport_idx, vport_num = 0;
9724 	int ret;
9725 
9726 	if (is_kill && !vlan_id)
9727 		return 0;
9728 
9729 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9730 	if (ret) {
9731 		dev_err(&hdev->pdev->dev,
9732 			"Set %u vport vlan filter config fail, ret =%d.\n",
9733 			vport_id, ret);
9734 		return ret;
9735 	}
9736 
9737 	/* vlan 0 may be added twice when 8021q module is enabled */
9738 	if (!is_kill && !vlan_id &&
9739 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9740 		return 0;
9741 
9742 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9743 		dev_err(&hdev->pdev->dev,
9744 			"Add port vlan failed, vport %u is already in vlan %u\n",
9745 			vport_id, vlan_id);
9746 		return -EINVAL;
9747 	}
9748 
9749 	if (is_kill &&
9750 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9751 		dev_err(&hdev->pdev->dev,
9752 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9753 			vport_id, vlan_id);
9754 		return -EINVAL;
9755 	}
9756 
9757 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9758 		vport_num++;
9759 
9760 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9761 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9762 						 is_kill);
9763 
9764 	return ret;
9765 }
9766 
9767 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9768 {
9769 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9770 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9771 	struct hclge_dev *hdev = vport->back;
9772 	struct hclge_desc desc;
9773 	u16 bmap_index;
9774 	int status;
9775 
9776 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9777 
9778 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9779 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9780 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9781 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9782 		      vcfg->accept_tag1 ? 1 : 0);
9783 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9784 		      vcfg->accept_untag1 ? 1 : 0);
9785 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9786 		      vcfg->accept_tag2 ? 1 : 0);
9787 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9788 		      vcfg->accept_untag2 ? 1 : 0);
9789 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9790 		      vcfg->insert_tag1_en ? 1 : 0);
9791 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9792 		      vcfg->insert_tag2_en ? 1 : 0);
9793 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9794 		      vcfg->tag_shift_mode_en ? 1 : 0);
9795 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9796 
9797 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9798 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9799 			HCLGE_VF_NUM_PER_BYTE;
9800 	req->vf_bitmap[bmap_index] =
9801 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9802 
9803 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9804 	if (status)
9805 		dev_err(&hdev->pdev->dev,
9806 			"Send port txvlan cfg command fail, ret =%d\n",
9807 			status);
9808 
9809 	return status;
9810 }
9811 
9812 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9813 {
9814 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9815 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9816 	struct hclge_dev *hdev = vport->back;
9817 	struct hclge_desc desc;
9818 	u16 bmap_index;
9819 	int status;
9820 
9821 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9822 
9823 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9824 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9825 		      vcfg->strip_tag1_en ? 1 : 0);
9826 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9827 		      vcfg->strip_tag2_en ? 1 : 0);
9828 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9829 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9830 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9831 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9832 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9833 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9834 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9835 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9836 
9837 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9838 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9839 			HCLGE_VF_NUM_PER_BYTE;
9840 	req->vf_bitmap[bmap_index] =
9841 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9842 
9843 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9844 	if (status)
9845 		dev_err(&hdev->pdev->dev,
9846 			"Send port rxvlan cfg command fail, ret =%d\n",
9847 			status);
9848 
9849 	return status;
9850 }
9851 
9852 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9853 				  u16 port_base_vlan_state,
9854 				  u16 vlan_tag, u8 qos)
9855 {
9856 	int ret;
9857 
9858 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9859 		vport->txvlan_cfg.accept_tag1 = true;
9860 		vport->txvlan_cfg.insert_tag1_en = false;
9861 		vport->txvlan_cfg.default_tag1 = 0;
9862 	} else {
9863 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9864 
9865 		vport->txvlan_cfg.accept_tag1 =
9866 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9867 		vport->txvlan_cfg.insert_tag1_en = true;
9868 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9869 						 vlan_tag;
9870 	}
9871 
9872 	vport->txvlan_cfg.accept_untag1 = true;
9873 
9874 	/* accept_tag2 and accept_untag2 are not supported on
9875 	 * pdev revision(0x20), new revision support them,
9876 	 * this two fields can not be configured by user.
9877 	 */
9878 	vport->txvlan_cfg.accept_tag2 = true;
9879 	vport->txvlan_cfg.accept_untag2 = true;
9880 	vport->txvlan_cfg.insert_tag2_en = false;
9881 	vport->txvlan_cfg.default_tag2 = 0;
9882 	vport->txvlan_cfg.tag_shift_mode_en = true;
9883 
9884 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9885 		vport->rxvlan_cfg.strip_tag1_en = false;
9886 		vport->rxvlan_cfg.strip_tag2_en =
9887 				vport->rxvlan_cfg.rx_vlan_offload_en;
9888 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9889 	} else {
9890 		vport->rxvlan_cfg.strip_tag1_en =
9891 				vport->rxvlan_cfg.rx_vlan_offload_en;
9892 		vport->rxvlan_cfg.strip_tag2_en = true;
9893 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9894 	}
9895 
9896 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9897 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9898 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9899 
9900 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9901 	if (ret)
9902 		return ret;
9903 
9904 	return hclge_set_vlan_rx_offload_cfg(vport);
9905 }
9906 
9907 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9908 {
9909 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9910 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9911 	struct hclge_desc desc;
9912 	int status;
9913 
9914 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9915 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9916 	rx_req->ot_fst_vlan_type =
9917 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9918 	rx_req->ot_sec_vlan_type =
9919 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9920 	rx_req->in_fst_vlan_type =
9921 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9922 	rx_req->in_sec_vlan_type =
9923 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9924 
9925 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9926 	if (status) {
9927 		dev_err(&hdev->pdev->dev,
9928 			"Send rxvlan protocol type command fail, ret =%d\n",
9929 			status);
9930 		return status;
9931 	}
9932 
9933 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9934 
9935 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9936 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9937 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9938 
9939 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9940 	if (status)
9941 		dev_err(&hdev->pdev->dev,
9942 			"Send txvlan protocol type command fail, ret =%d\n",
9943 			status);
9944 
9945 	return status;
9946 }
9947 
9948 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9949 {
9950 #define HCLGE_DEF_VLAN_TYPE		0x8100
9951 
9952 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9953 	struct hclge_vport *vport;
9954 	int ret;
9955 	int i;
9956 
9957 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9958 		/* for revision 0x21, vf vlan filter is per function */
9959 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9960 			vport = &hdev->vport[i];
9961 			ret = hclge_set_vlan_filter_ctrl(hdev,
9962 							 HCLGE_FILTER_TYPE_VF,
9963 							 HCLGE_FILTER_FE_EGRESS,
9964 							 true,
9965 							 vport->vport_id);
9966 			if (ret)
9967 				return ret;
9968 			vport->cur_vlan_fltr_en = true;
9969 		}
9970 
9971 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9972 						 HCLGE_FILTER_FE_INGRESS, true,
9973 						 0);
9974 		if (ret)
9975 			return ret;
9976 	} else {
9977 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9978 						 HCLGE_FILTER_FE_EGRESS_V1_B,
9979 						 true, 0);
9980 		if (ret)
9981 			return ret;
9982 	}
9983 
9984 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9985 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9986 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9987 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9988 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9989 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9990 
9991 	ret = hclge_set_vlan_protocol_type(hdev);
9992 	if (ret)
9993 		return ret;
9994 
9995 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9996 		u16 vlan_tag;
9997 		u8 qos;
9998 
9999 		vport = &hdev->vport[i];
10000 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10001 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
10002 
10003 		ret = hclge_vlan_offload_cfg(vport,
10004 					     vport->port_base_vlan_cfg.state,
10005 					     vlan_tag, qos);
10006 		if (ret)
10007 			return ret;
10008 	}
10009 
10010 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10011 }
10012 
10013 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10014 				       bool writen_to_tbl)
10015 {
10016 	struct hclge_vport_vlan_cfg *vlan;
10017 
10018 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10019 	if (!vlan)
10020 		return;
10021 
10022 	vlan->hd_tbl_status = writen_to_tbl;
10023 	vlan->vlan_id = vlan_id;
10024 
10025 	list_add_tail(&vlan->node, &vport->vlan_list);
10026 }
10027 
10028 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10029 {
10030 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10031 	struct hclge_dev *hdev = vport->back;
10032 	int ret;
10033 
10034 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10035 		if (!vlan->hd_tbl_status) {
10036 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10037 						       vport->vport_id,
10038 						       vlan->vlan_id, false);
10039 			if (ret) {
10040 				dev_err(&hdev->pdev->dev,
10041 					"restore vport vlan list failed, ret=%d\n",
10042 					ret);
10043 				return ret;
10044 			}
10045 		}
10046 		vlan->hd_tbl_status = true;
10047 	}
10048 
10049 	return 0;
10050 }
10051 
10052 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10053 				      bool is_write_tbl)
10054 {
10055 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10056 	struct hclge_dev *hdev = vport->back;
10057 
10058 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10059 		if (vlan->vlan_id == vlan_id) {
10060 			if (is_write_tbl && vlan->hd_tbl_status)
10061 				hclge_set_vlan_filter_hw(hdev,
10062 							 htons(ETH_P_8021Q),
10063 							 vport->vport_id,
10064 							 vlan_id,
10065 							 true);
10066 
10067 			list_del(&vlan->node);
10068 			kfree(vlan);
10069 			break;
10070 		}
10071 	}
10072 }
10073 
10074 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10075 {
10076 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10077 	struct hclge_dev *hdev = vport->back;
10078 
10079 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10080 		if (vlan->hd_tbl_status)
10081 			hclge_set_vlan_filter_hw(hdev,
10082 						 htons(ETH_P_8021Q),
10083 						 vport->vport_id,
10084 						 vlan->vlan_id,
10085 						 true);
10086 
10087 		vlan->hd_tbl_status = false;
10088 		if (is_del_list) {
10089 			list_del(&vlan->node);
10090 			kfree(vlan);
10091 		}
10092 	}
10093 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10094 }
10095 
10096 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10097 {
10098 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10099 	struct hclge_vport *vport;
10100 	int i;
10101 
10102 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10103 		vport = &hdev->vport[i];
10104 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10105 			list_del(&vlan->node);
10106 			kfree(vlan);
10107 		}
10108 	}
10109 }
10110 
10111 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10112 {
10113 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10114 	struct hclge_dev *hdev = vport->back;
10115 	u16 vlan_proto;
10116 	u16 vlan_id;
10117 	u16 state;
10118 	int ret;
10119 
10120 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10121 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10122 	state = vport->port_base_vlan_cfg.state;
10123 
10124 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10125 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10126 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10127 					 vport->vport_id, vlan_id,
10128 					 false);
10129 		return;
10130 	}
10131 
10132 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10133 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10134 					       vport->vport_id,
10135 					       vlan->vlan_id, false);
10136 		if (ret)
10137 			break;
10138 		vlan->hd_tbl_status = true;
10139 	}
10140 }
10141 
10142 /* For global reset and imp reset, hardware will clear the mac table,
10143  * so we change the mac address state from ACTIVE to TO_ADD, then they
10144  * can be restored in the service task after reset complete. Furtherly,
10145  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10146  * be restored after reset, so just remove these mac nodes from mac_list.
10147  */
10148 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10149 {
10150 	struct hclge_mac_node *mac_node, *tmp;
10151 
10152 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10153 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10154 			mac_node->state = HCLGE_MAC_TO_ADD;
10155 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10156 			list_del(&mac_node->node);
10157 			kfree(mac_node);
10158 		}
10159 	}
10160 }
10161 
10162 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10163 {
10164 	spin_lock_bh(&vport->mac_list_lock);
10165 
10166 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10167 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10168 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10169 
10170 	spin_unlock_bh(&vport->mac_list_lock);
10171 }
10172 
10173 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10174 {
10175 	struct hclge_vport *vport = &hdev->vport[0];
10176 	struct hnae3_handle *handle = &vport->nic;
10177 
10178 	hclge_restore_mac_table_common(vport);
10179 	hclge_restore_vport_vlan_table(vport);
10180 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10181 	hclge_restore_fd_entries(handle);
10182 }
10183 
10184 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10185 {
10186 	struct hclge_vport *vport = hclge_get_vport(handle);
10187 
10188 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10189 		vport->rxvlan_cfg.strip_tag1_en = false;
10190 		vport->rxvlan_cfg.strip_tag2_en = enable;
10191 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10192 	} else {
10193 		vport->rxvlan_cfg.strip_tag1_en = enable;
10194 		vport->rxvlan_cfg.strip_tag2_en = true;
10195 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10196 	}
10197 
10198 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10199 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10200 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10201 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10202 
10203 	return hclge_set_vlan_rx_offload_cfg(vport);
10204 }
10205 
10206 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10207 {
10208 	struct hclge_dev *hdev = vport->back;
10209 
10210 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10211 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10212 }
10213 
10214 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10215 					    u16 port_base_vlan_state,
10216 					    struct hclge_vlan_info *new_info,
10217 					    struct hclge_vlan_info *old_info)
10218 {
10219 	struct hclge_dev *hdev = vport->back;
10220 	int ret;
10221 
10222 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10223 		hclge_rm_vport_all_vlan_table(vport, false);
10224 		/* force clear VLAN 0 */
10225 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10226 		if (ret)
10227 			return ret;
10228 		return hclge_set_vlan_filter_hw(hdev,
10229 						 htons(new_info->vlan_proto),
10230 						 vport->vport_id,
10231 						 new_info->vlan_tag,
10232 						 false);
10233 	}
10234 
10235 	/* force add VLAN 0 */
10236 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10237 	if (ret)
10238 		return ret;
10239 
10240 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10241 				       vport->vport_id, old_info->vlan_tag,
10242 				       true);
10243 	if (ret)
10244 		return ret;
10245 
10246 	return hclge_add_vport_all_vlan_table(vport);
10247 }
10248 
10249 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10250 					  const struct hclge_vlan_info *old_cfg)
10251 {
10252 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10253 		return true;
10254 
10255 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10256 		return true;
10257 
10258 	return false;
10259 }
10260 
10261 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10262 				    struct hclge_vlan_info *vlan_info)
10263 {
10264 	struct hnae3_handle *nic = &vport->nic;
10265 	struct hclge_vlan_info *old_vlan_info;
10266 	struct hclge_dev *hdev = vport->back;
10267 	int ret;
10268 
10269 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10270 
10271 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10272 				     vlan_info->qos);
10273 	if (ret)
10274 		return ret;
10275 
10276 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10277 		goto out;
10278 
10279 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10280 		/* add new VLAN tag */
10281 		ret = hclge_set_vlan_filter_hw(hdev,
10282 					       htons(vlan_info->vlan_proto),
10283 					       vport->vport_id,
10284 					       vlan_info->vlan_tag,
10285 					       false);
10286 		if (ret)
10287 			return ret;
10288 
10289 		/* remove old VLAN tag */
10290 		if (old_vlan_info->vlan_tag == 0)
10291 			ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10292 						       true, 0);
10293 		else
10294 			ret = hclge_set_vlan_filter_hw(hdev,
10295 						       htons(ETH_P_8021Q),
10296 						       vport->vport_id,
10297 						       old_vlan_info->vlan_tag,
10298 						       true);
10299 		if (ret) {
10300 			dev_err(&hdev->pdev->dev,
10301 				"failed to clear vport%u port base vlan %u, ret = %d.\n",
10302 				vport->vport_id, old_vlan_info->vlan_tag, ret);
10303 			return ret;
10304 		}
10305 
10306 		goto out;
10307 	}
10308 
10309 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10310 					       old_vlan_info);
10311 	if (ret)
10312 		return ret;
10313 
10314 out:
10315 	vport->port_base_vlan_cfg.state = state;
10316 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10317 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10318 	else
10319 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10320 
10321 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10322 	hclge_set_vport_vlan_fltr_change(vport);
10323 
10324 	return 0;
10325 }
10326 
10327 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10328 					  enum hnae3_port_base_vlan_state state,
10329 					  u16 vlan, u8 qos)
10330 {
10331 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10332 		if (!vlan && !qos)
10333 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10334 
10335 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10336 	}
10337 
10338 	if (!vlan && !qos)
10339 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10340 
10341 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10342 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10343 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10344 
10345 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10346 }
10347 
10348 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10349 				    u16 vlan, u8 qos, __be16 proto)
10350 {
10351 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10352 	struct hclge_vport *vport = hclge_get_vport(handle);
10353 	struct hclge_dev *hdev = vport->back;
10354 	struct hclge_vlan_info vlan_info;
10355 	u16 state;
10356 	int ret;
10357 
10358 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10359 		return -EOPNOTSUPP;
10360 
10361 	vport = hclge_get_vf_vport(hdev, vfid);
10362 	if (!vport)
10363 		return -EINVAL;
10364 
10365 	/* qos is a 3 bits value, so can not be bigger than 7 */
10366 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10367 		return -EINVAL;
10368 	if (proto != htons(ETH_P_8021Q))
10369 		return -EPROTONOSUPPORT;
10370 
10371 	state = hclge_get_port_base_vlan_state(vport,
10372 					       vport->port_base_vlan_cfg.state,
10373 					       vlan, qos);
10374 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10375 		return 0;
10376 
10377 	vlan_info.vlan_tag = vlan;
10378 	vlan_info.qos = qos;
10379 	vlan_info.vlan_proto = ntohs(proto);
10380 
10381 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10382 	if (ret) {
10383 		dev_err(&hdev->pdev->dev,
10384 			"failed to update port base vlan for vf %d, ret = %d\n",
10385 			vfid, ret);
10386 		return ret;
10387 	}
10388 
10389 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10390 	 * VLAN state.
10391 	 */
10392 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10393 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10394 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10395 						  vport->vport_id, state,
10396 						  &vlan_info);
10397 
10398 	return 0;
10399 }
10400 
10401 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10402 {
10403 	struct hclge_vlan_info *vlan_info;
10404 	struct hclge_vport *vport;
10405 	int ret;
10406 	int vf;
10407 
10408 	/* clear port base vlan for all vf */
10409 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10410 		vport = &hdev->vport[vf];
10411 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10412 
10413 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10414 					       vport->vport_id,
10415 					       vlan_info->vlan_tag, true);
10416 		if (ret)
10417 			dev_err(&hdev->pdev->dev,
10418 				"failed to clear vf vlan for vf%d, ret = %d\n",
10419 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10420 	}
10421 }
10422 
10423 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10424 			  u16 vlan_id, bool is_kill)
10425 {
10426 	struct hclge_vport *vport = hclge_get_vport(handle);
10427 	struct hclge_dev *hdev = vport->back;
10428 	bool writen_to_tbl = false;
10429 	int ret = 0;
10430 
10431 	/* When device is resetting or reset failed, firmware is unable to
10432 	 * handle mailbox. Just record the vlan id, and remove it after
10433 	 * reset finished.
10434 	 */
10435 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10436 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10437 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10438 		return -EBUSY;
10439 	}
10440 
10441 	/* when port base vlan enabled, we use port base vlan as the vlan
10442 	 * filter entry. In this case, we don't update vlan filter table
10443 	 * when user add new vlan or remove exist vlan, just update the vport
10444 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10445 	 * table until port base vlan disabled
10446 	 */
10447 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10448 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10449 					       vlan_id, is_kill);
10450 		writen_to_tbl = true;
10451 	}
10452 
10453 	if (!ret) {
10454 		if (is_kill)
10455 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10456 		else
10457 			hclge_add_vport_vlan_table(vport, vlan_id,
10458 						   writen_to_tbl);
10459 	} else if (is_kill) {
10460 		/* when remove hw vlan filter failed, record the vlan id,
10461 		 * and try to remove it from hw later, to be consistence
10462 		 * with stack
10463 		 */
10464 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10465 	}
10466 
10467 	hclge_set_vport_vlan_fltr_change(vport);
10468 
10469 	return ret;
10470 }
10471 
10472 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10473 {
10474 	struct hclge_vport *vport;
10475 	int ret;
10476 	u16 i;
10477 
10478 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10479 		vport = &hdev->vport[i];
10480 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10481 					&vport->state))
10482 			continue;
10483 
10484 		ret = hclge_enable_vport_vlan_filter(vport,
10485 						     vport->req_vlan_fltr_en);
10486 		if (ret) {
10487 			dev_err(&hdev->pdev->dev,
10488 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10489 				vport->vport_id, ret);
10490 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10491 				&vport->state);
10492 			return;
10493 		}
10494 	}
10495 }
10496 
10497 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10498 {
10499 #define HCLGE_MAX_SYNC_COUNT	60
10500 
10501 	int i, ret, sync_cnt = 0;
10502 	u16 vlan_id;
10503 
10504 	/* start from vport 1 for PF is always alive */
10505 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10506 		struct hclge_vport *vport = &hdev->vport[i];
10507 
10508 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10509 					 VLAN_N_VID);
10510 		while (vlan_id != VLAN_N_VID) {
10511 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10512 						       vport->vport_id, vlan_id,
10513 						       true);
10514 			if (ret && ret != -EINVAL)
10515 				return;
10516 
10517 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10518 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10519 			hclge_set_vport_vlan_fltr_change(vport);
10520 
10521 			sync_cnt++;
10522 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10523 				return;
10524 
10525 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10526 						 VLAN_N_VID);
10527 		}
10528 	}
10529 
10530 	hclge_sync_vlan_fltr_state(hdev);
10531 }
10532 
10533 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10534 {
10535 	struct hclge_config_max_frm_size_cmd *req;
10536 	struct hclge_desc desc;
10537 
10538 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10539 
10540 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10541 	req->max_frm_size = cpu_to_le16(new_mps);
10542 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10543 
10544 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10545 }
10546 
10547 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10548 {
10549 	struct hclge_vport *vport = hclge_get_vport(handle);
10550 
10551 	return hclge_set_vport_mtu(vport, new_mtu);
10552 }
10553 
10554 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10555 {
10556 	struct hclge_dev *hdev = vport->back;
10557 	int i, max_frm_size, ret;
10558 
10559 	/* HW supprt 2 layer vlan */
10560 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10561 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10562 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10563 		return -EINVAL;
10564 
10565 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10566 	mutex_lock(&hdev->vport_lock);
10567 	/* VF's mps must fit within hdev->mps */
10568 	if (vport->vport_id && max_frm_size > hdev->mps) {
10569 		mutex_unlock(&hdev->vport_lock);
10570 		return -EINVAL;
10571 	} else if (vport->vport_id) {
10572 		vport->mps = max_frm_size;
10573 		mutex_unlock(&hdev->vport_lock);
10574 		return 0;
10575 	}
10576 
10577 	/* PF's mps must be greater then VF's mps */
10578 	for (i = 1; i < hdev->num_alloc_vport; i++)
10579 		if (max_frm_size < hdev->vport[i].mps) {
10580 			mutex_unlock(&hdev->vport_lock);
10581 			return -EINVAL;
10582 		}
10583 
10584 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10585 
10586 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10587 	if (ret) {
10588 		dev_err(&hdev->pdev->dev,
10589 			"Change mtu fail, ret =%d\n", ret);
10590 		goto out;
10591 	}
10592 
10593 	hdev->mps = max_frm_size;
10594 	vport->mps = max_frm_size;
10595 
10596 	ret = hclge_buffer_alloc(hdev);
10597 	if (ret)
10598 		dev_err(&hdev->pdev->dev,
10599 			"Allocate buffer fail, ret =%d\n", ret);
10600 
10601 out:
10602 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10603 	mutex_unlock(&hdev->vport_lock);
10604 	return ret;
10605 }
10606 
10607 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10608 				    bool enable)
10609 {
10610 	struct hclge_reset_tqp_queue_cmd *req;
10611 	struct hclge_desc desc;
10612 	int ret;
10613 
10614 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10615 
10616 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10617 	req->tqp_id = cpu_to_le16(queue_id);
10618 	if (enable)
10619 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10620 
10621 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10622 	if (ret) {
10623 		dev_err(&hdev->pdev->dev,
10624 			"Send tqp reset cmd error, status =%d\n", ret);
10625 		return ret;
10626 	}
10627 
10628 	return 0;
10629 }
10630 
10631 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10632 {
10633 	struct hclge_reset_tqp_queue_cmd *req;
10634 	struct hclge_desc desc;
10635 	int ret;
10636 
10637 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10638 
10639 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10640 	req->tqp_id = cpu_to_le16(queue_id);
10641 
10642 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10643 	if (ret) {
10644 		dev_err(&hdev->pdev->dev,
10645 			"Get reset status error, status =%d\n", ret);
10646 		return ret;
10647 	}
10648 
10649 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10650 }
10651 
10652 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10653 {
10654 	struct hnae3_queue *queue;
10655 	struct hclge_tqp *tqp;
10656 
10657 	queue = handle->kinfo.tqp[queue_id];
10658 	tqp = container_of(queue, struct hclge_tqp, q);
10659 
10660 	return tqp->index;
10661 }
10662 
10663 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10664 {
10665 	struct hclge_vport *vport = hclge_get_vport(handle);
10666 	struct hclge_dev *hdev = vport->back;
10667 	u16 reset_try_times = 0;
10668 	int reset_status;
10669 	u16 queue_gid;
10670 	int ret;
10671 	u16 i;
10672 
10673 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10674 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10675 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10676 		if (ret) {
10677 			dev_err(&hdev->pdev->dev,
10678 				"failed to send reset tqp cmd, ret = %d\n",
10679 				ret);
10680 			return ret;
10681 		}
10682 
10683 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10684 			reset_status = hclge_get_reset_status(hdev, queue_gid);
10685 			if (reset_status)
10686 				break;
10687 
10688 			/* Wait for tqp hw reset */
10689 			usleep_range(1000, 1200);
10690 		}
10691 
10692 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10693 			dev_err(&hdev->pdev->dev,
10694 				"wait for tqp hw reset timeout\n");
10695 			return -ETIME;
10696 		}
10697 
10698 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10699 		if (ret) {
10700 			dev_err(&hdev->pdev->dev,
10701 				"failed to deassert soft reset, ret = %d\n",
10702 				ret);
10703 			return ret;
10704 		}
10705 		reset_try_times = 0;
10706 	}
10707 	return 0;
10708 }
10709 
10710 static int hclge_reset_rcb(struct hnae3_handle *handle)
10711 {
10712 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10713 #define HCLGE_RESET_RCB_SUCCESS		1U
10714 
10715 	struct hclge_vport *vport = hclge_get_vport(handle);
10716 	struct hclge_dev *hdev = vport->back;
10717 	struct hclge_reset_cmd *req;
10718 	struct hclge_desc desc;
10719 	u8 return_status;
10720 	u16 queue_gid;
10721 	int ret;
10722 
10723 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10724 
10725 	req = (struct hclge_reset_cmd *)desc.data;
10726 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10727 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10728 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10729 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10730 
10731 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10732 	if (ret) {
10733 		dev_err(&hdev->pdev->dev,
10734 			"failed to send rcb reset cmd, ret = %d\n", ret);
10735 		return ret;
10736 	}
10737 
10738 	return_status = req->fun_reset_rcb_return_status;
10739 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10740 		return 0;
10741 
10742 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10743 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10744 			return_status);
10745 		return -EIO;
10746 	}
10747 
10748 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10749 	 * again to reset all tqps
10750 	 */
10751 	return hclge_reset_tqp_cmd(handle);
10752 }
10753 
10754 int hclge_reset_tqp(struct hnae3_handle *handle)
10755 {
10756 	struct hclge_vport *vport = hclge_get_vport(handle);
10757 	struct hclge_dev *hdev = vport->back;
10758 	int ret;
10759 
10760 	/* only need to disable PF's tqp */
10761 	if (!vport->vport_id) {
10762 		ret = hclge_tqp_enable(handle, false);
10763 		if (ret) {
10764 			dev_err(&hdev->pdev->dev,
10765 				"failed to disable tqp, ret = %d\n", ret);
10766 			return ret;
10767 		}
10768 	}
10769 
10770 	return hclge_reset_rcb(handle);
10771 }
10772 
10773 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10774 {
10775 	struct hclge_vport *vport = hclge_get_vport(handle);
10776 	struct hclge_dev *hdev = vport->back;
10777 
10778 	return hdev->fw_version;
10779 }
10780 
10781 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10782 {
10783 	struct phy_device *phydev = hdev->hw.mac.phydev;
10784 
10785 	if (!phydev)
10786 		return;
10787 
10788 	phy_set_asym_pause(phydev, rx_en, tx_en);
10789 }
10790 
10791 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10792 {
10793 	int ret;
10794 
10795 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10796 		return 0;
10797 
10798 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10799 	if (ret)
10800 		dev_err(&hdev->pdev->dev,
10801 			"configure pauseparam error, ret = %d.\n", ret);
10802 
10803 	return ret;
10804 }
10805 
10806 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10807 {
10808 	struct phy_device *phydev = hdev->hw.mac.phydev;
10809 	u16 remote_advertising = 0;
10810 	u16 local_advertising;
10811 	u32 rx_pause, tx_pause;
10812 	u8 flowctl;
10813 
10814 	if (!phydev->link || !phydev->autoneg)
10815 		return 0;
10816 
10817 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10818 
10819 	if (phydev->pause)
10820 		remote_advertising = LPA_PAUSE_CAP;
10821 
10822 	if (phydev->asym_pause)
10823 		remote_advertising |= LPA_PAUSE_ASYM;
10824 
10825 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10826 					   remote_advertising);
10827 	tx_pause = flowctl & FLOW_CTRL_TX;
10828 	rx_pause = flowctl & FLOW_CTRL_RX;
10829 
10830 	if (phydev->duplex == HCLGE_MAC_HALF) {
10831 		tx_pause = 0;
10832 		rx_pause = 0;
10833 	}
10834 
10835 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10836 }
10837 
10838 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10839 				 u32 *rx_en, u32 *tx_en)
10840 {
10841 	struct hclge_vport *vport = hclge_get_vport(handle);
10842 	struct hclge_dev *hdev = vport->back;
10843 	u8 media_type = hdev->hw.mac.media_type;
10844 
10845 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10846 		    hclge_get_autoneg(handle) : 0;
10847 
10848 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10849 		*rx_en = 0;
10850 		*tx_en = 0;
10851 		return;
10852 	}
10853 
10854 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10855 		*rx_en = 1;
10856 		*tx_en = 0;
10857 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10858 		*tx_en = 1;
10859 		*rx_en = 0;
10860 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10861 		*rx_en = 1;
10862 		*tx_en = 1;
10863 	} else {
10864 		*rx_en = 0;
10865 		*tx_en = 0;
10866 	}
10867 }
10868 
10869 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10870 					 u32 rx_en, u32 tx_en)
10871 {
10872 	if (rx_en && tx_en)
10873 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10874 	else if (rx_en && !tx_en)
10875 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10876 	else if (!rx_en && tx_en)
10877 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10878 	else
10879 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10880 
10881 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10882 }
10883 
10884 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10885 				u32 rx_en, u32 tx_en)
10886 {
10887 	struct hclge_vport *vport = hclge_get_vport(handle);
10888 	struct hclge_dev *hdev = vport->back;
10889 	struct phy_device *phydev = hdev->hw.mac.phydev;
10890 	u32 fc_autoneg;
10891 
10892 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10893 		fc_autoneg = hclge_get_autoneg(handle);
10894 		if (auto_neg != fc_autoneg) {
10895 			dev_info(&hdev->pdev->dev,
10896 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10897 			return -EOPNOTSUPP;
10898 		}
10899 	}
10900 
10901 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10902 		dev_info(&hdev->pdev->dev,
10903 			 "Priority flow control enabled. Cannot set link flow control.\n");
10904 		return -EOPNOTSUPP;
10905 	}
10906 
10907 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10908 
10909 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10910 
10911 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10912 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10913 
10914 	if (phydev)
10915 		return phy_start_aneg(phydev);
10916 
10917 	return -EOPNOTSUPP;
10918 }
10919 
10920 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10921 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10922 {
10923 	struct hclge_vport *vport = hclge_get_vport(handle);
10924 	struct hclge_dev *hdev = vport->back;
10925 
10926 	if (speed)
10927 		*speed = hdev->hw.mac.speed;
10928 	if (duplex)
10929 		*duplex = hdev->hw.mac.duplex;
10930 	if (auto_neg)
10931 		*auto_neg = hdev->hw.mac.autoneg;
10932 }
10933 
10934 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10935 				 u8 *module_type)
10936 {
10937 	struct hclge_vport *vport = hclge_get_vport(handle);
10938 	struct hclge_dev *hdev = vport->back;
10939 
10940 	/* When nic is down, the service task is not running, doesn't update
10941 	 * the port information per second. Query the port information before
10942 	 * return the media type, ensure getting the correct media information.
10943 	 */
10944 	hclge_update_port_info(hdev);
10945 
10946 	if (media_type)
10947 		*media_type = hdev->hw.mac.media_type;
10948 
10949 	if (module_type)
10950 		*module_type = hdev->hw.mac.module_type;
10951 }
10952 
10953 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10954 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10955 {
10956 	struct hclge_vport *vport = hclge_get_vport(handle);
10957 	struct hclge_dev *hdev = vport->back;
10958 	struct phy_device *phydev = hdev->hw.mac.phydev;
10959 	int mdix_ctrl, mdix, is_resolved;
10960 	unsigned int retval;
10961 
10962 	if (!phydev) {
10963 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10964 		*tp_mdix = ETH_TP_MDI_INVALID;
10965 		return;
10966 	}
10967 
10968 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10969 
10970 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10971 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10972 				    HCLGE_PHY_MDIX_CTRL_S);
10973 
10974 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10975 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10976 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10977 
10978 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10979 
10980 	switch (mdix_ctrl) {
10981 	case 0x0:
10982 		*tp_mdix_ctrl = ETH_TP_MDI;
10983 		break;
10984 	case 0x1:
10985 		*tp_mdix_ctrl = ETH_TP_MDI_X;
10986 		break;
10987 	case 0x3:
10988 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10989 		break;
10990 	default:
10991 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10992 		break;
10993 	}
10994 
10995 	if (!is_resolved)
10996 		*tp_mdix = ETH_TP_MDI_INVALID;
10997 	else if (mdix)
10998 		*tp_mdix = ETH_TP_MDI_X;
10999 	else
11000 		*tp_mdix = ETH_TP_MDI;
11001 }
11002 
11003 static void hclge_info_show(struct hclge_dev *hdev)
11004 {
11005 	struct device *dev = &hdev->pdev->dev;
11006 
11007 	dev_info(dev, "PF info begin:\n");
11008 
11009 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11010 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11011 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11012 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11013 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11014 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11015 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11016 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11017 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11018 	dev_info(dev, "This is %s PF\n",
11019 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11020 	dev_info(dev, "DCB %s\n",
11021 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11022 	dev_info(dev, "MQPRIO %s\n",
11023 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11024 
11025 	dev_info(dev, "PF info end.\n");
11026 }
11027 
11028 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11029 					  struct hclge_vport *vport)
11030 {
11031 	struct hnae3_client *client = vport->nic.client;
11032 	struct hclge_dev *hdev = ae_dev->priv;
11033 	int rst_cnt = hdev->rst_stats.reset_cnt;
11034 	int ret;
11035 
11036 	ret = client->ops->init_instance(&vport->nic);
11037 	if (ret)
11038 		return ret;
11039 
11040 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11041 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11042 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11043 		ret = -EBUSY;
11044 		goto init_nic_err;
11045 	}
11046 
11047 	/* Enable nic hw error interrupts */
11048 	ret = hclge_config_nic_hw_error(hdev, true);
11049 	if (ret) {
11050 		dev_err(&ae_dev->pdev->dev,
11051 			"fail(%d) to enable hw error interrupts\n", ret);
11052 		goto init_nic_err;
11053 	}
11054 
11055 	hnae3_set_client_init_flag(client, ae_dev, 1);
11056 
11057 	if (netif_msg_drv(&hdev->vport->nic))
11058 		hclge_info_show(hdev);
11059 
11060 	return ret;
11061 
11062 init_nic_err:
11063 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11064 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11065 		msleep(HCLGE_WAIT_RESET_DONE);
11066 
11067 	client->ops->uninit_instance(&vport->nic, 0);
11068 
11069 	return ret;
11070 }
11071 
11072 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11073 					   struct hclge_vport *vport)
11074 {
11075 	struct hclge_dev *hdev = ae_dev->priv;
11076 	struct hnae3_client *client;
11077 	int rst_cnt;
11078 	int ret;
11079 
11080 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11081 	    !hdev->nic_client)
11082 		return 0;
11083 
11084 	client = hdev->roce_client;
11085 	ret = hclge_init_roce_base_info(vport);
11086 	if (ret)
11087 		return ret;
11088 
11089 	rst_cnt = hdev->rst_stats.reset_cnt;
11090 	ret = client->ops->init_instance(&vport->roce);
11091 	if (ret)
11092 		return ret;
11093 
11094 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11095 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11096 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11097 		ret = -EBUSY;
11098 		goto init_roce_err;
11099 	}
11100 
11101 	/* Enable roce ras interrupts */
11102 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11103 	if (ret) {
11104 		dev_err(&ae_dev->pdev->dev,
11105 			"fail(%d) to enable roce ras interrupts\n", ret);
11106 		goto init_roce_err;
11107 	}
11108 
11109 	hnae3_set_client_init_flag(client, ae_dev, 1);
11110 
11111 	return 0;
11112 
11113 init_roce_err:
11114 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11115 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11116 		msleep(HCLGE_WAIT_RESET_DONE);
11117 
11118 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11119 
11120 	return ret;
11121 }
11122 
11123 static int hclge_init_client_instance(struct hnae3_client *client,
11124 				      struct hnae3_ae_dev *ae_dev)
11125 {
11126 	struct hclge_dev *hdev = ae_dev->priv;
11127 	struct hclge_vport *vport = &hdev->vport[0];
11128 	int ret;
11129 
11130 	switch (client->type) {
11131 	case HNAE3_CLIENT_KNIC:
11132 		hdev->nic_client = client;
11133 		vport->nic.client = client;
11134 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11135 		if (ret)
11136 			goto clear_nic;
11137 
11138 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11139 		if (ret)
11140 			goto clear_roce;
11141 
11142 		break;
11143 	case HNAE3_CLIENT_ROCE:
11144 		if (hnae3_dev_roce_supported(hdev)) {
11145 			hdev->roce_client = client;
11146 			vport->roce.client = client;
11147 		}
11148 
11149 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11150 		if (ret)
11151 			goto clear_roce;
11152 
11153 		break;
11154 	default:
11155 		return -EINVAL;
11156 	}
11157 
11158 	return 0;
11159 
11160 clear_nic:
11161 	hdev->nic_client = NULL;
11162 	vport->nic.client = NULL;
11163 	return ret;
11164 clear_roce:
11165 	hdev->roce_client = NULL;
11166 	vport->roce.client = NULL;
11167 	return ret;
11168 }
11169 
11170 static void hclge_uninit_client_instance(struct hnae3_client *client,
11171 					 struct hnae3_ae_dev *ae_dev)
11172 {
11173 	struct hclge_dev *hdev = ae_dev->priv;
11174 	struct hclge_vport *vport = &hdev->vport[0];
11175 
11176 	if (hdev->roce_client) {
11177 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11178 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11179 			msleep(HCLGE_WAIT_RESET_DONE);
11180 
11181 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11182 		hdev->roce_client = NULL;
11183 		vport->roce.client = NULL;
11184 	}
11185 	if (client->type == HNAE3_CLIENT_ROCE)
11186 		return;
11187 	if (hdev->nic_client && client->ops->uninit_instance) {
11188 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11189 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11190 			msleep(HCLGE_WAIT_RESET_DONE);
11191 
11192 		client->ops->uninit_instance(&vport->nic, 0);
11193 		hdev->nic_client = NULL;
11194 		vport->nic.client = NULL;
11195 	}
11196 }
11197 
11198 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11199 {
11200 #define HCLGE_MEM_BAR		4
11201 
11202 	struct pci_dev *pdev = hdev->pdev;
11203 	struct hclge_hw *hw = &hdev->hw;
11204 
11205 	/* for device does not have device memory, return directly */
11206 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11207 		return 0;
11208 
11209 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11210 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11211 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11212 	if (!hw->mem_base) {
11213 		dev_err(&pdev->dev, "failed to map device memory\n");
11214 		return -EFAULT;
11215 	}
11216 
11217 	return 0;
11218 }
11219 
11220 static int hclge_pci_init(struct hclge_dev *hdev)
11221 {
11222 	struct pci_dev *pdev = hdev->pdev;
11223 	struct hclge_hw *hw;
11224 	int ret;
11225 
11226 	ret = pci_enable_device(pdev);
11227 	if (ret) {
11228 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11229 		return ret;
11230 	}
11231 
11232 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11233 	if (ret) {
11234 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11235 		if (ret) {
11236 			dev_err(&pdev->dev,
11237 				"can't set consistent PCI DMA");
11238 			goto err_disable_device;
11239 		}
11240 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11241 	}
11242 
11243 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11244 	if (ret) {
11245 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11246 		goto err_disable_device;
11247 	}
11248 
11249 	pci_set_master(pdev);
11250 	hw = &hdev->hw;
11251 	hw->io_base = pcim_iomap(pdev, 2, 0);
11252 	if (!hw->io_base) {
11253 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11254 		ret = -ENOMEM;
11255 		goto err_clr_master;
11256 	}
11257 
11258 	ret = hclge_dev_mem_map(hdev);
11259 	if (ret)
11260 		goto err_unmap_io_base;
11261 
11262 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11263 
11264 	return 0;
11265 
11266 err_unmap_io_base:
11267 	pcim_iounmap(pdev, hdev->hw.io_base);
11268 err_clr_master:
11269 	pci_clear_master(pdev);
11270 	pci_release_regions(pdev);
11271 err_disable_device:
11272 	pci_disable_device(pdev);
11273 
11274 	return ret;
11275 }
11276 
11277 static void hclge_pci_uninit(struct hclge_dev *hdev)
11278 {
11279 	struct pci_dev *pdev = hdev->pdev;
11280 
11281 	if (hdev->hw.mem_base)
11282 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11283 
11284 	pcim_iounmap(pdev, hdev->hw.io_base);
11285 	pci_free_irq_vectors(pdev);
11286 	pci_clear_master(pdev);
11287 	pci_release_mem_regions(pdev);
11288 	pci_disable_device(pdev);
11289 }
11290 
11291 static void hclge_state_init(struct hclge_dev *hdev)
11292 {
11293 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11294 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11295 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11296 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11297 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11298 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11299 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11300 }
11301 
11302 static void hclge_state_uninit(struct hclge_dev *hdev)
11303 {
11304 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11305 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11306 
11307 	if (hdev->reset_timer.function)
11308 		del_timer_sync(&hdev->reset_timer);
11309 	if (hdev->service_task.work.func)
11310 		cancel_delayed_work_sync(&hdev->service_task);
11311 }
11312 
11313 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11314 					enum hnae3_reset_type rst_type)
11315 {
11316 #define HCLGE_RESET_RETRY_WAIT_MS	500
11317 #define HCLGE_RESET_RETRY_CNT	5
11318 
11319 	struct hclge_dev *hdev = ae_dev->priv;
11320 	int retry_cnt = 0;
11321 	int ret;
11322 
11323 retry:
11324 	down(&hdev->reset_sem);
11325 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11326 	hdev->reset_type = rst_type;
11327 	ret = hclge_reset_prepare(hdev);
11328 	if (ret || hdev->reset_pending) {
11329 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11330 			ret);
11331 		if (hdev->reset_pending ||
11332 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11333 			dev_err(&hdev->pdev->dev,
11334 				"reset_pending:0x%lx, retry_cnt:%d\n",
11335 				hdev->reset_pending, retry_cnt);
11336 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11337 			up(&hdev->reset_sem);
11338 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11339 			goto retry;
11340 		}
11341 	}
11342 
11343 	/* disable misc vector before reset done */
11344 	hclge_enable_vector(&hdev->misc_vector, false);
11345 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11346 
11347 	if (hdev->reset_type == HNAE3_FLR_RESET)
11348 		hdev->rst_stats.flr_rst_cnt++;
11349 }
11350 
11351 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11352 {
11353 	struct hclge_dev *hdev = ae_dev->priv;
11354 	int ret;
11355 
11356 	hclge_enable_vector(&hdev->misc_vector, true);
11357 
11358 	ret = hclge_reset_rebuild(hdev);
11359 	if (ret)
11360 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11361 
11362 	hdev->reset_type = HNAE3_NONE_RESET;
11363 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11364 	up(&hdev->reset_sem);
11365 }
11366 
11367 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11368 {
11369 	u16 i;
11370 
11371 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11372 		struct hclge_vport *vport = &hdev->vport[i];
11373 		int ret;
11374 
11375 		 /* Send cmd to clear VF's FUNC_RST_ING */
11376 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11377 		if (ret)
11378 			dev_warn(&hdev->pdev->dev,
11379 				 "clear vf(%u) rst failed %d!\n",
11380 				 vport->vport_id, ret);
11381 	}
11382 }
11383 
11384 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11385 {
11386 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11387 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11388 }
11389 
11390 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11391 {
11392 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11393 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11394 }
11395 
11396 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11397 {
11398 	struct pci_dev *pdev = ae_dev->pdev;
11399 	struct hclge_dev *hdev;
11400 	int ret;
11401 
11402 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11403 	if (!hdev)
11404 		return -ENOMEM;
11405 
11406 	hdev->pdev = pdev;
11407 	hdev->ae_dev = ae_dev;
11408 	hdev->reset_type = HNAE3_NONE_RESET;
11409 	hdev->reset_level = HNAE3_FUNC_RESET;
11410 	ae_dev->priv = hdev;
11411 
11412 	/* HW supprt 2 layer vlan */
11413 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11414 
11415 	mutex_init(&hdev->vport_lock);
11416 	spin_lock_init(&hdev->fd_rule_lock);
11417 	sema_init(&hdev->reset_sem, 1);
11418 
11419 	ret = hclge_pci_init(hdev);
11420 	if (ret)
11421 		goto out;
11422 
11423 	/* Firmware command queue initialize */
11424 	ret = hclge_cmd_queue_init(hdev);
11425 	if (ret)
11426 		goto err_pci_uninit;
11427 
11428 	/* Firmware command initialize */
11429 	ret = hclge_cmd_init(hdev);
11430 	if (ret)
11431 		goto err_cmd_uninit;
11432 
11433 	ret = hclge_get_cap(hdev);
11434 	if (ret)
11435 		goto err_cmd_uninit;
11436 
11437 	ret = hclge_query_dev_specs(hdev);
11438 	if (ret) {
11439 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11440 			ret);
11441 		goto err_cmd_uninit;
11442 	}
11443 
11444 	ret = hclge_configure(hdev);
11445 	if (ret) {
11446 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11447 		goto err_cmd_uninit;
11448 	}
11449 
11450 	ret = hclge_init_msi(hdev);
11451 	if (ret) {
11452 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11453 		goto err_cmd_uninit;
11454 	}
11455 
11456 	ret = hclge_misc_irq_init(hdev);
11457 	if (ret)
11458 		goto err_msi_uninit;
11459 
11460 	ret = hclge_alloc_tqps(hdev);
11461 	if (ret) {
11462 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11463 		goto err_msi_irq_uninit;
11464 	}
11465 
11466 	ret = hclge_alloc_vport(hdev);
11467 	if (ret)
11468 		goto err_msi_irq_uninit;
11469 
11470 	ret = hclge_map_tqp(hdev);
11471 	if (ret)
11472 		goto err_msi_irq_uninit;
11473 
11474 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11475 	    !hnae3_dev_phy_imp_supported(hdev)) {
11476 		ret = hclge_mac_mdio_config(hdev);
11477 		if (ret)
11478 			goto err_msi_irq_uninit;
11479 	}
11480 
11481 	ret = hclge_init_umv_space(hdev);
11482 	if (ret)
11483 		goto err_mdiobus_unreg;
11484 
11485 	ret = hclge_mac_init(hdev);
11486 	if (ret) {
11487 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11488 		goto err_mdiobus_unreg;
11489 	}
11490 
11491 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11492 	if (ret) {
11493 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11494 		goto err_mdiobus_unreg;
11495 	}
11496 
11497 	ret = hclge_config_gro(hdev, true);
11498 	if (ret)
11499 		goto err_mdiobus_unreg;
11500 
11501 	ret = hclge_init_vlan_config(hdev);
11502 	if (ret) {
11503 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11504 		goto err_mdiobus_unreg;
11505 	}
11506 
11507 	ret = hclge_tm_schd_init(hdev);
11508 	if (ret) {
11509 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11510 		goto err_mdiobus_unreg;
11511 	}
11512 
11513 	ret = hclge_rss_init_cfg(hdev);
11514 	if (ret) {
11515 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11516 		goto err_mdiobus_unreg;
11517 	}
11518 
11519 	ret = hclge_rss_init_hw(hdev);
11520 	if (ret) {
11521 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11522 		goto err_mdiobus_unreg;
11523 	}
11524 
11525 	ret = init_mgr_tbl(hdev);
11526 	if (ret) {
11527 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11528 		goto err_mdiobus_unreg;
11529 	}
11530 
11531 	ret = hclge_init_fd_config(hdev);
11532 	if (ret) {
11533 		dev_err(&pdev->dev,
11534 			"fd table init fail, ret=%d\n", ret);
11535 		goto err_mdiobus_unreg;
11536 	}
11537 
11538 	INIT_KFIFO(hdev->mac_tnl_log);
11539 
11540 	hclge_dcb_ops_set(hdev);
11541 
11542 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11543 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11544 
11545 	/* Setup affinity after service timer setup because add_timer_on
11546 	 * is called in affinity notify.
11547 	 */
11548 	hclge_misc_affinity_setup(hdev);
11549 
11550 	hclge_clear_all_event_cause(hdev);
11551 	hclge_clear_resetting_state(hdev);
11552 
11553 	/* Log and clear the hw errors those already occurred */
11554 	if (hnae3_dev_ras_imp_supported(hdev))
11555 		hclge_handle_occurred_error(hdev);
11556 	else
11557 		hclge_handle_all_hns_hw_errors(ae_dev);
11558 
11559 	/* request delayed reset for the error recovery because an immediate
11560 	 * global reset on a PF affecting pending initialization of other PFs
11561 	 */
11562 	if (ae_dev->hw_err_reset_req) {
11563 		enum hnae3_reset_type reset_level;
11564 
11565 		reset_level = hclge_get_reset_level(ae_dev,
11566 						    &ae_dev->hw_err_reset_req);
11567 		hclge_set_def_reset_request(ae_dev, reset_level);
11568 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11569 	}
11570 
11571 	hclge_init_rxd_adv_layout(hdev);
11572 
11573 	/* Enable MISC vector(vector0) */
11574 	hclge_enable_vector(&hdev->misc_vector, true);
11575 
11576 	hclge_state_init(hdev);
11577 	hdev->last_reset_time = jiffies;
11578 
11579 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11580 		 HCLGE_DRIVER_NAME);
11581 
11582 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11583 
11584 	return 0;
11585 
11586 err_mdiobus_unreg:
11587 	if (hdev->hw.mac.phydev)
11588 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11589 err_msi_irq_uninit:
11590 	hclge_misc_irq_uninit(hdev);
11591 err_msi_uninit:
11592 	pci_free_irq_vectors(pdev);
11593 err_cmd_uninit:
11594 	hclge_cmd_uninit(hdev);
11595 err_pci_uninit:
11596 	pcim_iounmap(pdev, hdev->hw.io_base);
11597 	pci_clear_master(pdev);
11598 	pci_release_regions(pdev);
11599 	pci_disable_device(pdev);
11600 out:
11601 	mutex_destroy(&hdev->vport_lock);
11602 	return ret;
11603 }
11604 
11605 static void hclge_stats_clear(struct hclge_dev *hdev)
11606 {
11607 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11608 }
11609 
11610 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11611 {
11612 	return hclge_config_switch_param(hdev, vf, enable,
11613 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11614 }
11615 
11616 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11617 {
11618 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11619 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11620 					  enable, vf);
11621 }
11622 
11623 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11624 {
11625 	int ret;
11626 
11627 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11628 	if (ret) {
11629 		dev_err(&hdev->pdev->dev,
11630 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11631 			vf, enable ? "on" : "off", ret);
11632 		return ret;
11633 	}
11634 
11635 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11636 	if (ret)
11637 		dev_err(&hdev->pdev->dev,
11638 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11639 			vf, enable ? "on" : "off", ret);
11640 
11641 	return ret;
11642 }
11643 
11644 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11645 				 bool enable)
11646 {
11647 	struct hclge_vport *vport = hclge_get_vport(handle);
11648 	struct hclge_dev *hdev = vport->back;
11649 	u32 new_spoofchk = enable ? 1 : 0;
11650 	int ret;
11651 
11652 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11653 		return -EOPNOTSUPP;
11654 
11655 	vport = hclge_get_vf_vport(hdev, vf);
11656 	if (!vport)
11657 		return -EINVAL;
11658 
11659 	if (vport->vf_info.spoofchk == new_spoofchk)
11660 		return 0;
11661 
11662 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11663 		dev_warn(&hdev->pdev->dev,
11664 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11665 			 vf);
11666 	else if (enable && hclge_is_umv_space_full(vport, true))
11667 		dev_warn(&hdev->pdev->dev,
11668 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11669 			 vf);
11670 
11671 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11672 	if (ret)
11673 		return ret;
11674 
11675 	vport->vf_info.spoofchk = new_spoofchk;
11676 	return 0;
11677 }
11678 
11679 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11680 {
11681 	struct hclge_vport *vport = hdev->vport;
11682 	int ret;
11683 	int i;
11684 
11685 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11686 		return 0;
11687 
11688 	/* resume the vf spoof check state after reset */
11689 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11690 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11691 					       vport->vf_info.spoofchk);
11692 		if (ret)
11693 			return ret;
11694 
11695 		vport++;
11696 	}
11697 
11698 	return 0;
11699 }
11700 
11701 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11702 {
11703 	struct hclge_vport *vport = hclge_get_vport(handle);
11704 	struct hclge_dev *hdev = vport->back;
11705 	u32 new_trusted = enable ? 1 : 0;
11706 
11707 	vport = hclge_get_vf_vport(hdev, vf);
11708 	if (!vport)
11709 		return -EINVAL;
11710 
11711 	if (vport->vf_info.trusted == new_trusted)
11712 		return 0;
11713 
11714 	vport->vf_info.trusted = new_trusted;
11715 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11716 	hclge_task_schedule(hdev, 0);
11717 
11718 	return 0;
11719 }
11720 
11721 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11722 {
11723 	int ret;
11724 	int vf;
11725 
11726 	/* reset vf rate to default value */
11727 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11728 		struct hclge_vport *vport = &hdev->vport[vf];
11729 
11730 		vport->vf_info.max_tx_rate = 0;
11731 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11732 		if (ret)
11733 			dev_err(&hdev->pdev->dev,
11734 				"vf%d failed to reset to default, ret=%d\n",
11735 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11736 	}
11737 }
11738 
11739 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11740 				     int min_tx_rate, int max_tx_rate)
11741 {
11742 	if (min_tx_rate != 0 ||
11743 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11744 		dev_err(&hdev->pdev->dev,
11745 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11746 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11747 		return -EINVAL;
11748 	}
11749 
11750 	return 0;
11751 }
11752 
11753 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11754 			     int min_tx_rate, int max_tx_rate, bool force)
11755 {
11756 	struct hclge_vport *vport = hclge_get_vport(handle);
11757 	struct hclge_dev *hdev = vport->back;
11758 	int ret;
11759 
11760 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11761 	if (ret)
11762 		return ret;
11763 
11764 	vport = hclge_get_vf_vport(hdev, vf);
11765 	if (!vport)
11766 		return -EINVAL;
11767 
11768 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11769 		return 0;
11770 
11771 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11772 	if (ret)
11773 		return ret;
11774 
11775 	vport->vf_info.max_tx_rate = max_tx_rate;
11776 
11777 	return 0;
11778 }
11779 
11780 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11781 {
11782 	struct hnae3_handle *handle = &hdev->vport->nic;
11783 	struct hclge_vport *vport;
11784 	int ret;
11785 	int vf;
11786 
11787 	/* resume the vf max_tx_rate after reset */
11788 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11789 		vport = hclge_get_vf_vport(hdev, vf);
11790 		if (!vport)
11791 			return -EINVAL;
11792 
11793 		/* zero means max rate, after reset, firmware already set it to
11794 		 * max rate, so just continue.
11795 		 */
11796 		if (!vport->vf_info.max_tx_rate)
11797 			continue;
11798 
11799 		ret = hclge_set_vf_rate(handle, vf, 0,
11800 					vport->vf_info.max_tx_rate, true);
11801 		if (ret) {
11802 			dev_err(&hdev->pdev->dev,
11803 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11804 				vf, vport->vf_info.max_tx_rate, ret);
11805 			return ret;
11806 		}
11807 	}
11808 
11809 	return 0;
11810 }
11811 
11812 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11813 {
11814 	struct hclge_vport *vport = hdev->vport;
11815 	int i;
11816 
11817 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11818 		hclge_vport_stop(vport);
11819 		vport++;
11820 	}
11821 }
11822 
11823 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11824 {
11825 	struct hclge_dev *hdev = ae_dev->priv;
11826 	struct pci_dev *pdev = ae_dev->pdev;
11827 	int ret;
11828 
11829 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11830 
11831 	hclge_stats_clear(hdev);
11832 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11833 	 * so here should not clean table in memory.
11834 	 */
11835 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11836 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11837 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11838 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11839 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11840 		hclge_reset_umv_space(hdev);
11841 	}
11842 
11843 	ret = hclge_cmd_init(hdev);
11844 	if (ret) {
11845 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11846 		return ret;
11847 	}
11848 
11849 	ret = hclge_map_tqp(hdev);
11850 	if (ret) {
11851 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11852 		return ret;
11853 	}
11854 
11855 	ret = hclge_mac_init(hdev);
11856 	if (ret) {
11857 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11858 		return ret;
11859 	}
11860 
11861 	ret = hclge_tp_port_init(hdev);
11862 	if (ret) {
11863 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11864 			ret);
11865 		return ret;
11866 	}
11867 
11868 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11869 	if (ret) {
11870 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11871 		return ret;
11872 	}
11873 
11874 	ret = hclge_config_gro(hdev, true);
11875 	if (ret)
11876 		return ret;
11877 
11878 	ret = hclge_init_vlan_config(hdev);
11879 	if (ret) {
11880 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11881 		return ret;
11882 	}
11883 
11884 	ret = hclge_tm_init_hw(hdev, true);
11885 	if (ret) {
11886 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11887 		return ret;
11888 	}
11889 
11890 	ret = hclge_rss_init_hw(hdev);
11891 	if (ret) {
11892 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11893 		return ret;
11894 	}
11895 
11896 	ret = init_mgr_tbl(hdev);
11897 	if (ret) {
11898 		dev_err(&pdev->dev,
11899 			"failed to reinit manager table, ret = %d\n", ret);
11900 		return ret;
11901 	}
11902 
11903 	ret = hclge_init_fd_config(hdev);
11904 	if (ret) {
11905 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11906 		return ret;
11907 	}
11908 
11909 	/* Log and clear the hw errors those already occurred */
11910 	if (hnae3_dev_ras_imp_supported(hdev))
11911 		hclge_handle_occurred_error(hdev);
11912 	else
11913 		hclge_handle_all_hns_hw_errors(ae_dev);
11914 
11915 	/* Re-enable the hw error interrupts because
11916 	 * the interrupts get disabled on global reset.
11917 	 */
11918 	ret = hclge_config_nic_hw_error(hdev, true);
11919 	if (ret) {
11920 		dev_err(&pdev->dev,
11921 			"fail(%d) to re-enable NIC hw error interrupts\n",
11922 			ret);
11923 		return ret;
11924 	}
11925 
11926 	if (hdev->roce_client) {
11927 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11928 		if (ret) {
11929 			dev_err(&pdev->dev,
11930 				"fail(%d) to re-enable roce ras interrupts\n",
11931 				ret);
11932 			return ret;
11933 		}
11934 	}
11935 
11936 	hclge_reset_vport_state(hdev);
11937 	ret = hclge_reset_vport_spoofchk(hdev);
11938 	if (ret)
11939 		return ret;
11940 
11941 	ret = hclge_resume_vf_rate(hdev);
11942 	if (ret)
11943 		return ret;
11944 
11945 	hclge_init_rxd_adv_layout(hdev);
11946 
11947 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11948 		 HCLGE_DRIVER_NAME);
11949 
11950 	return 0;
11951 }
11952 
11953 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11954 {
11955 	struct hclge_dev *hdev = ae_dev->priv;
11956 	struct hclge_mac *mac = &hdev->hw.mac;
11957 
11958 	hclge_reset_vf_rate(hdev);
11959 	hclge_clear_vf_vlan(hdev);
11960 	hclge_misc_affinity_teardown(hdev);
11961 	hclge_state_uninit(hdev);
11962 	hclge_uninit_rxd_adv_layout(hdev);
11963 	hclge_uninit_mac_table(hdev);
11964 	hclge_del_all_fd_entries(hdev);
11965 
11966 	if (mac->phydev)
11967 		mdiobus_unregister(mac->mdio_bus);
11968 
11969 	/* Disable MISC vector(vector0) */
11970 	hclge_enable_vector(&hdev->misc_vector, false);
11971 	synchronize_irq(hdev->misc_vector.vector_irq);
11972 
11973 	/* Disable all hw interrupts */
11974 	hclge_config_mac_tnl_int(hdev, false);
11975 	hclge_config_nic_hw_error(hdev, false);
11976 	hclge_config_rocee_ras_interrupt(hdev, false);
11977 
11978 	hclge_cmd_uninit(hdev);
11979 	hclge_misc_irq_uninit(hdev);
11980 	hclge_pci_uninit(hdev);
11981 	mutex_destroy(&hdev->vport_lock);
11982 	hclge_uninit_vport_vlan_table(hdev);
11983 	ae_dev->priv = NULL;
11984 }
11985 
11986 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11987 {
11988 	struct hclge_vport *vport = hclge_get_vport(handle);
11989 	struct hclge_dev *hdev = vport->back;
11990 
11991 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11992 }
11993 
11994 static void hclge_get_channels(struct hnae3_handle *handle,
11995 			       struct ethtool_channels *ch)
11996 {
11997 	ch->max_combined = hclge_get_max_channels(handle);
11998 	ch->other_count = 1;
11999 	ch->max_other = 1;
12000 	ch->combined_count = handle->kinfo.rss_size;
12001 }
12002 
12003 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12004 					u16 *alloc_tqps, u16 *max_rss_size)
12005 {
12006 	struct hclge_vport *vport = hclge_get_vport(handle);
12007 	struct hclge_dev *hdev = vport->back;
12008 
12009 	*alloc_tqps = vport->alloc_tqps;
12010 	*max_rss_size = hdev->pf_rss_size_max;
12011 }
12012 
12013 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12014 			      bool rxfh_configured)
12015 {
12016 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12017 	struct hclge_vport *vport = hclge_get_vport(handle);
12018 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12019 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12020 	struct hclge_dev *hdev = vport->back;
12021 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12022 	u16 cur_rss_size = kinfo->rss_size;
12023 	u16 cur_tqps = kinfo->num_tqps;
12024 	u16 tc_valid[HCLGE_MAX_TC_NUM];
12025 	u16 roundup_size;
12026 	u32 *rss_indir;
12027 	unsigned int i;
12028 	int ret;
12029 
12030 	kinfo->req_rss_size = new_tqps_num;
12031 
12032 	ret = hclge_tm_vport_map_update(hdev);
12033 	if (ret) {
12034 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12035 		return ret;
12036 	}
12037 
12038 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
12039 	roundup_size = ilog2(roundup_size);
12040 	/* Set the RSS TC mode according to the new RSS size */
12041 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12042 		tc_valid[i] = 0;
12043 
12044 		if (!(hdev->hw_tc_map & BIT(i)))
12045 			continue;
12046 
12047 		tc_valid[i] = 1;
12048 		tc_size[i] = roundup_size;
12049 		tc_offset[i] = kinfo->rss_size * i;
12050 	}
12051 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12052 	if (ret)
12053 		return ret;
12054 
12055 	/* RSS indirection table has been configured by user */
12056 	if (rxfh_configured)
12057 		goto out;
12058 
12059 	/* Reinitializes the rss indirect table according to the new RSS size */
12060 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12061 			    GFP_KERNEL);
12062 	if (!rss_indir)
12063 		return -ENOMEM;
12064 
12065 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12066 		rss_indir[i] = i % kinfo->rss_size;
12067 
12068 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12069 	if (ret)
12070 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12071 			ret);
12072 
12073 	kfree(rss_indir);
12074 
12075 out:
12076 	if (!ret)
12077 		dev_info(&hdev->pdev->dev,
12078 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12079 			 cur_rss_size, kinfo->rss_size,
12080 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12081 
12082 	return ret;
12083 }
12084 
12085 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12086 			      u32 *regs_num_64_bit)
12087 {
12088 	struct hclge_desc desc;
12089 	u32 total_num;
12090 	int ret;
12091 
12092 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12093 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12094 	if (ret) {
12095 		dev_err(&hdev->pdev->dev,
12096 			"Query register number cmd failed, ret = %d.\n", ret);
12097 		return ret;
12098 	}
12099 
12100 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
12101 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
12102 
12103 	total_num = *regs_num_32_bit + *regs_num_64_bit;
12104 	if (!total_num)
12105 		return -EINVAL;
12106 
12107 	return 0;
12108 }
12109 
12110 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12111 				 void *data)
12112 {
12113 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12114 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12115 
12116 	struct hclge_desc *desc;
12117 	u32 *reg_val = data;
12118 	__le32 *desc_data;
12119 	int nodata_num;
12120 	int cmd_num;
12121 	int i, k, n;
12122 	int ret;
12123 
12124 	if (regs_num == 0)
12125 		return 0;
12126 
12127 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12128 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12129 			       HCLGE_32_BIT_REG_RTN_DATANUM);
12130 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12131 	if (!desc)
12132 		return -ENOMEM;
12133 
12134 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12135 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12136 	if (ret) {
12137 		dev_err(&hdev->pdev->dev,
12138 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
12139 		kfree(desc);
12140 		return ret;
12141 	}
12142 
12143 	for (i = 0; i < cmd_num; i++) {
12144 		if (i == 0) {
12145 			desc_data = (__le32 *)(&desc[i].data[0]);
12146 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12147 		} else {
12148 			desc_data = (__le32 *)(&desc[i]);
12149 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
12150 		}
12151 		for (k = 0; k < n; k++) {
12152 			*reg_val++ = le32_to_cpu(*desc_data++);
12153 
12154 			regs_num--;
12155 			if (!regs_num)
12156 				break;
12157 		}
12158 	}
12159 
12160 	kfree(desc);
12161 	return 0;
12162 }
12163 
12164 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12165 				 void *data)
12166 {
12167 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12168 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12169 
12170 	struct hclge_desc *desc;
12171 	u64 *reg_val = data;
12172 	__le64 *desc_data;
12173 	int nodata_len;
12174 	int cmd_num;
12175 	int i, k, n;
12176 	int ret;
12177 
12178 	if (regs_num == 0)
12179 		return 0;
12180 
12181 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12182 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12183 			       HCLGE_64_BIT_REG_RTN_DATANUM);
12184 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12185 	if (!desc)
12186 		return -ENOMEM;
12187 
12188 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12189 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12190 	if (ret) {
12191 		dev_err(&hdev->pdev->dev,
12192 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
12193 		kfree(desc);
12194 		return ret;
12195 	}
12196 
12197 	for (i = 0; i < cmd_num; i++) {
12198 		if (i == 0) {
12199 			desc_data = (__le64 *)(&desc[i].data[0]);
12200 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12201 		} else {
12202 			desc_data = (__le64 *)(&desc[i]);
12203 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
12204 		}
12205 		for (k = 0; k < n; k++) {
12206 			*reg_val++ = le64_to_cpu(*desc_data++);
12207 
12208 			regs_num--;
12209 			if (!regs_num)
12210 				break;
12211 		}
12212 	}
12213 
12214 	kfree(desc);
12215 	return 0;
12216 }
12217 
12218 #define MAX_SEPARATE_NUM	4
12219 #define SEPARATOR_VALUE		0xFDFCFBFA
12220 #define REG_NUM_PER_LINE	4
12221 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12222 #define REG_SEPARATOR_LINE	1
12223 #define REG_NUM_REMAIN_MASK	3
12224 
12225 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12226 {
12227 	int i;
12228 
12229 	/* initialize command BD except the last one */
12230 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12231 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12232 					   true);
12233 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12234 	}
12235 
12236 	/* initialize the last command BD */
12237 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12238 
12239 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12240 }
12241 
12242 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12243 				    int *bd_num_list,
12244 				    u32 type_num)
12245 {
12246 	u32 entries_per_desc, desc_index, index, offset, i;
12247 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12248 	int ret;
12249 
12250 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12251 	if (ret) {
12252 		dev_err(&hdev->pdev->dev,
12253 			"Get dfx bd num fail, status is %d.\n", ret);
12254 		return ret;
12255 	}
12256 
12257 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12258 	for (i = 0; i < type_num; i++) {
12259 		offset = hclge_dfx_bd_offset_list[i];
12260 		index = offset % entries_per_desc;
12261 		desc_index = offset / entries_per_desc;
12262 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12263 	}
12264 
12265 	return ret;
12266 }
12267 
12268 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12269 				  struct hclge_desc *desc_src, int bd_num,
12270 				  enum hclge_opcode_type cmd)
12271 {
12272 	struct hclge_desc *desc = desc_src;
12273 	int i, ret;
12274 
12275 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12276 	for (i = 0; i < bd_num - 1; i++) {
12277 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12278 		desc++;
12279 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12280 	}
12281 
12282 	desc = desc_src;
12283 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12284 	if (ret)
12285 		dev_err(&hdev->pdev->dev,
12286 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12287 			cmd, ret);
12288 
12289 	return ret;
12290 }
12291 
12292 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12293 				    void *data)
12294 {
12295 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12296 	struct hclge_desc *desc = desc_src;
12297 	u32 *reg = data;
12298 
12299 	entries_per_desc = ARRAY_SIZE(desc->data);
12300 	reg_num = entries_per_desc * bd_num;
12301 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12302 	for (i = 0; i < reg_num; i++) {
12303 		index = i % entries_per_desc;
12304 		desc_index = i / entries_per_desc;
12305 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12306 	}
12307 	for (i = 0; i < separator_num; i++)
12308 		*reg++ = SEPARATOR_VALUE;
12309 
12310 	return reg_num + separator_num;
12311 }
12312 
12313 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12314 {
12315 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12316 	int data_len_per_desc, bd_num, i;
12317 	int *bd_num_list;
12318 	u32 data_len;
12319 	int ret;
12320 
12321 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12322 	if (!bd_num_list)
12323 		return -ENOMEM;
12324 
12325 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12326 	if (ret) {
12327 		dev_err(&hdev->pdev->dev,
12328 			"Get dfx reg bd num fail, status is %d.\n", ret);
12329 		goto out;
12330 	}
12331 
12332 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12333 	*len = 0;
12334 	for (i = 0; i < dfx_reg_type_num; i++) {
12335 		bd_num = bd_num_list[i];
12336 		data_len = data_len_per_desc * bd_num;
12337 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12338 	}
12339 
12340 out:
12341 	kfree(bd_num_list);
12342 	return ret;
12343 }
12344 
12345 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12346 {
12347 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12348 	int bd_num, bd_num_max, buf_len, i;
12349 	struct hclge_desc *desc_src;
12350 	int *bd_num_list;
12351 	u32 *reg = data;
12352 	int ret;
12353 
12354 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12355 	if (!bd_num_list)
12356 		return -ENOMEM;
12357 
12358 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12359 	if (ret) {
12360 		dev_err(&hdev->pdev->dev,
12361 			"Get dfx reg bd num fail, status is %d.\n", ret);
12362 		goto out;
12363 	}
12364 
12365 	bd_num_max = bd_num_list[0];
12366 	for (i = 1; i < dfx_reg_type_num; i++)
12367 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12368 
12369 	buf_len = sizeof(*desc_src) * bd_num_max;
12370 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12371 	if (!desc_src) {
12372 		ret = -ENOMEM;
12373 		goto out;
12374 	}
12375 
12376 	for (i = 0; i < dfx_reg_type_num; i++) {
12377 		bd_num = bd_num_list[i];
12378 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12379 					     hclge_dfx_reg_opcode_list[i]);
12380 		if (ret) {
12381 			dev_err(&hdev->pdev->dev,
12382 				"Get dfx reg fail, status is %d.\n", ret);
12383 			break;
12384 		}
12385 
12386 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12387 	}
12388 
12389 	kfree(desc_src);
12390 out:
12391 	kfree(bd_num_list);
12392 	return ret;
12393 }
12394 
12395 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12396 			      struct hnae3_knic_private_info *kinfo)
12397 {
12398 #define HCLGE_RING_REG_OFFSET		0x200
12399 #define HCLGE_RING_INT_REG_OFFSET	0x4
12400 
12401 	int i, j, reg_num, separator_num;
12402 	int data_num_sum;
12403 	u32 *reg = data;
12404 
12405 	/* fetching per-PF registers valus from PF PCIe register space */
12406 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12407 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12408 	for (i = 0; i < reg_num; i++)
12409 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12410 	for (i = 0; i < separator_num; i++)
12411 		*reg++ = SEPARATOR_VALUE;
12412 	data_num_sum = reg_num + separator_num;
12413 
12414 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12415 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12416 	for (i = 0; i < reg_num; i++)
12417 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12418 	for (i = 0; i < separator_num; i++)
12419 		*reg++ = SEPARATOR_VALUE;
12420 	data_num_sum += reg_num + separator_num;
12421 
12422 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12423 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12424 	for (j = 0; j < kinfo->num_tqps; j++) {
12425 		for (i = 0; i < reg_num; i++)
12426 			*reg++ = hclge_read_dev(&hdev->hw,
12427 						ring_reg_addr_list[i] +
12428 						HCLGE_RING_REG_OFFSET * j);
12429 		for (i = 0; i < separator_num; i++)
12430 			*reg++ = SEPARATOR_VALUE;
12431 	}
12432 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12433 
12434 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12435 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12436 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12437 		for (i = 0; i < reg_num; i++)
12438 			*reg++ = hclge_read_dev(&hdev->hw,
12439 						tqp_intr_reg_addr_list[i] +
12440 						HCLGE_RING_INT_REG_OFFSET * j);
12441 		for (i = 0; i < separator_num; i++)
12442 			*reg++ = SEPARATOR_VALUE;
12443 	}
12444 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12445 
12446 	return data_num_sum;
12447 }
12448 
12449 static int hclge_get_regs_len(struct hnae3_handle *handle)
12450 {
12451 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12452 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12453 	struct hclge_vport *vport = hclge_get_vport(handle);
12454 	struct hclge_dev *hdev = vport->back;
12455 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12456 	int regs_lines_32_bit, regs_lines_64_bit;
12457 	int ret;
12458 
12459 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12460 	if (ret) {
12461 		dev_err(&hdev->pdev->dev,
12462 			"Get register number failed, ret = %d.\n", ret);
12463 		return ret;
12464 	}
12465 
12466 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12467 	if (ret) {
12468 		dev_err(&hdev->pdev->dev,
12469 			"Get dfx reg len failed, ret = %d.\n", ret);
12470 		return ret;
12471 	}
12472 
12473 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12474 		REG_SEPARATOR_LINE;
12475 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12476 		REG_SEPARATOR_LINE;
12477 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12478 		REG_SEPARATOR_LINE;
12479 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12480 		REG_SEPARATOR_LINE;
12481 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12482 		REG_SEPARATOR_LINE;
12483 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12484 		REG_SEPARATOR_LINE;
12485 
12486 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12487 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12488 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12489 }
12490 
12491 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12492 			   void *data)
12493 {
12494 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12495 	struct hclge_vport *vport = hclge_get_vport(handle);
12496 	struct hclge_dev *hdev = vport->back;
12497 	u32 regs_num_32_bit, regs_num_64_bit;
12498 	int i, reg_num, separator_num, ret;
12499 	u32 *reg = data;
12500 
12501 	*version = hdev->fw_version;
12502 
12503 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12504 	if (ret) {
12505 		dev_err(&hdev->pdev->dev,
12506 			"Get register number failed, ret = %d.\n", ret);
12507 		return;
12508 	}
12509 
12510 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12511 
12512 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12513 	if (ret) {
12514 		dev_err(&hdev->pdev->dev,
12515 			"Get 32 bit register failed, ret = %d.\n", ret);
12516 		return;
12517 	}
12518 	reg_num = regs_num_32_bit;
12519 	reg += reg_num;
12520 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12521 	for (i = 0; i < separator_num; i++)
12522 		*reg++ = SEPARATOR_VALUE;
12523 
12524 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12525 	if (ret) {
12526 		dev_err(&hdev->pdev->dev,
12527 			"Get 64 bit register failed, ret = %d.\n", ret);
12528 		return;
12529 	}
12530 	reg_num = regs_num_64_bit * 2;
12531 	reg += reg_num;
12532 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12533 	for (i = 0; i < separator_num; i++)
12534 		*reg++ = SEPARATOR_VALUE;
12535 
12536 	ret = hclge_get_dfx_reg(hdev, reg);
12537 	if (ret)
12538 		dev_err(&hdev->pdev->dev,
12539 			"Get dfx register failed, ret = %d.\n", ret);
12540 }
12541 
12542 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12543 {
12544 	struct hclge_set_led_state_cmd *req;
12545 	struct hclge_desc desc;
12546 	int ret;
12547 
12548 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12549 
12550 	req = (struct hclge_set_led_state_cmd *)desc.data;
12551 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12552 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12553 
12554 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12555 	if (ret)
12556 		dev_err(&hdev->pdev->dev,
12557 			"Send set led state cmd error, ret =%d\n", ret);
12558 
12559 	return ret;
12560 }
12561 
12562 enum hclge_led_status {
12563 	HCLGE_LED_OFF,
12564 	HCLGE_LED_ON,
12565 	HCLGE_LED_NO_CHANGE = 0xFF,
12566 };
12567 
12568 static int hclge_set_led_id(struct hnae3_handle *handle,
12569 			    enum ethtool_phys_id_state status)
12570 {
12571 	struct hclge_vport *vport = hclge_get_vport(handle);
12572 	struct hclge_dev *hdev = vport->back;
12573 
12574 	switch (status) {
12575 	case ETHTOOL_ID_ACTIVE:
12576 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12577 	case ETHTOOL_ID_INACTIVE:
12578 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12579 	default:
12580 		return -EINVAL;
12581 	}
12582 }
12583 
12584 static void hclge_get_link_mode(struct hnae3_handle *handle,
12585 				unsigned long *supported,
12586 				unsigned long *advertising)
12587 {
12588 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12589 	struct hclge_vport *vport = hclge_get_vport(handle);
12590 	struct hclge_dev *hdev = vport->back;
12591 	unsigned int idx = 0;
12592 
12593 	for (; idx < size; idx++) {
12594 		supported[idx] = hdev->hw.mac.supported[idx];
12595 		advertising[idx] = hdev->hw.mac.advertising[idx];
12596 	}
12597 }
12598 
12599 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12600 {
12601 	struct hclge_vport *vport = hclge_get_vport(handle);
12602 	struct hclge_dev *hdev = vport->back;
12603 
12604 	return hclge_config_gro(hdev, enable);
12605 }
12606 
12607 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12608 {
12609 	struct hclge_vport *vport = &hdev->vport[0];
12610 	struct hnae3_handle *handle = &vport->nic;
12611 	u8 tmp_flags;
12612 	int ret;
12613 	u16 i;
12614 
12615 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12616 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12617 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12618 	}
12619 
12620 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12621 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12622 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12623 					     tmp_flags & HNAE3_MPE);
12624 		if (!ret) {
12625 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12626 				  &vport->state);
12627 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12628 				&vport->state);
12629 		}
12630 	}
12631 
12632 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12633 		bool uc_en = false;
12634 		bool mc_en = false;
12635 		bool bc_en;
12636 
12637 		vport = &hdev->vport[i];
12638 
12639 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12640 					&vport->state))
12641 			continue;
12642 
12643 		if (vport->vf_info.trusted) {
12644 			uc_en = vport->vf_info.request_uc_en > 0;
12645 			mc_en = vport->vf_info.request_mc_en > 0;
12646 		}
12647 		bc_en = vport->vf_info.request_bc_en > 0;
12648 
12649 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12650 						 mc_en, bc_en);
12651 		if (ret) {
12652 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12653 				&vport->state);
12654 			return;
12655 		}
12656 		hclge_set_vport_vlan_fltr_change(vport);
12657 	}
12658 }
12659 
12660 static bool hclge_module_existed(struct hclge_dev *hdev)
12661 {
12662 	struct hclge_desc desc;
12663 	u32 existed;
12664 	int ret;
12665 
12666 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12667 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12668 	if (ret) {
12669 		dev_err(&hdev->pdev->dev,
12670 			"failed to get SFP exist state, ret = %d\n", ret);
12671 		return false;
12672 	}
12673 
12674 	existed = le32_to_cpu(desc.data[0]);
12675 
12676 	return existed != 0;
12677 }
12678 
12679 /* need 6 bds(total 140 bytes) in one reading
12680  * return the number of bytes actually read, 0 means read failed.
12681  */
12682 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12683 				     u32 len, u8 *data)
12684 {
12685 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12686 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12687 	u16 read_len;
12688 	u16 copy_len;
12689 	int ret;
12690 	int i;
12691 
12692 	/* setup all 6 bds to read module eeprom info. */
12693 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12694 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12695 					   true);
12696 
12697 		/* bd0~bd4 need next flag */
12698 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12699 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12700 	}
12701 
12702 	/* setup bd0, this bd contains offset and read length. */
12703 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12704 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12705 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12706 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12707 
12708 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12709 	if (ret) {
12710 		dev_err(&hdev->pdev->dev,
12711 			"failed to get SFP eeprom info, ret = %d\n", ret);
12712 		return 0;
12713 	}
12714 
12715 	/* copy sfp info from bd0 to out buffer. */
12716 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12717 	memcpy(data, sfp_info_bd0->data, copy_len);
12718 	read_len = copy_len;
12719 
12720 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12721 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12722 		if (read_len >= len)
12723 			return read_len;
12724 
12725 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12726 		memcpy(data + read_len, desc[i].data, copy_len);
12727 		read_len += copy_len;
12728 	}
12729 
12730 	return read_len;
12731 }
12732 
12733 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12734 				   u32 len, u8 *data)
12735 {
12736 	struct hclge_vport *vport = hclge_get_vport(handle);
12737 	struct hclge_dev *hdev = vport->back;
12738 	u32 read_len = 0;
12739 	u16 data_len;
12740 
12741 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12742 		return -EOPNOTSUPP;
12743 
12744 	if (!hclge_module_existed(hdev))
12745 		return -ENXIO;
12746 
12747 	while (read_len < len) {
12748 		data_len = hclge_get_sfp_eeprom_info(hdev,
12749 						     offset + read_len,
12750 						     len - read_len,
12751 						     data + read_len);
12752 		if (!data_len)
12753 			return -EIO;
12754 
12755 		read_len += data_len;
12756 	}
12757 
12758 	return 0;
12759 }
12760 
12761 static const struct hnae3_ae_ops hclge_ops = {
12762 	.init_ae_dev = hclge_init_ae_dev,
12763 	.uninit_ae_dev = hclge_uninit_ae_dev,
12764 	.reset_prepare = hclge_reset_prepare_general,
12765 	.reset_done = hclge_reset_done,
12766 	.init_client_instance = hclge_init_client_instance,
12767 	.uninit_client_instance = hclge_uninit_client_instance,
12768 	.map_ring_to_vector = hclge_map_ring_to_vector,
12769 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12770 	.get_vector = hclge_get_vector,
12771 	.put_vector = hclge_put_vector,
12772 	.set_promisc_mode = hclge_set_promisc_mode,
12773 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12774 	.set_loopback = hclge_set_loopback,
12775 	.start = hclge_ae_start,
12776 	.stop = hclge_ae_stop,
12777 	.client_start = hclge_client_start,
12778 	.client_stop = hclge_client_stop,
12779 	.get_status = hclge_get_status,
12780 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12781 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12782 	.get_media_type = hclge_get_media_type,
12783 	.check_port_speed = hclge_check_port_speed,
12784 	.get_fec = hclge_get_fec,
12785 	.set_fec = hclge_set_fec,
12786 	.get_rss_key_size = hclge_get_rss_key_size,
12787 	.get_rss = hclge_get_rss,
12788 	.set_rss = hclge_set_rss,
12789 	.set_rss_tuple = hclge_set_rss_tuple,
12790 	.get_rss_tuple = hclge_get_rss_tuple,
12791 	.get_tc_size = hclge_get_tc_size,
12792 	.get_mac_addr = hclge_get_mac_addr,
12793 	.set_mac_addr = hclge_set_mac_addr,
12794 	.do_ioctl = hclge_do_ioctl,
12795 	.add_uc_addr = hclge_add_uc_addr,
12796 	.rm_uc_addr = hclge_rm_uc_addr,
12797 	.add_mc_addr = hclge_add_mc_addr,
12798 	.rm_mc_addr = hclge_rm_mc_addr,
12799 	.set_autoneg = hclge_set_autoneg,
12800 	.get_autoneg = hclge_get_autoneg,
12801 	.restart_autoneg = hclge_restart_autoneg,
12802 	.halt_autoneg = hclge_halt_autoneg,
12803 	.get_pauseparam = hclge_get_pauseparam,
12804 	.set_pauseparam = hclge_set_pauseparam,
12805 	.set_mtu = hclge_set_mtu,
12806 	.reset_queue = hclge_reset_tqp,
12807 	.get_stats = hclge_get_stats,
12808 	.get_mac_stats = hclge_get_mac_stat,
12809 	.update_stats = hclge_update_stats,
12810 	.get_strings = hclge_get_strings,
12811 	.get_sset_count = hclge_get_sset_count,
12812 	.get_fw_version = hclge_get_fw_version,
12813 	.get_mdix_mode = hclge_get_mdix_mode,
12814 	.enable_vlan_filter = hclge_enable_vlan_filter,
12815 	.set_vlan_filter = hclge_set_vlan_filter,
12816 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12817 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12818 	.reset_event = hclge_reset_event,
12819 	.get_reset_level = hclge_get_reset_level,
12820 	.set_default_reset_request = hclge_set_def_reset_request,
12821 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12822 	.set_channels = hclge_set_channels,
12823 	.get_channels = hclge_get_channels,
12824 	.get_regs_len = hclge_get_regs_len,
12825 	.get_regs = hclge_get_regs,
12826 	.set_led_id = hclge_set_led_id,
12827 	.get_link_mode = hclge_get_link_mode,
12828 	.add_fd_entry = hclge_add_fd_entry,
12829 	.del_fd_entry = hclge_del_fd_entry,
12830 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12831 	.get_fd_rule_info = hclge_get_fd_rule_info,
12832 	.get_fd_all_rules = hclge_get_all_rules,
12833 	.enable_fd = hclge_enable_fd,
12834 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12835 	.dbg_read_cmd = hclge_dbg_read_cmd,
12836 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12837 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12838 	.ae_dev_resetting = hclge_ae_dev_resetting,
12839 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12840 	.set_gro_en = hclge_gro_en,
12841 	.get_global_queue_id = hclge_covert_handle_qid_global,
12842 	.set_timer_task = hclge_set_timer_task,
12843 	.mac_connect_phy = hclge_mac_connect_phy,
12844 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12845 	.get_vf_config = hclge_get_vf_config,
12846 	.set_vf_link_state = hclge_set_vf_link_state,
12847 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12848 	.set_vf_trust = hclge_set_vf_trust,
12849 	.set_vf_rate = hclge_set_vf_rate,
12850 	.set_vf_mac = hclge_set_vf_mac,
12851 	.get_module_eeprom = hclge_get_module_eeprom,
12852 	.get_cmdq_stat = hclge_get_cmdq_stat,
12853 	.add_cls_flower = hclge_add_cls_flower,
12854 	.del_cls_flower = hclge_del_cls_flower,
12855 	.cls_flower_active = hclge_is_cls_flower_active,
12856 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12857 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12858 };
12859 
12860 static struct hnae3_ae_algo ae_algo = {
12861 	.ops = &hclge_ops,
12862 	.pdev_id_table = ae_algo_pci_tbl,
12863 };
12864 
12865 static int hclge_init(void)
12866 {
12867 	pr_info("%s is initializing\n", HCLGE_NAME);
12868 
12869 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12870 	if (!hclge_wq) {
12871 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12872 		return -ENOMEM;
12873 	}
12874 
12875 	hnae3_register_ae_algo(&ae_algo);
12876 
12877 	return 0;
12878 }
12879 
12880 static void hclge_exit(void)
12881 {
12882 	hnae3_unregister_ae_algo(&ae_algo);
12883 	destroy_workqueue(hclge_wq);
12884 }
12885 module_init(hclge_init);
12886 module_exit(hclge_exit);
12887 
12888 MODULE_LICENSE("GPL");
12889 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12890 MODULE_DESCRIPTION("HCLGE Driver");
12891 MODULE_VERSION(HCLGE_MOD_VERSION);
12892